use std::collections::BTreeMap;
use std::sync::Mutex;
use super::backend::{Errno, FsBackend};
use super::protocol::{
RemoveMappingIn, RemoveMappingOne, SetupMappingIn, FUSE_SETUPMAPPING_FLAG_READ,
FUSE_SETUPMAPPING_FLAG_WRITE,
};
pub const DAX_PROT_READ: u32 = 1 << 0;
pub const DAX_PROT_WRITE: u32 = 1 << 1;
pub trait HvfMapper: Send + Sync {
fn map(&self, host_va: *mut u8, gpa: u64, len: u64, prot: u32) -> Result<(), Errno>;
fn unmap(&self, gpa: u64, len: u64) -> Result<(), Errno>;
}
pub struct MockHvfMapper {
log: Mutex<Vec<MockCall>>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum MockCall {
Map { host_va: usize, gpa: u64, len: u64, prot: u32 },
Unmap { gpa: u64, len: u64 },
}
impl MockHvfMapper {
pub fn new() -> Self {
Self {
log: Mutex::new(Vec::new()),
}
}
pub fn calls(&self) -> Vec<MockCall> {
self.log.lock().unwrap().clone()
}
}
impl Default for MockHvfMapper {
fn default() -> Self {
Self::new()
}
}
impl HvfMapper for MockHvfMapper {
fn map(&self, host_va: *mut u8, gpa: u64, len: u64, prot: u32) -> Result<(), Errno> {
self.log.lock().unwrap().push(MockCall::Map {
host_va: host_va as usize,
gpa,
len,
prot,
});
Ok(())
}
fn unmap(&self, gpa: u64, len: u64) -> Result<(), Errno> {
self.log.lock().unwrap().push(MockCall::Unmap { gpa, len });
Ok(())
}
}
#[derive(Clone, Debug)]
struct Slot {
host_va: *mut u8,
len: u64,
}
unsafe impl Send for Slot {}
unsafe impl Sync for Slot {}
pub struct DaxSession {
dax_base_gpa: u64,
dax_window_len: u64,
backend: std::sync::Arc<dyn FsBackend>,
mapper: std::sync::Arc<dyn HvfMapper>,
active: Mutex<BTreeMap<u64, Slot>>,
}
impl DaxSession {
pub fn new(
dax_base_gpa: u64,
dax_window_len: u64,
backend: std::sync::Arc<dyn FsBackend>,
mapper: std::sync::Arc<dyn HvfMapper>,
) -> Self {
Self {
dax_base_gpa,
dax_window_len,
backend,
mapper,
active: Mutex::new(BTreeMap::new()),
}
}
pub fn setup(&self, nodeid: u64, req: &SetupMappingIn) -> Result<(), Errno> {
log_dax(|| format!(
"SETUPMAPPING: nodeid={nodeid} fh={} foffset={:#x} len={:#x} flags={:#x} moffset={:#x}",
req.fh, req.foffset, req.len, req.flags, req.moffset
));
if (req.moffset & 0x3FFF) != 0 || (req.len & 0x3FFF) != 0 {
return Err(super::backend::EINVAL);
}
if req.moffset.checked_add(req.len).map_or(true, |end| end > self.dax_window_len) {
return Err(super::backend::EINVAL);
}
let mut prot = 0u32;
if req.flags & FUSE_SETUPMAPPING_FLAG_READ != 0 {
prot |= DAX_PROT_READ;
}
if req.flags & FUSE_SETUPMAPPING_FLAG_WRITE != 0 {
prot |= DAX_PROT_WRITE;
}
if prot == 0 {
return Err(super::backend::EINVAL);
}
let host_va = self
.backend
.dax_map(nodeid, req.fh, req.foffset, req.len, prot)
.inspect_err(|e| log_dax(|| format!("backend.dax_map FAILED: errno={e}")))?;
let gpa = self.dax_base_gpa + req.moffset;
log_dax(|| format!(
"hv_vm_map: host_va={:#x} gpa={gpa:#x} len={:#x} prot={prot:#x}",
host_va as usize, req.len
));
self.mapper.map(host_va, gpa, req.len, prot).map_err(|e| {
log_dax(|| format!("mapper.map FAILED: errno={e}"));
let _ = self
.backend
.dax_unmap(nodeid, host_va, req.len);
e
})?;
let mut active = self.active.lock().unwrap();
if active.contains_key(&req.moffset) {
let _ = self.mapper.unmap(gpa, req.len);
let _ = self.backend.dax_unmap(nodeid, host_va, req.len);
return Err(super::backend::EEXIST);
}
active.insert(req.moffset, Slot { host_va, len: req.len });
Ok(())
}
pub fn remove(&self, nodeid: u64, entry: &RemoveMappingOne) -> Result<(), Errno> {
let mut active = self.active.lock().unwrap();
let slot = active.remove(&entry.moffset).ok_or(super::backend::EINVAL)?;
if slot.len != entry.len {
active.insert(entry.moffset, slot);
return Err(super::backend::EINVAL);
}
let gpa = self.dax_base_gpa + entry.moffset;
drop(active);
let mapper_res = self.mapper.unmap(gpa, entry.len);
let backend_res = self.backend.dax_unmap(nodeid, slot.host_va, slot.len);
mapper_res.and(backend_res)
}
pub fn active_slot_count(&self) -> usize {
self.active.lock().unwrap().len()
}
}
fn log_dax<F: FnOnce() -> String>(make_msg: F) {
use std::io::Write;
let Some(target) = std::env::var_os("SUPERMACHINE_FUSE_TRACE") else { return };
let s = make_msg();
let target = target.to_string_lossy().into_owned();
if target == "1" || target == "stderr" {
eprintln!("[dax] {s}");
return;
}
if let Ok(mut f) = std::fs::OpenOptions::new().create(true).append(true).open(&target) {
let _ = writeln!(f, "[dax] {s}");
}
}
pub fn parse_remove_payload(payload: &[u8]) -> Result<Vec<RemoveMappingOne>, Errno> {
if payload.len() < core::mem::size_of::<RemoveMappingIn>() {
return Err(super::backend::EINVAL);
}
let hdr: RemoveMappingIn = unsafe { core::ptr::read_unaligned(payload.as_ptr() as *const RemoveMappingIn) };
let count = hdr.count as usize;
let one_size = core::mem::size_of::<RemoveMappingOne>();
let needed = core::mem::size_of::<RemoveMappingIn>() + count * one_size;
if payload.len() < needed {
return Err(super::backend::EINVAL);
}
let mut out = Vec::with_capacity(count);
let base = payload.as_ptr();
for i in 0..count {
let off = core::mem::size_of::<RemoveMappingIn>() + i * one_size;
let one: RemoveMappingOne =
unsafe { core::ptr::read_unaligned(base.add(off) as *const RemoveMappingOne) };
out.push(one);
}
Ok(out)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::fuse::backend::MemoryFs;
use std::sync::Arc;
struct StubDaxBackend {
inner: MemoryFs,
next_va: Mutex<usize>,
}
impl StubDaxBackend {
fn new() -> Self {
Self {
inner: MemoryFs::new(),
next_va: Mutex::new(0x40_0000_0000usize),
}
}
}
impl FsBackend for StubDaxBackend {
fn lookup(&self, p: u64, n: &std::ffi::OsStr) -> Result<crate::fuse::backend::Entry, Errno> { self.inner.lookup(p, n) }
fn forget(&self, n: u64, nl: u64) { self.inner.forget(n, nl) }
fn getattr(&self, n: u64, f: Option<u64>) -> Result<crate::fuse::protocol::Attr, Errno> { self.inner.getattr(n, f) }
fn open(&self, n: u64, f: u32) -> Result<u64, Errno> { self.inner.open(n, f) }
fn read(&self, n: u64, h: u64, o: u64, s: u32) -> Result<Vec<u8>, Errno> { self.inner.read(n, h, o, s) }
fn release(&self, n: u64, h: u64) -> Result<(), Errno> { self.inner.release(n, h) }
fn opendir(&self, n: u64, f: u32) -> Result<u64, Errno> { self.inner.opendir(n, f) }
fn readdir(&self, n: u64, h: u64, o: u64, s: u32) -> Result<Vec<crate::fuse::backend::DirEntry>, Errno> { self.inner.readdir(n, h, o, s) }
fn releasedir(&self, n: u64, h: u64) -> Result<(), Errno> { self.inner.releasedir(n, h) }
fn statfs(&self, n: u64) -> Result<crate::fuse::backend::StatFs, Errno> { self.inner.statfs(n) }
fn dax_map(&self, _nodeid: u64, _fh: u64, _foffset: u64, len: u64, _prot: u32) -> Result<*mut u8, Errno> {
let mut g = self.next_va.lock().unwrap();
let va = *g;
*g += len as usize;
Ok(va as *mut u8)
}
fn dax_unmap(&self, _nodeid: u64, _host_va: *mut u8, _len: u64) -> Result<(), Errno> {
Ok(())
}
}
fn make_session() -> (DaxSession, Arc<MockHvfMapper>) {
let backend = Arc::new(StubDaxBackend::new());
let mapper = Arc::new(MockHvfMapper::new());
let session = DaxSession::new(0x80_0000_0000, 0x4_0000_0000, backend, mapper.clone());
(session, mapper)
}
#[test]
fn setup_records_mapping_and_calls_mapper() {
let (sess, m) = make_session();
let req = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: FUSE_SETUPMAPPING_FLAG_READ,
moffset: 0,
};
sess.setup(7, &req).unwrap();
assert_eq!(sess.active_slot_count(), 1);
let calls = m.calls();
assert_eq!(calls.len(), 1);
match &calls[0] {
MockCall::Map { gpa, len, prot, .. } => {
assert_eq!(*gpa, 0x80_0000_0000);
assert_eq!(*len, 0x4000);
assert_eq!(*prot, DAX_PROT_READ);
}
_ => panic!("expected Map"),
}
}
#[test]
fn remove_undoes_a_prior_setup() {
let (sess, m) = make_session();
let setup = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: FUSE_SETUPMAPPING_FLAG_READ,
moffset: 0x1_0000,
};
sess.setup(7, &setup).unwrap();
let rem = RemoveMappingOne {
moffset: 0x1_0000,
len: 0x4000,
};
sess.remove(7, &rem).unwrap();
assert_eq!(sess.active_slot_count(), 0);
let calls = m.calls();
assert_eq!(calls.len(), 2);
match &calls[1] {
MockCall::Unmap { gpa, len } => {
assert_eq!(*gpa, 0x80_0000_0000 + 0x1_0000);
assert_eq!(*len, 0x4000);
}
_ => panic!("expected Unmap"),
}
}
#[test]
fn setup_rejects_unaligned() {
let (sess, _) = make_session();
let req = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: FUSE_SETUPMAPPING_FLAG_READ,
moffset: 0x100, };
assert_eq!(sess.setup(7, &req).unwrap_err(), super::super::backend::EINVAL);
}
#[test]
fn setup_rejects_out_of_window() {
let (sess, _) = make_session();
let req = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: FUSE_SETUPMAPPING_FLAG_READ,
moffset: 0x4_0000_0000 - 0x4000 + 0x4000,
};
assert_eq!(sess.setup(7, &req).unwrap_err(), super::super::backend::EINVAL);
}
#[test]
fn setup_rejects_overlapping_existing_moffset() {
let (sess, _) = make_session();
let req = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: FUSE_SETUPMAPPING_FLAG_READ,
moffset: 0,
};
sess.setup(7, &req).unwrap();
assert_eq!(sess.setup(7, &req).unwrap_err(), super::super::backend::EEXIST);
}
#[test]
fn setup_rejects_zero_perms() {
let (sess, _) = make_session();
let req = SetupMappingIn {
fh: 1,
foffset: 0,
len: 0x4000,
flags: 0, moffset: 0,
};
assert_eq!(sess.setup(7, &req).unwrap_err(), super::super::backend::EINVAL);
}
#[test]
fn parse_remove_payload_round_trip() {
let entries = vec![
RemoveMappingOne { moffset: 0x4000, len: 0x4000 },
RemoveMappingOne { moffset: 0x8000, len: 0x8000 },
];
let mut buf = Vec::new();
let hdr = RemoveMappingIn { count: entries.len() as u32, _pad: 0 };
buf.extend_from_slice(unsafe {
std::slice::from_raw_parts(
&hdr as *const RemoveMappingIn as *const u8,
core::mem::size_of::<RemoveMappingIn>(),
)
});
for e in &entries {
buf.extend_from_slice(unsafe {
std::slice::from_raw_parts(
e as *const RemoveMappingOne as *const u8,
core::mem::size_of::<RemoveMappingOne>(),
)
});
}
let parsed = parse_remove_payload(&buf).unwrap();
assert_eq!(parsed.len(), 2);
assert_eq!(parsed[0].moffset, 0x4000);
assert_eq!(parsed[1].moffset, 0x8000);
}
}