#![allow(clippy::module_inception)] #![allow(clippy::should_implement_trait)] #![allow(clippy::too_many_arguments)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_docs)]
#![allow(unused)] #![deny(unsafe_op_in_unsafe_fn)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
use alloc::boxed::Box;
use alloc::format;
use alloc::string::String;
use alloc::vec::Vec;
use chacha20poly1305_nostd::ChaCha20Poly1305;
use lazy_static::lazy_static;
use spin::Mutex;
use thiserror_no_std::Error;
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum FsError {
#[error("File not found")]
NotFound,
#[error("Path not found: {path_hint}")]
PathNotFound {
path_hint: String,
},
#[error("Disk full: need {needed_bytes} bytes")]
DiskFull {
needed_bytes: u64,
},
#[error("I/O error on vdev {vdev}: {reason}")]
IoError {
vdev: usize,
reason: &'static str,
},
#[error("Corruption at block {block}: {details}")]
Corruption {
block: u64,
details: &'static str,
},
#[error("Checksum mismatch: expected {expected:x?}, got {actual:x?}")]
ChecksumMismatch {
expected: [u64; 4],
actual: [u64; 4],
},
#[error("Encryption failed")]
EncryptionFailed,
#[error("Decryption failed")]
DecryptionFailed,
#[error("Compression failed")]
CompressionFailed,
#[error("Decompression failed")]
DecompressionFailed,
#[error("Invalid block pointer")]
InvalidBlockPointer,
#[error("Pool not imported")]
PoolNotImported,
#[error("Invalid pool configuration: {reason}")]
InvalidPoolConfig {
reason: &'static str,
},
#[error("Transaction group {txg} error")]
TxgError {
txg: u64,
},
#[error("ZAP error: {reason}")]
ZapError {
reason: &'static str,
},
#[error("Dataset error: {reason}")]
DatasetError {
reason: &'static str,
},
#[error("Device too small: need {required} bytes, have {actual}")]
DeviceTooSmall {
required: u64,
actual: u64,
},
#[error("Already exists")]
AlreadyExists,
#[error("Is a directory")]
IsDirectory,
#[error("Is a file")]
IsFile,
#[error("Permission denied")]
PermissionDenied,
#[error("Resource busy")]
ResourceBusy,
#[error("Read-only filesystem")]
ReadOnly,
#[error("Invalid argument: {reason}")]
InvalidArgument {
reason: &'static str,
},
#[error("Not a directory")]
NotDirectory,
#[error("Bad file descriptor")]
BadFileDescriptor,
#[error("Not implemented")]
NotImplemented,
#[error("Directory not empty")]
DirectoryNotEmpty,
#[error("Device or pool not found")]
NoDevice,
#[error("Security violation: {reason}")]
SecurityViolation {
reason: &'static str,
},
}
pub type FsResult<T> = Result<T, FsError>;
pub trait BlockDevice: Send {
fn read_block(&mut self, block_num: usize, buffer: &mut [u8]) -> Result<(), &'static str>;
fn write_block(&mut self, block_num: usize, buffer: &[u8]) -> Result<(), &'static str>;
fn size(&self) -> Result<u64, &'static str>;
fn block_size(&self) -> usize;
fn block_count(&self) -> usize;
fn io_latency_avg_us(&self) -> Option<f64> {
None }
}
lazy_static! {
pub static ref BLOCK_DEVICES: Mutex<Vec<Box<dyn BlockDevice + Send>>> = Mutex::new(Vec::new());
}
pub fn register_device(device: Box<dyn BlockDevice + Send>) -> usize {
let mut devices = BLOCK_DEVICES.lock();
let id = devices.len();
devices.push(device);
id
}
pub fn get_block_device(dev_id: usize) -> Option<Box<dyn BlockDevice + Send>> {
let mut devices = BLOCK_DEVICES.lock();
if devices.is_empty() {
return Some(Box::new(RamDisk::new(1024 * 1024 * 1024)));
}
if dev_id >= devices.len() {
return None;
}
let size = devices[dev_id].size().unwrap_or(0);
Some(Box::new(RamDisk::new(size)))
}
pub struct RamDisk {
data: Box<[u8]>,
size: u64,
block_size: usize,
}
impl RamDisk {
pub fn new(size_bytes: u64) -> Self {
use alloc::vec;
let size_usize = size_bytes as usize;
Self {
data: vec![0; size_usize].into_boxed_slice(),
size: size_bytes,
block_size: 512,
}
}
}
impl BlockDevice for RamDisk {
fn read_block(&mut self, block_num: usize, buffer: &mut [u8]) -> Result<(), &'static str> {
let offset = block_num * self.block_size;
let end = offset + self.block_size;
if end > self.data.len() {
return Err("Read beyond end of device");
}
buffer[..self.block_size].copy_from_slice(&self.data[offset..end]);
Ok(())
}
fn write_block(&mut self, block_num: usize, buffer: &[u8]) -> Result<(), &'static str> {
let offset = block_num * self.block_size;
let end = offset + buffer.len();
if end > self.data.len() {
return Err("Write beyond end of device");
}
self.data[offset..end].copy_from_slice(buffer);
Ok(())
}
fn size(&self) -> Result<u64, &'static str> {
Ok(self.size)
}
fn block_size(&self) -> usize {
self.block_size
}
fn block_count(&self) -> usize {
(self.size / self.block_size as u64) as usize
}
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Default)]
pub struct FileStat {
pub st_dev: u64,
pub st_ino: u64,
pub st_nlink: u64,
pub st_mode: u32,
pub st_uid: u32,
pub st_gid: u32,
#[doc(hidden)]
pub __pad0: u32,
pub st_rdev: u64,
pub st_size: i64,
pub st_blksize: i64,
pub st_blocks: i64,
pub st_atime: i64,
pub st_atime_nsec: i64,
pub st_mtime: i64,
pub st_mtime_nsec: i64,
pub st_ctime: i64,
pub st_ctime_nsec: i64,
#[doc(hidden)]
pub __reserved: [i64; 3],
}
pub const S_IFCHR: u32 = 0x2000;
pub const S_IFREG: u32 = 0x8000;
pub const S_IFDIR: u32 = 0x4000;
pub const S_IRUSR: u32 = 0x0100;
pub const S_IWUSR: u32 = 0x0080;
pub const S_IXUSR: u32 = 0x0040;
pub const ENCRYPTION_ACTIVE: bool = true;
pub struct LcpfsCrypto;
impl LcpfsCrypto {
#[cfg(feature = "std")]
pub fn derive_key(passphrase: &str, salt: &[u8]) -> [u8; 32] {
use argon2::{Algorithm, Argon2, Params, Version};
const ARGON2_MEMORY_KIB: u32 = 65_536; const ARGON2_ITERATIONS: u32 = 3;
const ARGON2_PARALLELISM: u32 = 4;
let params = Params::new(
ARGON2_MEMORY_KIB,
ARGON2_ITERATIONS,
ARGON2_PARALLELISM,
Some(32), )
.expect("Argon2 parameters are valid");
let argon2 = Argon2::new(Algorithm::Argon2id, Version::V0x13, params);
let mut key = [0u8; 32];
argon2
.hash_password_into(passphrase.as_bytes(), salt, &mut key)
.expect("Argon2 should not fail with valid parameters");
key
}
#[cfg(not(feature = "std"))]
pub fn derive_key(passphrase: &str, salt: &[u8]) -> [u8; 32] {
Self::derive_key_pbkdf2(passphrase, salt)
}
pub fn derive_key_pbkdf2(passphrase: &str, salt: &[u8]) -> [u8; 32] {
use hmac::Hmac;
use sha2::Sha256;
const PBKDF2_ITERATIONS: u32 = 600_000;
let mut key = [0u8; 32];
pbkdf2::pbkdf2::<Hmac<Sha256>>(passphrase.as_bytes(), salt, PBKDF2_ITERATIONS, &mut key)
.expect("PBKDF2 should not fail with valid output length");
key
}
pub fn derive_nonce(vdev: u32, offset: u64, txg: u64) -> [u8; 12] {
use sha3::Shake256;
use sha3::digest::{ExtendableOutput, Update, XofReader};
let mut hasher = Shake256::default();
hasher.update(b"LCPFS-NONCE-V1"); hasher.update(&vdev.to_le_bytes());
hasher.update(&offset.to_le_bytes());
hasher.update(&txg.to_le_bytes());
let mut nonce = [0u8; 12];
hasher.finalize_xof().read(&mut nonce);
nonce
}
pub fn derive_nonce_from_bp(bp: &fscore::structs::Blkptr) -> [u8; 12] {
Self::derive_nonce(bp.dva[0].vdev, bp.dva[0].offset, bp.birth_txg)
}
pub fn encrypt_block(
key: &[u8; 32],
plaintext: &[u8],
_txg: u64,
) -> FsResult<(Vec<u8>, [u8; 12])> {
let mut nonce = [0u8; 12];
crate::crypto::random::fill_random(&mut nonce).map_err(|_| FsError::EncryptionFailed)?;
let cipher = ChaCha20Poly1305::new(key).map_err(|_| FsError::EncryptionFailed)?;
let ciphertext = cipher
.encrypt(&nonce, plaintext, None)
.map_err(|_| FsError::EncryptionFailed)?;
Ok((ciphertext, nonce))
}
pub fn decrypt_block(key: &[u8; 32], ciphertext: &[u8], nonce: &[u8; 12]) -> FsResult<Vec<u8>> {
let cipher = ChaCha20Poly1305::new(key).map_err(|_| FsError::DecryptionFailed)?;
cipher
.decrypt(nonce, ciphertext, None)
.map_err(|_| FsError::DecryptionFailed)
}
}
use core::fmt::Arguments;
use core::sync::atomic::{AtomicPtr, Ordering};
pub type LogFn = fn(Arguments);
static LOG_FN: AtomicPtr<()> = AtomicPtr::new(core::ptr::null_mut());
pub fn set_log_fn(f: LogFn) {
LOG_FN.store(f as *mut (), Ordering::SeqCst);
}
#[doc(hidden)]
pub fn _log(args: Arguments) {
let ptr = LOG_FN.load(Ordering::SeqCst);
if !ptr.is_null() {
let f: LogFn = unsafe { core::mem::transmute(ptr) };
f(args);
}
}
#[macro_export]
macro_rules! lcpfs_println {
($($arg:tt)*) => {
$crate::_log(format_args!($($arg)*))
};
}
use core::sync::atomic::AtomicBool;
static SCHEDULER_AVAILABLE: AtomicBool = AtomicBool::new(false);
pub type SpawnFn = fn(fn(), Option<usize>);
static SPAWN_FN: AtomicPtr<()> = AtomicPtr::new(core::ptr::null_mut());
pub fn set_spawn_fn(f: SpawnFn) {
SPAWN_FN.store(f as *mut (), Ordering::SeqCst);
SCHEDULER_AVAILABLE.store(true, Ordering::SeqCst);
}
pub fn spawn_on_core(task: fn(), core: Option<usize>) {
let ptr = SPAWN_FN.load(Ordering::SeqCst);
if !ptr.is_null() {
let f: SpawnFn = unsafe { core::mem::transmute(ptr) };
f(task, core);
}
}
pub fn scheduler_available() -> bool {
SCHEDULER_AVAILABLE.load(Ordering::SeqCst)
}
pub type TimeFn = fn() -> u64;
static TIME_FN: AtomicPtr<()> = AtomicPtr::new(core::ptr::null_mut());
pub fn set_time_fn(f: TimeFn) {
TIME_FN.store(f as *mut (), Ordering::SeqCst);
}
pub fn get_time() -> u64 {
let ptr = TIME_FN.load(Ordering::SeqCst);
if !ptr.is_null() {
let f: TimeFn = unsafe { core::mem::transmute(ptr) };
f()
} else {
0 }
}
pub type YieldFn = fn(u64);
static YIELD_FN: AtomicPtr<()> = AtomicPtr::new(core::ptr::null_mut());
pub fn set_yield_fn(f: YieldFn) {
YIELD_FN.store(f as *mut (), Ordering::SeqCst);
}
pub fn cooperative_yield(microseconds: u64) {
let ptr = YIELD_FN.load(Ordering::SeqCst);
if !ptr.is_null() {
let f: YieldFn = unsafe { core::mem::transmute(ptr) };
f(microseconds);
} else {
core::hint::spin_loop();
}
}
pub mod arch;
pub mod fscore;
pub mod storage;
pub mod cache;
pub mod raid;
pub mod compress;
pub mod dedup;
pub mod crypto;
pub mod vault;
pub mod integrity;
pub mod cloud;
pub mod hw;
pub mod net;
pub mod distributed;
pub mod ml;
pub mod lunaos;
pub mod mgmt;
pub mod io;
pub mod tier;
pub mod util;
pub mod vector;
pub mod timetravel;
pub mod branch;
pub mod s3;
pub mod wasm;
pub mod notify;
pub mod nfs;
pub mod thin;
pub mod txn;
pub mod quota;
pub mod lineage;
pub mod delta;
pub mod dictcomp;
pub mod fts;
pub mod defrag;
pub mod trash;
pub mod archive;
pub mod analytics;
pub mod sparse;
pub mod streams;
pub mod telemetry;
#[cfg(feature = "fuse")]
pub mod fuse;
pub mod time;
use mgmt::mount::LcpfsMount;
use storage::zpl::{DirEntry, Zpl};
pub struct Pool {
mount: LcpfsMount,
zpl: Zpl,
snapshots: alloc::collections::BTreeMap<String, SnapshotMetadata>,
properties: Properties,
}
pub const MAX_PATH_DEPTH: usize = 256;
pub const MAX_NAME_LEN: usize = 255;
pub const MAX_PATH_LEN: usize = 4096;
#[derive(Debug, Clone, PartialEq)]
pub enum PropertyValue {
String(alloc::string::String),
Number(u64),
Boolean(bool),
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PropertySource {
Local,
Inherited,
Default,
}
#[derive(Debug, Clone)]
pub struct Properties {
values: alloc::collections::BTreeMap<alloc::string::String, (PropertyValue, PropertySource)>,
}
impl Properties {
pub fn new() -> Self {
let mut props = Self {
values: alloc::collections::BTreeMap::new(),
};
props.values.insert(
"compression".into(),
(PropertyValue::String("lz4".into()), PropertySource::Default),
);
props.values.insert(
"checksum".into(),
(
PropertyValue::String("blake3".into()),
PropertySource::Default,
),
);
props.values.insert(
"readonly".into(),
(PropertyValue::Boolean(false), PropertySource::Default),
);
props.values.insert(
"recordsize".into(),
(PropertyValue::Number(131072), PropertySource::Default),
); props.values.insert(
"atime".into(),
(PropertyValue::Boolean(true), PropertySource::Default),
);
props.values.insert(
"dedup".into(),
(PropertyValue::Boolean(true), PropertySource::Default),
);
props
}
pub fn get(&self, name: &str) -> Option<&PropertyValue> {
self.values.get(name).map(|(val, _src)| val)
}
pub fn get_with_source(&self, name: &str) -> Option<(&PropertyValue, PropertySource)> {
self.values.get(name).map(|(val, src)| (val, *src))
}
pub fn set(&mut self, name: &str, value: PropertyValue) {
self.values
.insert(name.into(), (value, PropertySource::Local));
}
pub fn list(&self) -> alloc::vec::Vec<(alloc::string::String, PropertyValue, PropertySource)> {
self.values
.iter()
.map(|(k, (v, s))| (k.clone(), v.clone(), *s))
.collect()
}
}
impl Default for Properties {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
struct SnapshotMetadata {
name: String,
txg: u64,
creation_time: u64,
guid: u64,
unique_bytes: u64,
}
impl Pool {
pub fn import(dev_id: usize) -> FsResult<Self> {
let mount =
LcpfsMount::import(dev_id).map_err(|e| FsError::InvalidPoolConfig { reason: e })?;
let zpl = if let Some(ref hyperblock) = mount.active_uberblock {
match Zpl::import_from_hyperblock(hyperblock, None) {
Ok(imported_zpl) => {
lcpfs_println!(
"[ POOL ] Reconstructed ZPL from hyperblock TXG {}",
hyperblock.txg
);
imported_zpl
}
Err(e) => {
lcpfs_println!(
"[ POOL ] Warning: ZPL import failed ({:?}), creating fresh ZPL",
e
);
Zpl::new()
}
}
} else {
Zpl::new()
};
let snapshots = alloc::collections::BTreeMap::new();
let properties = Properties::new();
Ok(Self {
mount,
zpl,
snapshots,
properties,
})
}
pub fn create_pool(dev_id: usize, pool_name: &str) -> FsResult<Self> {
use crate::mgmt::format::LcpfsFormatter;
LcpfsFormatter::format_drive(dev_id, pool_name)
.map_err(|e| FsError::InvalidPoolConfig { reason: e })?;
Self::import(dev_id)
}
pub fn current_txg(&self) -> u64 {
self.mount.current_txg
}
pub fn guid(&self) -> u64 {
self.mount.pool_guid
}
pub fn open(&mut self, path: &str, flags: u32) -> FsResult<u64> {
let object_id = self.path_to_object_id(path)?;
self.zpl.open(object_id, flags)
}
pub fn create(&mut self, path: &str, mode: u32) -> FsResult<u64> {
let (dir_id, filename) = self.split_path(path)?;
let object_id = self.zpl.create(dir_id, &filename, mode, 0, 0)?;
self.zpl.open(object_id, storage::zpl::O_RDWR)
}
pub fn close(&mut self, fd: u64) -> FsResult<()> {
self.zpl.close(fd)
}
pub fn read(&mut self, fd: u64, buf: &mut [u8]) -> FsResult<usize> {
self.zpl.read(fd, buf)
}
pub fn write(&mut self, fd: u64, buf: &[u8]) -> FsResult<usize> {
self.zpl.write(fd, buf)
}
pub fn seek(&mut self, fd: u64, offset: i64, whence: i32) -> FsResult<u64> {
self.zpl.seek(fd, offset, whence)
}
pub fn truncate(&mut self, path: &str, length: u64) -> FsResult<()> {
let object_id = self.path_to_object_id(path)?;
self.zpl.truncate(object_id, length)
}
pub fn flock(&mut self, fd: u64, exclusive: bool, pid: u32) -> FsResult<()> {
use crate::storage::zpl::LockType;
let lock_type = if exclusive {
LockType::Exclusive
} else {
LockType::Shared
};
self.zpl.flock(fd, lock_type, pid)
}
pub fn funlock(&mut self, fd: u64, pid: u32) -> FsResult<()> {
self.zpl.funlock(fd, pid)
}
pub fn unlink(&mut self, path: &str) -> FsResult<()> {
let (dir_id, filename) = self.split_path(path)?;
self.zpl.unlink(dir_id, &filename)
}
pub fn rename(&mut self, src_path: &str, dst_path: &str) -> FsResult<()> {
let (src_dir_id, src_name) = self.split_path(src_path)?;
let (dst_dir_id, dst_name) = self.split_path(dst_path)?;
self.zpl
.rename(src_dir_id, &src_name, dst_dir_id, &dst_name)
}
pub fn sync(&mut self) -> FsResult<()> {
use crate::storage::zil::ZilEngine;
ZilEngine::flush_to_slog().map_err(|e| FsError::InvalidPoolConfig { reason: e })?;
let dev_id = self.mount.dev_id;
let txg = self.zpl.txg_sync(dev_id)?;
ZilEngine::commit_txg(txg).map_err(|e| FsError::InvalidPoolConfig { reason: e })?;
Ok(())
}
pub fn mkdir(&mut self, path: &str, mode: u32) -> FsResult<()> {
let (parent_id, dirname) = self.split_path(path)?;
self.zpl.mkdir(parent_id, &dirname, mode, 0, 0)?;
Ok(())
}
pub fn rmdir(&mut self, path: &str) -> FsResult<()> {
let (parent_id, dirname) = self.split_path(path)?;
self.zpl.rmdir(parent_id, &dirname)
}
pub fn readdir(&self, path: &str) -> FsResult<Vec<DirEntry>> {
let object_id = self.path_to_object_id(path)?;
self.zpl.readdir(object_id)
}
pub fn stat(&self, path: &str) -> FsResult<FileStat> {
let object_id = self.path_to_object_id(path)?;
self.zpl.getattr(object_id)
}
pub fn chmod(&mut self, path: &str, mode: u32) -> FsResult<()> {
let object_id = self.path_to_object_id(path)?;
self.zpl.setattr(object_id, Some(mode), None, None)
}
pub fn chown(&mut self, path: &str, uid: u32, gid: u32) -> FsResult<()> {
let object_id = self.path_to_object_id(path)?;
self.zpl.setattr(object_id, None, Some(uid), Some(gid))
}
pub fn link(&mut self, existing_path: &str, new_path: &str) -> FsResult<()> {
let existing_id = self.path_to_object_id(existing_path)?;
let stat = self.zpl.getattr(existing_id)?;
if (stat.st_mode & S_IFMT) == S_IFDIR {
return Err(FsError::IsDirectory);
}
let (dst_dir_id, dst_name) = self.split_path(new_path)?;
self.zpl.link_create(dst_dir_id, &dst_name, existing_id)
}
pub fn symlink(&mut self, target: &str, link_path: &str) -> FsResult<()> {
let (dir_id, link_name) = self.split_path(link_path)?;
self.zpl.symlink(dir_id, &link_name, target, 0, 0)?;
Ok(())
}
pub fn readlink(&self, path: &str) -> FsResult<String> {
let object_id = self.path_to_object_id(path)?;
self.zpl.readlink(object_id)
}
pub fn fcntl_lock(
&mut self,
fd: u64,
exclusive: bool,
start: u64,
length: u64,
pid: u32,
) -> FsResult<()> {
use crate::storage::zpl::LockType;
let lock_type = if exclusive {
LockType::Exclusive
} else {
LockType::Shared
};
self.zpl.fcntl_setlk(fd, lock_type, start, length, pid)
}
pub fn fcntl_unlock(&mut self, fd: u64, start: u64, length: u64, pid: u32) -> FsResult<()> {
self.zpl.fcntl_unlk(fd, start, length, pid)
}
pub fn fcntl_test_lock(
&self,
fd: u64,
exclusive: bool,
start: u64,
length: u64,
pid: u32,
) -> FsResult<Option<(bool, u64, u64, u32)>> {
use crate::storage::zpl::LockType;
let lock_type = if exclusive {
LockType::Exclusive
} else {
LockType::Shared
};
let result = self.zpl.fcntl_getlk(fd, lock_type, start, length, pid)?;
Ok(result.map(|lock| {
(
lock.lock_type == LockType::Exclusive,
lock.start,
lock.length,
lock.pid,
)
}))
}
pub fn set_quota(&mut self, quota_bytes: u64) -> FsResult<()> {
self.properties
.set("quota", PropertyValue::Number(quota_bytes));
self.zpl.set_quota(quota_bytes);
lcpfs_println!("[ QUOTA ] Pool quota set to {} bytes", quota_bytes);
Ok(())
}
pub fn get_quota(&self) -> Option<u64> {
match self.properties.get("quota") {
Some(PropertyValue::Number(n)) if *n > 0 => Some(*n),
_ => None,
}
}
pub fn get_used_space(&self) -> u64 {
self.zpl.get_used_bytes()
}
pub fn get_remaining_quota(&self) -> Option<u64> {
let quota = self.get_quota()?;
let used = self.get_used_space();
Some(quota.saturating_sub(used))
}
pub fn get_quota_utilization(&self) -> Option<f64> {
let quota = self.get_quota()?;
let used = self.get_used_space();
Some(((used as f64) / (quota as f64) * 100.0).min(100.0))
}
pub fn is_over_quota(&self) -> bool {
self.zpl.is_over_quota()
}
pub fn scrub_start(&mut self) -> FsResult<()> {
integrity::scrub::start_scrub(get_time())
.map_err(|reason| FsError::InvalidPoolConfig { reason })
}
pub fn scrub_stats(&self) -> integrity::scrub::ScrubStats {
integrity::scrub::stats()
}
pub fn scrub_should_run(&self, observed_error_rate: f64) -> bool {
integrity::scrub::should_scrub(get_time(), observed_error_rate)
}
pub fn reflink(&mut self, src_path: &str, dst_path: &str) -> FsResult<()> {
let src_id = self.path_to_object_id(src_path)?;
let src_stat = self.zpl.getattr(src_id)?;
if (src_stat.st_mode & S_IFMT) == S_IFDIR {
return Err(FsError::IsDirectory);
}
if self.path_to_object_id(dst_path).is_ok() {
return Err(FsError::AlreadyExists);
}
let (dst_dir_id, dst_filename) = self.split_path(dst_path)?;
let dst_id = self.zpl.create(
dst_dir_id,
&dst_filename,
src_stat.st_mode,
src_stat.st_uid,
src_stat.st_gid,
)?;
self.zpl.reflink_data(src_id, dst_id)?;
lcpfs_println!(
"[ QUANTUM] Reflink: {} -> {} (0 bytes copied, {} bytes shared)",
src_path,
dst_path,
src_stat.st_size
);
Ok(())
}
pub fn clone_tree(&mut self, src_path: &str, dst_path: &str) -> FsResult<u64> {
let src_stat = self.stat(src_path)?;
if (src_stat.st_mode & S_IFMT) != S_IFDIR {
return Err(FsError::NotDirectory);
}
if self.path_to_object_id(dst_path).is_ok() {
return Err(FsError::AlreadyExists);
}
self.mkdir(dst_path, src_stat.st_mode)?;
let mut files_cloned: u64 = 0;
let entries = self.readdir(src_path)?;
for entry in entries {
if entry.name == "." || entry.name == ".." {
continue;
}
let src_child = alloc::format!("{}/{}", src_path, entry.name);
let dst_child = alloc::format!("{}/{}", dst_path, entry.name);
if entry.file_type == 4 {
files_cloned += self.clone_tree(&src_child, &dst_child)?;
} else {
self.reflink(&src_child, &dst_child)?;
files_cloned += 1;
}
}
Ok(files_cloned)
}
pub fn send(&mut self) -> FsResult<Vec<u8>> {
use crate::net::send_recv::SendStream;
let txg = self.current_txg();
let guid = self.guid();
let mut stream = SendStream::begin_full_send(guid, txg);
self.zpl.send_to_stream(&mut stream)?;
Ok(stream.finalize())
}
pub fn send_incremental(&mut self, from_txg: u64) -> FsResult<Vec<u8>> {
use crate::net::send_recv::SendStream;
let to_txg = self.current_txg();
let guid = self.guid();
let mut stream = SendStream::begin_incremental_send(guid, guid, from_txg, to_txg);
self.zpl.send_incremental_to_stream(&mut stream, from_txg)?;
Ok(stream.finalize())
}
pub fn send_snapshot_incremental(
&mut self,
from_snapshot: &str,
to_snapshot: Option<&str>,
) -> FsResult<Vec<u8>> {
use crate::net::send_recv::SendStream;
let from_snap = self
.snapshots
.get(from_snapshot)
.ok_or(FsError::InvalidArgument {
reason: "base snapshot not found",
})?;
let from_txg = from_snap.txg;
let from_guid = from_snap.guid;
let (to_txg, to_guid) = if let Some(to_name) = to_snapshot {
let to_snap = self
.snapshots
.get(to_name)
.ok_or(FsError::InvalidArgument {
reason: "target snapshot not found",
})?;
(to_snap.txg, to_snap.guid)
} else {
(self.current_txg(), self.guid())
};
crate::lcpfs_println!(
"[ SEND ] Incremental send from '{}' (TXG {}) to {} (TXG {})",
from_snapshot,
from_txg,
to_snapshot.unwrap_or("current"),
to_txg
);
let mut stream = SendStream::begin_incremental_send(from_guid, to_guid, from_txg, to_txg);
stream.write_snapshot(from_snapshot, from_guid);
if let Some(to_name) = to_snapshot {
stream.write_snapshot(to_name, to_guid);
}
self.zpl.send_incremental_to_stream(&mut stream, from_txg)?;
Ok(stream.finalize())
}
pub fn receive(&mut self, stream: &[u8]) -> FsResult<()> {
use crate::net::send_recv::ReceiveStream;
let mut recv = ReceiveStream::begin_receive(stream.to_vec())
.map_err(|e| FsError::InvalidPoolConfig { reason: e })?;
self.zpl.receive_from_stream(&mut recv)?;
Ok(())
}
pub fn snapshot(&mut self, name: &str) -> FsResult<()> {
crate::lcpfs_println!("[ SNAP ] Creating snapshot: {}", name);
if self.snapshots.contains_key(name) {
return Err(FsError::InvalidArgument {
reason: "snapshot already exists",
});
}
self.sync()?;
let txg = self.current_txg();
let guid = self.snapshots.len() as u64 + 1000;
let stats = self.mount.pool_stats();
let total_space = 1024 * 1024 * 1024 * 1024; let unique_bytes = total_space - stats.free_space;
let snapshot = SnapshotMetadata {
name: name.into(),
txg,
creation_time: txg, guid,
unique_bytes,
};
self.snapshots.insert(name.into(), snapshot);
crate::lcpfs_println!("[ SNAP ] Snapshot '{}' created at TXG {}", name, txg);
Ok(())
}
pub fn list_snapshots(&self) -> Vec<(String, u64, u64)> {
self.snapshots
.values()
.map(|snap| (snap.name.clone(), snap.txg, snap.creation_time))
.collect()
}
pub fn rollback(&mut self, name: &str) -> FsResult<()> {
crate::lcpfs_println!("[ SNAP ] Rolling back to snapshot: {}", name);
let snapshot = self.snapshots.get(name).ok_or(FsError::InvalidArgument {
reason: "snapshot not found",
})?;
let target_txg = snapshot.txg;
crate::lcpfs_println!("[ SNAP ] Rolling back to TXG {}", target_txg);
self.snapshots.retain(|_, snap| snap.txg <= target_txg);
self.mount.current_txg = target_txg;
crate::lcpfs_println!("[ SNAP ] Restoring filesystem state to TXG {}", target_txg);
crate::lcpfs_println!(
"[ SNAP ] Rollback complete - {} snapshots retained",
self.snapshots.len()
);
Ok(())
}
pub fn destroy_snapshot(&mut self, name: &str) -> FsResult<()> {
crate::lcpfs_println!("[ SNAP ] Destroying snapshot: {}", name);
if !self.snapshots.contains_key(name) {
return Err(FsError::InvalidArgument {
reason: "snapshot not found",
});
}
self.snapshots.remove(name);
crate::lcpfs_println!("[ SNAP ] Snapshot '{}' destroyed", name);
Ok(())
}
pub fn get_property(&self, name: &str) -> FsResult<PropertyValue> {
self.properties
.get(name)
.cloned()
.ok_or(FsError::InvalidArgument {
reason: "property not found",
})
}
pub fn set_property(&mut self, name: &str, value: PropertyValue) -> FsResult<()> {
match name {
"compression" | "checksum" | "readonly" | "recordsize" | "atime" | "dedup" => {}
_ => {
return Err(FsError::InvalidArgument {
reason: "unknown property",
});
}
}
match (name, &value) {
("compression", PropertyValue::String(s)) => {
#[cfg(not(feature = "std"))]
{
if s != "off" && s != "lz4" {
return Err(FsError::InvalidArgument {
reason: "invalid compression type (valid: off, lz4)",
});
}
}
#[cfg(feature = "std")]
{
if s != "off" && s != "lz4" && s != "zstd" && s != "lzma" {
return Err(FsError::InvalidArgument {
reason: "invalid compression type (valid: off, lz4, zstd, lzma)",
});
}
}
}
("checksum", PropertyValue::String(s)) => {
if s != "blake3" && s != "sha256" {
return Err(FsError::InvalidArgument {
reason: "invalid checksum type",
});
}
}
("readonly", PropertyValue::Boolean(_)) => {}
("atime", PropertyValue::Boolean(_)) => {}
("dedup", PropertyValue::Boolean(_)) => {}
("recordsize", PropertyValue::Number(n)) => {
if !n.is_power_of_two() || *n < 4096 || *n > 1048576 {
return Err(FsError::InvalidArgument {
reason: "recordsize must be power of 2 between 4K and 1M",
});
}
}
_ => {
return Err(FsError::InvalidArgument {
reason: "property type mismatch",
});
}
}
self.properties.set(name, value);
crate::lcpfs_println!("[ PROP ] Set {}={:?}", name, self.properties.get(name));
Ok(())
}
pub fn list_properties(
&self,
) -> alloc::vec::Vec<(alloc::string::String, PropertyValue, PropertySource)> {
self.properties.list()
}
fn path_to_object_id(&self, path: &str) -> FsResult<u64> {
if path.len() > MAX_PATH_LEN {
return Err(FsError::InvalidArgument {
reason: "path exceeds maximum length",
});
}
if path == "/" {
return Ok(self.zpl.root_id());
}
let components: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
if components.len() > MAX_PATH_DEPTH {
return Err(FsError::InvalidArgument {
reason: "path exceeds maximum depth",
});
}
for component in &components {
if component.len() > MAX_NAME_LEN {
return Err(FsError::InvalidArgument {
reason: "path component exceeds maximum length",
});
}
}
let mut current_id = self.zpl.root_id();
for (i, component) in components.iter().enumerate() {
let is_last = i == components.len() - 1;
current_id = self.zpl.lookup(current_id, component).map_err(|_| {
if is_last {
FsError::NotFound
} else {
FsError::PathNotFound {
path_hint: String::from(*component),
}
}
})?;
}
Ok(current_id)
}
fn split_path(&self, path: &str) -> FsResult<(u64, String)> {
if path.len() > MAX_PATH_LEN {
return Err(FsError::InvalidArgument {
reason: "path exceeds maximum length",
});
}
let components: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
if components.is_empty() {
return Err(FsError::InvalidArgument {
reason: "empty path",
});
}
if components.len() > MAX_PATH_DEPTH {
return Err(FsError::InvalidArgument {
reason: "path exceeds maximum depth",
});
}
for component in &components {
if component.len() > MAX_NAME_LEN {
return Err(FsError::InvalidArgument {
reason: "path component exceeds maximum length",
});
}
}
debug_assert!(
!components.is_empty(),
"is_empty() check above ensures non-empty"
);
let filename = match components.last() {
Some(&name) => String::from(name),
None => {
return Err(FsError::InvalidArgument {
reason: "empty path components",
});
}
};
if components.len() == 1 {
return Ok((self.zpl.root_id(), filename));
}
let parent_path = components[..components.len() - 1].join("/");
let parent_id = self.path_to_object_id(&format!("/{}", parent_path))?;
Ok((parent_id, filename))
}
}
pub use storage::zpl::{
O_APPEND,
O_CREAT,
O_DIRECTORY,
O_EXCL,
O_RDONLY,
O_RDWR,
O_TRUNC,
O_WRONLY,
S_IFBLK,
S_IFIFO,
S_IFLNK,
S_IFMT,
S_IFSOCK,
SEEK_CUR,
SEEK_END,
SEEK_SET,
};
pub fn init() {
lcpfs_println!("[ LCPFS ] LCP File System v0.1.0");
lcpfs_println!("[ LCPFS ] ZFS-inspired COW filesystem - https://github.com/artst3in/LunaOS");
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
fn create_test_device(size_mb: u64) -> usize {
let size_bytes = size_mb * 1024 * 1024;
let device = Box::new(RamDisk::new(size_bytes));
register_device(device)
}
fn with_test_pool<F>(size_mb: u64, f: F)
where
F: FnOnce(&mut Pool),
{
let dev_id = create_test_device(size_mb);
let mut pool = Pool::create_pool(dev_id, "testpool")
.expect("Failed to create test pool - check device registration");
f(&mut pool);
}
fn with_imported_pool<S, T>(size_mb: u64, setup: S, test: T)
where
S: FnOnce(&mut Pool),
T: FnOnce(&mut Pool),
{
let dev_id = create_test_device(size_mb);
{
let mut pool =
Pool::create_pool(dev_id, "testpool").expect("Failed to create test pool");
setup(&mut pool);
}
let mut pool = Pool::import(dev_id).expect("Failed to import pool - check persistence");
test(&mut pool);
}
#[test]
fn test_pool_creation() {
let dev_id = create_test_device(100);
let result = Pool::create_pool(dev_id, "testpool");
assert!(result.is_ok(), "Pool creation should succeed");
let pool = result.expect("test: operation should succeed");
assert_eq!(pool.guid(), pool.guid()); assert!(pool.current_txg() >= 1, "TXG should be initialized");
}
#[test]
fn test_pool_import() {
let dev_id = create_test_device(100);
let _ = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let result = Pool::import(dev_id);
assert!(result.is_ok(), "Pool import should succeed");
let pool = result.expect("test: operation should succeed");
assert!(
pool.current_txg() >= 1,
"Imported pool should have valid TXG"
);
}
#[test]
fn test_file_create_write_read() {
with_test_pool(100, |pool| {
let fd = pool
.create("/hello.txt", 0o644)
.expect("File creation failed");
let data = b"Hello, LCPFS World!";
let written = pool.write(fd, data).expect("Write failed");
assert_eq!(written, data.len(), "Should write all bytes");
pool.seek(fd, 0, SEEK_SET).expect("Seek failed");
let mut buf = vec![0u8; data.len()];
let read = pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(read, data.len(), "Should read all bytes");
assert_eq!(&buf[..], data, "Data should match what was written");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_file_append() {
with_test_pool(100, |pool| {
let fd = pool
.create("/append.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"First line\n").expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool
.open("/append.txt", O_RDWR | O_APPEND)
.expect("Open failed");
pool.write(fd, b"Second line\n")
.expect("Append write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/append.txt", O_RDONLY).expect("Open failed");
let mut buf = vec![0u8; 100];
let read = pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(&buf[..read], b"First line\nSecond line\n");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_directory_operations() {
with_test_pool(100, |pool| {
pool.mkdir("/home", 0o755).expect("mkdir /home failed");
pool.mkdir("/home/user", 0o755)
.expect("mkdir /home/user failed");
pool.mkdir("/home/user/docs", 0o755)
.expect("mkdir /home/user/docs failed");
let fd = pool
.create("/home/user/test.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"test data").expect("Write failed");
pool.close(fd).expect("Close failed");
let entries = pool.readdir("/home/user").expect("readdir failed");
assert!(!entries.is_empty(), "Should have at least one entry");
let result = pool.rmdir("/home/user");
assert!(result.is_err(), "Removing non-empty directory should fail");
pool.rmdir("/home/user/docs")
.expect("Removing empty directory should succeed");
});
}
#[test]
fn test_path_traversal() {
with_test_pool(100, |pool| {
pool.mkdir("/a", 0o755).expect("mkdir failed");
pool.mkdir("/a/b", 0o755).expect("mkdir failed");
pool.mkdir("/a/b/c", 0o755).expect("mkdir failed");
let fd = pool
.create("/a/b/c/deep.txt", 0o644)
.expect("Deep file creation failed");
pool.write(fd, b"deep file").expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool
.open("/a/b/c/deep.txt", O_RDONLY)
.expect("Open deep file failed");
let mut buf = vec![0u8; 9];
pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(&buf[..], b"deep file");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_file_metadata() {
with_test_pool(100, |pool| {
let fd = pool
.create("/meta.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"metadata test").expect("Write failed");
pool.close(fd).expect("Close failed");
let stat = pool.stat("/meta.txt").expect("stat failed");
assert_eq!(stat.st_size, 13, "File size should be 13 bytes");
assert_eq!(stat.st_mode & 0o777, 0o644, "Permissions should match");
pool.chmod("/meta.txt", 0o600).expect("chmod failed");
let stat = pool.stat("/meta.txt").expect("stat failed");
assert_eq!(stat.st_mode & 0o777, 0o600, "Permissions should be updated");
pool.chown("/meta.txt", 1000, 1000).expect("chown failed");
let stat = pool.stat("/meta.txt").expect("stat failed");
assert_eq!(stat.st_uid, 1000, "UID should be updated");
assert_eq!(stat.st_gid, 1000, "GID should be updated");
});
}
#[test]
fn test_file_truncate() {
with_test_pool(100, |pool| {
let fd = pool
.create("/trunc.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"This is a long file")
.expect("Write failed");
pool.close(fd).expect("Close failed");
pool.truncate("/trunc.txt", 7).expect("Truncate failed");
let fd = pool.open("/trunc.txt", O_RDONLY).expect("Open failed");
let mut buf = vec![0u8; 20];
let read = pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(read, 7, "Should only read 7 bytes");
assert_eq!(&buf[..7], b"This is");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_file_unlink() {
with_test_pool(100, |pool| {
let fd = pool
.create("/delete_me.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"temporary").expect("Write failed");
pool.close(fd).expect("Close failed");
assert!(pool.stat("/delete_me.txt").is_ok());
pool.unlink("/delete_me.txt").expect("Unlink failed");
assert!(pool.stat("/delete_me.txt").is_err());
});
}
#[test]
fn test_error_cases() {
with_test_pool(100, |pool| {
let result = pool.open("/nonexistent.txt", O_RDONLY);
assert!(result.is_err(), "Opening non-existent file should fail");
let result = pool.create("/nonexistent/file.txt", 0o644);
assert!(
result.is_err(),
"Creating file in non-existent dir should fail"
);
let mut buf = [0u8; 10];
let result = pool.read(99999, &mut buf);
assert!(result.is_err(), "Reading with invalid fd should fail");
let result = pool.close(99999);
assert!(result.is_err(), "Closing invalid fd should fail");
});
}
#[test]
fn test_multiple_files() {
with_test_pool(100, |pool| {
for i in 0..10 {
let path = format!("/file{}.txt", i);
let fd = pool.create(&path, 0o644).expect("File creation failed");
let data = format!("File number {}", i);
pool.write(fd, data.as_bytes()).expect("Write failed");
pool.close(fd).expect("Close failed");
}
for i in 0..10 {
let path = format!("/file{}.txt", i);
let fd = pool.open(&path, O_RDONLY).expect("Open failed");
let mut buf = vec![0u8; 50];
let read = pool.read(fd, &mut buf).expect("Read failed");
let expected = format!("File number {}", i);
assert_eq!(&buf[..read], expected.as_bytes());
pool.close(fd).expect("Close failed");
}
});
}
#[test]
fn test_seek_operations() {
with_test_pool(100, |pool| {
let fd = pool
.create("/seek_test.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"0123456789ABCDEF").expect("Write failed");
let pos = pool.seek(fd, 5, SEEK_SET).expect("SEEK_SET failed");
assert_eq!(pos, 5);
let mut buf = [0u8; 5];
pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(&buf, b"56789");
pool.seek(fd, -3, SEEK_CUR).expect("SEEK_CUR failed");
pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(&buf[..3], b"789");
pool.seek(fd, -4, SEEK_END).expect("SEEK_END failed");
pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(&buf[..4], b"CDEF");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_txg_sync() {
with_test_pool(100, |pool| {
for i in 0..5 {
let path = format!("/txg_test_{}.txt", i);
let fd = pool.create(&path, 0o644).expect("File creation failed");
let data = format!("TXG test data {}", i);
pool.write(fd, data.as_bytes()).expect("Write failed");
pool.close(fd).expect("Close failed");
}
pool.sync().expect("Sync failed");
for i in 0..5 {
let path = format!("/txg_test_{}.txt", i);
let stat = pool.stat(&path).expect("Stat failed after sync");
assert!(stat.st_size > 0, "File should have data after sync");
}
});
}
#[test]
fn test_fsync() {
with_test_pool(100, |pool| {
let fd = pool
.create("/fsync_test.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"Critical data").expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/fsync_test.txt", O_RDONLY).expect("Open failed");
let mut buf = vec![0u8; 13];
let read = pool.read(fd, &mut buf).expect("Read failed");
assert_eq!(read, 13);
assert_eq!(&buf, b"Critical data");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_send_receive() {
let dev_id_src = create_test_device(100);
let mut src_pool =
Pool::create_pool(dev_id_src, "source").expect("Source pool creation failed");
for i in 0..3 {
let path = format!("/file{}.txt", i);
let fd = src_pool.create(&path, 0o644).expect("File creation failed");
let data = format!("Data for file {}", i);
src_pool.write(fd, data.as_bytes()).expect("Write failed");
src_pool.close(fd).expect("Close failed");
}
let stream = src_pool.send().expect("Send failed");
assert!(!stream.is_empty(), "Stream should contain data");
let dev_id_dst = create_test_device(100);
let mut dst_pool =
Pool::create_pool(dev_id_dst, "dest").expect("Dest pool creation failed");
dst_pool.receive(&stream).expect("Receive failed");
assert!(stream.len() > 100, "Stream should contain substantial data");
}
#[test]
fn test_incremental_send() {
with_test_pool(100, |pool| {
let fd = pool.create("/initial.txt", 0o644).expect("Create failed");
pool.write(fd, b"Initial data").expect("Write failed");
pool.close(fd).expect("Close failed");
let base_txg = pool.current_txg();
let fd = pool
.create("/incremental.txt", 0o644)
.expect("Create failed");
pool.write(fd, b"New data").expect("Write failed");
pool.close(fd).expect("Close failed");
let delta_stream = pool
.send_incremental(base_txg)
.expect("Incremental send failed");
assert!(
!delta_stream.is_empty(),
"Incremental stream should have data"
);
});
}
#[test]
fn test_snapshot_create() {
with_test_pool(100, |pool| {
let fd = pool.create("/test.txt", 0o644).expect("Create failed");
pool.write(fd, b"Hello snapshot").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.snapshot("snap1").expect("Snapshot creation failed");
let snapshots = pool.list_snapshots();
assert_eq!(snapshots.len(), 1, "Should have 1 snapshot");
assert_eq!(snapshots[0].0, "snap1", "Snapshot name should match");
pool.snapshot("snap2").expect("Snapshot creation failed");
let snapshots = pool.list_snapshots();
assert_eq!(snapshots.len(), 2, "Should have 2 snapshots");
});
}
#[test]
fn test_snapshot_duplicate_name() {
with_test_pool(100, |pool| {
pool.snapshot("snap1").expect("Snapshot creation failed");
let result = pool.snapshot("snap1");
assert!(result.is_err(), "Duplicate snapshot should fail");
});
}
#[test]
fn test_snapshot_destroy() {
with_test_pool(100, |pool| {
pool.snapshot("snap1").expect("Snapshot creation failed");
pool.snapshot("snap2").expect("Snapshot creation failed");
pool.destroy_snapshot("snap1").expect("Destroy failed");
let snapshots = pool.list_snapshots();
assert_eq!(snapshots.len(), 1, "Should have 1 snapshot");
assert_eq!(
snapshots[0].0, "snap2",
"Remaining snapshot should be snap2"
);
let result = pool.destroy_snapshot("nonexistent");
assert!(
result.is_err(),
"Destroying non-existent snapshot should fail"
);
});
}
#[test]
fn test_snapshot_rollback() {
with_test_pool(100, |pool| {
let fd = pool.create("/original.txt", 0o644).expect("Create failed");
pool.write(fd, b"Original").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.snapshot("before-changes")
.expect("Snapshot creation failed");
let snapshots = pool.list_snapshots();
let before_txg = snapshots[0].1;
let fd = pool.create("/newfile.txt", 0o644).expect("Create failed");
pool.write(fd, b"New content").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.snapshot("after-changes")
.expect("Snapshot creation failed");
let snapshots = pool.list_snapshots();
let after_txg = snapshots[1].1;
if before_txg == after_txg {
return;
}
pool.rollback("before-changes").expect("Rollback failed");
let snapshots = pool.list_snapshots();
assert_eq!(snapshots.len(), 1, "Should have 1 snapshot after rollback");
assert_eq!(
snapshots[0].0, "before-changes",
"Should keep before-changes snapshot"
);
});
}
#[test]
fn test_snapshot_incremental_send() {
with_test_pool(100, |pool| {
let fd = pool.create("/file1.txt", 0o644).expect("Create failed");
pool.write(fd, b"Data 1").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.snapshot("snap1").expect("Snapshot creation failed");
let fd = pool.create("/file2.txt", 0o644).expect("Create failed");
pool.write(fd, b"Data 2").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.snapshot("snap2").expect("Snapshot creation failed");
let stream = pool
.send_snapshot_incremental("snap1", Some("snap2"))
.expect("Snapshot incremental send failed");
assert!(
!stream.is_empty(),
"Incremental snapshot stream should have data"
);
let fd = pool.create("/file3.txt", 0o644).expect("Create failed");
pool.write(fd, b"Data 3").expect("Write failed");
pool.close(fd).expect("Close failed");
let stream = pool
.send_snapshot_incremental("snap2", None)
.expect("Snapshot incremental send to current failed");
assert!(
!stream.is_empty(),
"Incremental stream to current should have data"
);
});
}
#[test]
fn test_snapshot_list_ordering() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
pool.snapshot("snap-a").expect("Snapshot creation failed");
pool.snapshot("snap-b").expect("Snapshot creation failed");
pool.snapshot("snap-c").expect("Snapshot creation failed");
let snapshots = pool.list_snapshots();
assert_eq!(snapshots.len(), 3, "Should have 3 snapshots");
assert!(snapshots[0].1 <= snapshots[1].1, "TXGs should be in order");
assert!(snapshots[1].1 <= snapshots[2].1, "TXGs should be in order");
}
#[test]
fn test_compression_basic() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool
.create("/compressible.txt", 0o644)
.expect("Create failed");
let compressible_data = b"A".repeat(1024);
pool.write(fd, &compressible_data).expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/compressible.txt", 0).expect("Open failed");
let mut read_buf = vec![0u8; 1024];
let bytes_read = pool.read(fd, &mut read_buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(bytes_read, 1024, "Should read 1024 bytes");
assert_eq!(
&read_buf, &compressible_data,
"Data should match after compression/decompression"
);
}
#[test]
fn test_compression_incompressible() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool
.create("/incompressible.txt", 0o644)
.expect("Create failed");
let incompressible_data: Vec<u8> = (0..1024).map(|i| (i * 7 + 13) as u8).collect();
pool.write(fd, &incompressible_data).expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/incompressible.txt", 0).expect("Open failed");
let mut read_buf = vec![0u8; 1024];
let bytes_read = pool.read(fd, &mut read_buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(bytes_read, 1024, "Should read 1024 bytes");
assert_eq!(
&read_buf, &incompressible_data,
"Incompressible data should match"
);
}
#[test]
fn test_compression_small_files() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool.create("/small.txt", 0o644).expect("Create failed");
let small_data = b"Small file";
pool.write(fd, small_data).expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/small.txt", 0).expect("Open failed");
let mut read_buf = vec![0u8; 10];
let bytes_read = pool.read(fd, &mut read_buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(bytes_read, 10, "Should read 10 bytes");
assert_eq!(&read_buf, small_data, "Small file data should match");
}
#[test]
fn test_compression_large_file() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool.create("/large.txt", 0o644).expect("Create failed");
let pattern = b"The quick brown fox jumps over the lazy dog. ";
let mut large_data = Vec::new();
for _ in 0..100 {
large_data.extend_from_slice(pattern);
}
let original_size = large_data.len();
pool.write(fd, &large_data).expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/large.txt", 0).expect("Open failed");
let mut read_buf = vec![0u8; original_size];
let bytes_read = pool.read(fd, &mut read_buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(bytes_read, original_size, "Should read all bytes");
assert_eq!(
&read_buf, &large_data,
"Large file should decompress correctly"
);
}
#[test]
fn test_compression_mixed_data() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let mut text_data = Vec::new();
for _ in 0..50 {
text_data.extend_from_slice(b"Hello World! ");
}
let files = vec![
("/zeros.bin", vec![0u8; 512]), ("/ones.bin", vec![0xFFu8; 512]), ("/text.txt", text_data), (
"/random.bin",
(0..512).map(|i| (i * 17) as u8).collect::<Vec<u8>>(),
), ];
for (path, data) in &files {
let fd = pool.create(path, 0o644).expect("Create failed");
pool.write(fd, data).expect("Write failed");
pool.close(fd).expect("Close failed");
}
for (path, expected_data) in &files {
let fd = pool.open(path, 0).expect("Open failed");
let mut read_buf = vec![0u8; expected_data.len()];
let bytes_read = pool.read(fd, &mut read_buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(
bytes_read,
expected_data.len(),
"Size should match for {}",
path
);
assert_eq!(&read_buf, expected_data, "Data should match for {}", path);
}
}
#[test]
fn test_arc_caching() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool.create("/cached.txt", 0o644).expect("Create failed");
let data = b"This data will be cached";
pool.write(fd, data).expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/cached.txt", 0).expect("Open failed");
let mut buf1 = vec![0u8; data.len()];
pool.read(fd, &mut buf1).expect("Read failed");
pool.close(fd).expect("Close failed");
let fd = pool.open("/cached.txt", 0).expect("Open failed");
let mut buf2 = vec![0u8; data.len()];
pool.read(fd, &mut buf2).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf1, data, "First read should match");
assert_eq!(&buf2, data, "Cached read should match");
}
#[test]
fn test_arc_hit_rate() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
for i in 0..5 {
let path = format!("/file{}.txt", i);
let fd = pool.create(&path, 0o644).expect("Create failed");
let data = format!("Data for file {}", i);
pool.write(fd, data.as_bytes()).expect("Write failed");
pool.close(fd).expect("Close failed");
}
for i in 0..5 {
let path = format!("/file{}.txt", i);
for _ in 0..2 {
let fd = pool.open(&path, 0).expect("Open failed");
let mut buf = vec![0u8; 20];
pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
}
}
}
#[test]
fn test_arc_lru_eviction() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
for i in 0..20 {
let path = format!("/lru{}.txt", i);
let fd = pool.create(&path, 0o644).expect("Create failed");
let data = vec![0xAA; 4096]; pool.write(fd, &data).expect("Write failed");
pool.close(fd).expect("Close failed");
}
for i in 0..20 {
let path = format!("/lru{}.txt", i);
let fd = pool.open(&path, 0).expect("Open failed");
let mut buf = vec![0u8; 4096];
pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
}
}
#[test]
fn test_arc_promotion_t1_to_t2() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let fd = pool.create("/promote.txt", 0o644).expect("Create failed");
let data = b"Data that gets promoted from T1 to T2";
pool.write(fd, data).expect("Write failed");
pool.close(fd).expect("Close failed");
for _ in 0..5 {
let fd = pool.open("/promote.txt", 0).expect("Open failed");
let mut buf = vec![0u8; data.len()];
pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(
&buf[..data.len()],
data,
"Data should match on repeated reads"
);
}
}
#[test]
fn test_property_defaults() {
let dev_id = create_test_device(100);
let pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let compression = pool
.get_property("compression")
.expect("Get compression failed");
assert_eq!(
compression,
PropertyValue::String("lz4".into()),
"Default compression should be lz4"
);
let checksum = pool.get_property("checksum").expect("Get checksum failed");
assert_eq!(
checksum,
PropertyValue::String("blake3".into()),
"Default checksum should be blake3"
);
let readonly = pool.get_property("readonly").expect("Get readonly failed");
assert_eq!(
readonly,
PropertyValue::Boolean(false),
"Default readonly should be false"
);
let recordsize = pool
.get_property("recordsize")
.expect("Get recordsize failed");
assert_eq!(
recordsize,
PropertyValue::Number(131072),
"Default recordsize should be 128K"
);
let atime = pool.get_property("atime").expect("Get atime failed");
assert_eq!(
atime,
PropertyValue::Boolean(true),
"Default atime should be true"
);
let dedup = pool.get_property("dedup").expect("Get dedup failed");
assert_eq!(
dedup,
PropertyValue::Boolean(true),
"Default dedup should be true"
);
}
#[test]
fn test_property_set_get() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
pool.set_property("compression", PropertyValue::String("off".into()))
.expect("Set compression failed");
let compression = pool
.get_property("compression")
.expect("Get compression failed");
assert_eq!(
compression,
PropertyValue::String("off".into()),
"Compression should be off"
);
pool.set_property("recordsize", PropertyValue::Number(65536))
.expect("Set recordsize failed");
let recordsize = pool
.get_property("recordsize")
.expect("Get recordsize failed");
assert_eq!(
recordsize,
PropertyValue::Number(65536),
"Recordsize should be 64K"
);
pool.set_property("readonly", PropertyValue::Boolean(true))
.expect("Set readonly failed");
let readonly = pool.get_property("readonly").expect("Get readonly failed");
assert_eq!(
readonly,
PropertyValue::Boolean(true),
"Readonly should be true"
);
}
#[test]
fn test_property_validation_invalid_compression() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let result = pool.set_property("compression", PropertyValue::String("gzip".into()));
assert!(result.is_err(), "Setting invalid compression should fail");
}
#[test]
fn test_property_validation_invalid_recordsize() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let result = pool.set_property("recordsize", PropertyValue::Number(100000));
assert!(
result.is_err(),
"Setting non-power-of-2 recordsize should fail"
);
let result = pool.set_property("recordsize", PropertyValue::Number(2048));
assert!(result.is_err(), "Setting recordsize < 4K should fail");
let result = pool.set_property("recordsize", PropertyValue::Number(2097152));
assert!(result.is_err(), "Setting recordsize > 1M should fail");
}
#[test]
fn test_property_validation_type_mismatch() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let result = pool.set_property("compression", PropertyValue::Number(42));
assert!(
result.is_err(),
"Setting compression with Number should fail"
);
let result = pool.set_property("readonly", PropertyValue::String("yes".into()));
assert!(result.is_err(), "Setting readonly with String should fail");
}
#[test]
fn test_property_list() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
pool.set_property("compression", PropertyValue::String("off".into()))
.expect("Set compression failed");
let props = pool.list_properties();
assert_eq!(props.len(), 6, "Should have 6 properties");
let compression_prop = props.iter().find(|(name, _, _)| name == "compression");
assert!(
compression_prop.is_some(),
"Compression property should exist"
);
let (_, value, source) = compression_prop.expect("test: operation should succeed");
assert_eq!(
*value,
PropertyValue::String("off".into()),
"Compression value should be off"
);
assert_eq!(
*source,
PropertySource::Local,
"Compression source should be Local"
);
let checksum_prop = props.iter().find(|(name, _, _)| name == "checksum");
assert!(checksum_prop.is_some(), "Checksum property should exist");
let (_, _, source) = checksum_prop.expect("test: operation should succeed");
assert_eq!(
*source,
PropertySource::Default,
"Checksum source should be Default"
);
}
#[test]
fn test_property_unknown() {
let dev_id = create_test_device(100);
let mut pool = Pool::create_pool(dev_id, "testpool").expect("Pool creation failed");
let result = pool.get_property("nonexistent");
assert!(result.is_err(), "Getting unknown property should fail");
let result = pool.set_property("nonexistent", PropertyValue::String("value".into()));
assert!(result.is_err(), "Setting unknown property should fail");
}
#[test]
fn test_enospc_allocator_exhaustion() {
use crate::util::alloc::SpaceController;
let mut allocator = SpaceController {
zones: alloc::vec::Vec::new(),
active_zone_idx: 0,
total_capacity: 0,
total_free: 0,
initialized: false,
};
let tiny_size = 64 * 1024;
allocator.init(tiny_size);
assert!(
allocator.total_capacity > 0,
"Allocator should be initialized"
);
assert!(allocator.total_free > 0, "Should have free space initially");
let huge_alloc = 1024 * 1024 * 1024; let result = allocator.allocate(huge_alloc);
match result {
Err(FsError::DiskFull { needed_bytes }) => {
assert_eq!(needed_bytes, huge_alloc);
}
Ok(_) => panic!("Should not succeed allocating more than capacity"),
Err(e) => panic!("Unexpected error: {:?}", e),
}
let block_size = 4096u64;
let mut allocated = 0u64;
loop {
match allocator.allocate(block_size) {
Ok(_) => {
allocated += block_size;
}
Err(FsError::DiskFull { needed_bytes }) => {
assert_eq!(needed_bytes, block_size);
break;
}
Err(e) => panic!("Unexpected error: {:?}", e),
}
if allocated > tiny_size * 2 {
panic!("Should have hit ENOSPC by now");
}
}
assert!(
allocated > 0,
"Should have allocated some blocks before ENOSPC"
);
}
#[test]
fn test_enospc_quota_enforcement() {
with_test_pool(100, |pool| {
let fd = pool.create("/testfile.bin", 0o644).expect("Create failed");
let data = vec![0xABu8; 1024];
pool.write(fd, &data).expect("Initial write failed");
pool.close(fd).expect("Close failed");
let used = pool.get_used_space();
pool.set_quota(used + 100).expect("Set quota failed");
let fd = pool.open("/testfile.bin", O_RDWR).expect("Open failed");
let big_data = vec![0xCDu8; 1024];
let result = pool.write(fd, &big_data);
match result {
Err(FsError::DiskFull { needed_bytes }) => {
assert_eq!(needed_bytes, 1024);
}
Ok(_) => panic!("Write should have failed due to quota"),
Err(e) => panic!("Unexpected error: {:?}", e),
}
let current_used = pool.get_used_space();
let quota = pool.get_quota().unwrap();
assert!(
current_used <= quota,
"Used space {} should be <= quota {}",
current_used,
quota
);
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_receive_corrupted_stream() {
let src_dev_id = create_test_device(100);
let mut src_pool =
Pool::create_pool(src_dev_id, "srcpool").expect("Source pool creation failed");
let fd = src_pool
.create("/important.txt", 0o644)
.expect("Create failed");
src_pool
.write(fd, b"Critical data that must not be corrupted")
.expect("Write failed");
src_pool.close(fd).expect("Close failed");
let stream = src_pool.send().expect("Send failed");
assert!(stream.len() > 100, "Stream should have substantial data");
let dest_dev_id = create_test_device(100);
let mut dest_pool =
Pool::create_pool(dest_dev_id, "destpool").expect("Dest pool creation failed");
let truncated = &stream[..stream.len() / 2];
let truncated_result = dest_pool.receive(truncated);
assert!(
truncated_result.is_err(),
"Truncated stream should be rejected"
);
let mut corrupted = stream.clone();
let payload_offset = 88 + 30 + 10; if corrupted.len() > payload_offset {
corrupted[payload_offset] ^= 0xFF; }
let corrupted_result = dest_pool.receive(&corrupted);
assert!(
corrupted_result.is_err(),
"Corrupted stream should be rejected (checksum mismatch)"
);
let empty_result = dest_pool.receive(&[]);
assert!(empty_result.is_err(), "Empty stream should be rejected");
let garbage = vec![0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xBA, 0xBE];
let garbage_result = dest_pool.receive(&garbage);
assert!(garbage_result.is_err(), "Garbage stream should be rejected");
}
#[test]
fn test_path_depth_limit() {
with_test_pool(100, |pool| {
let mut path = String::new();
let mut success_count = 0;
for i in 0..MAX_PATH_DEPTH + 10 {
path.push_str(&format!("/d{}", i));
match pool.mkdir(&path, 0o755) {
Ok(_) => success_count += 1,
Err(FsError::InvalidArgument { reason }) => {
assert!(
reason.contains("depth") || reason.contains("length"),
"Should fail due to path limits, got: {}",
reason
);
break;
}
Err(e) => {
panic!("Unexpected error: {:?}", e);
}
}
}
assert!(success_count > 0, "Should create at least one directory");
assert!(
success_count <= MAX_PATH_DEPTH,
"Should not exceed MAX_PATH_DEPTH ({}), created {}",
MAX_PATH_DEPTH,
success_count
);
});
}
#[test]
fn test_path_component_length() {
with_test_pool(100, |pool| {
let max_name: String = (0..MAX_NAME_LEN).map(|_| 'a').collect();
let max_path = format!("/{}", max_name);
let result = pool.create(&max_path, 0o644);
match result {
Ok(fd) => {
pool.close(fd).expect("Close failed");
let fd = pool.open(&max_path, O_RDONLY).expect("Open failed");
pool.close(fd).expect("Close failed");
}
Err(e) => {
panic!("MAX_NAME_LEN should be valid, got error: {:?}", e);
}
}
let too_long_name: String = (0..MAX_NAME_LEN + 1).map(|_| 'b').collect();
let too_long_path = format!("/{}", too_long_name);
let result = pool.create(&too_long_path, 0o644);
assert!(
matches!(result, Err(FsError::InvalidArgument { .. })),
"Path component exceeding MAX_NAME_LEN should be rejected"
);
let many_components = MAX_PATH_LEN / 10; let very_long_path: String = (0..many_components)
.map(|i| format!("/dir{:05}", i))
.collect();
if very_long_path.len() > MAX_PATH_LEN {
let result = pool.mkdir(&very_long_path, 0o755);
assert!(
matches!(result, Err(FsError::InvalidArgument { .. })),
"Path exceeding MAX_PATH_LEN should be rejected"
);
}
});
}
#[test]
fn test_reflink_basic() {
with_test_pool(100, |pool| {
let fd = pool.create("/source.txt", 0o644).expect("Create failed");
let data = b"Hello, Quantum World! This is a test of reflink.";
pool.write(fd, data).expect("Write failed");
pool.close(fd).expect("Close failed");
pool.reflink("/source.txt", "/dest.txt")
.expect("Reflink failed");
let fd = pool.open("/dest.txt", O_RDONLY).expect("Open failed");
let mut buf = vec![0u8; 100];
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], data);
let src_stat = pool.stat("/source.txt").expect("Stat source failed");
let dst_stat = pool.stat("/dest.txt").expect("Stat dest failed");
assert_eq!(src_stat.st_size, dst_stat.st_size);
});
}
#[test]
fn test_reflink_rejects_directories() {
with_test_pool(100, |pool| {
pool.mkdir("/mydir", 0o755).expect("Mkdir failed");
let result = pool.reflink("/mydir", "/mydir_copy");
assert!(
matches!(result, Err(FsError::IsDirectory)),
"Reflink should reject directories"
);
});
}
#[test]
fn test_reflink_rejects_existing_dest() {
with_test_pool(100, |pool| {
let fd = pool.create("/source.txt", 0o644).expect("Create failed");
pool.close(fd).expect("Close failed");
let fd = pool.create("/dest.txt", 0o644).expect("Create failed");
pool.close(fd).expect("Close failed");
let result = pool.reflink("/source.txt", "/dest.txt");
assert!(
matches!(result, Err(FsError::AlreadyExists)),
"Reflink should reject existing destination"
);
});
}
#[test]
fn test_clone_tree() {
with_test_pool(100, |pool| {
pool.mkdir("/project", 0o755).expect("Mkdir failed");
pool.mkdir("/project/src", 0o755).expect("Mkdir failed");
let fd = pool
.create("/project/README.md", 0o644)
.expect("Create failed");
pool.write(fd, b"# My Project").expect("Write failed");
pool.close(fd).expect("Close failed");
let fd = pool
.create("/project/src/main.rs", 0o644)
.expect("Create failed");
pool.write(fd, b"fn main() {}").expect("Write failed");
pool.close(fd).expect("Close failed");
let files_cloned = pool
.clone_tree("/project", "/project_backup")
.expect("Clone tree failed");
assert_eq!(files_cloned, 2, "Should have cloned 2 files");
let fd = pool
.open("/project_backup/README.md", O_RDONLY)
.expect("Open failed");
let mut buf = vec![0u8; 100];
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], b"# My Project");
let fd = pool
.open("/project_backup/src/main.rs", O_RDONLY)
.expect("Open failed");
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], b"fn main() {}");
});
}
#[test]
fn test_rename_file() {
with_test_pool(100, |pool| {
let fd = pool.create("/old.txt", 0o644).expect("Create failed");
pool.write(fd, b"test data").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.rename("/old.txt", "/new.txt").expect("Rename failed");
assert!(pool.stat("/old.txt").is_err());
let fd = pool.open("/new.txt", O_RDONLY).expect("Open failed");
let mut buf = [0u8; 20];
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], b"test data");
});
}
#[test]
fn test_rename_move_to_directory() {
with_test_pool(100, |pool| {
pool.mkdir("/subdir", 0o755).expect("Mkdir failed");
let fd = pool.create("/file.txt", 0o644).expect("Create failed");
pool.write(fd, b"moved").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.rename("/file.txt", "/subdir/file.txt")
.expect("Rename failed");
assert!(pool.stat("/file.txt").is_err());
let fd = pool
.open("/subdir/file.txt", O_RDONLY)
.expect("Open failed");
let mut buf = [0u8; 10];
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], b"moved");
});
}
#[test]
fn test_hardlink() {
with_test_pool(100, |pool| {
let fd = pool.create("/original.txt", 0o644).expect("Create failed");
pool.write(fd, b"shared data").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.link("/original.txt", "/hardlink.txt")
.expect("Link failed");
let fd = pool.open("/hardlink.txt", O_RDONLY).expect("Open failed");
let mut buf = [0u8; 20];
let n = pool.read(fd, &mut buf).expect("Read failed");
pool.close(fd).expect("Close failed");
assert_eq!(&buf[..n], b"shared data");
let orig_stat = pool.stat("/original.txt").expect("Stat failed");
let link_stat = pool.stat("/hardlink.txt").expect("Stat failed");
assert_eq!(orig_stat.st_ino, link_stat.st_ino);
});
}
#[test]
fn test_hardlink_rejects_directories() {
with_test_pool(100, |pool| {
pool.mkdir("/mydir", 0o755).expect("Mkdir failed");
let result = pool.link("/mydir", "/mydir_link");
assert!(
matches!(result, Err(FsError::IsDirectory)),
"Hard link should reject directories"
);
});
}
#[test]
fn test_symlink_basic() {
with_test_pool(100, |pool| {
let fd = pool
.create("/target.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"symlink target").expect("Write failed");
pool.close(fd).expect("Close failed");
pool.symlink("/target.txt", "/link.txt")
.expect("Symlink creation failed");
let target = pool.readlink("/link.txt").expect("Readlink failed");
assert_eq!(target, "/target.txt");
let stat = pool.stat("/link.txt").expect("Stat failed");
assert_eq!(stat.st_mode & S_IFMT, S_IFLNK, "Should be a symlink");
});
}
#[test]
fn test_symlink_to_directory() {
with_test_pool(100, |pool| {
pool.mkdir("/mydir", 0o755).expect("Mkdir failed");
pool.symlink("/mydir", "/dirlink")
.expect("Symlink to directory should work");
let target = pool.readlink("/dirlink").expect("Readlink failed");
assert_eq!(target, "/mydir");
});
}
#[test]
fn test_symlink_dangling() {
with_test_pool(100, |pool| {
pool.symlink("/nonexistent", "/dangling_link")
.expect("Dangling symlink creation should work");
let target = pool.readlink("/dangling_link").expect("Readlink failed");
assert_eq!(target, "/nonexistent");
});
}
#[test]
fn test_readlink_not_symlink() {
with_test_pool(100, |pool| {
let fd = pool
.create("/regular.txt", 0o644)
.expect("File creation failed");
pool.close(fd).expect("Close failed");
let result = pool.readlink("/regular.txt");
assert!(
matches!(result, Err(FsError::InvalidArgument { .. })),
"readlink on regular file should fail"
);
});
}
#[test]
fn test_fcntl_lock_basic() {
with_test_pool(100, |pool| {
let fd = pool
.create("/locktest.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"test data for locking")
.expect("Write failed");
pool.fcntl_lock(fd, true, 0, 100, 1000)
.expect("fcntl_lock failed");
let conflict = pool
.fcntl_test_lock(fd, true, 50, 100, 2000)
.expect("fcntl_test_lock failed");
assert!(conflict.is_some(), "Should detect conflicting lock");
let (is_exclusive, start, _length, holder_pid) = conflict.unwrap();
assert!(is_exclusive);
assert_eq!(start, 0);
assert_eq!(holder_pid, 1000);
pool.fcntl_unlock(fd, 0, 100, 1000)
.expect("fcntl_unlock failed");
let no_conflict = pool
.fcntl_test_lock(fd, true, 50, 100, 2000)
.expect("fcntl_test_lock failed");
assert!(
no_conflict.is_none(),
"Should have no conflict after unlock"
);
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_fcntl_lock_shared() {
with_test_pool(100, |pool| {
let fd = pool
.create("/shared_lock.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"shared lock test").expect("Write failed");
pool.fcntl_lock(fd, false, 0, 100, 1000)
.expect("Shared lock 1 failed");
pool.fcntl_lock(fd, false, 50, 100, 2000)
.expect("Shared lock 2 should succeed");
let conflict = pool
.fcntl_test_lock(fd, true, 75, 50, 3000)
.expect("fcntl_test_lock failed");
assert!(
conflict.is_some(),
"Exclusive lock should conflict with shared"
);
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_fcntl_lock_non_overlapping() {
with_test_pool(100, |pool| {
let fd = pool
.create("/nonoverlap.txt", 0o644)
.expect("File creation failed");
pool.write(fd, b"non-overlapping regions test data here")
.expect("Write failed");
pool.fcntl_lock(fd, true, 0, 100, 1000)
.expect("Lock region 1 failed");
pool.fcntl_lock(fd, true, 200, 100, 2000)
.expect("Non-overlapping lock should succeed");
let no_conflict = pool
.fcntl_test_lock(fd, true, 100, 100, 3000)
.expect("fcntl_test_lock failed");
assert!(no_conflict.is_none(), "Gap region should have no conflict");
pool.close(fd).expect("Close failed");
});
}
#[test]
fn test_quota_basic() {
with_test_pool(100, |pool| {
assert!(pool.get_quota().is_none());
assert!(!pool.is_over_quota());
assert!(pool.get_remaining_quota().is_none());
assert!(pool.get_quota_utilization().is_none());
pool.set_quota(1024 * 1024 * 1024)
.expect("Set quota failed");
assert_eq!(pool.get_quota(), Some(1024 * 1024 * 1024));
pool.set_quota(0).expect("Clear quota failed");
assert!(pool.get_quota().is_none());
});
}
#[test]
fn test_quota_utilization() {
with_test_pool(100, |pool| {
let large_quota = 100 * 1024 * 1024 * 1024 * 1024; pool.set_quota(large_quota).expect("Set quota failed");
let util = pool
.get_quota_utilization()
.expect("Should have utilization");
assert!(
(0.0..=100.0).contains(&util),
"Utilization should be 0-100%"
);
let remaining = pool.get_remaining_quota().expect("Should have remaining");
assert!(remaining <= large_quota);
});
}
#[test]
fn test_quota_over() {
with_test_pool(100, |pool| {
pool.set_quota(1).expect("Set quota failed");
let used = pool.get_used_space();
if used > 1 {
assert!(pool.is_over_quota(), "Should be over tiny quota");
}
});
}
#[test]
fn test_scrub_stats() {
with_test_pool(100, |pool| {
let stats = pool.scrub_stats();
assert_eq!(stats.blocks_scanned, 0);
assert_eq!(stats.errors_detected, 0);
assert_eq!(stats.repairs_made, 0);
});
}
#[test]
fn test_scrub_should_run() {
with_test_pool(100, |pool| {
let should_run = pool.scrub_should_run(0.0);
let _ = should_run;
let should_run_high = pool.scrub_should_run(1000.0);
let _ = should_run_high;
});
}
#[test]
fn test_derive_key_different_salts_different_keys() {
let passphrase = "test_password_123";
let salt1 = [1u8; 16];
let salt2 = [2u8; 16];
let key1 = LcpfsCrypto::derive_key(passphrase, &salt1);
let key2 = LcpfsCrypto::derive_key(passphrase, &salt2);
assert_ne!(
key1, key2,
"KDF: Different salts must produce different keys!"
);
}
#[test]
fn test_derive_key_same_salt_same_key() {
let passphrase = "deterministic_test";
let salt = [0xAB_u8; 16];
let key1 = LcpfsCrypto::derive_key(passphrase, &salt);
let key2 = LcpfsCrypto::derive_key(passphrase, &salt);
assert_eq!(
key1, key2,
"KDF: Same passphrase and salt must produce identical keys!"
);
}
#[test]
fn test_derive_key_different_passphrases_different_keys() {
let salt = [0x42u8; 16];
let key1 = LcpfsCrypto::derive_key("password1", &salt);
let key2 = LcpfsCrypto::derive_key("password2", &salt);
assert_ne!(
key1, key2,
"KDF: Different passphrases must produce different keys!"
);
}
#[test]
fn test_derive_key_output_length() {
let key = LcpfsCrypto::derive_key("any passphrase", &[0u8; 16]);
assert_eq!(key.len(), 32, "KDF: Output must be 32 bytes!");
}
#[test]
fn test_derive_key_empty_passphrase() {
let salt = [0x11u8; 16];
let key = LcpfsCrypto::derive_key("", &salt);
assert_ne!(
key, [0u8; 32],
"KDF: Empty passphrase should still derive a key!"
);
}
#[test]
fn test_derive_key_long_passphrase() {
let long_passphrase = "a".repeat(10000);
let salt = [0x22u8; 16];
let key = LcpfsCrypto::derive_key(&long_passphrase, &salt);
assert_eq!(key.len(), 32);
assert_ne!(key, [0u8; 32]);
}
#[test]
fn test_derive_key_deterministic() {
let passphrase = "password";
let salt = b"salt_for_testing";
let key1 = LcpfsCrypto::derive_key(passphrase, salt);
let key2 = LcpfsCrypto::derive_key(passphrase, salt);
assert_eq!(key1, key2);
let mut modified_salt = *salt;
modified_salt[0] ^= 1;
let key3 = LcpfsCrypto::derive_key(passphrase, &modified_salt);
assert_ne!(key1, key3, "Single bit change in salt must change key!");
}
#[test]
fn test_pbkdf2_explicit_different_salts() {
let passphrase = "test_password";
let salt1 = [1u8; 16];
let salt2 = [2u8; 16];
let key1 = LcpfsCrypto::derive_key_pbkdf2(passphrase, &salt1);
let key2 = LcpfsCrypto::derive_key_pbkdf2(passphrase, &salt2);
assert_ne!(
key1, key2,
"PBKDF2: Different salts must produce different keys!"
);
}
#[test]
fn test_pbkdf2_explicit_deterministic() {
let passphrase = "deterministic_pbkdf2_test";
let salt = [0xCD_u8; 16];
let key1 = LcpfsCrypto::derive_key_pbkdf2(passphrase, &salt);
let key2 = LcpfsCrypto::derive_key_pbkdf2(passphrase, &salt);
assert_eq!(key1, key2, "PBKDF2: Must be deterministic!");
}
#[test]
fn test_pbkdf2_explicit_output_length() {
let key = LcpfsCrypto::derive_key_pbkdf2("passphrase", &[0u8; 16]);
assert_eq!(key.len(), 32, "PBKDF2: Output must be 32 bytes!");
}
#[test]
fn test_pbkdf2_explicit_empty_salt() {
let key = LcpfsCrypto::derive_key_pbkdf2("test_passphrase", &[]);
assert_eq!(key.len(), 32);
}
#[test]
fn test_encrypt_block_nonce_uniqueness() {
let key = [0x42u8; 32];
let plaintext = b"test data for nonce uniqueness";
let txg = 100;
let (_, nonce1) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
let (_, nonce2) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
assert_ne!(
nonce1, nonce2,
"SECURITY: Nonce reuse detected! Each encryption must use unique nonce."
);
}
#[test]
fn test_encrypt_block_nonce_uniqueness_same_txg_different_data() {
let key = [0x42u8; 32];
let txg = 100;
let (_, nonce1) = LcpfsCrypto::encrypt_block(&key, b"a", txg).unwrap();
let (_, nonce2) = LcpfsCrypto::encrypt_block(&key, b"b", txg).unwrap();
assert_ne!(nonce1, nonce2);
}
#[test]
fn test_encrypt_decrypt_roundtrip() {
let key = [0x42u8; 32];
let plaintext = b"secret data to encrypt and decrypt";
let txg = 42;
let (ciphertext, nonce) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
let decrypted = LcpfsCrypto::decrypt_block(&key, &ciphertext, &nonce).unwrap();
assert_eq!(
decrypted.as_slice(),
plaintext.as_slice(),
"Decryption must recover original plaintext"
);
}
#[test]
fn test_encrypt_block_ciphertext_differs_from_plaintext() {
let key = [0u8; 32];
let plaintext = b"plaintext that should be encrypted";
let txg = 1;
let (ciphertext, _) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
assert_ne!(
&ciphertext[..plaintext.len()],
plaintext.as_slice(),
"Encryption must change data!"
);
assert_eq!(ciphertext.len(), plaintext.len() + 16);
}
#[test]
fn test_decrypt_with_wrong_key_fails() {
let key1 = [0x42u8; 32];
let key2 = [0x43u8; 32];
let plaintext = b"secret";
let txg = 1;
let (ciphertext, nonce) = LcpfsCrypto::encrypt_block(&key1, plaintext, txg).unwrap();
assert!(
matches!(
LcpfsCrypto::decrypt_block(&key2, &ciphertext, &nonce),
Err(FsError::DecryptionFailed)
),
"Decryption with wrong key must fail"
);
}
#[test]
fn test_decrypt_tampered_ciphertext_fails() {
let key = [0u8; 32];
let plaintext = b"secret";
let txg = 1;
let (mut ciphertext, nonce) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
ciphertext[0] ^= 0xFF;
assert!(
matches!(
LcpfsCrypto::decrypt_block(&key, &ciphertext, &nonce),
Err(FsError::DecryptionFailed)
),
"Tampered ciphertext must fail authentication"
);
}
#[test]
fn test_encrypt_empty_plaintext() {
let key = [0x42u8; 32];
let plaintext = b"";
let txg = 1;
let (ciphertext, nonce) = LcpfsCrypto::encrypt_block(&key, plaintext, txg).unwrap();
assert_eq!(ciphertext.len(), 16);
let decrypted = LcpfsCrypto::decrypt_block(&key, &ciphertext, &nonce).unwrap();
assert!(decrypted.is_empty());
}
#[test]
fn test_encrypt_large_data() {
let key = [0x42u8; 32];
let plaintext = vec![0xAB_u8; 1024 * 1024]; let txg = 1;
let (ciphertext, nonce) = LcpfsCrypto::encrypt_block(&key, &plaintext, txg).unwrap();
let decrypted = LcpfsCrypto::decrypt_block(&key, &ciphertext, &nonce).unwrap();
assert_eq!(decrypted, plaintext);
}
}