use crate::backend::native::{
graph_file::buffers::ReadBuffer,
graph_file::buffers::WriteBuffer,
types::NativeResult,
};
#[cfg(feature = "v2_experimental")]
use crate::backend::native::NativeBackendError;
#[cfg(feature = "v2_experimental")]
use memmap2::{MmapMut, MmapOptions};
use std::io::{Seek, SeekFrom, Write};
pub struct FileManager;
impl FileManager {
pub fn validate_file_size(
file_size: u64,
persistent_header: &crate::backend::native::persistent_header::PersistentHeaderV2,
) -> NativeResult<()> {
crate::backend::native::graph_file::validation::GraphFileValidator::validate_file_size(
file_size,
persistent_header,
)
}
pub fn grow_file(file: &mut std::fs::File, additional_bytes: u64) -> NativeResult<()> {
if additional_bytes == 0 {
return Ok(());
}
let current_size = file.metadata()?.len();
let new_size = current_size + additional_bytes;
file.set_len(new_size)?;
file.flush()?;
Ok(())
}
pub fn flush_complete(
file: &mut std::fs::File,
write_buffer: &mut WriteBuffer,
) -> NativeResult<()> {
Self::flush_write_buffer(file, write_buffer)?;
file.flush()?;
Ok(())
}
fn flush_write_buffer(
file: &mut std::fs::File,
write_buffer: &mut WriteBuffer,
) -> NativeResult<()> {
let operations = write_buffer.flush();
let mut sorted_ops: Vec<_> = operations.into_iter().collect();
sorted_ops.sort_by_key(|(offset, _)| *offset);
for (offset, data) in sorted_ops {
file.seek(SeekFrom::Start(offset))?;
file.write_all(&data)?;
}
file.flush()?;
Ok(())
}
pub fn invalidate_read_buffer(read_buffer: &mut ReadBuffer) {
read_buffer.offset = 0;
read_buffer.size = 0;
}
#[cfg(feature = "v2_experimental")]
pub fn mmap_ensure_size(
file: &mut std::fs::File,
file_path: &std::path::Path,
len: u64,
mmap: &mut Option<MmapMut>,
) -> NativeResult<()> {
thread_local! {
static MMAP_DEPTH: std::cell::RefCell<u32> = const { std::cell::RefCell::new(0) };
}
MMAP_DEPTH.with(|d| {
let mut depth = d.borrow_mut();
*depth += 1;
if *depth > 10 {
return Err(NativeBackendError::CorruptNodeRecord {
node_id: -1,
reason: format!("mmap recursion depth exceeded: {}", *depth),
});
}
Ok(())
})?;
let result = (|| {
let current_size = file.metadata()?.len();
if len > current_size {
Self::grow_file(file, len - current_size)?;
}
Self::ensure_mmap_covers(file, file_path, len, mmap)?;
Ok(())
})();
MMAP_DEPTH.with(|d| {
*d.borrow_mut() -= 1;
});
result
}
#[cfg(feature = "v2_experimental")]
fn ensure_mmap_covers(
file: &mut std::fs::File,
file_path: &std::path::Path,
len: u64,
mmap: &mut Option<MmapMut>,
) -> NativeResult<()> {
let needs_remap = match mmap {
None => true,
Some(current_mmap) => len > current_mmap.len() as u64,
};
if needs_remap {
let file_size = file.metadata()?.len();
let required_size = len.max(file_size);
*mmap = unsafe {
Some(
MmapOptions::new()
.len(required_size as usize)
.map_mut(&file.try_clone()?)?,
)
};
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Read, Seek, SeekFrom, Write};
use tempfile::tempfile;
#[test]
fn test_grow_file() {
let mut temp_file = tempfile().unwrap();
let initial_size = temp_file.metadata().unwrap().len();
assert_eq!(initial_size, 0);
FileManager::grow_file(&mut temp_file, 1024).unwrap();
let new_size = temp_file.metadata().unwrap().len();
assert_eq!(new_size, 1024);
}
#[test]
fn test_grow_file_zero_bytes() {
let mut temp_file = tempfile().unwrap();
temp_file.write_all(b"test data").unwrap();
let initial_size = temp_file.metadata().unwrap().len();
FileManager::grow_file(&mut temp_file, 0).unwrap();
let new_size = temp_file.metadata().unwrap().len();
assert_eq!(new_size, initial_size);
}
#[test]
fn test_flush_complete() {
let mut temp_file = tempfile().unwrap();
let mut write_buffer = WriteBuffer::new(10);
write_buffer.add(100, b"flush_test".to_vec());
write_buffer.add(120, b"data".to_vec());
FileManager::flush_complete(&mut temp_file, &mut write_buffer).unwrap();
assert!(write_buffer.operations.is_empty());
let mut buffer1 = vec![0u8; 10];
temp_file.seek(SeekFrom::Start(100)).unwrap();
temp_file.read_exact(&mut buffer1).unwrap();
assert_eq!(buffer1, b"flush_test");
let mut buffer2 = vec![0u8; 4];
temp_file.seek(SeekFrom::Start(120)).unwrap();
temp_file.read_exact(&mut buffer2).unwrap();
assert_eq!(buffer2, b"data");
}
#[test]
fn test_invalidate_read_buffer() {
let mut read_buffer = ReadBuffer::new();
read_buffer.offset = 1000;
read_buffer.size = 512;
FileManager::invalidate_read_buffer(&mut read_buffer);
assert_eq!(read_buffer.offset, 0);
assert_eq!(read_buffer.size, 0);
}
#[test]
fn test_validate_file_size() {
use crate::backend::native::persistent_header::PersistentHeaderV2;
let header = PersistentHeaderV2::new_v2();
let file_size = 1024u64;
let result = FileManager::validate_file_size(file_size, &header);
assert!(result.is_ok());
}
#[cfg(feature = "v2_experimental")]
#[test]
fn test_mmap_ensure_size() {
use std::path::PathBuf;
let mut temp_file = tempfile().unwrap();
let file_path = PathBuf::from("test_mmap");
let mut mmap: Option<MmapMut> = None;
FileManager::mmap_ensure_size(&mut temp_file, &file_path, 2048, &mut mmap).unwrap();
assert!(mmap.is_some());
let mmap_ref = mmap.as_ref().unwrap();
assert!(mmap_ref.len() >= 2048);
let file_size = temp_file.metadata().unwrap().len();
assert!(file_size >= 2048);
}
}