use super::types::{ChunkId, ChunkOffset, ChunkRange, VersionMismatch, VersionedResult};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, RwLock};
use std::time::Instant;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
#[repr(u8)]
pub enum FileType {
#[default]
Regular = 0,
Directory = 1,
Symlink = 2,
Hardlink = 3,
CharDevice = 4,
BlockDevice = 5,
Fifo = 6,
Socket = 7,
}
impl FileType {
pub fn has_content(&self) -> bool {
matches!(self, FileType::Regular)
}
pub fn can_have_content(&self) -> bool {
matches!(
self,
FileType::Regular | FileType::CharDevice | FileType::BlockDevice
)
}
pub fn is_metadata_only(&self) -> bool {
matches!(self, FileType::Fifo | FileType::Socket)
}
pub fn is_link(&self) -> bool {
matches!(self, FileType::Symlink | FileType::Hardlink)
}
pub fn is_device(&self) -> bool {
matches!(self, FileType::CharDevice | FileType::BlockDevice)
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct FilePermissions {
pub uid: u32,
pub gid: u32,
pub mode: u32,
}
impl FilePermissions {
pub fn new(uid: u32, gid: u32, mode: u32) -> Self {
Self { uid, gid, mode }
}
pub fn default_file() -> Self {
Self {
uid: 0,
gid: 0,
mode: 0o644,
}
}
pub fn default_dir() -> Self {
Self {
uid: 0,
gid: 0,
mode: 0o755,
}
}
pub fn is_setuid(&self) -> bool {
self.mode & 0o4000 != 0
}
pub fn is_setgid(&self) -> bool {
self.mode & 0o2000 != 0
}
pub fn is_sticky(&self) -> bool {
self.mode & 0o1000 != 0
}
}
#[derive(Debug, Clone)]
pub struct VersionedFileEntry {
pub path: String,
pub file_type: FileType,
pub permissions: FilePermissions,
pub symlink_target: Option<String>,
pub hardlink_target: Option<String>,
pub device_id: Option<(u32, u32)>,
pub is_text: bool,
pub size: usize,
pub chunks: Vec<ChunkId>,
pub chunk_offsets: Option<Vec<ChunkOffset>>,
pub deleted: bool,
pub compression_codec: Option<u8>,
pub uncompressed_size: Option<usize>,
pub encoding_format: Option<u8>,
pub version: u64,
pub created_at: Instant,
pub modified_at: Instant,
}
impl VersionedFileEntry {
pub fn new(path: String, is_text: bool, size: usize, chunks: Vec<ChunkId>) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Regular,
permissions: FilePermissions::default_file(),
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text,
size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_with_metadata(
path: String,
file_type: FileType,
permissions: FilePermissions,
is_text: bool,
size: usize,
chunks: Vec<ChunkId>,
) -> Self {
let now = Instant::now();
Self {
path,
file_type,
permissions,
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text,
size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_symlink(path: String, target: String, permissions: FilePermissions) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Symlink,
permissions,
symlink_target: Some(target),
hardlink_target: None,
device_id: None,
is_text: false,
size: 0,
chunks: Vec::new(),
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_hardlink(path: String, target: String, permissions: FilePermissions) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Hardlink,
permissions,
symlink_target: None,
hardlink_target: Some(target),
device_id: None,
is_text: false,
size: 0,
chunks: Vec::new(),
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_device(
path: String,
is_char: bool,
major: u32,
minor: u32,
permissions: FilePermissions,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: if is_char {
FileType::CharDevice
} else {
FileType::BlockDevice
},
permissions,
symlink_target: None,
hardlink_target: None,
device_id: Some((major, minor)),
is_text: false,
size: 0,
chunks: Vec::new(),
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_device_with_data(
path: String,
is_char: bool,
major: u32,
minor: u32,
permissions: FilePermissions,
size: usize,
chunks: Vec<ChunkId>,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: if is_char {
FileType::CharDevice
} else {
FileType::BlockDevice
},
permissions,
symlink_target: None,
hardlink_target: None,
device_id: Some((major, minor)),
is_text: false,
size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
#[allow(clippy::too_many_arguments)]
pub fn new_device_compressed(
path: String,
is_char: bool,
major: u32,
minor: u32,
permissions: FilePermissions,
compressed_size: usize,
uncompressed_size: usize,
compression_codec: u8,
chunks: Vec<ChunkId>,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: if is_char {
FileType::CharDevice
} else {
FileType::BlockDevice
},
permissions,
symlink_target: None,
hardlink_target: None,
device_id: Some((major, minor)),
is_text: false,
size: compressed_size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: Some(compression_codec),
uncompressed_size: Some(uncompressed_size),
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_special(path: String, file_type: FileType, permissions: FilePermissions) -> Self {
let now = Instant::now();
Self {
path,
file_type,
permissions,
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text: false,
size: 0,
chunks: Vec::new(),
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_compressed(
path: String,
is_text: bool,
compressed_size: usize,
uncompressed_size: usize,
compression_codec: u8,
chunks: Vec<ChunkId>,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Regular,
permissions: FilePermissions::default_file(),
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text,
size: compressed_size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: Some(compression_codec),
uncompressed_size: Some(uncompressed_size),
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn new_compressed_with_metadata(
path: String,
permissions: FilePermissions,
is_text: bool,
compressed_size: usize,
uncompressed_size: usize,
compression_codec: u8,
chunks: Vec<ChunkId>,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Regular,
permissions,
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text,
size: compressed_size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: Some(compression_codec),
uncompressed_size: Some(uncompressed_size),
encoding_format: None,
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn update(&self, new_chunks: Vec<ChunkId>, new_size: usize) -> Self {
Self {
path: self.path.clone(),
file_type: self.file_type,
permissions: self.permissions,
symlink_target: self.symlink_target.clone(),
hardlink_target: self.hardlink_target.clone(),
device_id: self.device_id,
is_text: self.is_text,
size: new_size,
chunks: new_chunks,
chunk_offsets: None, deleted: false,
compression_codec: self.compression_codec,
uncompressed_size: self.uncompressed_size,
encoding_format: self.encoding_format,
version: self.version + 1,
created_at: self.created_at,
modified_at: Instant::now(),
}
}
pub fn update_compressed(
&self,
new_chunks: Vec<ChunkId>,
compressed_size: usize,
uncompressed_size: usize,
compression_codec: u8,
) -> Self {
Self {
path: self.path.clone(),
file_type: self.file_type,
permissions: self.permissions,
symlink_target: self.symlink_target.clone(),
hardlink_target: self.hardlink_target.clone(),
device_id: self.device_id,
is_text: self.is_text,
size: compressed_size,
chunks: new_chunks,
chunk_offsets: None, deleted: false,
compression_codec: Some(compression_codec),
uncompressed_size: Some(uncompressed_size),
encoding_format: self.encoding_format,
version: self.version + 1,
created_at: self.created_at,
modified_at: Instant::now(),
}
}
pub fn mark_deleted(&self) -> Self {
let mut updated = self.clone();
updated.deleted = true;
updated.version += 1;
updated.modified_at = Instant::now();
updated
}
pub fn new_holographic(
path: String,
is_text: bool,
size: usize,
chunks: Vec<ChunkId>,
encoding_format: u8,
) -> Self {
let now = Instant::now();
Self {
path,
file_type: FileType::Regular,
permissions: FilePermissions::default_file(),
symlink_target: None,
hardlink_target: None,
device_id: None,
is_text,
size,
chunks,
chunk_offsets: None,
deleted: false,
compression_codec: None,
uncompressed_size: None,
encoding_format: Some(encoding_format),
version: 0,
created_at: now,
modified_at: now,
}
}
pub fn is_regular_file(&self) -> bool {
self.file_type == FileType::Regular
}
pub fn is_metadata_only(&self) -> bool {
self.file_type.is_metadata_only()
}
pub fn build_offset_index(&mut self, chunk_sizes: &[usize]) {
assert_eq!(
chunk_sizes.len(),
self.chunks.len(),
"chunk_sizes must match chunks length"
);
let mut offset = 0;
let offsets: Vec<ChunkOffset> = self
.chunks
.iter()
.zip(chunk_sizes.iter())
.map(|(&chunk_id, &size)| {
let co = ChunkOffset::new(chunk_id, offset, size);
offset += size;
co
})
.collect();
self.chunk_offsets = Some(offsets);
}
pub fn new_with_offsets(
path: String,
is_text: bool,
size: usize,
chunks: Vec<ChunkId>,
chunk_sizes: Vec<usize>,
) -> Self {
let mut entry = Self::new(path, is_text, size, chunks);
entry.build_offset_index(&chunk_sizes);
entry
}
pub fn new_holographic_with_offsets(
path: String,
is_text: bool,
size: usize,
chunks: Vec<ChunkId>,
chunk_sizes: Vec<usize>,
encoding_format: u8,
) -> Self {
let mut entry = Self::new_holographic(path, is_text, size, chunks, encoding_format);
entry.build_offset_index(&chunk_sizes);
entry
}
pub fn has_offset_index(&self) -> bool {
self.chunk_offsets.is_some()
}
pub fn find_chunk_at_offset(&self, byte_offset: usize) -> Option<(ChunkId, usize)> {
if byte_offset >= self.size {
return None;
}
if let Some(ref offsets) = self.chunk_offsets {
match offsets.binary_search_by(|co| {
if byte_offset < co.byte_offset {
std::cmp::Ordering::Greater
} else if byte_offset >= co.end_offset() {
std::cmp::Ordering::Less
} else {
std::cmp::Ordering::Equal
}
}) {
Ok(idx) => {
let co = &offsets[idx];
Some((co.chunk_id, byte_offset - co.byte_offset))
}
Err(_) => None, }
} else {
self.chunks.first().map(|&id| (id, byte_offset))
}
}
pub fn chunks_for_range(&self, start_offset: usize, length: usize) -> Vec<ChunkRange> {
if start_offset >= self.size || length == 0 {
return Vec::new();
}
let end_offset = (start_offset + length).min(self.size);
if let Some(ref offsets) = self.chunk_offsets {
let mut ranges = Vec::new();
for co in offsets.iter() {
if co.end_offset() <= start_offset {
continue;
}
if co.byte_offset >= end_offset {
break;
}
let chunk_start = start_offset.saturating_sub(co.byte_offset);
let chunk_end = (end_offset - co.byte_offset).min(co.byte_length);
let range_length = chunk_end - chunk_start;
if range_length > 0 {
ranges.push(ChunkRange::new(co.chunk_id, chunk_start, range_length));
}
}
ranges
} else {
self.chunks
.iter()
.map(|&chunk_id| ChunkRange::new(chunk_id, 0, 0)) .collect()
}
}
pub fn computed_size(&self) -> usize {
if let Some(ref offsets) = self.chunk_offsets {
offsets.last().map(|co| co.end_offset()).unwrap_or(0)
} else {
self.size
}
}
}
pub struct VersionedManifest {
files: Arc<RwLock<Vec<VersionedFileEntry>>>,
file_index: Arc<RwLock<HashMap<String, usize>>>,
global_version: Arc<AtomicU64>,
total_chunks: Arc<AtomicU64>,
}
impl VersionedManifest {
pub fn new() -> Self {
Self {
files: Arc::new(RwLock::new(Vec::new())),
file_index: Arc::new(RwLock::new(HashMap::new())),
global_version: Arc::new(AtomicU64::new(0)),
total_chunks: Arc::new(AtomicU64::new(0)),
}
}
pub fn version(&self) -> u64 {
self.global_version.load(Ordering::Acquire)
}
pub fn get_file(&self, path: &str) -> Option<(VersionedFileEntry, u64)> {
let files = self.files.read().unwrap();
let index = self.file_index.read().unwrap();
let version = self.version();
index
.get(path)
.and_then(|&idx| files.get(idx))
.map(|entry| (entry.clone(), version))
}
pub fn contains(&self, path: &str) -> bool {
self.file_index.read().unwrap().contains_key(path)
}
pub fn add_file(&self, entry: VersionedFileEntry) -> VersionedResult<u64> {
let mut files = self.files.write().unwrap();
let mut index = self.file_index.write().unwrap();
if index.contains_key(&entry.path) {
return Err(VersionMismatch {
expected: 0,
actual: self.version(),
});
}
let idx = files.len();
let chunk_count = entry.chunks.len() as u64;
files.push(entry.clone());
index.insert(entry.path.clone(), idx);
self.total_chunks.fetch_add(chunk_count, Ordering::AcqRel);
let new_version = self.global_version.fetch_add(1, Ordering::AcqRel) + 1;
Ok(new_version)
}
pub fn update_file(
&self,
path: &str,
new_entry: VersionedFileEntry,
expected_file_version: u64,
) -> VersionedResult<u64> {
let mut files = self.files.write().unwrap();
let index = self.file_index.read().unwrap();
let &idx = index.get(path).ok_or(VersionMismatch {
expected: expected_file_version,
actual: 0,
})?;
let current_entry = &files[idx];
if current_entry.version != expected_file_version {
return Err(VersionMismatch {
expected: expected_file_version,
actual: current_entry.version,
});
}
let old_chunks = current_entry.chunks.len() as u64;
let new_chunks = new_entry.chunks.len() as u64;
if new_chunks >= old_chunks {
let delta = new_chunks - old_chunks;
if delta > 0 {
self.total_chunks.fetch_add(delta, Ordering::AcqRel);
}
} else {
let delta = old_chunks - new_chunks;
if delta > 0 {
self.total_chunks.fetch_sub(delta, Ordering::AcqRel);
}
}
files[idx] = new_entry;
files[idx].version = expected_file_version + 1;
files[idx].modified_at = Instant::now();
let new_version = self.global_version.fetch_add(1, Ordering::AcqRel) + 1;
Ok(new_version)
}
pub fn remove_file(&self, path: &str, expected_file_version: u64) -> VersionedResult<u64> {
let mut files = self.files.write().unwrap();
let index = self.file_index.read().unwrap();
let &idx = index.get(path).ok_or(VersionMismatch {
expected: expected_file_version,
actual: 0,
})?;
let current_entry = &files[idx];
if current_entry.version != expected_file_version {
return Err(VersionMismatch {
expected: expected_file_version,
actual: current_entry.version,
});
}
files[idx].deleted = true;
files[idx].version = expected_file_version + 1;
files[idx].modified_at = Instant::now();
let new_version = self.global_version.fetch_add(1, Ordering::AcqRel) + 1;
Ok(new_version)
}
pub fn list_files(&self) -> Vec<String> {
self.files
.read()
.unwrap()
.iter()
.filter(|f| !f.deleted)
.map(|f| f.path.clone())
.collect()
}
pub fn iter(&self) -> Vec<VersionedFileEntry> {
self.files.read().unwrap().clone()
}
pub fn len(&self) -> usize {
self.files.read().unwrap().len()
}
pub fn is_empty(&self) -> bool {
self.files.read().unwrap().is_empty()
}
pub fn total_chunks(&self) -> u64 {
self.total_chunks.load(Ordering::Acquire)
}
pub fn compact(&self, expected_version: u64) -> VersionedResult<usize> {
let mut files = self.files.write().unwrap();
let mut index = self.file_index.write().unwrap();
let current_version = self.version();
if current_version != expected_version {
return Err(VersionMismatch {
expected: expected_version,
actual: current_version,
});
}
let old_len = files.len();
let new_files: Vec<VersionedFileEntry> = files.drain(..).filter(|f| !f.deleted).collect();
let removed = old_len - new_files.len();
index.clear();
for (idx, file) in new_files.iter().enumerate() {
index.insert(file.path.clone(), idx);
}
*files = new_files;
let total: u64 = files.iter().map(|f| f.chunks.len() as u64).sum();
self.total_chunks.store(total, Ordering::Release);
if removed > 0 {
self.global_version.fetch_add(1, Ordering::AcqRel);
}
Ok(removed)
}
pub fn stats(&self) -> ManifestStats {
let files = self.files.read().unwrap();
let total_files = files.len();
let deleted_files = files.iter().filter(|f| f.deleted).count();
let active_files = total_files - deleted_files;
let total_size = files.iter().map(|f| f.size).sum();
ManifestStats {
total_files,
active_files,
deleted_files,
total_chunks: self.total_chunks(),
total_size_bytes: total_size,
version: self.version(),
}
}
}
impl Default for VersionedManifest {
fn default() -> Self {
Self::new()
}
}
impl Clone for VersionedManifest {
fn clone(&self) -> Self {
Self {
files: Arc::clone(&self.files),
file_index: Arc::clone(&self.file_index),
global_version: Arc::clone(&self.global_version),
total_chunks: Arc::clone(&self.total_chunks),
}
}
}
#[derive(Debug, Clone)]
pub struct ManifestStats {
pub total_files: usize,
pub active_files: usize,
pub deleted_files: usize,
pub total_chunks: u64,
pub total_size_bytes: usize,
pub version: u64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_manifest_creation() {
let manifest = VersionedManifest::new();
assert_eq!(manifest.version(), 0);
assert!(manifest.is_empty());
}
#[test]
fn test_add_file() {
let manifest = VersionedManifest::new();
let entry = VersionedFileEntry::new("test.txt".to_string(), true, 100, vec![1, 2, 3]);
let version = manifest.add_file(entry).unwrap();
assert_eq!(version, 1);
assert_eq!(manifest.len(), 1);
assert_eq!(manifest.total_chunks(), 3);
}
#[test]
fn test_update_file() {
let manifest = VersionedManifest::new();
let entry = VersionedFileEntry::new("test.txt".to_string(), true, 100, vec![1, 2, 3]);
manifest.add_file(entry.clone()).unwrap();
let updated = entry.update(vec![4, 5], 200);
let version = manifest.update_file("test.txt", updated, 0).unwrap();
assert_eq!(version, 2);
let (retrieved, _) = manifest.get_file("test.txt").unwrap();
assert_eq!(retrieved.version, 1);
assert_eq!(retrieved.size, 200);
assert_eq!(retrieved.chunks, vec![4, 5]);
}
#[test]
fn test_remove_file() {
let manifest = VersionedManifest::new();
let entry = VersionedFileEntry::new("test.txt".to_string(), true, 100, vec![1, 2, 3]);
manifest.add_file(entry).unwrap();
manifest.remove_file("test.txt", 0).unwrap();
let (retrieved, _) = manifest.get_file("test.txt").unwrap();
assert!(retrieved.deleted);
assert_eq!(retrieved.version, 1);
}
#[test]
fn test_compact() {
let manifest = VersionedManifest::new();
for i in 0..10 {
let entry = VersionedFileEntry::new(format!("file{}.txt", i), true, 100, vec![i]);
manifest.add_file(entry).unwrap();
}
for i in 0..5 {
manifest.remove_file(&format!("file{}.txt", i), 0).unwrap();
}
assert_eq!(manifest.len(), 10);
let removed = manifest.compact(manifest.version()).unwrap();
assert_eq!(removed, 5);
assert_eq!(manifest.len(), 5);
}
#[test]
fn test_stats() {
let manifest = VersionedManifest::new();
for i in 0..5 {
let entry = VersionedFileEntry::new(format!("file{}.txt", i), true, 100, vec![i]);
manifest.add_file(entry).unwrap();
}
let stats = manifest.stats();
assert_eq!(stats.total_files, 5);
assert_eq!(stats.active_files, 5);
assert_eq!(stats.deleted_files, 0);
assert_eq!(stats.total_chunks, 5);
}
#[test]
fn test_build_offset_index() {
let mut entry = VersionedFileEntry::new(
"test.txt".to_string(),
true,
300, vec![0, 1, 2], );
entry.build_offset_index(&[100, 100, 100]);
assert!(entry.has_offset_index());
let offsets = entry.chunk_offsets.as_ref().unwrap();
assert_eq!(offsets.len(), 3);
assert_eq!(offsets[0].byte_offset, 0);
assert_eq!(offsets[0].byte_length, 100);
assert_eq!(offsets[1].byte_offset, 100);
assert_eq!(offsets[1].byte_length, 100);
assert_eq!(offsets[2].byte_offset, 200);
assert_eq!(offsets[2].byte_length, 100);
}
#[test]
fn test_find_chunk_at_offset() {
let mut entry = VersionedFileEntry::new(
"test.txt".to_string(),
true,
256,
vec![10, 20, 30, 40], );
entry.build_offset_index(&[64, 64, 64, 64]);
let (chunk_id, within) = entry.find_chunk_at_offset(0).unwrap();
assert_eq!(chunk_id, 10);
assert_eq!(within, 0);
let (chunk_id, within) = entry.find_chunk_at_offset(63).unwrap();
assert_eq!(chunk_id, 10);
assert_eq!(within, 63);
let (chunk_id, within) = entry.find_chunk_at_offset(64).unwrap();
assert_eq!(chunk_id, 20);
assert_eq!(within, 0);
let (chunk_id, within) = entry.find_chunk_at_offset(200).unwrap();
assert_eq!(chunk_id, 40);
assert_eq!(within, 8);
assert!(entry.find_chunk_at_offset(256).is_none());
assert!(entry.find_chunk_at_offset(1000).is_none());
}
#[test]
fn test_chunks_for_range() {
let mut entry = VersionedFileEntry::new(
"test.txt".to_string(),
true,
256,
vec![10, 20, 30, 40], );
entry.build_offset_index(&[64, 64, 64, 64]);
let ranges = entry.chunks_for_range(10, 20);
assert_eq!(ranges.len(), 1);
assert_eq!(ranges[0].chunk_id, 10);
assert_eq!(ranges[0].start_within_chunk, 10);
assert_eq!(ranges[0].length, 20);
let ranges = entry.chunks_for_range(50, 30);
assert_eq!(ranges.len(), 2);
assert_eq!(ranges[0].chunk_id, 10);
assert_eq!(ranges[0].start_within_chunk, 50);
assert_eq!(ranges[0].length, 14); assert_eq!(ranges[1].chunk_id, 20);
assert_eq!(ranges[1].start_within_chunk, 0);
assert_eq!(ranges[1].length, 16);
let ranges = entry.chunks_for_range(0, 256);
assert_eq!(ranges.len(), 4);
for (i, range) in ranges.iter().enumerate() {
assert_eq!(range.start_within_chunk, 0);
assert_eq!(range.length, 64);
assert!(range.is_full_chunk(64));
assert_eq!(range.chunk_id, [10, 20, 30, 40][i]);
}
let ranges = entry.chunks_for_range(0, 0);
assert!(ranges.is_empty());
let ranges = entry.chunks_for_range(300, 50);
assert!(ranges.is_empty());
}
#[test]
fn test_new_with_offsets() {
let entry = VersionedFileEntry::new_with_offsets(
"test.txt".to_string(),
true,
192,
vec![1, 2, 3],
vec![64, 64, 64],
);
assert!(entry.has_offset_index());
assert_eq!(entry.computed_size(), 192);
}
}