1use std::collections::HashMap;
2use std::sync::Arc;
3use std::time::{SystemTime, UNIX_EPOCH};
4
5use rand::RngCore;
6
7use crate::allocator::{BitmapAllocator, SlotAllocator};
8use crate::block_store::BlockStore;
9use crate::codec::{
10 read_encrypted_object, read_encrypted_raw, write_encrypted_object, write_encrypted_raw,
11 ObjectCodec, PostcardCodec,
12};
13use crate::crypto::CryptoEngine;
14use crate::error::{FsError, FsResult};
15use crate::model::*;
16use crate::transaction::TransactionManager;
17
18pub struct FilesystemCore {
25 store: Arc<dyn BlockStore>,
26 crypto: Arc<dyn CryptoEngine>,
27 codec: PostcardCodec,
28 allocator: BitmapAllocator,
29 txn: TransactionManager,
30 superblock: Option<Superblock>,
32 next_inode_id: InodeId,
34 write_buffer: HashMap<String, DirtyFile>,
36 last_superblock_block: Option<u64>,
39}
40
41struct AncestorEntry {
44 inode: Inode,
45 dir_page: DirectoryPage,
46 child_index: usize,
47}
48
49struct DirtyFile {
55 dirty_chunks: HashMap<u64, Vec<u8>>,
57 base_inode: Inode,
59 extent_map: ExtentMap,
61 size: u64,
63 metadata_dirty: bool,
65}
66
67fn max_chunk_payload(block_size: usize) -> usize {
71 if block_size > 200 {
74 block_size - 200
75 } else {
76 0
77 }
78}
79
80fn now_secs() -> u64 {
81 SystemTime::now()
82 .duration_since(UNIX_EPOCH)
83 .unwrap_or_default()
84 .as_secs()
85}
86
87impl FilesystemCore {
88 pub fn new(store: Arc<dyn BlockStore>, crypto: Arc<dyn CryptoEngine>) -> Self {
90 let total_blocks = store.total_blocks();
91 Self {
92 store,
93 crypto,
94 codec: PostcardCodec,
95 allocator: BitmapAllocator::new(total_blocks),
96 txn: TransactionManager::new(),
97 superblock: None,
98 next_inode_id: 1,
99 write_buffer: HashMap::new(),
100 last_superblock_block: None,
101 }
102 }
103
104 pub fn init_filesystem(&mut self) -> FsResult<()> {
109 let block_size = self.store.block_size() as u32;
110 let total_blocks = self.store.total_blocks();
111
112 let header = StorageHeader::new(block_size, total_blocks);
114 let header_bytes = self.codec.serialize_object(&header)?;
115 let bs = self.store.block_size();
116 let mut block = vec![0u8; bs];
117 rand::thread_rng().fill_bytes(&mut block);
118 let len = header_bytes.len() as u32;
119 block[..4].copy_from_slice(&len.to_le_bytes());
120 block[4..4 + header_bytes.len()].copy_from_slice(&header_bytes);
121 self.store.write_block(BLOCK_STORAGE_HEADER, &block)?;
122
123 let root_inode_id = self.alloc_inode_id();
125 let dir_page = DirectoryPage::new();
126 let dir_page_block = self.allocator.allocate()?;
127 write_encrypted_object(
128 self.store.as_ref(),
129 self.crypto.as_ref(),
130 &self.codec,
131 dir_page_block,
132 ObjectKind::DirectoryPage,
133 &dir_page,
134 )?;
135
136 let ts = now_secs();
137 let root_inode = Inode {
138 id: root_inode_id,
139 kind: InodeKind::Directory,
140 size: 0,
141 directory_page_ref: ObjectRef::new(dir_page_block),
142 extent_map_ref: ObjectRef::null(),
143 created_at: ts,
144 modified_at: ts,
145 };
146 let root_inode_block = self.allocator.allocate()?;
147 write_encrypted_object(
148 self.store.as_ref(),
149 self.crypto.as_ref(),
150 &self.codec,
151 root_inode_block,
152 ObjectKind::Inode,
153 &root_inode,
154 )?;
155
156 let sb = Superblock {
158 generation: 1,
159 root_inode_ref: ObjectRef::new(root_inode_block),
160 };
161 self.superblock = Some(sb.clone());
162
163 self.txn.commit(
165 self.store.as_ref(),
166 self.crypto.as_ref(),
167 &self.codec,
168 &self.allocator,
169 &sb,
170 )?;
171
172 Ok(())
173 }
174
175 pub fn open(&mut self) -> FsResult<()> {
177 let header = self.read_storage_header()?;
179 if !header.is_valid() {
180 return Err(FsError::InvalidSuperblock);
181 }
182
183 let (rp, was_b) = TransactionManager::recover_latest(self.store.as_ref(), &self.codec)?
185 .ok_or(FsError::InvalidRootPointer)?;
186
187 let sb: Superblock = read_encrypted_object(
189 self.store.as_ref(),
190 self.crypto.as_ref(),
191 &self.codec,
192 rp.superblock_ref.block_id,
193 )?;
194
195 let sb_bytes = self.codec.serialize_object(&sb)?;
197 let checksum = blake3::hash(&sb_bytes);
198 if *checksum.as_bytes() != rp.checksum {
199 return Err(FsError::InvalidSuperblock);
200 }
201
202 self.txn = TransactionManager::from_recovered(rp.generation, was_b);
203 self.superblock = Some(sb.clone());
204 self.last_superblock_block = Some(rp.superblock_ref.block_id);
205
206 self.rebuild_allocator(&sb)?;
208
209 Ok(())
210 }
211
212 fn split_path(path: &str) -> FsResult<(Vec<&str>, &str)> {
219 let trimmed = path.trim_matches('/');
220 if trimmed.is_empty() {
221 return Err(FsError::Internal("empty path".into()));
222 }
223 let parts: Vec<&str> = trimmed.split('/').collect();
224 let (dirs, leaf) = parts.split_at(parts.len() - 1);
225 Ok((dirs.to_vec(), leaf[0]))
226 }
227
228 fn split_dir_path(path: &str) -> Vec<&str> {
230 let trimmed = path.trim_matches('/');
231 if trimmed.is_empty() {
232 return Vec::new();
233 }
234 trimmed.split('/').collect()
235 }
236
237 fn resolve_dir_chain(
244 &self,
245 components: &[&str],
246 root_inode: &Inode,
247 ) -> FsResult<(Vec<AncestorEntry>, Inode, DirectoryPage)> {
248 let mut ancestors: Vec<AncestorEntry> = Vec::new();
249 let mut current_inode = root_inode.clone();
250 let mut current_dir_page: DirectoryPage =
251 self.read_obj(current_inode.directory_page_ref.block_id)?;
252
253 for component in components {
254 let idx = current_dir_page
255 .entries
256 .iter()
257 .position(|e| e.name == *component)
258 .ok_or_else(|| FsError::DirectoryNotFound(component.to_string()))?;
259
260 let entry = ¤t_dir_page.entries[idx];
261 if entry.kind != InodeKind::Directory {
262 return Err(FsError::NotADirectory(component.to_string()));
263 }
264
265 let child_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
266 let child_dir_page: DirectoryPage =
267 self.read_obj(child_inode.directory_page_ref.block_id)?;
268
269 ancestors.push(AncestorEntry {
270 inode: current_inode,
271 dir_page: current_dir_page,
272 child_index: idx,
273 });
274
275 current_inode = child_inode;
276 current_dir_page = child_dir_page;
277 }
278
279 Ok((ancestors, current_inode, current_dir_page))
280 }
281
282 fn commit_cow_chain(
289 &mut self,
290 sb: &Superblock,
291 ancestors: &[AncestorEntry],
292 target_inode: &Inode,
293 new_dir_page: &DirectoryPage,
294 ) -> FsResult<()> {
295 let mut stale_blocks: Vec<u64> = Vec::new();
297
298 stale_blocks.push(target_inode.directory_page_ref.block_id);
300 if ancestors.is_empty() {
302 stale_blocks.push(sb.root_inode_ref.block_id);
304 } else {
305 let last = ancestors.last().unwrap();
306 stale_blocks.push(last.dir_page.entries[last.child_index].inode_ref.block_id);
307 }
308
309 let mut new_dp_block = self.allocator.allocate()?;
311 self.write_obj(new_dp_block, ObjectKind::DirectoryPage, new_dir_page)?;
312
313 let mut new_inode = target_inode.clone();
315 new_inode.directory_page_ref = ObjectRef::new(new_dp_block);
316 new_inode.modified_at = now_secs();
317 let mut new_inode_block = self.allocator.allocate()?;
318 self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
319
320 for (i, ancestor) in ancestors.iter().rev().enumerate() {
322 stale_blocks.push(ancestor.inode.directory_page_ref.block_id);
324 let rev_idx = ancestors.len() - 1 - i;
326 if rev_idx == 0 {
327 stale_blocks.push(sb.root_inode_ref.block_id);
329 } else {
330 let parent = &ancestors[rev_idx - 1];
331 stale_blocks.push(
332 parent.dir_page.entries[parent.child_index]
333 .inode_ref
334 .block_id,
335 );
336 }
337
338 let mut parent_dp = ancestor.dir_page.clone();
339 parent_dp.entries[ancestor.child_index].inode_ref = ObjectRef::new(new_inode_block);
340
341 new_dp_block = self.allocator.allocate()?;
342 self.write_obj(new_dp_block, ObjectKind::DirectoryPage, &parent_dp)?;
343
344 let mut parent_inode = ancestor.inode.clone();
345 parent_inode.directory_page_ref = ObjectRef::new(new_dp_block);
346 parent_inode.modified_at = now_secs();
347 new_inode_block = self.allocator.allocate()?;
348 self.write_obj(new_inode_block, ObjectKind::Inode, &parent_inode)?;
349 }
350
351 let new_sb = Superblock {
353 generation: sb.generation + 1,
354 root_inode_ref: ObjectRef::new(new_inode_block),
355 };
356 self.commit_superblock(new_sb)?;
357
358 for block_id in stale_blocks {
360 let _ = self.allocator.free(block_id);
361 }
362
363 Ok(())
364 }
365
366 fn cow_subchain(
376 &mut self,
377 sub_ancestors: &[AncestorEntry],
378 target_inode: &Inode,
379 new_dir_page: &DirectoryPage,
380 stale_blocks: &mut Vec<u64>,
381 ) -> FsResult<u64> {
382 stale_blocks.push(target_inode.directory_page_ref.block_id);
384 let mut new_dp_block = self.allocator.allocate()?;
385 self.write_obj(new_dp_block, ObjectKind::DirectoryPage, new_dir_page)?;
386
387 let mut new_inode = target_inode.clone();
389 new_inode.directory_page_ref = ObjectRef::new(new_dp_block);
390 new_inode.modified_at = now_secs();
391 let mut new_inode_block = self.allocator.allocate()?;
392 self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
393
394 for ancestor in sub_ancestors.iter().rev() {
396 stale_blocks.push(
398 ancestor.dir_page.entries[ancestor.child_index]
399 .inode_ref
400 .block_id,
401 );
402 stale_blocks.push(ancestor.inode.directory_page_ref.block_id);
404
405 let mut parent_dp = ancestor.dir_page.clone();
406 parent_dp.entries[ancestor.child_index].inode_ref = ObjectRef::new(new_inode_block);
407
408 new_dp_block = self.allocator.allocate()?;
409 self.write_obj(new_dp_block, ObjectKind::DirectoryPage, &parent_dp)?;
410
411 let mut parent_inode = ancestor.inode.clone();
412 parent_inode.directory_page_ref = ObjectRef::new(new_dp_block);
413 parent_inode.modified_at = now_secs();
414 new_inode_block = self.allocator.allocate()?;
415 self.write_obj(new_inode_block, ObjectKind::Inode, &parent_inode)?;
416 }
417
418 Ok(new_inode_block)
419 }
420
421 pub fn create_file(&mut self, path: &str) -> FsResult<()> {
428 let (dir_parts, leaf) = Self::split_path(path)?;
429 self.validate_name(leaf)?;
430 self.flush_all()?;
431 let sb = self
432 .superblock
433 .as_ref()
434 .ok_or(FsError::NotInitialized)?
435 .clone();
436
437 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
438 let (ancestors, target_inode, mut dir_page) =
439 self.resolve_dir_chain(&dir_parts, &root_inode)?;
440
441 if dir_page.entries.iter().any(|e| e.name == leaf) {
442 return Err(FsError::FileAlreadyExists(leaf.to_string()));
443 }
444
445 let extent_map = ExtentMap::new();
447 let em_block = self.allocator.allocate()?;
448 self.write_obj(em_block, ObjectKind::ExtentMap, &extent_map)?;
449
450 let inode_id = self.alloc_inode_id();
452 let ts = now_secs();
453 let file_inode = Inode {
454 id: inode_id,
455 kind: InodeKind::File,
456 size: 0,
457 directory_page_ref: ObjectRef::null(),
458 extent_map_ref: ObjectRef::new(em_block),
459 created_at: ts,
460 modified_at: ts,
461 };
462 let inode_block = self.allocator.allocate()?;
463 self.write_obj(inode_block, ObjectKind::Inode, &file_inode)?;
464
465 dir_page.entries.push(DirectoryEntry {
466 name: leaf.to_string(),
467 inode_ref: ObjectRef::new(inode_block),
468 inode_id,
469 kind: InodeKind::File,
470 });
471
472 self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
473 Ok(())
474 }
475
476 pub fn write_file(&mut self, path: &str, offset: u64, data: &[u8]) -> FsResult<()> {
483 if data.is_empty() {
484 return Ok(());
485 }
486
487 let chunk_size = max_chunk_payload(self.store.block_size());
488 if chunk_size == 0 {
489 return Err(FsError::DataTooLarge(data.len()));
490 }
491
492 let path_key = path.trim_matches('/').to_string();
493
494 let mut dirty = match self.write_buffer.remove(&path_key) {
497 Some(d) => d,
498 None => {
499 let (dir_parts, leaf) = Self::split_path(path)?;
501 let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
502 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
503 let (_, _, dir_page) = self.resolve_dir_chain(&dir_parts, &root_inode)?;
504 let entry = dir_page
505 .entries
506 .iter()
507 .find(|e| e.name == leaf)
508 .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
509 if entry.kind != InodeKind::File {
510 return Err(FsError::NotAFile(leaf.to_string()));
511 }
512 let file_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
513 let mut extent_map: ExtentMap =
514 self.read_obj(file_inode.extent_map_ref.block_id)?;
515 extent_map.entries.sort_by_key(|e| e.chunk_index);
516 DirtyFile {
517 dirty_chunks: HashMap::new(),
518 base_inode: file_inode.clone(),
519 extent_map,
520 size: file_inode.size,
521 metadata_dirty: false,
522 }
523 }
524 };
525
526 let old_size = dirty.size as usize;
527 let write_start = offset as usize;
528 let write_end = write_start + data.len();
529 let new_size = std::cmp::max(old_size, write_end);
530
531 let first_chunk = if write_start >= old_size {
532 old_size / chunk_size
533 } else {
534 write_start / chunk_size
535 };
536 let last_chunk = (new_size - 1) / chunk_size;
537
538 for chunk_idx in first_chunk..=last_chunk {
539 let chunk_file_start = chunk_idx * chunk_size;
540 let chunk_file_end = std::cmp::min(chunk_file_start + chunk_size, new_size);
541 let chunk_len = chunk_file_end - chunk_file_start;
542 let chunk_idx_u64 = chunk_idx as u64;
543
544 if !dirty.dirty_chunks.contains_key(&chunk_idx_u64) {
546 let mut buf = vec![0u8; chunk_len];
547 if chunk_file_start < old_size {
548 if let Ok(pos) = dirty
549 .extent_map
550 .entries
551 .binary_search_by_key(&chunk_idx_u64, |e| e.chunk_index)
552 {
553 let existing = &dirty.extent_map.entries[pos];
554 let raw = read_encrypted_raw(
555 self.store.as_ref(),
556 self.crypto.as_ref(),
557 &self.codec,
558 existing.data_ref.block_id,
559 )?;
560 let copy_len = std::cmp::min(existing.plaintext_len as usize, chunk_len);
561 let src_len = std::cmp::min(copy_len, raw.len());
562 buf[..src_len].copy_from_slice(&raw[..src_len]);
563 }
564 }
565 dirty.dirty_chunks.insert(chunk_idx_u64, buf);
566 }
567
568 let chunk_buf = dirty.dirty_chunks.get_mut(&chunk_idx_u64).unwrap();
569 if chunk_buf.len() < chunk_len {
570 chunk_buf.resize(chunk_len, 0);
571 }
572
573 let overlap_start = std::cmp::max(chunk_file_start, write_start);
575 let overlap_end = std::cmp::min(chunk_file_end, write_end);
576 if overlap_start < overlap_end {
577 let data_off = overlap_start - write_start;
578 let chunk_off = overlap_start - chunk_file_start;
579 let len = overlap_end - overlap_start;
580 chunk_buf[chunk_off..chunk_off + len]
581 .copy_from_slice(&data[data_off..data_off + len]);
582 }
583 }
584
585 dirty.size = new_size as u64;
586 dirty.metadata_dirty = true;
587
588 self.write_buffer.insert(path_key, dirty);
589 Ok(())
590 }
591
592 pub fn read_file(&self, path: &str, offset: u64, len: usize) -> FsResult<Vec<u8>> {
597 let path_key = path.trim_matches('/');
598
599 if let Some(dirty) = self.write_buffer.get(path_key) {
600 return self.read_file_buffered(dirty, offset, len);
601 }
602
603 let (dir_parts, leaf) = Self::split_path(path)?;
604 let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
605 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
606 let (_, _, dir_page) = self.resolve_dir_chain(&dir_parts, &root_inode)?;
607
608 let entry = dir_page
609 .entries
610 .iter()
611 .find(|e| e.name == leaf)
612 .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
613
614 if entry.kind != InodeKind::File {
615 return Err(FsError::NotAFile(leaf.to_string()));
616 }
617
618 let file_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
619 let extent_map: ExtentMap = self.read_obj(file_inode.extent_map_ref.block_id)?;
620
621 let full_data = self.read_all_chunks(&extent_map)?;
622
623 let start = offset as usize;
624 if start >= full_data.len() {
625 return Ok(Vec::new());
626 }
627 let end = std::cmp::min(start + len, full_data.len());
628 Ok(full_data[start..end].to_vec())
629 }
630
631 pub fn list_directory(&self, path: &str) -> FsResult<Vec<DirListEntry>> {
635 let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
636 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
637
638 let components = Self::split_dir_path(path);
639 let (_, _, dir_page) = self.resolve_dir_chain(&components, &root_inode)?;
640
641 let dir_prefix = {
642 let trimmed = path.trim_matches('/');
643 if trimmed.is_empty() {
644 String::new()
645 } else {
646 format!("{}/", trimmed)
647 }
648 };
649
650 let mut result = Vec::new();
651 for entry in &dir_page.entries {
652 let inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
653 let size = if entry.kind == InodeKind::File {
655 let full_path = format!("{}{}", dir_prefix, entry.name);
656 if let Some(dirty) = self.write_buffer.get(&full_path) {
657 dirty.size
658 } else {
659 inode.size
660 }
661 } else {
662 inode.size
663 };
664 result.push(DirListEntry {
665 name: entry.name.clone(),
666 kind: entry.kind,
667 size,
668 inode_id: entry.inode_id,
669 });
670 }
671 Ok(result)
672 }
673
674 pub fn create_directory(&mut self, path: &str) -> FsResult<()> {
678 let (dir_parts, leaf) = Self::split_path(path)?;
679 self.validate_name(leaf)?;
680 self.flush_all()?;
681 let sb = self
682 .superblock
683 .as_ref()
684 .ok_or(FsError::NotInitialized)?
685 .clone();
686 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
687 let (ancestors, target_inode, mut dir_page) =
688 self.resolve_dir_chain(&dir_parts, &root_inode)?;
689
690 if dir_page.entries.iter().any(|e| e.name == leaf) {
691 return Err(FsError::DirectoryAlreadyExists(leaf.to_string()));
692 }
693
694 let sub_dp = DirectoryPage::new();
696 let sub_dp_block = self.allocator.allocate()?;
697 self.write_obj(sub_dp_block, ObjectKind::DirectoryPage, &sub_dp)?;
698
699 let inode_id = self.alloc_inode_id();
700 let ts = now_secs();
701 let dir_inode = Inode {
702 id: inode_id,
703 kind: InodeKind::Directory,
704 size: 0,
705 directory_page_ref: ObjectRef::new(sub_dp_block),
706 extent_map_ref: ObjectRef::null(),
707 created_at: ts,
708 modified_at: ts,
709 };
710 let inode_block = self.allocator.allocate()?;
711 self.write_obj(inode_block, ObjectKind::Inode, &dir_inode)?;
712
713 dir_page.entries.push(DirectoryEntry {
714 name: leaf.to_string(),
715 inode_ref: ObjectRef::new(inode_block),
716 inode_id,
717 kind: InodeKind::Directory,
718 });
719
720 self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
721 Ok(())
722 }
723
724 pub fn remove_file(&mut self, path: &str) -> FsResult<()> {
726 let path_key = path.trim_matches('/').to_string();
727 self.write_buffer.remove(&path_key);
728 self.flush_all()?;
729 let (dir_parts, leaf) = Self::split_path(path)?;
730 let sb = self
731 .superblock
732 .as_ref()
733 .ok_or(FsError::NotInitialized)?
734 .clone();
735 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
736 let (ancestors, target_inode, mut dir_page) =
737 self.resolve_dir_chain(&dir_parts, &root_inode)?;
738
739 let idx = dir_page
740 .entries
741 .iter()
742 .position(|e| e.name == leaf)
743 .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
744
745 let removed_entry = &dir_page.entries[idx];
747 let mut stale_blocks: Vec<u64> = Vec::new();
748 stale_blocks.push(removed_entry.inode_ref.block_id);
749 let removed_inode: Inode = self.read_obj(removed_entry.inode_ref.block_id)?;
750
751 match removed_entry.kind {
752 InodeKind::Directory => {
753 let sub_page: DirectoryPage =
754 self.read_obj(removed_inode.directory_page_ref.block_id)?;
755 if !sub_page.entries.is_empty() {
756 return Err(FsError::DirectoryNotEmpty(leaf.to_string()));
757 }
758 stale_blocks.push(removed_inode.directory_page_ref.block_id);
759 }
760 InodeKind::File => {
761 if !removed_inode.extent_map_ref.is_null() {
762 stale_blocks.push(removed_inode.extent_map_ref.block_id);
763 let extent_map: ExtentMap =
764 self.read_obj(removed_inode.extent_map_ref.block_id)?;
765 for ext in &extent_map.entries {
766 stale_blocks.push(ext.data_ref.block_id);
767 }
768 }
769 }
770 }
771
772 dir_page.entries.remove(idx);
773 self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
775
776 for block_id in stale_blocks {
778 let _ = self.allocator.free(block_id);
779 }
780
781 Ok(())
782 }
783
784 pub fn rename(&mut self, old_path: &str, new_path: &str) -> FsResult<()> {
787 let (old_dir, old_leaf) = Self::split_path(old_path)?;
788 let (new_dir, new_leaf) = Self::split_path(new_path)?;
789 self.validate_name(new_leaf)?;
790 self.flush_all()?;
791
792 let sb = self
793 .superblock
794 .as_ref()
795 .ok_or(FsError::NotInitialized)?
796 .clone();
797 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
798
799 if old_dir == new_dir {
800 let (ancestors, target_inode, mut dir_page) =
802 self.resolve_dir_chain(&old_dir, &root_inode)?;
803
804 if dir_page.entries.iter().any(|e| e.name == new_leaf) {
805 return Err(FsError::FileAlreadyExists(new_leaf.to_string()));
806 }
807
808 let entry = dir_page
809 .entries
810 .iter_mut()
811 .find(|e| e.name == old_leaf)
812 .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
813
814 entry.name = new_leaf.to_string();
815 self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
816 } else {
817 let src_full: Vec<&str> = old_dir
821 .iter()
822 .copied()
823 .chain(std::iter::once(old_leaf))
824 .collect();
825 if new_dir.len() >= src_full.len() && new_dir[..src_full.len()] == src_full[..] {
826 return Err(FsError::Internal(
827 "cannot move a directory into itself".into(),
828 ));
829 }
830
831 let common_len = old_dir
833 .iter()
834 .zip(new_dir.iter())
835 .take_while(|(a, b)| a == b)
836 .count();
837 let common_parts = &old_dir[..common_len];
838 let src_remaining = &old_dir[common_len..];
839 let dst_remaining = &new_dir[common_len..];
840
841 let (common_ancestors, common_inode, common_dir_page) =
843 self.resolve_dir_chain(common_parts, &root_inode)?;
844
845 let (full_src_ancestors, src_inode, src_dir_page) = if src_remaining.is_empty() {
847 (Vec::new(), common_inode.clone(), common_dir_page.clone())
848 } else {
849 self.resolve_dir_chain(src_remaining, &common_inode)?
850 };
851
852 let (full_dst_ancestors, dst_inode, dst_dir_page) = if dst_remaining.is_empty() {
854 (Vec::new(), common_inode.clone(), common_dir_page.clone())
855 } else {
856 self.resolve_dir_chain(dst_remaining, &common_inode)?
857 };
858
859 if dst_dir_page.entries.iter().any(|e| e.name == new_leaf) {
861 return Err(FsError::FileAlreadyExists(new_leaf.to_string()));
862 }
863 let src_idx = src_dir_page
864 .entries
865 .iter()
866 .position(|e| e.name == old_leaf)
867 .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
868
869 let mut moved_entry = src_dir_page.entries[src_idx].clone();
871 moved_entry.name = new_leaf.to_string();
872
873 let mut stale_blocks: Vec<u64> = Vec::new();
874
875 let mut merged_common_dp = common_dir_page.clone();
878
879 if src_remaining.is_empty() {
881 let idx = merged_common_dp
883 .entries
884 .iter()
885 .position(|e| e.name == old_leaf)
886 .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
887 merged_common_dp.entries.remove(idx);
888 } else {
889 let src_sub = &full_src_ancestors[1..];
893 let mut new_src_dp = src_dir_page.clone();
894 new_src_dp.entries.remove(src_idx);
895
896 let new_src_child =
897 self.cow_subchain(src_sub, &src_inode, &new_src_dp, &mut stale_blocks)?;
898
899 let src_child_name = src_remaining[0];
901 let ci = merged_common_dp
902 .entries
903 .iter()
904 .position(|e| e.name == src_child_name)
905 .ok_or_else(|| FsError::DirectoryNotFound(src_child_name.to_string()))?;
906 stale_blocks.push(merged_common_dp.entries[ci].inode_ref.block_id);
907 merged_common_dp.entries[ci].inode_ref = ObjectRef::new(new_src_child);
908 }
909
910 if dst_remaining.is_empty() {
912 merged_common_dp.entries.push(moved_entry);
914 } else {
915 let dst_sub = &full_dst_ancestors[1..];
917 let mut new_dst_dp = dst_dir_page.clone();
918 new_dst_dp.entries.push(moved_entry);
919
920 let new_dst_child =
921 self.cow_subchain(dst_sub, &dst_inode, &new_dst_dp, &mut stale_blocks)?;
922
923 let dst_child_name = dst_remaining[0];
925 let ci = merged_common_dp
926 .entries
927 .iter()
928 .position(|e| e.name == dst_child_name)
929 .ok_or_else(|| FsError::DirectoryNotFound(dst_child_name.to_string()))?;
930 stale_blocks.push(merged_common_dp.entries[ci].inode_ref.block_id);
931 merged_common_dp.entries[ci].inode_ref = ObjectRef::new(new_dst_child);
932 }
933
934 self.commit_cow_chain(&sb, &common_ancestors, &common_inode, &merged_common_dp)?;
936
937 for block_id in stale_blocks {
939 let _ = self.allocator.free(block_id);
940 }
941 }
942
943 Ok(())
944 }
945
946 pub fn sync(&mut self) -> FsResult<()> {
949 self.flush_all()?;
950 self.store.sync()
951 }
952
953 #[cfg(test)]
955 pub fn free_block_count(&self) -> u64 {
956 self.allocator.free_count()
957 }
958
959 pub fn scrub_free_blocks(&mut self) -> FsResult<()> {
968 self.flush_all()?;
969
970 let free_ids = self.allocator.free_block_ids();
971 if free_ids.is_empty() {
972 return Ok(());
973 }
974
975 let bs = self.store.block_size();
976 let mut rng = rand::thread_rng();
977
978 const BATCH: usize = 64;
980 for chunk in free_ids.chunks(BATCH) {
981 let mut pairs: Vec<(u64, Vec<u8>)> = Vec::with_capacity(chunk.len());
982 for &id in chunk {
983 let mut buf = vec![0u8; bs];
984 rng.fill_bytes(&mut buf);
985 pairs.push((id, buf));
986 }
987 let refs: Vec<(u64, &[u8])> = pairs.iter().map(|(id, d)| (*id, d.as_slice())).collect();
988 self.store.write_blocks(&refs)?;
989 }
990
991 self.store.sync()
992 }
993
994 fn flush_file(&mut self, path_key: &str) -> FsResult<()> {
998 let dirty = match self.write_buffer.remove(path_key) {
999 Some(d) => d,
1000 None => return Ok(()),
1001 };
1002
1003 if !dirty.metadata_dirty {
1004 return Ok(());
1005 }
1006
1007 let (dir_parts, leaf) = Self::split_path(path_key)?;
1009 let sb = self
1010 .superblock
1011 .as_ref()
1012 .ok_or(FsError::NotInitialized)?
1013 .clone();
1014 let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
1015 let (ancestors, target_inode, dir_page) =
1016 self.resolve_dir_chain(&dir_parts, &root_inode)?;
1017
1018 let mut extent_map = dirty.extent_map;
1019
1020 let mut stale_blocks: Vec<u64> = Vec::new();
1022
1023 for (&chunk_idx, chunk_data) in &dirty.dirty_chunks {
1025 if let Some(existing) = extent_map
1027 .entries
1028 .iter()
1029 .find(|e| e.chunk_index == chunk_idx)
1030 {
1031 stale_blocks.push(existing.data_ref.block_id);
1032 }
1033
1034 let data_block = self.allocator.allocate()?;
1035 write_encrypted_raw(
1036 self.store.as_ref(),
1037 self.crypto.as_ref(),
1038 &self.codec,
1039 data_block,
1040 ObjectKind::FileDataChunk,
1041 chunk_data,
1042 )?;
1043
1044 if let Some(entry) = extent_map
1045 .entries
1046 .iter_mut()
1047 .find(|e| e.chunk_index == chunk_idx)
1048 {
1049 entry.data_ref = ObjectRef::new(data_block);
1050 entry.plaintext_len = chunk_data.len() as u32;
1051 } else {
1052 extent_map.entries.push(ExtentEntry {
1053 chunk_index: chunk_idx,
1054 data_ref: ObjectRef::new(data_block),
1055 plaintext_len: chunk_data.len() as u32,
1056 });
1057 }
1058 }
1059
1060 extent_map.entries.sort_by_key(|e| e.chunk_index);
1061
1062 if !dirty.base_inode.extent_map_ref.is_null() {
1064 stale_blocks.push(dirty.base_inode.extent_map_ref.block_id);
1065 }
1066
1067 let new_em_block = self.allocator.allocate()?;
1069 self.write_obj(new_em_block, ObjectKind::ExtentMap, &extent_map)?;
1070
1071 let old_inode_block = dir_page
1073 .entries
1074 .iter()
1075 .find(|e| e.name == leaf)
1076 .map(|e| e.inode_ref.block_id);
1077 if let Some(blk) = old_inode_block {
1078 stale_blocks.push(blk);
1079 }
1080
1081 let mut new_inode = dirty.base_inode;
1083 new_inode.size = dirty.size;
1084 new_inode.extent_map_ref = ObjectRef::new(new_em_block);
1085 new_inode.modified_at = now_secs();
1086 let new_inode_block = self.allocator.allocate()?;
1087 self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
1088
1089 let mut new_dir_page = dir_page.clone();
1091 for e in &mut new_dir_page.entries {
1092 if e.name == leaf {
1093 e.inode_ref = ObjectRef::new(new_inode_block);
1094 }
1095 }
1096
1097 self.commit_cow_chain(&sb, &ancestors, &target_inode, &new_dir_page)?;
1099
1100 for block_id in stale_blocks {
1102 let _ = self.allocator.free(block_id);
1103 }
1104
1105 Ok(())
1106 }
1107
1108 fn flush_all(&mut self) -> FsResult<()> {
1110 let keys: Vec<String> = self.write_buffer.keys().cloned().collect();
1111 for key in keys {
1112 self.flush_file(&key)?;
1113 }
1114 Ok(())
1115 }
1116
1117 fn read_file_buffered(&self, dirty: &DirtyFile, offset: u64, len: usize) -> FsResult<Vec<u8>> {
1120 let chunk_size = max_chunk_payload(self.store.block_size());
1121 let file_size = dirty.size as usize;
1122 let start = offset as usize;
1123 if start >= file_size || len == 0 {
1124 return Ok(Vec::new());
1125 }
1126 let end = std::cmp::min(start + len, file_size);
1127 let mut result = Vec::with_capacity(end - start);
1128
1129 let first_chunk = start / chunk_size;
1130 let last_chunk = (end - 1) / chunk_size;
1131
1132 for chunk_idx in first_chunk..=last_chunk {
1133 let chunk_file_start = chunk_idx * chunk_size;
1134 let chunk_file_end = std::cmp::min(chunk_file_start + chunk_size, file_size);
1135 let chunk_idx_u64 = chunk_idx as u64;
1136
1137 let chunk_data: Vec<u8> = if let Some(buf) = dirty.dirty_chunks.get(&chunk_idx_u64) {
1139 buf.clone()
1140 } else if let Ok(pos) = dirty
1141 .extent_map
1142 .entries
1143 .binary_search_by_key(&chunk_idx_u64, |e| e.chunk_index)
1144 {
1145 let entry = &dirty.extent_map.entries[pos];
1146 let raw = read_encrypted_raw(
1147 self.store.as_ref(),
1148 self.crypto.as_ref(),
1149 &self.codec,
1150 entry.data_ref.block_id,
1151 )?;
1152 let plain_len = std::cmp::min(entry.plaintext_len as usize, raw.len());
1153 raw[..plain_len].to_vec()
1154 } else {
1155 vec![0u8; chunk_file_end - chunk_file_start]
1156 };
1157
1158 let read_start = if chunk_idx == first_chunk {
1160 start - chunk_file_start
1161 } else {
1162 0
1163 };
1164 let read_end = if chunk_idx == last_chunk {
1165 end - chunk_file_start
1166 } else {
1167 chunk_data.len()
1168 };
1169 let read_end = std::cmp::min(read_end, chunk_data.len());
1170
1171 if read_start < read_end {
1172 result.extend_from_slice(&chunk_data[read_start..read_end]);
1173 }
1174 }
1175
1176 Ok(result)
1177 }
1178
1179 fn alloc_inode_id(&mut self) -> InodeId {
1180 let id = self.next_inode_id;
1181 self.next_inode_id += 1;
1182 id
1183 }
1184
1185 fn validate_name(&self, name: &str) -> FsResult<()> {
1186 if name.is_empty() || name.contains('/') || name.contains('\0') {
1187 return Err(FsError::Internal("invalid name".into()));
1188 }
1189 if name.len() > MAX_NAME_LEN {
1190 return Err(FsError::NameTooLong(name.len(), MAX_NAME_LEN));
1191 }
1192 Ok(())
1193 }
1194
1195 fn read_obj<T: serde::de::DeserializeOwned>(&self, block_id: u64) -> FsResult<T> {
1196 read_encrypted_object(
1197 self.store.as_ref(),
1198 self.crypto.as_ref(),
1199 &self.codec,
1200 block_id,
1201 )
1202 }
1203
1204 fn write_obj<T: serde::Serialize>(
1205 &self,
1206 block_id: u64,
1207 kind: ObjectKind,
1208 obj: &T,
1209 ) -> FsResult<()> {
1210 write_encrypted_object(
1211 self.store.as_ref(),
1212 self.crypto.as_ref(),
1213 &self.codec,
1214 block_id,
1215 kind,
1216 obj,
1217 )
1218 }
1219
1220 fn read_all_chunks(&self, extent_map: &ExtentMap) -> FsResult<Vec<u8>> {
1221 let mut entries = extent_map.entries.clone();
1222 entries.sort_by_key(|e| e.chunk_index);
1223
1224 let mut buf = Vec::new();
1225 for entry in &entries {
1226 let chunk = read_encrypted_raw(
1227 self.store.as_ref(),
1228 self.crypto.as_ref(),
1229 &self.codec,
1230 entry.data_ref.block_id,
1231 )?;
1232 let len = entry.plaintext_len as usize;
1234 if len <= chunk.len() {
1235 buf.extend_from_slice(&chunk[..len]);
1236 } else {
1237 buf.extend_from_slice(&chunk);
1238 }
1239 }
1240 Ok(buf)
1241 }
1242
1243 fn read_storage_header(&self) -> FsResult<StorageHeader> {
1244 let block = self.store.read_block(BLOCK_STORAGE_HEADER)?;
1245 if block.len() < 4 {
1246 return Err(FsError::InvalidSuperblock);
1247 }
1248 let len = u32::from_le_bytes([block[0], block[1], block[2], block[3]]) as usize;
1249 if len == 0 || 4 + len > block.len() {
1250 return Err(FsError::InvalidSuperblock);
1251 }
1252 self.codec
1253 .deserialize_object::<StorageHeader>(&block[4..4 + len])
1254 }
1255
1256 fn commit_superblock(&mut self, sb: Superblock) -> FsResult<()> {
1257 let new_sb_block = self.txn.commit(
1258 self.store.as_ref(),
1259 self.crypto.as_ref(),
1260 &self.codec,
1261 &self.allocator,
1262 &sb,
1263 )?;
1264 if let Some(old) = self.last_superblock_block {
1266 let _ = self.allocator.free(old);
1267 }
1268 self.last_superblock_block = Some(new_sb_block);
1269 self.superblock = Some(sb);
1270 Ok(())
1271 }
1272
1273 fn rebuild_allocator(&mut self, sb: &Superblock) -> FsResult<()> {
1276 let (rp, _) = TransactionManager::recover_latest(self.store.as_ref(), &self.codec)?
1283 .ok_or(FsError::InvalidRootPointer)?;
1284 self.allocator.mark_allocated(rp.superblock_ref.block_id)?;
1285
1286 self.mark_inode_tree(sb.root_inode_ref.block_id)?;
1288
1289 Ok(())
1293 }
1294
1295 fn mark_inode_tree(&mut self, inode_block: u64) -> FsResult<()> {
1296 self.allocator.mark_allocated(inode_block)?;
1297 let inode: Inode = self.read_obj(inode_block)?;
1298
1299 if inode.id >= self.next_inode_id {
1300 self.next_inode_id = inode.id + 1;
1301 }
1302
1303 match inode.kind {
1304 InodeKind::Directory => {
1305 if !inode.directory_page_ref.is_null() {
1306 self.allocator
1307 .mark_allocated(inode.directory_page_ref.block_id)?;
1308 let dir_page: DirectoryPage =
1309 self.read_obj(inode.directory_page_ref.block_id)?;
1310 for entry in &dir_page.entries {
1311 self.mark_inode_tree(entry.inode_ref.block_id)?;
1312 }
1313 }
1314 }
1315 InodeKind::File => {
1316 if !inode.extent_map_ref.is_null() {
1317 self.allocator
1318 .mark_allocated(inode.extent_map_ref.block_id)?;
1319 let extent_map: ExtentMap = self.read_obj(inode.extent_map_ref.block_id)?;
1320 for entry in &extent_map.entries {
1321 self.allocator.mark_allocated(entry.data_ref.block_id)?;
1322 }
1323 }
1324 }
1325 }
1326 Ok(())
1327 }
1328}
1329
1330#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
1332pub struct DirListEntry {
1333 pub name: String,
1334 pub kind: InodeKind,
1335 pub size: u64,
1336 pub inode_id: InodeId,
1337}