Skip to main content

doublecrypt_core/
fs.rs

1use std::collections::HashMap;
2use std::sync::Arc;
3use std::time::{SystemTime, UNIX_EPOCH};
4
5use rand::RngCore;
6
7use crate::allocator::{BitmapAllocator, SlotAllocator};
8use crate::block_store::BlockStore;
9use crate::codec::{
10    read_encrypted_object, read_encrypted_raw, write_encrypted_object, write_encrypted_raw,
11    ObjectCodec, PostcardCodec,
12};
13use crate::crypto::CryptoEngine;
14use crate::error::{FsError, FsResult};
15use crate::model::*;
16use crate::transaction::TransactionManager;
17
18/// The main filesystem core. Owns the block store, crypto, codec, allocator,
19/// and transaction manager. Provides high-level filesystem operations.
20///
21/// All path-accepting methods use `/`-separated paths.  An empty string or
22/// `"/"` refers to the root directory.  Parent directories must already exist;
23/// only `create_file` and `create_directory` create the leaf entry.
24pub struct FilesystemCore {
25    store: Arc<dyn BlockStore>,
26    crypto: Arc<dyn CryptoEngine>,
27    codec: PostcardCodec,
28    allocator: BitmapAllocator,
29    txn: TransactionManager,
30    /// Cached current superblock.
31    superblock: Option<Superblock>,
32    /// Next inode ID to allocate.
33    next_inode_id: InodeId,
34    /// Write buffer: dirty file chunks held in memory until flush.
35    write_buffer: HashMap<String, DirtyFile>,
36    /// Block ID of the most recently committed superblock object.
37    /// Freed when superseded by a new commit.
38    last_superblock_block: Option<u64>,
39}
40
41/// Tracks one ancestor directory during path resolution, used by
42/// `commit_cow_chain` to propagate CoW writes back to the root.
43struct AncestorEntry {
44    inode: Inode,
45    dir_page: DirectoryPage,
46    child_index: usize,
47}
48
49/// Tracks in-memory buffered writes for a single file.
50///
51/// All dirty chunks are held in memory until `sync()` (or the next
52/// metadata-mutating operation) flushes them to the block store.
53/// This keeps `write_file()` purely in-memory for smooth throughput.
54struct DirtyFile {
55    /// In-memory chunk data keyed by chunk index (only partial chunks).
56    dirty_chunks: HashMap<u64, Vec<u8>>,
57    /// The file's inode at the time buffering started.
58    base_inode: Inode,
59    /// The file's extent map (updated in-place when chunks are eagerly flushed).
60    extent_map: ExtentMap,
61    /// Current logical file size (updated on every write).
62    size: u64,
63    /// Set to `true` when any data has been written (even if eagerly flushed).
64    metadata_dirty: bool,
65}
66
67/// Maximum payload size for a single file data chunk.
68/// Computed conservatively: block_size minus overhead for envelope framing.
69/// We'll compute this dynamically based on block size.
70fn max_chunk_payload(block_size: usize) -> usize {
71    // Rough overhead: 4 bytes length prefix, ~60 bytes envelope metadata,
72    // 16 bytes Poly1305 tag, some postcard framing. Be conservative.
73    if block_size > 200 {
74        block_size - 200
75    } else {
76        0
77    }
78}
79
80fn now_secs() -> u64 {
81    SystemTime::now()
82        .duration_since(UNIX_EPOCH)
83        .unwrap_or_default()
84        .as_secs()
85}
86
87impl FilesystemCore {
88    /// Create a new FilesystemCore backed by the given store and crypto engine.
89    pub fn new(store: Arc<dyn BlockStore>, crypto: Arc<dyn CryptoEngine>) -> Self {
90        let total_blocks = store.total_blocks();
91        Self {
92            store,
93            crypto,
94            codec: PostcardCodec,
95            allocator: BitmapAllocator::new(total_blocks),
96            txn: TransactionManager::new(),
97            superblock: None,
98            next_inode_id: 1,
99            write_buffer: HashMap::new(),
100            last_superblock_block: None,
101        }
102    }
103
104    // ── Initialization ──
105
106    /// Initialize a brand-new filesystem on the block store.
107    /// Writes the storage header, creates the root directory, and commits.
108    pub fn init_filesystem(&mut self) -> FsResult<()> {
109        let block_size = self.store.block_size() as u32;
110        let total_blocks = self.store.total_blocks();
111
112        // Write storage header to block 0 (unencrypted).
113        let header = StorageHeader::new(block_size, total_blocks);
114        let header_bytes = self.codec.serialize_object(&header)?;
115        let bs = self.store.block_size();
116        let mut block = vec![0u8; bs];
117        rand::thread_rng().fill_bytes(&mut block);
118        let len = header_bytes.len() as u32;
119        block[..4].copy_from_slice(&len.to_le_bytes());
120        block[4..4 + header_bytes.len()].copy_from_slice(&header_bytes);
121        self.store.write_block(BLOCK_STORAGE_HEADER, &block)?;
122
123        // Create root directory inode.
124        let root_inode_id = self.alloc_inode_id();
125        let dir_page = DirectoryPage::new();
126        let dir_page_block = self.allocator.allocate()?;
127        write_encrypted_object(
128            self.store.as_ref(),
129            self.crypto.as_ref(),
130            &self.codec,
131            dir_page_block,
132            ObjectKind::DirectoryPage,
133            &dir_page,
134        )?;
135
136        let ts = now_secs();
137        let root_inode = Inode {
138            id: root_inode_id,
139            kind: InodeKind::Directory,
140            size: 0,
141            directory_page_ref: ObjectRef::new(dir_page_block),
142            extent_map_ref: ObjectRef::null(),
143            created_at: ts,
144            modified_at: ts,
145        };
146        let root_inode_block = self.allocator.allocate()?;
147        write_encrypted_object(
148            self.store.as_ref(),
149            self.crypto.as_ref(),
150            &self.codec,
151            root_inode_block,
152            ObjectKind::Inode,
153            &root_inode,
154        )?;
155
156        // Create superblock.
157        let sb = Superblock {
158            generation: 1,
159            root_inode_ref: ObjectRef::new(root_inode_block),
160        };
161        self.superblock = Some(sb.clone());
162
163        // Commit.
164        self.txn.commit(
165            self.store.as_ref(),
166            self.crypto.as_ref(),
167            &self.codec,
168            &self.allocator,
169            &sb,
170        )?;
171
172        Ok(())
173    }
174
175    /// Open / mount an existing filesystem by recovering the latest root pointer.
176    pub fn open(&mut self) -> FsResult<()> {
177        // Verify storage header.
178        let header = self.read_storage_header()?;
179        if !header.is_valid() {
180            return Err(FsError::InvalidSuperblock);
181        }
182
183        // Recover latest root pointer.
184        let (rp, was_b) = TransactionManager::recover_latest(self.store.as_ref(), &self.codec)?
185            .ok_or(FsError::InvalidRootPointer)?;
186
187        // Read superblock.
188        let sb: Superblock = read_encrypted_object(
189            self.store.as_ref(),
190            self.crypto.as_ref(),
191            &self.codec,
192            rp.superblock_ref.block_id,
193        )?;
194
195        // Verify checksum.
196        let sb_bytes = self.codec.serialize_object(&sb)?;
197        let checksum = blake3::hash(&sb_bytes);
198        if *checksum.as_bytes() != rp.checksum {
199            return Err(FsError::InvalidSuperblock);
200        }
201
202        self.txn = TransactionManager::from_recovered(rp.generation, was_b);
203        self.superblock = Some(sb.clone());
204        self.last_superblock_block = Some(rp.superblock_ref.block_id);
205
206        // Rebuild allocator knowledge by walking the metadata tree.
207        self.rebuild_allocator(&sb)?;
208
209        Ok(())
210    }
211
212    // ── File operations ──
213
214    // ── Path helpers ──────────────────────────────────────────
215
216    /// Split a path into its directory components and the leaf name.
217    /// Returns `(["a","b"], "c")` for `"a/b/c"`, or `([], "c")` for `"c"`.
218    fn split_path(path: &str) -> FsResult<(Vec<&str>, &str)> {
219        let trimmed = path.trim_matches('/');
220        if trimmed.is_empty() {
221            return Err(FsError::Internal("empty path".into()));
222        }
223        let parts: Vec<&str> = trimmed.split('/').collect();
224        let (dirs, leaf) = parts.split_at(parts.len() - 1);
225        Ok((dirs.to_vec(), leaf[0]))
226    }
227
228    /// Parse a directory path (may be empty / "/" for root) into components.
229    fn split_dir_path(path: &str) -> Vec<&str> {
230        let trimmed = path.trim_matches('/');
231        if trimmed.is_empty() {
232            return Vec::new();
233        }
234        trimmed.split('/').collect()
235    }
236
237    /// Resolve a sequence of directory components starting from the root inode,
238    /// returning the ancestor chain needed for CoW commit propagation.
239    ///
240    /// Returns `(ancestors, target_inode, target_dir_page)` where `ancestors`
241    /// is a list of `(Inode, DirectoryPage, entry_index_in_parent)` from root
242    /// down to (but not including) the final resolved directory.
243    fn resolve_dir_chain(
244        &self,
245        components: &[&str],
246        root_inode: &Inode,
247    ) -> FsResult<(Vec<AncestorEntry>, Inode, DirectoryPage)> {
248        let mut ancestors: Vec<AncestorEntry> = Vec::new();
249        let mut current_inode = root_inode.clone();
250        let mut current_dir_page: DirectoryPage =
251            self.read_obj(current_inode.directory_page_ref.block_id)?;
252
253        for component in components {
254            let idx = current_dir_page
255                .entries
256                .iter()
257                .position(|e| e.name == *component)
258                .ok_or_else(|| FsError::DirectoryNotFound(component.to_string()))?;
259
260            let entry = &current_dir_page.entries[idx];
261            if entry.kind != InodeKind::Directory {
262                return Err(FsError::NotADirectory(component.to_string()));
263            }
264
265            let child_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
266            let child_dir_page: DirectoryPage =
267                self.read_obj(child_inode.directory_page_ref.block_id)?;
268
269            ancestors.push(AncestorEntry {
270                inode: current_inode,
271                dir_page: current_dir_page,
272                child_index: idx,
273            });
274
275            current_inode = child_inode;
276            current_dir_page = child_dir_page;
277        }
278
279        Ok((ancestors, current_inode, current_dir_page))
280    }
281
282    /// After mutating a directory's page, propagate CoW changes up through
283    /// the ancestor chain to the root, then commit a new superblock.
284    ///
285    /// `new_dir_page` is the already-modified DirectoryPage of the target dir.
286    /// `target_inode` is the inode of the directory that owns `new_dir_page`.
287    /// `ancestors` is the chain from root down to (but not including) target.
288    fn commit_cow_chain(
289        &mut self,
290        sb: &Superblock,
291        ancestors: &[AncestorEntry],
292        target_inode: &Inode,
293        new_dir_page: &DirectoryPage,
294    ) -> FsResult<()> {
295        // Collect old block IDs replaced by CoW.  Freed after commit succeeds.
296        let mut stale_blocks: Vec<u64> = Vec::new();
297
298        // Target directory: old dir page block.
299        stale_blocks.push(target_inode.directory_page_ref.block_id);
300        // Target inode block (its block ID is derived from the chain):
301        if ancestors.is_empty() {
302            // target IS the root inode.
303            stale_blocks.push(sb.root_inode_ref.block_id);
304        } else {
305            let last = ancestors.last().unwrap();
306            stale_blocks.push(last.dir_page.entries[last.child_index].inode_ref.block_id);
307        }
308
309        // Write the modified directory page.
310        let mut new_dp_block = self.allocator.allocate()?;
311        self.write_obj(new_dp_block, ObjectKind::DirectoryPage, new_dir_page)?;
312
313        // Write the modified directory inode.
314        let mut new_inode = target_inode.clone();
315        new_inode.directory_page_ref = ObjectRef::new(new_dp_block);
316        new_inode.modified_at = now_secs();
317        let mut new_inode_block = self.allocator.allocate()?;
318        self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
319
320        // Propagate upward through ancestors (bottom to top).
321        for (i, ancestor) in ancestors.iter().rev().enumerate() {
322            // Old ancestor dir page block.
323            stale_blocks.push(ancestor.inode.directory_page_ref.block_id);
324            // Old ancestor inode block.
325            let rev_idx = ancestors.len() - 1 - i;
326            if rev_idx == 0 {
327                // This is the root — its block is in the superblock.
328                stale_blocks.push(sb.root_inode_ref.block_id);
329            } else {
330                let parent = &ancestors[rev_idx - 1];
331                stale_blocks.push(
332                    parent.dir_page.entries[parent.child_index]
333                        .inode_ref
334                        .block_id,
335                );
336            }
337
338            let mut parent_dp = ancestor.dir_page.clone();
339            parent_dp.entries[ancestor.child_index].inode_ref = ObjectRef::new(new_inode_block);
340
341            new_dp_block = self.allocator.allocate()?;
342            self.write_obj(new_dp_block, ObjectKind::DirectoryPage, &parent_dp)?;
343
344            let mut parent_inode = ancestor.inode.clone();
345            parent_inode.directory_page_ref = ObjectRef::new(new_dp_block);
346            parent_inode.modified_at = now_secs();
347            new_inode_block = self.allocator.allocate()?;
348            self.write_obj(new_inode_block, ObjectKind::Inode, &parent_inode)?;
349        }
350
351        // new_inode_block is now the new root inode block.
352        let new_sb = Superblock {
353            generation: sb.generation + 1,
354            root_inode_ref: ObjectRef::new(new_inode_block),
355        };
356        self.commit_superblock(new_sb)?;
357
358        // Free stale blocks after the commit has succeeded.
359        for block_id in stale_blocks {
360            let _ = self.allocator.free(block_id);
361        }
362
363        Ok(())
364    }
365
366    /// CoW-propagate a modified directory page upward through a sub-chain of
367    /// ancestors, stopping before the common ancestor level.
368    ///
369    /// Returns the new top-level inode block ID.  The caller must update the
370    /// common ancestor's dir-page entry to point to this block.
371    ///
372    /// `stale_blocks` collects old blocks replaced by CoW.  The old inode
373    /// block of the topmost node (referenced by the common ancestor's
374    /// directory entry) is **not** added — the caller handles that.
375    fn cow_subchain(
376        &mut self,
377        sub_ancestors: &[AncestorEntry],
378        target_inode: &Inode,
379        new_dir_page: &DirectoryPage,
380        stale_blocks: &mut Vec<u64>,
381    ) -> FsResult<u64> {
382        // Write the modified directory page.
383        stale_blocks.push(target_inode.directory_page_ref.block_id);
384        let mut new_dp_block = self.allocator.allocate()?;
385        self.write_obj(new_dp_block, ObjectKind::DirectoryPage, new_dir_page)?;
386
387        // Write the updated target inode.
388        let mut new_inode = target_inode.clone();
389        new_inode.directory_page_ref = ObjectRef::new(new_dp_block);
390        new_inode.modified_at = now_secs();
391        let mut new_inode_block = self.allocator.allocate()?;
392        self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
393
394        // Propagate upward through sub-ancestors (bottom to top).
395        for ancestor in sub_ancestors.iter().rev() {
396            // Old child inode block (the one we just replaced).
397            stale_blocks.push(
398                ancestor.dir_page.entries[ancestor.child_index]
399                    .inode_ref
400                    .block_id,
401            );
402            // Old ancestor dir page block.
403            stale_blocks.push(ancestor.inode.directory_page_ref.block_id);
404
405            let mut parent_dp = ancestor.dir_page.clone();
406            parent_dp.entries[ancestor.child_index].inode_ref = ObjectRef::new(new_inode_block);
407
408            new_dp_block = self.allocator.allocate()?;
409            self.write_obj(new_dp_block, ObjectKind::DirectoryPage, &parent_dp)?;
410
411            let mut parent_inode = ancestor.inode.clone();
412            parent_inode.directory_page_ref = ObjectRef::new(new_dp_block);
413            parent_inode.modified_at = now_secs();
414            new_inode_block = self.allocator.allocate()?;
415            self.write_obj(new_inode_block, ObjectKind::Inode, &parent_inode)?;
416        }
417
418        Ok(new_inode_block)
419    }
420
421    // ── Public operations ─────────────────────────────────────
422
423    /// Create a new empty file at the given path.
424    ///
425    /// Parent directories must already exist.  The leaf name is created in
426    /// the innermost directory.
427    pub fn create_file(&mut self, path: &str) -> FsResult<()> {
428        let (dir_parts, leaf) = Self::split_path(path)?;
429        self.validate_name(leaf)?;
430        self.flush_all()?;
431        let sb = self
432            .superblock
433            .as_ref()
434            .ok_or(FsError::NotInitialized)?
435            .clone();
436
437        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
438        let (ancestors, target_inode, mut dir_page) =
439            self.resolve_dir_chain(&dir_parts, &root_inode)?;
440
441        if dir_page.entries.iter().any(|e| e.name == leaf) {
442            return Err(FsError::FileAlreadyExists(leaf.to_string()));
443        }
444
445        // Create empty extent map.
446        let extent_map = ExtentMap::new();
447        let em_block = self.allocator.allocate()?;
448        self.write_obj(em_block, ObjectKind::ExtentMap, &extent_map)?;
449
450        // Create file inode.
451        let inode_id = self.alloc_inode_id();
452        let ts = now_secs();
453        let file_inode = Inode {
454            id: inode_id,
455            kind: InodeKind::File,
456            size: 0,
457            directory_page_ref: ObjectRef::null(),
458            extent_map_ref: ObjectRef::new(em_block),
459            created_at: ts,
460            modified_at: ts,
461        };
462        let inode_block = self.allocator.allocate()?;
463        self.write_obj(inode_block, ObjectKind::Inode, &file_inode)?;
464
465        dir_page.entries.push(DirectoryEntry {
466            name: leaf.to_string(),
467            inode_ref: ObjectRef::new(inode_block),
468            inode_id,
469            kind: InodeKind::File,
470        });
471
472        self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
473        Ok(())
474    }
475
476    /// Write data to a file at the given path.
477    ///
478    /// Writes are buffered in memory and only flushed to the block store on
479    /// `sync()` or when another metadata-mutating operation occurs.
480    /// This keeps every `write_file` call purely in-memory for smooth
481    /// throughput.  Call `sync()` periodically to bound memory usage.
482    pub fn write_file(&mut self, path: &str, offset: u64, data: &[u8]) -> FsResult<()> {
483        if data.is_empty() {
484            return Ok(());
485        }
486
487        let chunk_size = max_chunk_payload(self.store.block_size());
488        if chunk_size == 0 {
489            return Err(FsError::DataTooLarge(data.len()));
490        }
491
492        let path_key = path.trim_matches('/').to_string();
493
494        // Take the dirty entry out of the map so `self` is free for other
495        // borrows (disk reads, etc.).  We'll put it back at the end.
496        let mut dirty = match self.write_buffer.remove(&path_key) {
497            Some(d) => d,
498            None => {
499                // First buffered write — load metadata from disk.
500                let (dir_parts, leaf) = Self::split_path(path)?;
501                let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
502                let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
503                let (_, _, dir_page) = self.resolve_dir_chain(&dir_parts, &root_inode)?;
504                let entry = dir_page
505                    .entries
506                    .iter()
507                    .find(|e| e.name == leaf)
508                    .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
509                if entry.kind != InodeKind::File {
510                    return Err(FsError::NotAFile(leaf.to_string()));
511                }
512                let file_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
513                let mut extent_map: ExtentMap =
514                    self.read_obj(file_inode.extent_map_ref.block_id)?;
515                extent_map.entries.sort_by_key(|e| e.chunk_index);
516                DirtyFile {
517                    dirty_chunks: HashMap::new(),
518                    base_inode: file_inode.clone(),
519                    extent_map,
520                    size: file_inode.size,
521                    metadata_dirty: false,
522                }
523            }
524        };
525
526        let old_size = dirty.size as usize;
527        let write_start = offset as usize;
528        let write_end = write_start + data.len();
529        let new_size = std::cmp::max(old_size, write_end);
530
531        let first_chunk = if write_start >= old_size {
532            old_size / chunk_size
533        } else {
534            write_start / chunk_size
535        };
536        let last_chunk = (new_size - 1) / chunk_size;
537
538        for chunk_idx in first_chunk..=last_chunk {
539            let chunk_file_start = chunk_idx * chunk_size;
540            let chunk_file_end = std::cmp::min(chunk_file_start + chunk_size, new_size);
541            let chunk_len = chunk_file_end - chunk_file_start;
542            let chunk_idx_u64 = chunk_idx as u64;
543
544            // If this chunk isn't buffered yet, load its on-disk content (or zeros).
545            if !dirty.dirty_chunks.contains_key(&chunk_idx_u64) {
546                let mut buf = vec![0u8; chunk_len];
547                if chunk_file_start < old_size {
548                    if let Ok(pos) = dirty
549                        .extent_map
550                        .entries
551                        .binary_search_by_key(&chunk_idx_u64, |e| e.chunk_index)
552                    {
553                        let existing = &dirty.extent_map.entries[pos];
554                        let raw = read_encrypted_raw(
555                            self.store.as_ref(),
556                            self.crypto.as_ref(),
557                            &self.codec,
558                            existing.data_ref.block_id,
559                        )?;
560                        let copy_len = std::cmp::min(existing.plaintext_len as usize, chunk_len);
561                        let src_len = std::cmp::min(copy_len, raw.len());
562                        buf[..src_len].copy_from_slice(&raw[..src_len]);
563                    }
564                }
565                dirty.dirty_chunks.insert(chunk_idx_u64, buf);
566            }
567
568            let chunk_buf = dirty.dirty_chunks.get_mut(&chunk_idx_u64).unwrap();
569            if chunk_buf.len() < chunk_len {
570                chunk_buf.resize(chunk_len, 0);
571            }
572
573            // Overlay the write data onto the chunk.
574            let overlap_start = std::cmp::max(chunk_file_start, write_start);
575            let overlap_end = std::cmp::min(chunk_file_end, write_end);
576            if overlap_start < overlap_end {
577                let data_off = overlap_start - write_start;
578                let chunk_off = overlap_start - chunk_file_start;
579                let len = overlap_end - overlap_start;
580                chunk_buf[chunk_off..chunk_off + len]
581                    .copy_from_slice(&data[data_off..data_off + len]);
582            }
583        }
584
585        dirty.size = new_size as u64;
586        dirty.metadata_dirty = true;
587
588        self.write_buffer.insert(path_key, dirty);
589        Ok(())
590    }
591
592    /// Read file data at the given path. Returns the requested slice.
593    ///
594    /// If the file has buffered (unflushed) writes, reads are served from the
595    /// in-memory buffer merged with on-disk data.
596    pub fn read_file(&self, path: &str, offset: u64, len: usize) -> FsResult<Vec<u8>> {
597        let path_key = path.trim_matches('/');
598
599        if let Some(dirty) = self.write_buffer.get(path_key) {
600            return self.read_file_buffered(dirty, offset, len);
601        }
602
603        let (dir_parts, leaf) = Self::split_path(path)?;
604        let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
605        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
606        let (_, _, dir_page) = self.resolve_dir_chain(&dir_parts, &root_inode)?;
607
608        let entry = dir_page
609            .entries
610            .iter()
611            .find(|e| e.name == leaf)
612            .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
613
614        if entry.kind != InodeKind::File {
615            return Err(FsError::NotAFile(leaf.to_string()));
616        }
617
618        let file_inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
619        let extent_map: ExtentMap = self.read_obj(file_inode.extent_map_ref.block_id)?;
620
621        let full_data = self.read_all_chunks(&extent_map)?;
622
623        let start = offset as usize;
624        if start >= full_data.len() {
625            return Ok(Vec::new());
626        }
627        let end = std::cmp::min(start + len, full_data.len());
628        Ok(full_data[start..end].to_vec())
629    }
630
631    /// List entries in a directory at the given path.
632    ///
633    /// Pass `""` or `"/"` to list the root directory.
634    pub fn list_directory(&self, path: &str) -> FsResult<Vec<DirListEntry>> {
635        let sb = self.superblock.as_ref().ok_or(FsError::NotInitialized)?;
636        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
637
638        let components = Self::split_dir_path(path);
639        let (_, _, dir_page) = self.resolve_dir_chain(&components, &root_inode)?;
640
641        let dir_prefix = {
642            let trimmed = path.trim_matches('/');
643            if trimmed.is_empty() {
644                String::new()
645            } else {
646                format!("{}/", trimmed)
647            }
648        };
649
650        let mut result = Vec::new();
651        for entry in &dir_page.entries {
652            let inode: Inode = self.read_obj(entry.inode_ref.block_id)?;
653            // Use buffered size if this file has pending writes.
654            let size = if entry.kind == InodeKind::File {
655                let full_path = format!("{}{}", dir_prefix, entry.name);
656                if let Some(dirty) = self.write_buffer.get(&full_path) {
657                    dirty.size
658                } else {
659                    inode.size
660                }
661            } else {
662                inode.size
663            };
664            result.push(DirListEntry {
665                name: entry.name.clone(),
666                kind: entry.kind,
667                size,
668                inode_id: entry.inode_id,
669            });
670        }
671        Ok(result)
672    }
673
674    /// Create a subdirectory at the given path.
675    ///
676    /// Parent directories must already exist; only the leaf is created.
677    pub fn create_directory(&mut self, path: &str) -> FsResult<()> {
678        let (dir_parts, leaf) = Self::split_path(path)?;
679        self.validate_name(leaf)?;
680        self.flush_all()?;
681        let sb = self
682            .superblock
683            .as_ref()
684            .ok_or(FsError::NotInitialized)?
685            .clone();
686        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
687        let (ancestors, target_inode, mut dir_page) =
688            self.resolve_dir_chain(&dir_parts, &root_inode)?;
689
690        if dir_page.entries.iter().any(|e| e.name == leaf) {
691            return Err(FsError::DirectoryAlreadyExists(leaf.to_string()));
692        }
693
694        // Create empty directory page for the new subdirectory.
695        let sub_dp = DirectoryPage::new();
696        let sub_dp_block = self.allocator.allocate()?;
697        self.write_obj(sub_dp_block, ObjectKind::DirectoryPage, &sub_dp)?;
698
699        let inode_id = self.alloc_inode_id();
700        let ts = now_secs();
701        let dir_inode = Inode {
702            id: inode_id,
703            kind: InodeKind::Directory,
704            size: 0,
705            directory_page_ref: ObjectRef::new(sub_dp_block),
706            extent_map_ref: ObjectRef::null(),
707            created_at: ts,
708            modified_at: ts,
709        };
710        let inode_block = self.allocator.allocate()?;
711        self.write_obj(inode_block, ObjectKind::Inode, &dir_inode)?;
712
713        dir_page.entries.push(DirectoryEntry {
714            name: leaf.to_string(),
715            inode_ref: ObjectRef::new(inode_block),
716            inode_id,
717            kind: InodeKind::Directory,
718        });
719
720        self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
721        Ok(())
722    }
723
724    /// Remove a file or empty directory at the given path.
725    pub fn remove_file(&mut self, path: &str) -> FsResult<()> {
726        let path_key = path.trim_matches('/').to_string();
727        self.write_buffer.remove(&path_key);
728        self.flush_all()?;
729        let (dir_parts, leaf) = Self::split_path(path)?;
730        let sb = self
731            .superblock
732            .as_ref()
733            .ok_or(FsError::NotInitialized)?
734            .clone();
735        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
736        let (ancestors, target_inode, mut dir_page) =
737            self.resolve_dir_chain(&dir_parts, &root_inode)?;
738
739        let idx = dir_page
740            .entries
741            .iter()
742            .position(|e| e.name == leaf)
743            .ok_or_else(|| FsError::FileNotFound(leaf.to_string()))?;
744
745        // Collect all blocks owned by the removed entry so we can free them.
746        let removed_entry = &dir_page.entries[idx];
747        let mut stale_blocks: Vec<u64> = Vec::new();
748        stale_blocks.push(removed_entry.inode_ref.block_id);
749        let removed_inode: Inode = self.read_obj(removed_entry.inode_ref.block_id)?;
750
751        match removed_entry.kind {
752            InodeKind::Directory => {
753                let sub_page: DirectoryPage =
754                    self.read_obj(removed_inode.directory_page_ref.block_id)?;
755                if !sub_page.entries.is_empty() {
756                    return Err(FsError::DirectoryNotEmpty(leaf.to_string()));
757                }
758                stale_blocks.push(removed_inode.directory_page_ref.block_id);
759            }
760            InodeKind::File => {
761                if !removed_inode.extent_map_ref.is_null() {
762                    stale_blocks.push(removed_inode.extent_map_ref.block_id);
763                    let extent_map: ExtentMap =
764                        self.read_obj(removed_inode.extent_map_ref.block_id)?;
765                    for ext in &extent_map.entries {
766                        stale_blocks.push(ext.data_ref.block_id);
767                    }
768                }
769            }
770        }
771
772        dir_page.entries.remove(idx);
773        // commit_cow_chain frees its own stale CoW blocks.
774        self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
775
776        // Free blocks that belonged to the removed entry.
777        for block_id in stale_blocks {
778            let _ = self.allocator.free(block_id);
779        }
780
781        Ok(())
782    }
783
784    /// Rename or move a file or directory.  Supports both same-directory
785    /// renames and cross-directory moves.
786    pub fn rename(&mut self, old_path: &str, new_path: &str) -> FsResult<()> {
787        let (old_dir, old_leaf) = Self::split_path(old_path)?;
788        let (new_dir, new_leaf) = Self::split_path(new_path)?;
789        self.validate_name(new_leaf)?;
790        self.flush_all()?;
791
792        let sb = self
793            .superblock
794            .as_ref()
795            .ok_or(FsError::NotInitialized)?
796            .clone();
797        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
798
799        if old_dir == new_dir {
800            // ── Same-directory rename: just change the name in-place. ──
801            let (ancestors, target_inode, mut dir_page) =
802                self.resolve_dir_chain(&old_dir, &root_inode)?;
803
804            if dir_page.entries.iter().any(|e| e.name == new_leaf) {
805                return Err(FsError::FileAlreadyExists(new_leaf.to_string()));
806            }
807
808            let entry = dir_page
809                .entries
810                .iter_mut()
811                .find(|e| e.name == old_leaf)
812                .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
813
814            entry.name = new_leaf.to_string();
815            self.commit_cow_chain(&sb, &ancestors, &target_inode, &dir_page)?;
816        } else {
817            // ── Cross-directory rename / move. ──
818
819            // Prevent moving a directory into its own subtree.
820            let src_full: Vec<&str> = old_dir
821                .iter()
822                .copied()
823                .chain(std::iter::once(old_leaf))
824                .collect();
825            if new_dir.len() >= src_full.len() && new_dir[..src_full.len()] == src_full[..] {
826                return Err(FsError::Internal(
827                    "cannot move a directory into itself".into(),
828                ));
829            }
830
831            // Find the Least Common Ancestor (LCA) of the two parent dirs.
832            let common_len = old_dir
833                .iter()
834                .zip(new_dir.iter())
835                .take_while(|(a, b)| a == b)
836                .count();
837            let common_parts = &old_dir[..common_len];
838            let src_remaining = &old_dir[common_len..];
839            let dst_remaining = &new_dir[common_len..];
840
841            // Resolve the common ancestor chain from root.
842            let (common_ancestors, common_inode, common_dir_page) =
843                self.resolve_dir_chain(common_parts, &root_inode)?;
844
845            // Resolve source sub-chain below the common ancestor.
846            let (full_src_ancestors, src_inode, src_dir_page) = if src_remaining.is_empty() {
847                (Vec::new(), common_inode.clone(), common_dir_page.clone())
848            } else {
849                self.resolve_dir_chain(src_remaining, &common_inode)?
850            };
851
852            // Resolve destination sub-chain below the common ancestor.
853            let (full_dst_ancestors, dst_inode, dst_dir_page) = if dst_remaining.is_empty() {
854                (Vec::new(), common_inode.clone(), common_dir_page.clone())
855            } else {
856                self.resolve_dir_chain(dst_remaining, &common_inode)?
857            };
858
859            // Validate: source must exist, destination name must not.
860            if dst_dir_page.entries.iter().any(|e| e.name == new_leaf) {
861                return Err(FsError::FileAlreadyExists(new_leaf.to_string()));
862            }
863            let src_idx = src_dir_page
864                .entries
865                .iter()
866                .position(|e| e.name == old_leaf)
867                .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
868
869            // Build the moved entry with its new name.
870            let mut moved_entry = src_dir_page.entries[src_idx].clone();
871            moved_entry.name = new_leaf.to_string();
872
873            let mut stale_blocks: Vec<u64> = Vec::new();
874
875            // Start with the common ancestor's dir page; both sides
876            // accumulate updates into this copy.
877            let mut merged_common_dp = common_dir_page.clone();
878
879            // ── Source side ──
880            if src_remaining.is_empty() {
881                // Source dir IS the common ancestor — remove directly.
882                let idx = merged_common_dp
883                    .entries
884                    .iter()
885                    .position(|e| e.name == old_leaf)
886                    .ok_or_else(|| FsError::FileNotFound(old_leaf.to_string()))?;
887                merged_common_dp.entries.remove(idx);
888            } else {
889                // CoW the source sub-chain with the entry removed.
890                // full_src_ancestors[0] is the common ancestor itself;
891                // pass only the levels below it.
892                let src_sub = &full_src_ancestors[1..];
893                let mut new_src_dp = src_dir_page.clone();
894                new_src_dp.entries.remove(src_idx);
895
896                let new_src_child =
897                    self.cow_subchain(src_sub, &src_inode, &new_src_dp, &mut stale_blocks)?;
898
899                // Update the common ancestor's entry for the source branch.
900                let src_child_name = src_remaining[0];
901                let ci = merged_common_dp
902                    .entries
903                    .iter()
904                    .position(|e| e.name == src_child_name)
905                    .ok_or_else(|| FsError::DirectoryNotFound(src_child_name.to_string()))?;
906                stale_blocks.push(merged_common_dp.entries[ci].inode_ref.block_id);
907                merged_common_dp.entries[ci].inode_ref = ObjectRef::new(new_src_child);
908            }
909
910            // ── Destination side ──
911            if dst_remaining.is_empty() {
912                // Destination dir IS the common ancestor — add directly.
913                merged_common_dp.entries.push(moved_entry);
914            } else {
915                // CoW the destination sub-chain with the entry added.
916                let dst_sub = &full_dst_ancestors[1..];
917                let mut new_dst_dp = dst_dir_page.clone();
918                new_dst_dp.entries.push(moved_entry);
919
920                let new_dst_child =
921                    self.cow_subchain(dst_sub, &dst_inode, &new_dst_dp, &mut stale_blocks)?;
922
923                // Update the common ancestor's entry for the dest branch.
924                let dst_child_name = dst_remaining[0];
925                let ci = merged_common_dp
926                    .entries
927                    .iter()
928                    .position(|e| e.name == dst_child_name)
929                    .ok_or_else(|| FsError::DirectoryNotFound(dst_child_name.to_string()))?;
930                stale_blocks.push(merged_common_dp.entries[ci].inode_ref.block_id);
931                merged_common_dp.entries[ci].inode_ref = ObjectRef::new(new_dst_child);
932            }
933
934            // CoW from the common ancestor up to root and commit.
935            self.commit_cow_chain(&sb, &common_ancestors, &common_inode, &merged_common_dp)?;
936
937            // Free sub-chain stale blocks (commit_cow_chain frees its own).
938            for block_id in stale_blocks {
939                let _ = self.allocator.free(block_id);
940            }
941        }
942
943        Ok(())
944    }
945
946    /// Sync / flush. Writes all buffered data to blocks and calls through
947    /// to the block store sync.
948    pub fn sync(&mut self) -> FsResult<()> {
949        self.flush_all()?;
950        self.store.sync()
951    }
952
953    /// Returns the number of free blocks in the allocator.
954    #[cfg(test)]
955    pub fn free_block_count(&self) -> u64 {
956        self.allocator.free_count()
957    }
958
959    // ── Internal helpers ──
960
961    /// Flush a single file's buffered writes to the block store.
962    fn flush_file(&mut self, path_key: &str) -> FsResult<()> {
963        let dirty = match self.write_buffer.remove(path_key) {
964            Some(d) => d,
965            None => return Ok(()),
966        };
967
968        if !dirty.metadata_dirty {
969            return Ok(());
970        }
971
972        // Re-resolve path from the current superblock.
973        let (dir_parts, leaf) = Self::split_path(path_key)?;
974        let sb = self
975            .superblock
976            .as_ref()
977            .ok_or(FsError::NotInitialized)?
978            .clone();
979        let root_inode: Inode = self.read_obj(sb.root_inode_ref.block_id)?;
980        let (ancestors, target_inode, dir_page) =
981            self.resolve_dir_chain(&dir_parts, &root_inode)?;
982
983        let mut extent_map = dirty.extent_map;
984
985        // Collect stale data chunk blocks being overwritten.
986        let mut stale_blocks: Vec<u64> = Vec::new();
987
988        // Write each dirty chunk to a new block.
989        for (&chunk_idx, chunk_data) in &dirty.dirty_chunks {
990            // If this chunk already existed, the old data block is stale.
991            if let Some(existing) = extent_map
992                .entries
993                .iter()
994                .find(|e| e.chunk_index == chunk_idx)
995            {
996                stale_blocks.push(existing.data_ref.block_id);
997            }
998
999            let data_block = self.allocator.allocate()?;
1000            write_encrypted_raw(
1001                self.store.as_ref(),
1002                self.crypto.as_ref(),
1003                &self.codec,
1004                data_block,
1005                ObjectKind::FileDataChunk,
1006                chunk_data,
1007            )?;
1008
1009            if let Some(entry) = extent_map
1010                .entries
1011                .iter_mut()
1012                .find(|e| e.chunk_index == chunk_idx)
1013            {
1014                entry.data_ref = ObjectRef::new(data_block);
1015                entry.plaintext_len = chunk_data.len() as u32;
1016            } else {
1017                extent_map.entries.push(ExtentEntry {
1018                    chunk_index: chunk_idx,
1019                    data_ref: ObjectRef::new(data_block),
1020                    plaintext_len: chunk_data.len() as u32,
1021                });
1022            }
1023        }
1024
1025        extent_map.entries.sort_by_key(|e| e.chunk_index);
1026
1027        // Old extent map block is stale.
1028        if !dirty.base_inode.extent_map_ref.is_null() {
1029            stale_blocks.push(dirty.base_inode.extent_map_ref.block_id);
1030        }
1031
1032        // Write extent map.
1033        let new_em_block = self.allocator.allocate()?;
1034        self.write_obj(new_em_block, ObjectKind::ExtentMap, &extent_map)?;
1035
1036        // Old file inode block is stale — find it from the dir entry.
1037        let old_inode_block = dir_page
1038            .entries
1039            .iter()
1040            .find(|e| e.name == leaf)
1041            .map(|e| e.inode_ref.block_id);
1042        if let Some(blk) = old_inode_block {
1043            stale_blocks.push(blk);
1044        }
1045
1046        // Write inode.
1047        let mut new_inode = dirty.base_inode;
1048        new_inode.size = dirty.size;
1049        new_inode.extent_map_ref = ObjectRef::new(new_em_block);
1050        new_inode.modified_at = now_secs();
1051        let new_inode_block = self.allocator.allocate()?;
1052        self.write_obj(new_inode_block, ObjectKind::Inode, &new_inode)?;
1053
1054        // Update dir entry.
1055        let mut new_dir_page = dir_page.clone();
1056        for e in &mut new_dir_page.entries {
1057            if e.name == leaf {
1058                e.inode_ref = ObjectRef::new(new_inode_block);
1059            }
1060        }
1061
1062        // commit_cow_chain frees its own stale blocks (dir pages, ancestor inodes).
1063        self.commit_cow_chain(&sb, &ancestors, &target_inode, &new_dir_page)?;
1064
1065        // Free file-level stale blocks (data chunks, old extent map, old inode).
1066        for block_id in stale_blocks {
1067            let _ = self.allocator.free(block_id);
1068        }
1069
1070        Ok(())
1071    }
1072
1073    /// Flush all buffered file writes to the block store.
1074    fn flush_all(&mut self) -> FsResult<()> {
1075        let keys: Vec<String> = self.write_buffer.keys().cloned().collect();
1076        for key in keys {
1077            self.flush_file(&key)?;
1078        }
1079        Ok(())
1080    }
1081
1082    /// Read from a file that has dirty (buffered) chunks, merging in-memory
1083    /// data with on-disk data.
1084    fn read_file_buffered(&self, dirty: &DirtyFile, offset: u64, len: usize) -> FsResult<Vec<u8>> {
1085        let chunk_size = max_chunk_payload(self.store.block_size());
1086        let file_size = dirty.size as usize;
1087        let start = offset as usize;
1088        if start >= file_size || len == 0 {
1089            return Ok(Vec::new());
1090        }
1091        let end = std::cmp::min(start + len, file_size);
1092        let mut result = Vec::with_capacity(end - start);
1093
1094        let first_chunk = start / chunk_size;
1095        let last_chunk = (end - 1) / chunk_size;
1096
1097        for chunk_idx in first_chunk..=last_chunk {
1098            let chunk_file_start = chunk_idx * chunk_size;
1099            let chunk_file_end = std::cmp::min(chunk_file_start + chunk_size, file_size);
1100            let chunk_idx_u64 = chunk_idx as u64;
1101
1102            // Get chunk data from buffer or disk.
1103            let chunk_data: Vec<u8> = if let Some(buf) = dirty.dirty_chunks.get(&chunk_idx_u64) {
1104                buf.clone()
1105            } else if let Ok(pos) = dirty
1106                .extent_map
1107                .entries
1108                .binary_search_by_key(&chunk_idx_u64, |e| e.chunk_index)
1109            {
1110                let entry = &dirty.extent_map.entries[pos];
1111                let raw = read_encrypted_raw(
1112                    self.store.as_ref(),
1113                    self.crypto.as_ref(),
1114                    &self.codec,
1115                    entry.data_ref.block_id,
1116                )?;
1117                let plain_len = std::cmp::min(entry.plaintext_len as usize, raw.len());
1118                raw[..plain_len].to_vec()
1119            } else {
1120                vec![0u8; chunk_file_end - chunk_file_start]
1121            };
1122
1123            // Slice to the requested range within this chunk.
1124            let read_start = if chunk_idx == first_chunk {
1125                start - chunk_file_start
1126            } else {
1127                0
1128            };
1129            let read_end = if chunk_idx == last_chunk {
1130                end - chunk_file_start
1131            } else {
1132                chunk_data.len()
1133            };
1134            let read_end = std::cmp::min(read_end, chunk_data.len());
1135
1136            if read_start < read_end {
1137                result.extend_from_slice(&chunk_data[read_start..read_end]);
1138            }
1139        }
1140
1141        Ok(result)
1142    }
1143
1144    fn alloc_inode_id(&mut self) -> InodeId {
1145        let id = self.next_inode_id;
1146        self.next_inode_id += 1;
1147        id
1148    }
1149
1150    fn validate_name(&self, name: &str) -> FsResult<()> {
1151        if name.is_empty() || name.contains('/') || name.contains('\0') {
1152            return Err(FsError::Internal("invalid name".into()));
1153        }
1154        if name.len() > MAX_NAME_LEN {
1155            return Err(FsError::NameTooLong(name.len(), MAX_NAME_LEN));
1156        }
1157        Ok(())
1158    }
1159
1160    fn read_obj<T: serde::de::DeserializeOwned>(&self, block_id: u64) -> FsResult<T> {
1161        read_encrypted_object(
1162            self.store.as_ref(),
1163            self.crypto.as_ref(),
1164            &self.codec,
1165            block_id,
1166        )
1167    }
1168
1169    fn write_obj<T: serde::Serialize>(
1170        &self,
1171        block_id: u64,
1172        kind: ObjectKind,
1173        obj: &T,
1174    ) -> FsResult<()> {
1175        write_encrypted_object(
1176            self.store.as_ref(),
1177            self.crypto.as_ref(),
1178            &self.codec,
1179            block_id,
1180            kind,
1181            obj,
1182        )
1183    }
1184
1185    fn read_all_chunks(&self, extent_map: &ExtentMap) -> FsResult<Vec<u8>> {
1186        let mut entries = extent_map.entries.clone();
1187        entries.sort_by_key(|e| e.chunk_index);
1188
1189        let mut buf = Vec::new();
1190        for entry in &entries {
1191            let chunk = read_encrypted_raw(
1192                self.store.as_ref(),
1193                self.crypto.as_ref(),
1194                &self.codec,
1195                entry.data_ref.block_id,
1196            )?;
1197            // Only take plaintext_len bytes (chunk may have been decrypted from padded block).
1198            let len = entry.plaintext_len as usize;
1199            if len <= chunk.len() {
1200                buf.extend_from_slice(&chunk[..len]);
1201            } else {
1202                buf.extend_from_slice(&chunk);
1203            }
1204        }
1205        Ok(buf)
1206    }
1207
1208    fn read_storage_header(&self) -> FsResult<StorageHeader> {
1209        let block = self.store.read_block(BLOCK_STORAGE_HEADER)?;
1210        if block.len() < 4 {
1211            return Err(FsError::InvalidSuperblock);
1212        }
1213        let len = u32::from_le_bytes([block[0], block[1], block[2], block[3]]) as usize;
1214        if len == 0 || 4 + len > block.len() {
1215            return Err(FsError::InvalidSuperblock);
1216        }
1217        self.codec
1218            .deserialize_object::<StorageHeader>(&block[4..4 + len])
1219    }
1220
1221    fn commit_superblock(&mut self, sb: Superblock) -> FsResult<()> {
1222        let new_sb_block = self.txn.commit(
1223            self.store.as_ref(),
1224            self.crypto.as_ref(),
1225            &self.codec,
1226            &self.allocator,
1227            &sb,
1228        )?;
1229        // Free the previous superblock block now that the new one is committed.
1230        if let Some(old) = self.last_superblock_block {
1231            let _ = self.allocator.free(old);
1232        }
1233        self.last_superblock_block = Some(new_sb_block);
1234        self.superblock = Some(sb);
1235        Ok(())
1236    }
1237
1238    /// Walk the metadata tree from the superblock and mark all referenced blocks
1239    /// as allocated in the allocator. Used during open/mount.
1240    fn rebuild_allocator(&mut self, sb: &Superblock) -> FsResult<()> {
1241        // Mark superblock block.
1242        // The superblock_ref's block was allocated by the transaction manager.
1243        // We also need to mark root pointer blocks, but those are reserved (0,1,2).
1244
1245        // We need to find which block the superblock is stored in.
1246        // The root pointer tells us.
1247        let (rp, _) = TransactionManager::recover_latest(self.store.as_ref(), &self.codec)?
1248            .ok_or(FsError::InvalidRootPointer)?;
1249        self.allocator.mark_allocated(rp.superblock_ref.block_id)?;
1250
1251        // Walk root inode.
1252        self.mark_inode_tree(sb.root_inode_ref.block_id)?;
1253
1254        // Set next_inode_id to be higher than any seen inode.
1255        // (We updated it during the walk.)
1256
1257        Ok(())
1258    }
1259
1260    fn mark_inode_tree(&mut self, inode_block: u64) -> FsResult<()> {
1261        self.allocator.mark_allocated(inode_block)?;
1262        let inode: Inode = self.read_obj(inode_block)?;
1263
1264        if inode.id >= self.next_inode_id {
1265            self.next_inode_id = inode.id + 1;
1266        }
1267
1268        match inode.kind {
1269            InodeKind::Directory => {
1270                if !inode.directory_page_ref.is_null() {
1271                    self.allocator
1272                        .mark_allocated(inode.directory_page_ref.block_id)?;
1273                    let dir_page: DirectoryPage =
1274                        self.read_obj(inode.directory_page_ref.block_id)?;
1275                    for entry in &dir_page.entries {
1276                        self.mark_inode_tree(entry.inode_ref.block_id)?;
1277                    }
1278                }
1279            }
1280            InodeKind::File => {
1281                if !inode.extent_map_ref.is_null() {
1282                    self.allocator
1283                        .mark_allocated(inode.extent_map_ref.block_id)?;
1284                    let extent_map: ExtentMap = self.read_obj(inode.extent_map_ref.block_id)?;
1285                    for entry in &extent_map.entries {
1286                        self.allocator.mark_allocated(entry.data_ref.block_id)?;
1287                    }
1288                }
1289            }
1290        }
1291        Ok(())
1292    }
1293}
1294
1295/// Return type for directory listings (used by FFI and public API).
1296#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
1297pub struct DirListEntry {
1298    pub name: String,
1299    pub kind: InodeKind,
1300    pub size: u64,
1301    pub inode_id: InodeId,
1302}