Skip to main content

arcbox_ext4/
formatter.rs

1// Core ext4 image formatter.
2//
3// Creates ext4 filesystem images from scratch using a single-pass sequential
4// write strategy.  The public entry point is `Formatter::new()` followed by
5// repeated `create()` / `link()` / `unlink()` calls, finalized with `close()`.
6
7use std::collections::HashMap;
8use std::fs::{File, OpenOptions};
9use std::io::{self, Read, Seek, SeekFrom, Write};
10use std::path::Path;
11
12use uuid::Uuid;
13
14use crate::constants::*;
15use crate::dir;
16use crate::error::{FormatError, FormatResult};
17use crate::extent;
18use crate::file_tree::{BlockRange, FileTree, FileTreeNode, InodeNumber};
19use crate::types::*;
20use crate::xattr::{ExtendedAttribute, XattrState};
21
22/// Maximum bytes that fit in the ext4 superblock's `volume_name` field.
23const VOLUME_NAME_LEN: usize = 16;
24
25// ---------------------------------------------------------------------------
26// FormatOptions
27// ---------------------------------------------------------------------------
28
29/// Parameters for creating a new ext4 image.
30///
31/// Construct via [`FormatOptions::new`] and layer on [`uuid`] / [`label`] as
32/// needed. The resulting options are passed to [`Formatter::with_options`].
33///
34/// [`uuid`]: Self::uuid
35/// [`label`]: Self::label
36#[derive(Clone, Debug)]
37pub struct FormatOptions {
38    /// Block size in bytes. Only 4096 is currently accepted; the field is
39    /// explicit so future versions can widen the accepted set without another
40    /// API break.
41    pub block_size: u32,
42    /// Total size of the image file in bytes.
43    pub size: u64,
44    /// UUID written to the superblock. `None` picks a random v4 UUID at
45    /// format time.
46    pub uuid: Option<Uuid>,
47    /// Volume label. Must be ≤ 16 bytes UTF-8 and contain no NUL bytes.
48    /// `None` leaves the field zeroed.
49    pub label: Option<String>,
50}
51
52impl FormatOptions {
53    /// Create options for an image of the given size, with default
54    /// `block_size = 4096`, random UUID, no label.
55    pub fn new(size: u64) -> Self {
56        Self {
57            block_size: 4096,
58            size,
59            uuid: None,
60            label: None,
61        }
62    }
63
64    /// Set an explicit UUID instead of a randomly generated one.
65    pub fn uuid(mut self, uuid: Uuid) -> Self {
66        self.uuid = Some(uuid);
67        self
68    }
69
70    /// Set the volume label.
71    pub fn label(mut self, label: impl Into<String>) -> Self {
72        self.label = Some(label.into());
73        self
74    }
75}
76
77fn validate_label(label: &str) -> FormatResult<()> {
78    if label.as_bytes().len() > VOLUME_NAME_LEN {
79        return Err(FormatError::InvalidLabel(format!(
80            "label {label:?} is {} bytes, ext4 limit is {VOLUME_NAME_LEN}",
81            label.as_bytes().len()
82        )));
83    }
84    if label.contains('\0') {
85        return Err(FormatError::InvalidLabel(format!(
86            "label {label:?} contains a NUL byte"
87        )));
88    }
89    Ok(())
90}
91
92// ---------------------------------------------------------------------------
93// FileTimestamps
94// ---------------------------------------------------------------------------
95
96/// Timestamps to apply to a newly created inode.
97///
98/// Each pair is `(seconds_lo, extra)` in ext4 format -- the same layout
99/// returned by `timestamp_now()`.
100pub struct FileTimestamps {
101    pub access_lo: u32,
102    pub access_hi: u32,
103    pub modification_lo: u32,
104    pub modification_hi: u32,
105    pub creation_lo: u32,
106    pub creation_hi: u32,
107    pub now_lo: u32,
108    pub now_hi: u32,
109}
110
111impl Default for FileTimestamps {
112    fn default() -> Self {
113        let (lo, hi) = timestamp_now();
114        Self {
115            access_lo: lo,
116            access_hi: hi,
117            modification_lo: lo,
118            modification_hi: hi,
119            creation_lo: lo,
120            creation_hi: hi,
121            now_lo: lo,
122            now_hi: hi,
123        }
124    }
125}
126
127// ---------------------------------------------------------------------------
128// Formatter
129// ---------------------------------------------------------------------------
130
131/// Single-pass ext4 image builder.
132pub struct Formatter {
133    file: File,
134    block_size: u32,
135    size: u64,
136    inodes: Vec<Inode>,
137    tree: FileTree,
138    deleted_blocks: Vec<BlockRange>,
139    /// Block ranges owned by inodes that still have links_count > 0 but
140    /// whose original tree node has been detached.  Keyed by inode number.
141    /// Used to reclaim blocks when the last hard-link reference is removed.
142    deferred_blocks: HashMap<u32, Vec<BlockRange>>,
143    /// UUID to write into the superblock. `None` triggers a random v4 at
144    /// format time.
145    uuid: Option<Uuid>,
146    /// Volume label to copy into the superblock's `volume_name` field. Validated
147    /// on construction.
148    label: Option<String>,
149}
150
151impl Formatter {
152    // -- Computed properties ------------------------------------------------
153
154    #[inline]
155    fn blocks_per_group(&self) -> u32 {
156        self.block_size * 8
157    }
158
159    #[inline]
160    fn max_inodes_per_group(&self) -> u32 {
161        self.block_size * 8
162    }
163
164    #[inline]
165    fn groups_per_descriptor_block(&self) -> u32 {
166        self.block_size / 32
167    }
168
169    #[inline]
170    fn block_count(&self) -> u32 {
171        ((self.size - 1) / self.block_size as u64 + 1) as u32
172    }
173
174    #[inline]
175    fn group_count(&self) -> u32 {
176        (self.block_count() - 1) / self.blocks_per_group() + 1
177    }
178
179    #[inline]
180    fn group_descriptor_blocks(&self) -> u32 {
181        ((self.group_count() - 1) / self.groups_per_descriptor_block() + 1) * 32
182    }
183
184    #[inline]
185    fn pos(&mut self) -> u64 {
186        self.file.stream_position().unwrap_or(0)
187    }
188
189    #[inline]
190    fn current_block(&mut self) -> u32 {
191        let p = self.pos();
192        (p / self.block_size as u64) as u32
193    }
194
195    fn seek_to_block(&mut self, block: u32) -> io::Result<()> {
196        self.file
197            .seek(SeekFrom::Start(block as u64 * self.block_size as u64))?;
198        Ok(())
199    }
200
201    fn align_to_block(&mut self) -> io::Result<()> {
202        let p = self.pos();
203        if p % self.block_size as u64 != 0 {
204            let blk = self.current_block() + 1;
205            self.seek_to_block(blk)?;
206        }
207        Ok(())
208    }
209
210    // -- Constructor -------------------------------------------------------
211
212    /// Create a new formatter that writes an ext4 image to `path`.
213    ///
214    /// The file is truncated and re-created as a sparse file of `opts.size`
215    /// bytes. The root directory (inode 2) and the `/lost+found` directory
216    /// (required by e2fsck) are created automatically.
217    ///
218    /// Only `opts.block_size == 4096` is currently accepted; other values
219    /// return [`FormatError::UnsupportedBlockSize`]. Labels are validated
220    /// eagerly — an oversize or NUL-containing label returns
221    /// [`FormatError::InvalidLabel`] before any file work happens.
222    pub fn with_options(path: &Path, opts: FormatOptions) -> FormatResult<Self> {
223        // Only 4096-byte blocks are supported.  Supporting smaller block sizes
224        // (1024, 2048) would require first_data_block=1 and different group
225        // descriptor offset calculations throughout the formatter and reader.
226        if opts.block_size != 4096 {
227            return Err(FormatError::UnsupportedBlockSize(opts.block_size));
228        }
229
230        if let Some(label) = &opts.label {
231            validate_label(label)?;
232        }
233
234        let file = OpenOptions::new()
235            .read(true)
236            .write(true)
237            .create(true)
238            .truncate(true)
239            .open(path)?;
240
241        // Create a sparse file.
242        file.set_len(opts.size)?;
243
244        // Reserve the first 10 inodes (indices 0..9  =>  inode numbers 1..10).
245        //   [0] = defective block inode  (empty / default)
246        //   [1] = root inode             (inode 2)
247        //   [2..9] = reserved            (empty / default)
248        let mut inodes = Vec::with_capacity(16);
249        inodes.push(Inode::default()); // inode 1 -- defective blocks
250        inodes.push(Inode::root_inode()); // inode 2 -- root directory
251        for _ in 2..10 {
252            inodes.push(Inode::default()); // inodes 3..10
253        }
254
255        let tree = FileTree::new(ROOT_INODE, "/");
256
257        let mut fmt = Self {
258            file,
259            block_size: opts.block_size,
260            size: opts.size,
261            inodes,
262            tree,
263            deleted_blocks: Vec::new(),
264            deferred_blocks: HashMap::new(),
265            uuid: opts.uuid,
266            label: opts.label,
267        };
268
269        // Seek past the superblock (block 0) and the group descriptor table.
270        let gdb = fmt.group_descriptor_blocks();
271        fmt.seek_to_block(gdb + 1)?;
272
273        // /lost+found is required by e2fsck.
274        fmt.create(
275            "/lost+found",
276            make_mode(file_mode::S_IFDIR, 0o700),
277            None,
278            None,
279            None,
280            None,
281            None,
282            None,
283        )?;
284
285        Ok(fmt)
286    }
287
288    /// Create a new formatter with defaults for UUID (random) and label (empty).
289    ///
290    /// Thin shim over [`Formatter::with_options`] for the common case where
291    /// only the image size matters. Callers that need a specific UUID or
292    /// volume label should use `with_options` directly.
293    pub fn new(path: &Path, block_size: u32, min_disk_size: u64) -> FormatResult<Self> {
294        Self::with_options(
295            path,
296            FormatOptions {
297                block_size,
298                size: min_disk_size,
299                uuid: None,
300                label: None,
301            },
302        )
303    }
304
305    // -- create() ----------------------------------------------------------
306
307    /// Create a file, directory, or symlink at `path`.
308    ///
309    /// Parent directories are created recursively with mode 0755, inheriting
310    /// uid/gid from the nearest existing parent (like `mkdir -p`).
311    #[allow(clippy::too_many_arguments)]
312    pub fn create(
313        &mut self,
314        path: &str,
315        mode: u16,
316        link: Option<&str>,
317        ts: Option<FileTimestamps>,
318        data: Option<&mut dyn Read>,
319        uid: Option<u32>,
320        gid: Option<u32>,
321        xattrs: Option<&HashMap<String, Vec<u8>>>,
322    ) -> FormatResult<()> {
323        let path_buf = std::path::PathBuf::from(path);
324
325        // ------- Handle "already exists" cases -------
326        if let Some(existing_idx) = self.tree.lookup(&path_buf) {
327            let existing_inode_num = self.tree.node(existing_idx).inode;
328            let existing_inode = &self.inodes[(existing_inode_num - 1) as usize];
329
330            if is_dir(mode) {
331                if existing_inode.is_dir() {
332                    // mkdir -p: directory already exists.  Only update mode
333                    // and ownership when the caller explicitly provided them
334                    // (uid/gid != None).  This prevents recursive parent-
335                    // creation from silently downgrading permissions set by
336                    // an earlier explicit create() call.
337                    if self.tree.node(existing_idx).name == basename(path) {
338                        if uid.is_some() || gid.is_some() {
339                            let inode =
340                                &mut self.inodes[(existing_inode_num - 1) as usize];
341                            inode.mode = mode;
342                            if let Some(u) = uid {
343                                inode.set_uid(u);
344                            }
345                            if let Some(g) = gid {
346                                inode.set_gid(g);
347                            }
348                        }
349                        return Ok(());
350                    }
351                } else {
352                    // A regular file (or other non-dir) blocks directory creation.
353                    return Err(FormatError::NotDirectory(path_buf));
354                }
355            } else if self.tree.node(existing_idx).link.is_some() {
356                // Hard-link entries can always be overwritten.
357                self.unlink(path, false)?;
358            } else {
359                // File or symlink replacing an existing entry.
360                if existing_inode.is_dir() && !is_link(mode) {
361                    return Err(FormatError::NotFile(path_buf));
362                }
363                self.unlink(path, false)?;
364            }
365        }
366
367        // ------- Ensure parent directories exist -------
368        let parent_path = parent_of(path);
369        if parent_path != path {
370            self.create(
371                parent_path,
372                make_mode(file_mode::S_IFDIR, 0o755),
373                None,
374                None,
375                None,
376                None,
377                None,
378                None,
379            )?;
380        }
381
382        let parent_path_buf = std::path::PathBuf::from(parent_path);
383        let parent_idx = self
384            .tree
385            .lookup(&parent_path_buf)
386            .ok_or_else(|| FormatError::NotFound(parent_path_buf.clone()))?;
387        let parent_inode_num = self.tree.node(parent_idx).inode;
388        let parent_inode = &self.inodes[(parent_inode_num - 1) as usize];
389
390        if parent_inode.links_count as u32 >= MAX_LINKS {
391            return Err(FormatError::MaximumLinksExceeded(parent_path_buf));
392        }
393
394        // ------- Build the child inode -------
395        let mut child_inode = Inode::default();
396        child_inode.mode = mode;
397
398        // uid / gid -- inherit from parent when not specified.
399        if let Some(u) = uid {
400            child_inode.set_uid(u);
401        } else {
402            child_inode.uid = parent_inode.uid;
403            child_inode.uid_hi = parent_inode.uid_hi;
404        }
405        if let Some(g) = gid {
406            child_inode.set_gid(g);
407        } else {
408            child_inode.gid = parent_inode.gid;
409            child_inode.gid_hi = parent_inode.gid_hi;
410        }
411
412        // Extended attributes.
413        if let Some(xattr_map) = xattrs {
414            if !xattr_map.is_empty() {
415                let child_ino = self.inodes.len() as u32 + 1;
416                let mut state =
417                    XattrState::new(child_ino, INODE_EXTRA_SIZE, self.block_size);
418                // The reference implementation adds a sentinel "system.data" attribute.
419                state.add(ExtendedAttribute::new("system.data", Vec::new()))?;
420                for (name, value) in xattr_map {
421                    state.add(ExtendedAttribute::new(name, value.clone()))?;
422                }
423                if state.has_inline() {
424                    let buf = state.write_inline()?;
425                    child_inode.inline_xattrs.copy_from_slice(&buf);
426                }
427                if state.has_block() {
428                    let block_buf = state.write_block()?;
429                    self.align_to_block()?;
430                    child_inode.xattr_block_lo = self.current_block();
431                    self.file.write_all(&block_buf)?;
432                    // Account for the xattr block.  When HUGE_FILE is set,
433                    // blocks_lo is in filesystem-block units; otherwise sectors.
434                    if child_inode.flags & inode_flags::HUGE_FILE != 0 {
435                        child_inode.blocks_lo += 1;
436                    } else {
437                        child_inode.blocks_lo += (self.block_size / 512) as u32;
438                    }
439                }
440            }
441        }
442
443        // Timestamps.
444        let ts = ts.unwrap_or_default();
445        child_inode.atime = ts.access_lo;
446        child_inode.atime_extra = ts.access_hi;
447        child_inode.ctime = ts.now_lo;
448        child_inode.ctime_extra = ts.now_hi;
449        child_inode.mtime = ts.modification_lo;
450        child_inode.mtime_extra = ts.modification_hi;
451        child_inode.crtime = ts.creation_lo;
452        child_inode.crtime_extra = ts.creation_hi;
453        child_inode.links_count = 1;
454        child_inode.extra_isize = EXTRA_ISIZE;
455        child_inode.flags = inode_flags::HUGE_FILE;
456
457        // Align to block boundary before writing data.
458        self.align_to_block()?;
459
460        let mut start_block: u32 = 0;
461        let mut end_block: u32 = 0;
462
463        // ------- Handle by file type -------
464        if is_dir(mode) {
465            // Directory: bump link counts; directory entries are deferred to
466            // close() so they can be sorted.
467            child_inode.links_count = 2;
468            self.inodes[(parent_inode_num - 1) as usize].links_count += 1;
469        } else if let Some(link_target) = link {
470            // Symbolic link.
471            start_block = self.current_block();
472            let link_bytes = link_target.as_bytes();
473
474            let size = if link_bytes.len() < 60 {
475                // Short symlink: store inline in the inode's block field.
476                child_inode.block[..link_bytes.len()].copy_from_slice(link_bytes);
477                link_bytes.len() as u64
478            } else {
479                // Long symlink: write to data blocks.
480                self.file.write_all(link_bytes)?;
481                link_bytes.len() as u64
482            };
483
484            self.align_to_block()?;
485            end_block = self.current_block();
486
487            child_inode.set_file_size(size);
488            child_inode.mode |= 0o777;
489            child_inode.flags = 0;
490
491            if link_bytes.len() < 60 {
492                child_inode.blocks_lo = 0;
493            } else {
494                let blocks = BlockRange {
495                    start: start_block,
496                    end: end_block,
497                };
498                let mut cur = self.current_block();
499                extent::write_extents(
500                    &mut child_inode,
501                    blocks,
502                    self.block_size,
503                    &mut self.file,
504                    &mut cur,
505                )?;
506            }
507        } else if is_reg(mode) {
508            // Regular file.
509            start_block = self.current_block();
510            let mut size = 0u64;
511
512            if let Some(reader) = data {
513                let mut buf = vec![0u8; self.block_size as usize];
514                loop {
515                    let n = reader.read(&mut buf)?;
516                    if n == 0 {
517                        break;
518                    }
519                    size += n as u64;
520                    if size > MAX_FILE_SIZE {
521                        return Err(FormatError::FileTooBig(size));
522                    }
523                    self.file.write_all(&buf[..n])?;
524                }
525            }
526
527            self.align_to_block()?;
528            end_block = self.current_block();
529
530            child_inode.set_file_size(size);
531
532            let blocks = BlockRange {
533                start: start_block,
534                end: end_block,
535            };
536            let mut cur = self.current_block();
537            extent::write_extents(
538                &mut child_inode,
539                blocks,
540                self.block_size,
541                &mut self.file,
542                &mut cur,
543            )?;
544        } else {
545            return Err(FormatError::UnsupportedFiletype);
546        }
547
548        // ------- Register the new inode and tree node -------
549        self.inodes.push(child_inode);
550        let child_inode_num = self.inodes.len() as InodeNumber;
551
552        let child_node = FileTreeNode {
553            inode: child_inode_num,
554            name: basename(path).to_string(),
555            children: Vec::new(),
556            parent: None,
557            blocks: if start_block != end_block {
558                Some(BlockRange {
559                    start: start_block,
560                    end: end_block,
561                })
562            } else {
563                None
564            },
565            additional_blocks: Vec::new(),
566            link: None,
567        };
568        self.tree.add_child(parent_idx, child_node);
569
570        Ok(())
571    }
572
573    // -- Query helpers (for Ext4Writer / layer.rs) --------------------------
574
575    /// Check whether `path` exists in the in-memory file tree.
576    pub fn exists(&self, path: &str) -> bool {
577        self.tree.lookup(&std::path::PathBuf::from(path)).is_some()
578    }
579
580    /// Check whether `path` exists and is a directory.
581    pub fn is_dir(&self, path: &str) -> bool {
582        let path_buf = std::path::PathBuf::from(path);
583        if let Some(idx) = self.tree.lookup(&path_buf) {
584            let ino = self.tree.node(idx).inode;
585            self.inodes[(ino - 1) as usize].is_dir()
586        } else {
587            false
588        }
589    }
590
591    /// List the names of immediate children of a directory (excluding `.`
592    /// and `..`).  Returns an empty vec if `path` is not a directory.
593    pub fn list_dir(&self, path: &str) -> Vec<String> {
594        let path_buf = std::path::PathBuf::from(path);
595        let Some(idx) = self.tree.lookup(&path_buf) else {
596            return Vec::new();
597        };
598        self.tree
599            .node(idx)
600            .children
601            .iter()
602            .map(|&ci| self.tree.node(ci).name.clone())
603            .collect()
604    }
605
606    /// Update the permission bits of an existing entry.
607    pub fn set_permissions(&mut self, path: &str, mode: u16) -> FormatResult<()> {
608        let path_buf = std::path::PathBuf::from(path);
609        let idx = self
610            .tree
611            .lookup(&path_buf)
612            .ok_or_else(|| FormatError::NotFound(path_buf))?;
613        let ino = self.tree.node(idx).inode;
614        let inode = &mut self.inodes[(ino - 1) as usize];
615        // Preserve the file-type bits, replace the permission bits.
616        inode.mode = (inode.mode & file_mode::TYPE_MASK) | (mode & !file_mode::TYPE_MASK);
617        Ok(())
618    }
619
620    /// Update the owner uid/gid of an existing entry.
621    pub fn set_owner(&mut self, path: &str, uid: u32, gid: u32) -> FormatResult<()> {
622        let path_buf = std::path::PathBuf::from(path);
623        let idx = self
624            .tree
625            .lookup(&path_buf)
626            .ok_or_else(|| FormatError::NotFound(path_buf))?;
627        let ino = self.tree.node(idx).inode;
628        let inode = &mut self.inodes[(ino - 1) as usize];
629        inode.set_uid(uid);
630        inode.set_gid(gid);
631        Ok(())
632    }
633
634    // -- link() ------------------------------------------------------------
635
636    /// Create a hard link at `link_path` pointing to `target_path`.
637    pub fn link(&mut self, link_path: &str, target_path: &str) -> FormatResult<()> {
638        let target_buf = std::path::PathBuf::from(target_path);
639        let target_idx = self
640            .tree
641            .lookup(&target_buf)
642            .ok_or_else(|| FormatError::NotFound(target_buf.clone()))?;
643        let target_inode_num = self.tree.node(target_idx).inode;
644        let target_inode = &self.inodes[(target_inode_num - 1) as usize];
645
646        if target_inode.is_dir() {
647            return Err(FormatError::CannotHardlinkDirectory(target_buf));
648        }
649
650        // Increment link count on the target inode.
651        self.inodes[(target_inode_num - 1) as usize].links_count += 1;
652
653        // If the link path already exists, remove it first.
654        let link_buf = std::path::PathBuf::from(link_path);
655        if self.tree.lookup(&link_buf).is_some() {
656            self.unlink(link_path, false)?;
657        }
658
659        let parent_path = parent_of(link_path);
660        let parent_path_buf = std::path::PathBuf::from(parent_path);
661        let parent_idx = self
662            .tree
663            .lookup(&parent_path_buf)
664            .ok_or_else(|| FormatError::NotFound(parent_path_buf.clone()))?;
665
666        let parent_inode_num = self.tree.node(parent_idx).inode;
667        if self.inodes[(parent_inode_num - 1) as usize].links_count as u32 >= MAX_LINKS {
668            return Err(FormatError::MaximumLinksExceeded(parent_path_buf));
669        }
670
671        let link_node = FileTreeNode {
672            inode: ROOT_INODE, // placeholder, not used for hard links
673            name: basename(link_path).to_string(),
674            children: Vec::new(),
675            parent: None,
676            blocks: None,
677            additional_blocks: Vec::new(),
678            link: Some(target_inode_num),
679        };
680        self.tree.add_child(parent_idx, link_node);
681
682        Ok(())
683    }
684
685    // -- unlink() ----------------------------------------------------------
686
687    /// Remove the entry at `path`.
688    ///
689    /// When `directory_whiteout` is true, only children are removed and the
690    /// directory entry itself is kept (used for overlay whiteouts).
691    pub fn unlink(&mut self, path: &str, directory_whiteout: bool) -> FormatResult<()> {
692        let path_buf = std::path::PathBuf::from(path);
693        let node_idx = match self.tree.lookup(&path_buf) {
694            Some(idx) => idx,
695            None => return Ok(()), // nothing to unlink
696        };
697
698        let inode_num = self.tree.node(node_idx).inode;
699        let inode_idx = (inode_num - 1) as usize;
700
701        if directory_whiteout && !self.inodes[inode_idx].is_dir() {
702            return Err(FormatError::NotDirectory(path_buf));
703        }
704
705        // Recursively unlink children.
706        let child_names: Vec<String> = self
707            .tree
708            .node(node_idx)
709            .children
710            .iter()
711            .map(|&ci| self.tree.node(ci).name.clone())
712            .collect();
713        for child_name in child_names {
714            let child_path = if path == "/" {
715                format!("/{child_name}")
716            } else {
717                format!("{path}/{child_name}")
718            };
719            self.unlink(&child_path, false)?;
720        }
721
722        if directory_whiteout {
723            return Ok(());
724        }
725
726        // Detach from parent and update parent's link count for directories.
727        let parent_path = parent_of(path);
728        let parent_path_buf = std::path::PathBuf::from(parent_path);
729        if let Some(parent_idx) = self.tree.lookup(&parent_path_buf) {
730            let parent_inode_num = self.tree.node(parent_idx).inode;
731            if self.inodes[inode_idx].is_dir()
732                && self.inodes[(parent_inode_num - 1) as usize].links_count > 2
733            {
734                self.inodes[(parent_inode_num - 1) as usize].links_count -= 1;
735            }
736            self.tree.remove_child(parent_idx, basename(path));
737        }
738
739        // Determine which inode to decrement and potentially reclaim.
740        // For hard-link entries the target inode is the one that matters;
741        // for regular entries it is the node's own inode.
742        let (target_ino, target_idx) =
743            if let Some(linked_ino) = self.tree.node(node_idx).link {
744                (linked_ino, (linked_ino - 1) as usize)
745            } else {
746                (inode_num, inode_idx)
747            };
748
749        if target_ino > FIRST_INODE - 1 {
750            if self.inodes[target_idx].links_count > 0 {
751                self.inodes[target_idx].links_count -= 1;
752            }
753
754            // Collect block ranges from this tree node (if any).
755            let node = self.tree.node(node_idx);
756            let mut node_blocks: Vec<BlockRange> = Vec::new();
757            if let Some(b) = node.blocks {
758                if b.start != b.end {
759                    node_blocks.push(b);
760                }
761            }
762            for blk in &node.additional_blocks {
763                node_blocks.push(*blk);
764            }
765
766            if self.inodes[target_idx].links_count == 0 {
767                // Last reference removed -- reclaim blocks and mark deleted.
768                // Blocks may come from this node (if it is the original) or
769                // from deferred_blocks (if the original was unlinked earlier
770                // while hard links still existed).
771                for blk in &node_blocks {
772                    self.deleted_blocks.push(*blk);
773                }
774                if let Some(deferred) = self.deferred_blocks.remove(&target_ino) {
775                    for blk in deferred {
776                        self.deleted_blocks.push(blk);
777                    }
778                }
779
780                let (now_lo, _) = timestamp_now();
781                self.inodes[target_idx] = Inode::default();
782                self.inodes[target_idx].dtime = now_lo;
783            } else if !node_blocks.is_empty() {
784                // The original node is being removed but the inode still has
785                // remaining hard-link references.  Stash the block ranges so
786                // they can be reclaimed when the last link is finally deleted.
787                self.deferred_blocks
788                    .entry(target_ino)
789                    .or_default()
790                    .extend(node_blocks);
791            }
792        }
793
794        Ok(())
795    }
796
797    // -- close() -----------------------------------------------------------
798
799    /// Finalize the ext4 image.
800    ///
801    /// Writes directory entries (BFS order), the inode table, block/inode
802    /// bitmaps, group descriptors, and the superblock.  Consumes `self`.
803    pub fn close(mut self) -> FormatResult<()> {
804        // -- Step 1: BFS-commit directory entries --
805        self.commit_directories()?;
806
807        // -- Step 2: Optimize block group layout --
808        let current_blk = self.current_block();
809        let inode_count = self.inodes.len() as u32;
810        let (block_groups, inodes_per_group) =
811            self.optimize_block_group_layout(current_blk, inode_count);
812
813        // -- Step 3: Write inode table --
814        let inode_table_offset = self.commit_inode_table(block_groups, inodes_per_group)?;
815
816        self.align_to_block()?;
817
818        // -- Step 4: Write bitmaps and build group descriptors --
819        let bitmap_offset = self.current_block();
820        let bitmap_size = block_groups * 2; // block bitmap + inode bitmap per group
821        let data_size = bitmap_offset + bitmap_size;
822
823        // Ensure the disk is large enough.
824        let minimum_disk_size = if block_groups == 1 {
825            self.blocks_per_group()
826        } else {
827            (block_groups - 1) * self.blocks_per_group() + 1
828        };
829
830        if self.size < minimum_disk_size as u64 * self.block_size as u64 {
831            self.size = minimum_disk_size as u64 * self.block_size as u64;
832        }
833
834        let inode_table_size_per_group = inodes_per_group * INODE_SIZE / self.block_size;
835
836        // Check if we need more groups for the full disk size.
837        let min_groups =
838            ((self.pos() / self.block_size as u64) - 1) / self.blocks_per_group() as u64 + 1;
839        if self.size < min_groups * self.blocks_per_group() as u64 * self.block_size as u64 {
840            self.size = min_groups * self.blocks_per_group() as u64 * self.block_size as u64;
841            let saved_pos = self.pos();
842            self.file.set_len(self.size)?;
843            self.file.seek(SeekFrom::Start(saved_pos))?;
844        }
845
846        let total_groups =
847            ((self.size / self.block_size as u64) - 1) / self.blocks_per_group() as u64 + 1;
848
849        // Align disk size to block-group boundary.
850        if self.size < total_groups * self.blocks_per_group() as u64 * self.block_size as u64 {
851            self.size = total_groups * self.blocks_per_group() as u64 * self.block_size as u64;
852            let saved_pos = self.pos();
853            self.file.set_len(self.size)?;
854            self.file.seek(SeekFrom::Start(saved_pos))?;
855        }
856
857        let total_groups_u32 = total_groups as u32;
858
859        // Verify group descriptor space.
860        let gd_block_count =
861            (total_groups_u32 - 1) / self.groups_per_descriptor_block() + 1;
862        if gd_block_count > self.group_descriptor_blocks() {
863            return Err(FormatError::InsufficientSpaceForGroupDescriptorBlocks);
864        }
865
866        let mut total_used_blocks: u32 = 0;
867        let mut total_used_inodes: u32 = 0;
868        let mut group_descriptors = Vec::with_capacity(total_groups_u32 as usize);
869
870        // Write bitmaps for groups that contain data.
871        for group in 0..block_groups {
872            let mut dirs: u32 = 0;
873            let mut used_inodes: u32 = 0;
874            let mut used_blocks: u32 = 0;
875
876            // Two bitmaps per group: block bitmap + inode bitmap, each one
877            // block in size.
878            let mut bitmap = vec![0u8; self.block_size as usize * 2];
879
880            // -- Block bitmap --
881            let group_start = group * self.blocks_per_group();
882            let group_end = group_start + self.blocks_per_group();
883
884            if group_end <= data_size {
885                // Fully allocated group.
886                for byte in bitmap[..self.block_size as usize].iter_mut() {
887                    *byte = 0xFF;
888                }
889                used_blocks = self.blocks_per_group();
890            } else if group_start < data_size {
891                // Partially allocated group.
892                let used = data_size - group_start;
893                for i in 0..used {
894                    bitmap[(i / 8) as usize] |= 1 << (i % 8);
895                    used_blocks += 1;
896                }
897            }
898
899            // Group 0: adjust for unused group descriptor blocks.
900            if group == 0 {
901                let used_gd_blocks =
902                    (total_groups_u32 - 1) / self.groups_per_descriptor_block() + 1;
903                // Mark used GD blocks (block 0 = superblock, 1..=used_gd_blocks).
904                for i in 0..=used_gd_blocks {
905                    bitmap[(i / 8) as usize] |= 1 << (i % 8);
906                }
907                // Free the reserved-but-unused GD blocks.
908                let gd_reserved = self.group_descriptor_blocks();
909                for i in (used_gd_blocks + 1)..=gd_reserved {
910                    let was_set =
911                        (bitmap[(i / 8) as usize] >> (i % 8)) & 1;
912                    bitmap[(i / 8) as usize] &= !(1 << (i % 8));
913                    if was_set != 0 {
914                        used_blocks -= 1;
915                    }
916                }
917            }
918
919            // Mark deleted blocks as free.
920            for deleted in &self.deleted_blocks {
921                for blk in deleted.start..deleted.end {
922                    if blk / self.blocks_per_group() == group {
923                        let j = blk % self.blocks_per_group();
924                        let was_set =
925                            (bitmap[(j / 8) as usize] >> (j % 8)) & 1;
926                        bitmap[(j / 8) as usize] &= !(1 << (j % 8));
927                        if was_set != 0 {
928                            used_blocks -= 1;
929                        }
930                    }
931                }
932            }
933
934            // -- Inode bitmap (stored in the second block of the pair) --
935            let inode_bitmap_start = self.block_size as usize;
936            for i in 0..inodes_per_group {
937                let ino = 1 + group * inodes_per_group + i;
938                if ino > self.inodes.len() as u32 {
939                    continue;
940                }
941                let inode = &self.inodes[(ino - 1) as usize];
942                // Reserved inodes (1..10) are always marked used.
943                if ino > 10 && inode.links_count == 0 {
944                    continue;
945                }
946                bitmap[inode_bitmap_start + (i / 8) as usize] |= 1 << (i % 8);
947                used_inodes += 1;
948                if inode.is_dir() {
949                    dirs += 1;
950                }
951            }
952            // Mark remaining inode-bitmap bits (past inodes_per_group) as occupied
953            // so the kernel does not try to allocate them.
954            for i in (inodes_per_group / 8)..self.block_size {
955                bitmap[inode_bitmap_start + i as usize] = 0xFF;
956            }
957
958            self.file.write_all(&bitmap)?;
959
960            // -- Build group descriptor --
961            let free_blocks = if self.blocks_per_group() >= used_blocks {
962                self.blocks_per_group() - used_blocks
963            } else {
964                0
965            };
966            let free_inodes = inodes_per_group - used_inodes;
967            let block_bitmap_lo = (bitmap_offset + 2 * group) as u32;
968            let inode_bitmap_lo = block_bitmap_lo + 1;
969            let inode_table_lo =
970                (inode_table_offset as u32) + group * inode_table_size_per_group;
971
972            group_descriptors.push(GroupDescriptor {
973                block_bitmap_lo,
974                inode_bitmap_lo,
975                inode_table_lo,
976                free_blocks_count_lo: free_blocks as u16,
977                free_inodes_count_lo: free_inodes as u16,
978                used_dirs_count_lo: dirs as u16,
979                flags: 0,
980                exclude_bitmap_lo: 0,
981                block_bitmap_csum_lo: 0,
982                inode_bitmap_csum_lo: 0,
983                itable_unused_lo: 0,
984                checksum: 0,
985            });
986
987            total_used_blocks += used_blocks;
988            total_used_inodes += used_inodes;
989        }
990
991        // -- Step 5: Extra (empty) block groups beyond data --
992        let empty_block_bitmap = {
993            let mut bm = vec![0u8; self.blocks_per_group() as usize / 8];
994            // Mark inode-table blocks + 2 bitmap blocks as used.
995            for i in 0..(inode_table_size_per_group + 2) {
996                bm[(i / 8) as usize] |= 1 << (i % 8);
997            }
998            bm
999        };
1000        let empty_inode_bitmap = {
1001            let mut bm = vec![0xFFu8; self.blocks_per_group() as usize / 8];
1002            for i in 0..inodes_per_group as u16 {
1003                bm[(i / 8) as usize] &= !(1 << (i % 8));
1004            }
1005            bm
1006        };
1007
1008        for group in block_groups..total_groups_u32 {
1009            let blocks_in_group = if group == total_groups_u32 - 1 {
1010                let rem = (self.size / self.block_size as u64) % self.blocks_per_group() as u64;
1011                if rem == 0 {
1012                    self.blocks_per_group()
1013                } else {
1014                    rem as u32
1015                }
1016            } else {
1017                self.blocks_per_group()
1018            };
1019
1020            let bb_offset = group * self.blocks_per_group() + inode_table_size_per_group;
1021            let ib_offset = bb_offset + 1;
1022            let it_offset = group as u64 * self.blocks_per_group() as u64;
1023            let free_blocks_count = blocks_in_group.saturating_sub(inode_table_size_per_group + 2);
1024            let free_inodes_count = inodes_per_group;
1025
1026            group_descriptors.push(GroupDescriptor {
1027                block_bitmap_lo: bb_offset,
1028                inode_bitmap_lo: ib_offset,
1029                inode_table_lo: it_offset as u32,
1030                free_blocks_count_lo: free_blocks_count as u16,
1031                free_inodes_count_lo: free_inodes_count as u16,
1032                used_dirs_count_lo: 0,
1033                flags: 0,
1034                exclude_bitmap_lo: 0,
1035                block_bitmap_csum_lo: 0,
1036                inode_bitmap_csum_lo: 0,
1037                itable_unused_lo: 0,
1038                checksum: 0,
1039            });
1040
1041            total_used_blocks += inode_table_size_per_group + 2;
1042
1043            // Write block bitmap + inode bitmap at the right offset.
1044            self.seek_to_block(bb_offset)?;
1045
1046            if group == total_groups_u32 - 1
1047                && blocks_in_group < self.blocks_per_group()
1048            {
1049                // Last partial group: mark out-of-range blocks as used.
1050                let mut partial_bb =
1051                    vec![0u8; self.blocks_per_group() as usize / 8];
1052                for i in blocks_in_group..self.blocks_per_group() {
1053                    partial_bb[(i / 8) as usize] |= 1 << (i % 8);
1054                }
1055                for i in 0..(inode_table_size_per_group + 2) {
1056                    partial_bb[(i / 8) as usize] |= 1 << (i % 8);
1057                }
1058                self.file.write_all(&partial_bb)?;
1059                self.file.write_all(&empty_inode_bitmap)?;
1060            } else {
1061                self.file.write_all(&empty_block_bitmap)?;
1062                self.file.write_all(&empty_inode_bitmap)?;
1063            }
1064        }
1065
1066        // -- Step 6: Write group descriptors at block 1 --
1067        self.seek_to_block(1)?;
1068        let mut gd_buf = vec![0u8; GroupDescriptor::SIZE];
1069        for gd in &group_descriptors {
1070            gd.write_to(&mut gd_buf);
1071            self.file.write_all(&gd_buf)?;
1072        }
1073
1074        // -- Step 7: Write superblock --
1075        let computed_inodes = total_groups_u32 as u64 * inodes_per_group as u64;
1076        let mut blocks_count =
1077            total_groups_u32 as u64 * self.blocks_per_group() as u64;
1078        if blocks_count < total_used_blocks as u64 {
1079            blocks_count = total_used_blocks as u64;
1080        }
1081        let total_free_blocks = blocks_count.saturating_sub(total_used_blocks as u64);
1082        let free_inodes = computed_inodes as u32 - total_used_inodes;
1083
1084        let mut sb = SuperBlock::default();
1085        sb.inodes_count = computed_inodes as u32;
1086        sb.blocks_count_lo = blocks_count as u32;
1087        sb.blocks_count_hi = (blocks_count >> 32) as u32;
1088        sb.free_blocks_count_lo = total_free_blocks as u32;
1089        sb.free_blocks_count_hi = (total_free_blocks >> 32) as u32;
1090        sb.free_inodes_count = free_inodes;
1091        sb.first_data_block = 0;
1092        // log_block_size = log2(block_size / 1024).  E.g. 1024->0, 2048->1, 4096->2.
1093        let log_bs = (self.block_size / 1024).trailing_zeros();
1094        sb.log_block_size = log_bs;
1095        sb.log_cluster_size = log_bs;
1096        sb.blocks_per_group = self.blocks_per_group();
1097        sb.clusters_per_group = self.blocks_per_group();
1098        sb.inodes_per_group = inodes_per_group;
1099        sb.magic = SUPERBLOCK_MAGIC;
1100        sb.state = 1; // cleanly unmounted
1101        sb.errors = 1; // continue on error
1102        sb.creator_os = 3; // FreeBSD (matches Apple's implementation)
1103        sb.rev_level = 1; // dynamic inode sizes
1104        sb.first_ino = FIRST_INODE;
1105        sb.lpf_ino = LOST_AND_FOUND_INODE;
1106        sb.inode_size = INODE_SIZE as u16;
1107        sb.feature_compat = compat::SPARSE_SUPER2 | compat::EXT_ATTR;
1108        sb.feature_incompat =
1109            incompat::FILETYPE | incompat::EXTENTS | incompat::FLEX_BG;
1110        sb.feature_ro_compat =
1111            ro_compat::LARGE_FILE | ro_compat::HUGE_FILE | ro_compat::EXTRA_ISIZE;
1112        sb.min_extra_isize = EXTRA_ISIZE;
1113        sb.want_extra_isize = EXTRA_ISIZE;
1114        sb.log_groups_per_flex = 31;
1115        sb.uuid = *self.uuid.unwrap_or_else(Uuid::new_v4).as_bytes();
1116        if let Some(label) = &self.label {
1117            let bytes = label.as_bytes();
1118            sb.volume_name[..bytes.len()].copy_from_slice(bytes);
1119        }
1120
1121        // Write: 1024 zero bytes (boot sector) + superblock + 2048 zero bytes.
1122        self.seek_to_block(0)?;
1123        self.file.write_all(&[0u8; 1024])?;
1124        let mut sb_buf = [0u8; SUPERBLOCK_SIZE];
1125        sb.write_to(&mut sb_buf);
1126        self.file.write_all(&sb_buf)?;
1127        self.file.write_all(&[0u8; 2048])?;
1128
1129        self.file.flush()?;
1130        Ok(())
1131    }
1132
1133    // -----------------------------------------------------------------------
1134    // Private helpers
1135    // -----------------------------------------------------------------------
1136
1137    /// BFS-walk the file tree and write directory entries for every directory
1138    /// inode.  Updates each directory inode's size and extent tree.
1139    fn commit_directories(&mut self) -> FormatResult<()> {
1140        // Collect BFS order: (parent_idx_opt, node_idx).
1141        let mut queue: std::collections::VecDeque<(Option<usize>, usize)> =
1142            std::collections::VecDeque::new();
1143        queue.push_back((None, self.tree.root()));
1144
1145        let mut bfs_order: Vec<(Option<usize>, usize)> = Vec::new();
1146        while let Some((parent, node)) = queue.pop_front() {
1147            bfs_order.push((parent, node));
1148            // Skip hardlink nodes (no children to descend into).
1149            if self.tree.node(node).link.is_some() {
1150                continue;
1151            }
1152            let children: Vec<usize> = self.tree.node(node).children.clone();
1153            for &child in &children {
1154                queue.push_back((Some(node), child));
1155            }
1156        }
1157
1158        // Commit each directory.
1159        for (parent_opt, node_idx) in bfs_order {
1160            let inode_num = self.tree.node(node_idx).inode;
1161            let inode = &self.inodes[(inode_num - 1) as usize];
1162
1163            if inode.links_count == 0 {
1164                continue;
1165            }
1166            if self.tree.node(node_idx).link.is_some() {
1167                continue;
1168            }
1169            if !inode.is_dir() {
1170                continue;
1171            }
1172
1173            self.align_to_block()?;
1174            let start_block = self.current_block();
1175
1176            let mut left = self.block_size as i32;
1177
1178            // Write "." entry.
1179            dir::write_dir_entry(
1180                &mut self.file,
1181                ".",
1182                inode_num,
1183                self.inodes[(inode_num - 1) as usize].mode,
1184                None,
1185                None,
1186                self.block_size,
1187                &mut left,
1188            )?;
1189
1190            // Write ".." entry.
1191            let parent_ino = match parent_opt {
1192                Some(pidx) => self.tree.node(pidx).inode,
1193                None => inode_num, // root's ".." points to itself
1194            };
1195            dir::write_dir_entry(
1196                &mut self.file,
1197                "..",
1198                parent_ino,
1199                self.inodes[(parent_ino - 1) as usize].mode,
1200                None,
1201                None,
1202                self.block_size,
1203                &mut left,
1204            )?;
1205
1206            // Sort children by inode number for e2fsck compatibility.
1207            let mut sorted_children: Vec<usize> =
1208                self.tree.node(node_idx).children.clone();
1209            sorted_children.sort_by_key(|&ci| self.tree.node(ci).inode);
1210
1211            for &child_idx in &sorted_children {
1212                let child_ino = self.tree.node(child_idx).inode;
1213                let child_name = self.tree.node(child_idx).name.clone();
1214
1215                // Check that the inode has links (not deleted).
1216                let effective_ino;
1217                let effective_mode;
1218                if let Some(linked_ino) = self.tree.node(child_idx).link {
1219                    if self.inodes[(linked_ino - 1) as usize].links_count == 0 {
1220                        continue;
1221                    }
1222                    effective_ino = linked_ino;
1223                    effective_mode = self.inodes[(linked_ino - 1) as usize].mode;
1224                } else {
1225                    if self.inodes[(child_ino - 1) as usize].links_count == 0 {
1226                        continue;
1227                    }
1228                    effective_ino = child_ino;
1229                    effective_mode = self.inodes[(child_ino - 1) as usize].mode;
1230                }
1231
1232                let (link_inode, link_mode) =
1233                    if self.tree.node(child_idx).link.is_some() {
1234                        (Some(effective_ino), Some(effective_mode))
1235                    } else {
1236                        (None, None)
1237                    };
1238
1239                dir::write_dir_entry(
1240                    &mut self.file,
1241                    &child_name,
1242                    child_ino,
1243                    self.inodes[(child_ino.max(1) - 1) as usize].mode,
1244                    link_inode,
1245                    link_mode,
1246                    self.block_size,
1247                    &mut left,
1248                )?;
1249            }
1250
1251            dir::finish_dir_entry_block(&mut self.file, &mut left, self.block_size)?;
1252
1253            let end_block = self.current_block();
1254            let size = (end_block - start_block) as u64 * self.block_size as u64;
1255            self.inodes[(inode_num - 1) as usize].set_file_size(size);
1256
1257            // Store block range in the tree node.
1258            self.tree.node_mut(node_idx).blocks = Some(BlockRange {
1259                start: start_block,
1260                end: end_block,
1261            });
1262
1263            // Write extent tree for this directory.
1264            self.align_to_block()?;
1265            let blocks = BlockRange {
1266                start: start_block,
1267                end: end_block,
1268            };
1269            let mut cur = self.current_block();
1270            extent::write_extents(
1271                &mut self.inodes[(inode_num - 1) as usize],
1272                blocks,
1273                self.block_size,
1274                &mut self.file,
1275                &mut cur,
1276            )?;
1277        }
1278
1279        Ok(())
1280    }
1281
1282    /// Write all inodes sequentially, padding to fill inode tables for all
1283    /// block groups.
1284    fn commit_inode_table(
1285        &mut self,
1286        block_groups: u32,
1287        inodes_per_group: u32,
1288    ) -> FormatResult<u64> {
1289        self.align_to_block()?;
1290        let inode_table_offset = self.pos() / self.block_size as u64;
1291
1292        // Write the actual inodes.
1293        let mut inode_buf = [0u8; Inode::SIZE];
1294        for inode in &self.inodes {
1295            inode.write_to(&mut inode_buf);
1296            self.file.write_all(&inode_buf)?;
1297        }
1298
1299        // Pad the rest of the table with zeros.
1300        let table_size =
1301            INODE_SIZE as u64 * block_groups as u64 * inodes_per_group as u64;
1302        let written = self.inodes.len() as u64 * INODE_SIZE as u64;
1303        let rest = table_size - written;
1304        if rest > 0 {
1305            let zero_block = vec![0u8; self.block_size as usize];
1306            let full_blocks = rest / self.block_size as u64;
1307            for _ in 0..full_blocks {
1308                self.file.write_all(&zero_block)?;
1309            }
1310            let remainder = (rest % self.block_size as u64) as usize;
1311            if remainder > 0 {
1312                self.file.write_all(&vec![0u8; remainder])?;
1313            }
1314        }
1315
1316        Ok(inode_table_offset)
1317    }
1318
1319    /// Find the (block_groups, inodes_per_group) pair that minimizes the
1320    /// number of block groups needed to hold all inodes and all data blocks.
1321    fn optimize_block_group_layout(
1322        &self,
1323        blocks: u32,
1324        inodes: u32,
1325    ) -> (u32, u32) {
1326        let group_count =
1327            |blocks: u32, inodes: u32, ipg: u32| -> u32 {
1328                let inode_blocks_per_group = ipg * INODE_SIZE / self.block_size;
1329                // 2 blocks reserved for bitmaps.
1330                let data_blocks_per_group =
1331                    self.blocks_per_group() - inode_blocks_per_group - 2;
1332                // Ensure enough groups for all the inodes.
1333                let min_blocks =
1334                    (inodes.saturating_sub(1)) / ipg * data_blocks_per_group + 1;
1335                let effective_blocks = blocks.max(min_blocks);
1336                (effective_blocks + data_blocks_per_group - 1)
1337                    / data_blocks_per_group
1338            };
1339
1340        let inc = (self.block_size * 512 / INODE_SIZE) as usize;
1341        let mut best_groups = u32::MAX;
1342        let mut best_ipg: u32 = inc as u32;
1343
1344        let mut ipg = inc;
1345        while ipg <= self.max_inodes_per_group() as usize {
1346            let g = group_count(blocks, inodes, ipg as u32);
1347            if g < best_groups {
1348                best_groups = g;
1349                best_ipg = ipg as u32;
1350            }
1351            ipg += inc;
1352        }
1353
1354        (best_groups, best_ipg)
1355    }
1356}
1357
1358// ---------------------------------------------------------------------------
1359// Free-standing helpers
1360// ---------------------------------------------------------------------------
1361
1362/// Return the parent directory of `path`.
1363///
1364/// `"/foo/bar"` -> `"/foo"`, `"/foo"` -> `"/"`, `"/"` -> `"/"`.
1365fn parent_of(path: &str) -> &str {
1366    if path == "/" {
1367        return "/";
1368    }
1369    let trimmed = path.trim_end_matches('/');
1370    match trimmed.rfind('/') {
1371        Some(0) => "/",
1372        Some(pos) => &trimmed[..pos],
1373        None => "/",
1374    }
1375}
1376
1377/// Return the last path component.
1378///
1379/// `"/foo/bar"` -> `"bar"`, `"/"` -> `"/"`.
1380fn basename(path: &str) -> &str {
1381    if path == "/" {
1382        return "/";
1383    }
1384    let trimmed = path.trim_end_matches('/');
1385    match trimmed.rfind('/') {
1386        Some(pos) => &trimmed[pos + 1..],
1387        None => trimmed,
1388    }
1389}
1390
1391#[cfg(test)]
1392mod tests {
1393    use super::*;
1394
1395    #[test]
1396    fn test_parent_of() {
1397        assert_eq!(parent_of("/"), "/");
1398        assert_eq!(parent_of("/foo"), "/");
1399        assert_eq!(parent_of("/foo/bar"), "/foo");
1400        assert_eq!(parent_of("/a/b/c"), "/a/b");
1401    }
1402
1403    #[test]
1404    fn test_basename() {
1405        assert_eq!(basename("/"), "/");
1406        assert_eq!(basename("/foo"), "foo");
1407        assert_eq!(basename("/foo/bar"), "bar");
1408        assert_eq!(basename("/a/b/c"), "c");
1409    }
1410
1411    #[test]
1412    fn test_formatter_new_and_close() {
1413        let dir = tempfile::tempdir().unwrap();
1414        let path = dir.path().join("test.ext4");
1415        let fmt = Formatter::new(&path, 4096, 256 * 1024).unwrap();
1416        fmt.close().unwrap();
1417
1418        // The file should exist and have non-zero size.
1419        let meta = std::fs::metadata(&path).unwrap();
1420        assert!(meta.len() > 0);
1421    }
1422
1423    #[test]
1424    fn test_create_file_and_directory() {
1425        let dir = tempfile::tempdir().unwrap();
1426        let path = dir.path().join("test.ext4");
1427        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1428
1429        // Create a nested directory.
1430        fmt.create(
1431            "/etc",
1432            make_mode(file_mode::S_IFDIR, 0o755),
1433            None, None, None, None, None, None,
1434        ).unwrap();
1435
1436        // Create a regular file with content.
1437        let data = b"hello world\n";
1438        let mut cursor = std::io::Cursor::new(&data[..]);
1439        fmt.create(
1440            "/etc/motd",
1441            make_mode(file_mode::S_IFREG, 0o644),
1442            None, None, Some(&mut cursor), None, None, None,
1443        ).unwrap();
1444
1445        // Create a symlink.
1446        fmt.create(
1447            "/etc/motd.link",
1448            make_mode(file_mode::S_IFLNK, 0o777),
1449            Some("motd"),
1450            None, None, None, None, None,
1451        ).unwrap();
1452
1453        fmt.close().unwrap();
1454    }
1455
1456    #[test]
1457    fn test_hard_link() {
1458        let dir = tempfile::tempdir().unwrap();
1459        let path = dir.path().join("test.ext4");
1460        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1461
1462        let data = b"content";
1463        let mut cursor = std::io::Cursor::new(&data[..]);
1464        fmt.create(
1465            "/file",
1466            make_mode(file_mode::S_IFREG, 0o644),
1467            None, None, Some(&mut cursor), None, None, None,
1468        ).unwrap();
1469
1470        fmt.link("/hardlink", "/file").unwrap();
1471        fmt.close().unwrap();
1472    }
1473
1474    #[test]
1475    fn test_unlink() {
1476        let dir = tempfile::tempdir().unwrap();
1477        let path = dir.path().join("test.ext4");
1478        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1479
1480        fmt.create(
1481            "/dir/nested",
1482            make_mode(file_mode::S_IFDIR, 0o755),
1483            None, None, None, None, None, None,
1484        ).unwrap();
1485
1486        fmt.unlink("/dir", false).unwrap();
1487        fmt.close().unwrap();
1488    }
1489
1490    #[test]
1491    fn test_mkdir_p_idempotent() {
1492        let dir = tempfile::tempdir().unwrap();
1493        let path = dir.path().join("test.ext4");
1494        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1495
1496        // Creating the same directory twice should succeed (mkdir -p semantics).
1497        fmt.create(
1498            "/var/log",
1499            make_mode(file_mode::S_IFDIR, 0o755),
1500            None, None, None, None, None, None,
1501        ).unwrap();
1502        fmt.create(
1503            "/var/log",
1504            make_mode(file_mode::S_IFDIR, 0o755),
1505            None, None, None, None, None, None,
1506        ).unwrap();
1507
1508        fmt.close().unwrap();
1509    }
1510
1511    #[test]
1512    fn test_large_file() {
1513        let dir = tempfile::tempdir().unwrap();
1514        let path = dir.path().join("test.ext4");
1515        let mut fmt = Formatter::new(&path, 4096, 4 * 1024 * 1024).unwrap();
1516
1517        // Create a file larger than one block.
1518        let data = vec![0xABu8; 8192];
1519        let mut cursor = std::io::Cursor::new(&data[..]);
1520        fmt.create(
1521            "/big",
1522            make_mode(file_mode::S_IFREG, 0o644),
1523            None, None, Some(&mut cursor), None, None, None,
1524        ).unwrap();
1525
1526        fmt.close().unwrap();
1527    }
1528}