Skip to main content

arcbox_ext4/
formatter.rs

1// Core ext4 image formatter.
2//
3// Creates ext4 filesystem images from scratch using a single-pass sequential
4// write strategy.  The public entry point is `Formatter::new()` followed by
5// repeated `create()` / `link()` / `unlink()` calls, finalized with `close()`.
6
7use std::collections::HashMap;
8use std::fs::{File, OpenOptions};
9use std::io::{self, Read, Seek, SeekFrom, Write};
10use std::path::Path;
11
12use crate::constants::*;
13use crate::dir;
14use crate::error::{FormatError, FormatResult};
15use crate::extent;
16use crate::file_tree::{BlockRange, FileTree, FileTreeNode, InodeNumber};
17use crate::types::*;
18use crate::xattr::{ExtendedAttribute, XattrState};
19
20// ---------------------------------------------------------------------------
21// FileTimestamps
22// ---------------------------------------------------------------------------
23
24/// Timestamps to apply to a newly created inode.
25///
26/// Each pair is `(seconds_lo, extra)` in ext4 format -- the same layout
27/// returned by `timestamp_now()`.
28pub struct FileTimestamps {
29    pub access_lo: u32,
30    pub access_hi: u32,
31    pub modification_lo: u32,
32    pub modification_hi: u32,
33    pub creation_lo: u32,
34    pub creation_hi: u32,
35    pub now_lo: u32,
36    pub now_hi: u32,
37}
38
39impl Default for FileTimestamps {
40    fn default() -> Self {
41        let (lo, hi) = timestamp_now();
42        Self {
43            access_lo: lo,
44            access_hi: hi,
45            modification_lo: lo,
46            modification_hi: hi,
47            creation_lo: lo,
48            creation_hi: hi,
49            now_lo: lo,
50            now_hi: hi,
51        }
52    }
53}
54
55// ---------------------------------------------------------------------------
56// Formatter
57// ---------------------------------------------------------------------------
58
59/// Single-pass ext4 image builder.
60pub struct Formatter {
61    file: File,
62    block_size: u32,
63    size: u64,
64    inodes: Vec<Inode>,
65    tree: FileTree,
66    deleted_blocks: Vec<BlockRange>,
67    /// Block ranges owned by inodes that still have links_count > 0 but
68    /// whose original tree node has been detached.  Keyed by inode number.
69    /// Used to reclaim blocks when the last hard-link reference is removed.
70    deferred_blocks: HashMap<u32, Vec<BlockRange>>,
71}
72
73impl Formatter {
74    // -- Computed properties ------------------------------------------------
75
76    #[inline]
77    fn blocks_per_group(&self) -> u32 {
78        self.block_size * 8
79    }
80
81    #[inline]
82    fn max_inodes_per_group(&self) -> u32 {
83        self.block_size * 8
84    }
85
86    #[inline]
87    fn groups_per_descriptor_block(&self) -> u32 {
88        self.block_size / 32
89    }
90
91    #[inline]
92    fn block_count(&self) -> u32 {
93        ((self.size - 1) / self.block_size as u64 + 1) as u32
94    }
95
96    #[inline]
97    fn group_count(&self) -> u32 {
98        (self.block_count() - 1) / self.blocks_per_group() + 1
99    }
100
101    #[inline]
102    fn group_descriptor_blocks(&self) -> u32 {
103        ((self.group_count() - 1) / self.groups_per_descriptor_block() + 1) * 32
104    }
105
106    #[inline]
107    fn pos(&mut self) -> u64 {
108        self.file.stream_position().unwrap_or(0)
109    }
110
111    #[inline]
112    fn current_block(&mut self) -> u32 {
113        let p = self.pos();
114        (p / self.block_size as u64) as u32
115    }
116
117    fn seek_to_block(&mut self, block: u32) -> io::Result<()> {
118        self.file
119            .seek(SeekFrom::Start(block as u64 * self.block_size as u64))?;
120        Ok(())
121    }
122
123    fn align_to_block(&mut self) -> io::Result<()> {
124        let p = self.pos();
125        if p % self.block_size as u64 != 0 {
126            let blk = self.current_block() + 1;
127            self.seek_to_block(blk)?;
128        }
129        Ok(())
130    }
131
132    // -- Constructor -------------------------------------------------------
133
134    /// Create a new formatter that writes an ext4 image to `path`.
135    ///
136    /// The file is truncated and re-created as a sparse file of
137    /// `min_disk_size` bytes.  The root directory (inode 2) and the
138    /// `/lost+found` directory (required by e2fsck) are created
139    /// automatically.
140    pub fn new(path: &Path, block_size: u32, min_disk_size: u64) -> FormatResult<Self> {
141        // Only 4096-byte blocks are supported.  Supporting smaller block sizes
142        // (1024, 2048) would require first_data_block=1 and different group
143        // descriptor offset calculations throughout the formatter and reader.
144        if block_size != 4096 {
145            return Err(FormatError::UnsupportedBlockSize(block_size));
146        }
147
148        let file = OpenOptions::new()
149            .read(true)
150            .write(true)
151            .create(true)
152            .truncate(true)
153            .open(path)?;
154
155        // Create a sparse file.
156        file.set_len(min_disk_size)?;
157
158        // Reserve the first 10 inodes (indices 0..9  =>  inode numbers 1..10).
159        //   [0] = defective block inode  (empty / default)
160        //   [1] = root inode             (inode 2)
161        //   [2..9] = reserved            (empty / default)
162        let mut inodes = Vec::with_capacity(16);
163        inodes.push(Inode::default()); // inode 1 -- defective blocks
164        inodes.push(Inode::root_inode()); // inode 2 -- root directory
165        for _ in 2..10 {
166            inodes.push(Inode::default()); // inodes 3..10
167        }
168
169        let tree = FileTree::new(ROOT_INODE, "/");
170
171        let mut fmt = Self {
172            file,
173            block_size,
174            size: min_disk_size,
175            inodes,
176            tree,
177            deleted_blocks: Vec::new(),
178            deferred_blocks: HashMap::new(),
179        };
180
181        // Seek past the superblock (block 0) and the group descriptor table.
182        let gdb = fmt.group_descriptor_blocks();
183        fmt.seek_to_block(gdb + 1)?;
184
185        // /lost+found is required by e2fsck.
186        fmt.create(
187            "/lost+found",
188            make_mode(file_mode::S_IFDIR, 0o700),
189            None,
190            None,
191            None,
192            None,
193            None,
194            None,
195        )?;
196
197        Ok(fmt)
198    }
199
200    // -- create() ----------------------------------------------------------
201
202    /// Create a file, directory, or symlink at `path`.
203    ///
204    /// Parent directories are created recursively with mode 0755, inheriting
205    /// uid/gid from the nearest existing parent (like `mkdir -p`).
206    #[allow(clippy::too_many_arguments)]
207    pub fn create(
208        &mut self,
209        path: &str,
210        mode: u16,
211        link: Option<&str>,
212        ts: Option<FileTimestamps>,
213        data: Option<&mut dyn Read>,
214        uid: Option<u32>,
215        gid: Option<u32>,
216        xattrs: Option<&HashMap<String, Vec<u8>>>,
217    ) -> FormatResult<()> {
218        let path_buf = std::path::PathBuf::from(path);
219
220        // ------- Handle "already exists" cases -------
221        if let Some(existing_idx) = self.tree.lookup(&path_buf) {
222            let existing_inode_num = self.tree.node(existing_idx).inode;
223            let existing_inode = &self.inodes[(existing_inode_num - 1) as usize];
224
225            if is_dir(mode) {
226                if existing_inode.is_dir() {
227                    // mkdir -p: directory already exists.  Only update mode
228                    // and ownership when the caller explicitly provided them
229                    // (uid/gid != None).  This prevents recursive parent-
230                    // creation from silently downgrading permissions set by
231                    // an earlier explicit create() call.
232                    if self.tree.node(existing_idx).name == basename(path) {
233                        if uid.is_some() || gid.is_some() {
234                            let inode =
235                                &mut self.inodes[(existing_inode_num - 1) as usize];
236                            inode.mode = mode;
237                            if let Some(u) = uid {
238                                inode.set_uid(u);
239                            }
240                            if let Some(g) = gid {
241                                inode.set_gid(g);
242                            }
243                        }
244                        return Ok(());
245                    }
246                } else {
247                    // A regular file (or other non-dir) blocks directory creation.
248                    return Err(FormatError::NotDirectory(path_buf));
249                }
250            } else if self.tree.node(existing_idx).link.is_some() {
251                // Hard-link entries can always be overwritten.
252                self.unlink(path, false)?;
253            } else {
254                // File or symlink replacing an existing entry.
255                if existing_inode.is_dir() && !is_link(mode) {
256                    return Err(FormatError::NotFile(path_buf));
257                }
258                self.unlink(path, false)?;
259            }
260        }
261
262        // ------- Ensure parent directories exist -------
263        let parent_path = parent_of(path);
264        if parent_path != path {
265            self.create(
266                parent_path,
267                make_mode(file_mode::S_IFDIR, 0o755),
268                None,
269                None,
270                None,
271                None,
272                None,
273                None,
274            )?;
275        }
276
277        let parent_path_buf = std::path::PathBuf::from(parent_path);
278        let parent_idx = self
279            .tree
280            .lookup(&parent_path_buf)
281            .ok_or_else(|| FormatError::NotFound(parent_path_buf.clone()))?;
282        let parent_inode_num = self.tree.node(parent_idx).inode;
283        let parent_inode = &self.inodes[(parent_inode_num - 1) as usize];
284
285        if parent_inode.links_count as u32 >= MAX_LINKS {
286            return Err(FormatError::MaximumLinksExceeded(parent_path_buf));
287        }
288
289        // ------- Build the child inode -------
290        let mut child_inode = Inode::default();
291        child_inode.mode = mode;
292
293        // uid / gid -- inherit from parent when not specified.
294        if let Some(u) = uid {
295            child_inode.set_uid(u);
296        } else {
297            child_inode.uid = parent_inode.uid;
298            child_inode.uid_hi = parent_inode.uid_hi;
299        }
300        if let Some(g) = gid {
301            child_inode.set_gid(g);
302        } else {
303            child_inode.gid = parent_inode.gid;
304            child_inode.gid_hi = parent_inode.gid_hi;
305        }
306
307        // Extended attributes.
308        if let Some(xattr_map) = xattrs {
309            if !xattr_map.is_empty() {
310                let child_ino = self.inodes.len() as u32 + 1;
311                let mut state =
312                    XattrState::new(child_ino, INODE_EXTRA_SIZE, self.block_size);
313                // The reference implementation adds a sentinel "system.data" attribute.
314                state.add(ExtendedAttribute::new("system.data", Vec::new()))?;
315                for (name, value) in xattr_map {
316                    state.add(ExtendedAttribute::new(name, value.clone()))?;
317                }
318                if state.has_inline() {
319                    let buf = state.write_inline()?;
320                    child_inode.inline_xattrs.copy_from_slice(&buf);
321                }
322                if state.has_block() {
323                    let block_buf = state.write_block()?;
324                    self.align_to_block()?;
325                    child_inode.xattr_block_lo = self.current_block();
326                    self.file.write_all(&block_buf)?;
327                    // Account for the xattr block.  When HUGE_FILE is set,
328                    // blocks_lo is in filesystem-block units; otherwise sectors.
329                    if child_inode.flags & inode_flags::HUGE_FILE != 0 {
330                        child_inode.blocks_lo += 1;
331                    } else {
332                        child_inode.blocks_lo += (self.block_size / 512) as u32;
333                    }
334                }
335            }
336        }
337
338        // Timestamps.
339        let ts = ts.unwrap_or_default();
340        child_inode.atime = ts.access_lo;
341        child_inode.atime_extra = ts.access_hi;
342        child_inode.ctime = ts.now_lo;
343        child_inode.ctime_extra = ts.now_hi;
344        child_inode.mtime = ts.modification_lo;
345        child_inode.mtime_extra = ts.modification_hi;
346        child_inode.crtime = ts.creation_lo;
347        child_inode.crtime_extra = ts.creation_hi;
348        child_inode.links_count = 1;
349        child_inode.extra_isize = EXTRA_ISIZE;
350        child_inode.flags = inode_flags::HUGE_FILE;
351
352        // Align to block boundary before writing data.
353        self.align_to_block()?;
354
355        let mut start_block: u32 = 0;
356        let mut end_block: u32 = 0;
357
358        // ------- Handle by file type -------
359        if is_dir(mode) {
360            // Directory: bump link counts; directory entries are deferred to
361            // close() so they can be sorted.
362            child_inode.links_count = 2;
363            self.inodes[(parent_inode_num - 1) as usize].links_count += 1;
364        } else if let Some(link_target) = link {
365            // Symbolic link.
366            start_block = self.current_block();
367            let link_bytes = link_target.as_bytes();
368
369            let size = if link_bytes.len() < 60 {
370                // Short symlink: store inline in the inode's block field.
371                child_inode.block[..link_bytes.len()].copy_from_slice(link_bytes);
372                link_bytes.len() as u64
373            } else {
374                // Long symlink: write to data blocks.
375                self.file.write_all(link_bytes)?;
376                link_bytes.len() as u64
377            };
378
379            self.align_to_block()?;
380            end_block = self.current_block();
381
382            child_inode.set_file_size(size);
383            child_inode.mode |= 0o777;
384            child_inode.flags = 0;
385
386            if link_bytes.len() < 60 {
387                child_inode.blocks_lo = 0;
388            } else {
389                let blocks = BlockRange {
390                    start: start_block,
391                    end: end_block,
392                };
393                let mut cur = self.current_block();
394                extent::write_extents(
395                    &mut child_inode,
396                    blocks,
397                    self.block_size,
398                    &mut self.file,
399                    &mut cur,
400                )?;
401            }
402        } else if is_reg(mode) {
403            // Regular file.
404            start_block = self.current_block();
405            let mut size = 0u64;
406
407            if let Some(reader) = data {
408                let mut buf = vec![0u8; self.block_size as usize];
409                loop {
410                    let n = reader.read(&mut buf)?;
411                    if n == 0 {
412                        break;
413                    }
414                    size += n as u64;
415                    if size > MAX_FILE_SIZE {
416                        return Err(FormatError::FileTooBig(size));
417                    }
418                    self.file.write_all(&buf[..n])?;
419                }
420            }
421
422            self.align_to_block()?;
423            end_block = self.current_block();
424
425            child_inode.set_file_size(size);
426
427            let blocks = BlockRange {
428                start: start_block,
429                end: end_block,
430            };
431            let mut cur = self.current_block();
432            extent::write_extents(
433                &mut child_inode,
434                blocks,
435                self.block_size,
436                &mut self.file,
437                &mut cur,
438            )?;
439        } else {
440            return Err(FormatError::UnsupportedFiletype);
441        }
442
443        // ------- Register the new inode and tree node -------
444        self.inodes.push(child_inode);
445        let child_inode_num = self.inodes.len() as InodeNumber;
446
447        let child_node = FileTreeNode {
448            inode: child_inode_num,
449            name: basename(path).to_string(),
450            children: Vec::new(),
451            parent: None,
452            blocks: if start_block != end_block {
453                Some(BlockRange {
454                    start: start_block,
455                    end: end_block,
456                })
457            } else {
458                None
459            },
460            additional_blocks: Vec::new(),
461            link: None,
462        };
463        self.tree.add_child(parent_idx, child_node);
464
465        Ok(())
466    }
467
468    // -- Query helpers (for Ext4Writer / layer.rs) --------------------------
469
470    /// Check whether `path` exists in the in-memory file tree.
471    pub fn exists(&self, path: &str) -> bool {
472        self.tree.lookup(&std::path::PathBuf::from(path)).is_some()
473    }
474
475    /// Check whether `path` exists and is a directory.
476    pub fn is_dir(&self, path: &str) -> bool {
477        let path_buf = std::path::PathBuf::from(path);
478        if let Some(idx) = self.tree.lookup(&path_buf) {
479            let ino = self.tree.node(idx).inode;
480            self.inodes[(ino - 1) as usize].is_dir()
481        } else {
482            false
483        }
484    }
485
486    /// List the names of immediate children of a directory (excluding `.`
487    /// and `..`).  Returns an empty vec if `path` is not a directory.
488    pub fn list_dir(&self, path: &str) -> Vec<String> {
489        let path_buf = std::path::PathBuf::from(path);
490        let Some(idx) = self.tree.lookup(&path_buf) else {
491            return Vec::new();
492        };
493        self.tree
494            .node(idx)
495            .children
496            .iter()
497            .map(|&ci| self.tree.node(ci).name.clone())
498            .collect()
499    }
500
501    /// Update the permission bits of an existing entry.
502    pub fn set_permissions(&mut self, path: &str, mode: u16) -> FormatResult<()> {
503        let path_buf = std::path::PathBuf::from(path);
504        let idx = self
505            .tree
506            .lookup(&path_buf)
507            .ok_or_else(|| FormatError::NotFound(path_buf))?;
508        let ino = self.tree.node(idx).inode;
509        let inode = &mut self.inodes[(ino - 1) as usize];
510        // Preserve the file-type bits, replace the permission bits.
511        inode.mode = (inode.mode & file_mode::TYPE_MASK) | (mode & !file_mode::TYPE_MASK);
512        Ok(())
513    }
514
515    /// Update the owner uid/gid of an existing entry.
516    pub fn set_owner(&mut self, path: &str, uid: u32, gid: u32) -> FormatResult<()> {
517        let path_buf = std::path::PathBuf::from(path);
518        let idx = self
519            .tree
520            .lookup(&path_buf)
521            .ok_or_else(|| FormatError::NotFound(path_buf))?;
522        let ino = self.tree.node(idx).inode;
523        let inode = &mut self.inodes[(ino - 1) as usize];
524        inode.set_uid(uid);
525        inode.set_gid(gid);
526        Ok(())
527    }
528
529    // -- link() ------------------------------------------------------------
530
531    /// Create a hard link at `link_path` pointing to `target_path`.
532    pub fn link(&mut self, link_path: &str, target_path: &str) -> FormatResult<()> {
533        let target_buf = std::path::PathBuf::from(target_path);
534        let target_idx = self
535            .tree
536            .lookup(&target_buf)
537            .ok_or_else(|| FormatError::NotFound(target_buf.clone()))?;
538        let target_inode_num = self.tree.node(target_idx).inode;
539        let target_inode = &self.inodes[(target_inode_num - 1) as usize];
540
541        if target_inode.is_dir() {
542            return Err(FormatError::CannotHardlinkDirectory(target_buf));
543        }
544
545        // Increment link count on the target inode.
546        self.inodes[(target_inode_num - 1) as usize].links_count += 1;
547
548        // If the link path already exists, remove it first.
549        let link_buf = std::path::PathBuf::from(link_path);
550        if self.tree.lookup(&link_buf).is_some() {
551            self.unlink(link_path, false)?;
552        }
553
554        let parent_path = parent_of(link_path);
555        let parent_path_buf = std::path::PathBuf::from(parent_path);
556        let parent_idx = self
557            .tree
558            .lookup(&parent_path_buf)
559            .ok_or_else(|| FormatError::NotFound(parent_path_buf.clone()))?;
560
561        let parent_inode_num = self.tree.node(parent_idx).inode;
562        if self.inodes[(parent_inode_num - 1) as usize].links_count as u32 >= MAX_LINKS {
563            return Err(FormatError::MaximumLinksExceeded(parent_path_buf));
564        }
565
566        let link_node = FileTreeNode {
567            inode: ROOT_INODE, // placeholder, not used for hard links
568            name: basename(link_path).to_string(),
569            children: Vec::new(),
570            parent: None,
571            blocks: None,
572            additional_blocks: Vec::new(),
573            link: Some(target_inode_num),
574        };
575        self.tree.add_child(parent_idx, link_node);
576
577        Ok(())
578    }
579
580    // -- unlink() ----------------------------------------------------------
581
582    /// Remove the entry at `path`.
583    ///
584    /// When `directory_whiteout` is true, only children are removed and the
585    /// directory entry itself is kept (used for overlay whiteouts).
586    pub fn unlink(&mut self, path: &str, directory_whiteout: bool) -> FormatResult<()> {
587        let path_buf = std::path::PathBuf::from(path);
588        let node_idx = match self.tree.lookup(&path_buf) {
589            Some(idx) => idx,
590            None => return Ok(()), // nothing to unlink
591        };
592
593        let inode_num = self.tree.node(node_idx).inode;
594        let inode_idx = (inode_num - 1) as usize;
595
596        if directory_whiteout && !self.inodes[inode_idx].is_dir() {
597            return Err(FormatError::NotDirectory(path_buf));
598        }
599
600        // Recursively unlink children.
601        let child_names: Vec<String> = self
602            .tree
603            .node(node_idx)
604            .children
605            .iter()
606            .map(|&ci| self.tree.node(ci).name.clone())
607            .collect();
608        for child_name in child_names {
609            let child_path = if path == "/" {
610                format!("/{child_name}")
611            } else {
612                format!("{path}/{child_name}")
613            };
614            self.unlink(&child_path, false)?;
615        }
616
617        if directory_whiteout {
618            return Ok(());
619        }
620
621        // Detach from parent and update parent's link count for directories.
622        let parent_path = parent_of(path);
623        let parent_path_buf = std::path::PathBuf::from(parent_path);
624        if let Some(parent_idx) = self.tree.lookup(&parent_path_buf) {
625            let parent_inode_num = self.tree.node(parent_idx).inode;
626            if self.inodes[inode_idx].is_dir()
627                && self.inodes[(parent_inode_num - 1) as usize].links_count > 2
628            {
629                self.inodes[(parent_inode_num - 1) as usize].links_count -= 1;
630            }
631            self.tree.remove_child(parent_idx, basename(path));
632        }
633
634        // Determine which inode to decrement and potentially reclaim.
635        // For hard-link entries the target inode is the one that matters;
636        // for regular entries it is the node's own inode.
637        let (target_ino, target_idx) =
638            if let Some(linked_ino) = self.tree.node(node_idx).link {
639                (linked_ino, (linked_ino - 1) as usize)
640            } else {
641                (inode_num, inode_idx)
642            };
643
644        if target_ino > FIRST_INODE - 1 {
645            if self.inodes[target_idx].links_count > 0 {
646                self.inodes[target_idx].links_count -= 1;
647            }
648
649            // Collect block ranges from this tree node (if any).
650            let node = self.tree.node(node_idx);
651            let mut node_blocks: Vec<BlockRange> = Vec::new();
652            if let Some(b) = node.blocks {
653                if b.start != b.end {
654                    node_blocks.push(b);
655                }
656            }
657            for blk in &node.additional_blocks {
658                node_blocks.push(*blk);
659            }
660
661            if self.inodes[target_idx].links_count == 0 {
662                // Last reference removed -- reclaim blocks and mark deleted.
663                // Blocks may come from this node (if it is the original) or
664                // from deferred_blocks (if the original was unlinked earlier
665                // while hard links still existed).
666                for blk in &node_blocks {
667                    self.deleted_blocks.push(*blk);
668                }
669                if let Some(deferred) = self.deferred_blocks.remove(&target_ino) {
670                    for blk in deferred {
671                        self.deleted_blocks.push(blk);
672                    }
673                }
674
675                let (now_lo, _) = timestamp_now();
676                self.inodes[target_idx] = Inode::default();
677                self.inodes[target_idx].dtime = now_lo;
678            } else if !node_blocks.is_empty() {
679                // The original node is being removed but the inode still has
680                // remaining hard-link references.  Stash the block ranges so
681                // they can be reclaimed when the last link is finally deleted.
682                self.deferred_blocks
683                    .entry(target_ino)
684                    .or_default()
685                    .extend(node_blocks);
686            }
687        }
688
689        Ok(())
690    }
691
692    // -- close() -----------------------------------------------------------
693
694    /// Finalize the ext4 image.
695    ///
696    /// Writes directory entries (BFS order), the inode table, block/inode
697    /// bitmaps, group descriptors, and the superblock.  Consumes `self`.
698    pub fn close(mut self) -> FormatResult<()> {
699        // -- Step 1: BFS-commit directory entries --
700        self.commit_directories()?;
701
702        // -- Step 2: Optimize block group layout --
703        let current_blk = self.current_block();
704        let inode_count = self.inodes.len() as u32;
705        let (block_groups, inodes_per_group) =
706            self.optimize_block_group_layout(current_blk, inode_count);
707
708        // -- Step 3: Write inode table --
709        let inode_table_offset = self.commit_inode_table(block_groups, inodes_per_group)?;
710
711        self.align_to_block()?;
712
713        // -- Step 4: Write bitmaps and build group descriptors --
714        let bitmap_offset = self.current_block();
715        let bitmap_size = block_groups * 2; // block bitmap + inode bitmap per group
716        let data_size = bitmap_offset + bitmap_size;
717
718        // Ensure the disk is large enough.
719        let minimum_disk_size = if block_groups == 1 {
720            self.blocks_per_group()
721        } else {
722            (block_groups - 1) * self.blocks_per_group() + 1
723        };
724
725        if self.size < minimum_disk_size as u64 * self.block_size as u64 {
726            self.size = minimum_disk_size as u64 * self.block_size as u64;
727        }
728
729        let inode_table_size_per_group = inodes_per_group * INODE_SIZE / self.block_size;
730
731        // Check if we need more groups for the full disk size.
732        let min_groups =
733            ((self.pos() / self.block_size as u64) - 1) / self.blocks_per_group() as u64 + 1;
734        if self.size < min_groups * self.blocks_per_group() as u64 * self.block_size as u64 {
735            self.size = min_groups * self.blocks_per_group() as u64 * self.block_size as u64;
736            let saved_pos = self.pos();
737            self.file.set_len(self.size)?;
738            self.file.seek(SeekFrom::Start(saved_pos))?;
739        }
740
741        let total_groups =
742            ((self.size / self.block_size as u64) - 1) / self.blocks_per_group() as u64 + 1;
743
744        // Align disk size to block-group boundary.
745        if self.size < total_groups * self.blocks_per_group() as u64 * self.block_size as u64 {
746            self.size = total_groups * self.blocks_per_group() as u64 * self.block_size as u64;
747            let saved_pos = self.pos();
748            self.file.set_len(self.size)?;
749            self.file.seek(SeekFrom::Start(saved_pos))?;
750        }
751
752        let total_groups_u32 = total_groups as u32;
753
754        // Verify group descriptor space.
755        let gd_block_count =
756            (total_groups_u32 - 1) / self.groups_per_descriptor_block() + 1;
757        if gd_block_count > self.group_descriptor_blocks() {
758            return Err(FormatError::InsufficientSpaceForGroupDescriptorBlocks);
759        }
760
761        let mut total_used_blocks: u32 = 0;
762        let mut total_used_inodes: u32 = 0;
763        let mut group_descriptors = Vec::with_capacity(total_groups_u32 as usize);
764
765        // Write bitmaps for groups that contain data.
766        for group in 0..block_groups {
767            let mut dirs: u32 = 0;
768            let mut used_inodes: u32 = 0;
769            let mut used_blocks: u32 = 0;
770
771            // Two bitmaps per group: block bitmap + inode bitmap, each one
772            // block in size.
773            let mut bitmap = vec![0u8; self.block_size as usize * 2];
774
775            // -- Block bitmap --
776            let group_start = group * self.blocks_per_group();
777            let group_end = group_start + self.blocks_per_group();
778
779            if group_end <= data_size {
780                // Fully allocated group.
781                for byte in bitmap[..self.block_size as usize].iter_mut() {
782                    *byte = 0xFF;
783                }
784                used_blocks = self.blocks_per_group();
785            } else if group_start < data_size {
786                // Partially allocated group.
787                let used = data_size - group_start;
788                for i in 0..used {
789                    bitmap[(i / 8) as usize] |= 1 << (i % 8);
790                    used_blocks += 1;
791                }
792            }
793
794            // Group 0: adjust for unused group descriptor blocks.
795            if group == 0 {
796                let used_gd_blocks =
797                    (total_groups_u32 - 1) / self.groups_per_descriptor_block() + 1;
798                // Mark used GD blocks (block 0 = superblock, 1..=used_gd_blocks).
799                for i in 0..=used_gd_blocks {
800                    bitmap[(i / 8) as usize] |= 1 << (i % 8);
801                }
802                // Free the reserved-but-unused GD blocks.
803                let gd_reserved = self.group_descriptor_blocks();
804                for i in (used_gd_blocks + 1)..=gd_reserved {
805                    let was_set =
806                        (bitmap[(i / 8) as usize] >> (i % 8)) & 1;
807                    bitmap[(i / 8) as usize] &= !(1 << (i % 8));
808                    if was_set != 0 {
809                        used_blocks -= 1;
810                    }
811                }
812            }
813
814            // Mark deleted blocks as free.
815            for deleted in &self.deleted_blocks {
816                for blk in deleted.start..deleted.end {
817                    if blk / self.blocks_per_group() == group {
818                        let j = blk % self.blocks_per_group();
819                        let was_set =
820                            (bitmap[(j / 8) as usize] >> (j % 8)) & 1;
821                        bitmap[(j / 8) as usize] &= !(1 << (j % 8));
822                        if was_set != 0 {
823                            used_blocks -= 1;
824                        }
825                    }
826                }
827            }
828
829            // -- Inode bitmap (stored in the second block of the pair) --
830            let inode_bitmap_start = self.block_size as usize;
831            for i in 0..inodes_per_group {
832                let ino = 1 + group * inodes_per_group + i;
833                if ino > self.inodes.len() as u32 {
834                    continue;
835                }
836                let inode = &self.inodes[(ino - 1) as usize];
837                // Reserved inodes (1..10) are always marked used.
838                if ino > 10 && inode.links_count == 0 {
839                    continue;
840                }
841                bitmap[inode_bitmap_start + (i / 8) as usize] |= 1 << (i % 8);
842                used_inodes += 1;
843                if inode.is_dir() {
844                    dirs += 1;
845                }
846            }
847            // Mark remaining inode-bitmap bits (past inodes_per_group) as occupied
848            // so the kernel does not try to allocate them.
849            for i in (inodes_per_group / 8)..self.block_size {
850                bitmap[inode_bitmap_start + i as usize] = 0xFF;
851            }
852
853            self.file.write_all(&bitmap)?;
854
855            // -- Build group descriptor --
856            let free_blocks = if self.blocks_per_group() >= used_blocks {
857                self.blocks_per_group() - used_blocks
858            } else {
859                0
860            };
861            let free_inodes = inodes_per_group - used_inodes;
862            let block_bitmap_lo = (bitmap_offset + 2 * group) as u32;
863            let inode_bitmap_lo = block_bitmap_lo + 1;
864            let inode_table_lo =
865                (inode_table_offset as u32) + group * inode_table_size_per_group;
866
867            group_descriptors.push(GroupDescriptor {
868                block_bitmap_lo,
869                inode_bitmap_lo,
870                inode_table_lo,
871                free_blocks_count_lo: free_blocks as u16,
872                free_inodes_count_lo: free_inodes as u16,
873                used_dirs_count_lo: dirs as u16,
874                flags: 0,
875                exclude_bitmap_lo: 0,
876                block_bitmap_csum_lo: 0,
877                inode_bitmap_csum_lo: 0,
878                itable_unused_lo: 0,
879                checksum: 0,
880            });
881
882            total_used_blocks += used_blocks;
883            total_used_inodes += used_inodes;
884        }
885
886        // -- Step 5: Extra (empty) block groups beyond data --
887        let empty_block_bitmap = {
888            let mut bm = vec![0u8; self.blocks_per_group() as usize / 8];
889            // Mark inode-table blocks + 2 bitmap blocks as used.
890            for i in 0..(inode_table_size_per_group + 2) {
891                bm[(i / 8) as usize] |= 1 << (i % 8);
892            }
893            bm
894        };
895        let empty_inode_bitmap = {
896            let mut bm = vec![0xFFu8; self.blocks_per_group() as usize / 8];
897            for i in 0..inodes_per_group as u16 {
898                bm[(i / 8) as usize] &= !(1 << (i % 8));
899            }
900            bm
901        };
902
903        for group in block_groups..total_groups_u32 {
904            let blocks_in_group = if group == total_groups_u32 - 1 {
905                let rem = (self.size / self.block_size as u64) % self.blocks_per_group() as u64;
906                if rem == 0 {
907                    self.blocks_per_group()
908                } else {
909                    rem as u32
910                }
911            } else {
912                self.blocks_per_group()
913            };
914
915            let bb_offset = group * self.blocks_per_group() + inode_table_size_per_group;
916            let ib_offset = bb_offset + 1;
917            let it_offset = group as u64 * self.blocks_per_group() as u64;
918            let free_blocks_count = blocks_in_group.saturating_sub(inode_table_size_per_group + 2);
919            let free_inodes_count = inodes_per_group;
920
921            group_descriptors.push(GroupDescriptor {
922                block_bitmap_lo: bb_offset,
923                inode_bitmap_lo: ib_offset,
924                inode_table_lo: it_offset as u32,
925                free_blocks_count_lo: free_blocks_count as u16,
926                free_inodes_count_lo: free_inodes_count as u16,
927                used_dirs_count_lo: 0,
928                flags: 0,
929                exclude_bitmap_lo: 0,
930                block_bitmap_csum_lo: 0,
931                inode_bitmap_csum_lo: 0,
932                itable_unused_lo: 0,
933                checksum: 0,
934            });
935
936            total_used_blocks += inode_table_size_per_group + 2;
937
938            // Write block bitmap + inode bitmap at the right offset.
939            self.seek_to_block(bb_offset)?;
940
941            if group == total_groups_u32 - 1
942                && blocks_in_group < self.blocks_per_group()
943            {
944                // Last partial group: mark out-of-range blocks as used.
945                let mut partial_bb =
946                    vec![0u8; self.blocks_per_group() as usize / 8];
947                for i in blocks_in_group..self.blocks_per_group() {
948                    partial_bb[(i / 8) as usize] |= 1 << (i % 8);
949                }
950                for i in 0..(inode_table_size_per_group + 2) {
951                    partial_bb[(i / 8) as usize] |= 1 << (i % 8);
952                }
953                self.file.write_all(&partial_bb)?;
954                self.file.write_all(&empty_inode_bitmap)?;
955            } else {
956                self.file.write_all(&empty_block_bitmap)?;
957                self.file.write_all(&empty_inode_bitmap)?;
958            }
959        }
960
961        // -- Step 6: Write group descriptors at block 1 --
962        self.seek_to_block(1)?;
963        let mut gd_buf = vec![0u8; GroupDescriptor::SIZE];
964        for gd in &group_descriptors {
965            gd.write_to(&mut gd_buf);
966            self.file.write_all(&gd_buf)?;
967        }
968
969        // -- Step 7: Write superblock --
970        let computed_inodes = total_groups_u32 as u64 * inodes_per_group as u64;
971        let mut blocks_count =
972            total_groups_u32 as u64 * self.blocks_per_group() as u64;
973        if blocks_count < total_used_blocks as u64 {
974            blocks_count = total_used_blocks as u64;
975        }
976        let total_free_blocks = blocks_count.saturating_sub(total_used_blocks as u64);
977        let free_inodes = computed_inodes as u32 - total_used_inodes;
978
979        let mut sb = SuperBlock::default();
980        sb.inodes_count = computed_inodes as u32;
981        sb.blocks_count_lo = blocks_count as u32;
982        sb.blocks_count_hi = (blocks_count >> 32) as u32;
983        sb.free_blocks_count_lo = total_free_blocks as u32;
984        sb.free_blocks_count_hi = (total_free_blocks >> 32) as u32;
985        sb.free_inodes_count = free_inodes;
986        sb.first_data_block = 0;
987        // log_block_size = log2(block_size / 1024).  E.g. 1024->0, 2048->1, 4096->2.
988        let log_bs = (self.block_size / 1024).trailing_zeros();
989        sb.log_block_size = log_bs;
990        sb.log_cluster_size = log_bs;
991        sb.blocks_per_group = self.blocks_per_group();
992        sb.clusters_per_group = self.blocks_per_group();
993        sb.inodes_per_group = inodes_per_group;
994        sb.magic = SUPERBLOCK_MAGIC;
995        sb.state = 1; // cleanly unmounted
996        sb.errors = 1; // continue on error
997        sb.creator_os = 3; // FreeBSD (matches Apple's implementation)
998        sb.rev_level = 1; // dynamic inode sizes
999        sb.first_ino = FIRST_INODE;
1000        sb.lpf_ino = LOST_AND_FOUND_INODE;
1001        sb.inode_size = INODE_SIZE as u16;
1002        sb.feature_compat = compat::SPARSE_SUPER2 | compat::EXT_ATTR;
1003        sb.feature_incompat =
1004            incompat::FILETYPE | incompat::EXTENTS | incompat::FLEX_BG;
1005        sb.feature_ro_compat =
1006            ro_compat::LARGE_FILE | ro_compat::HUGE_FILE | ro_compat::EXTRA_ISIZE;
1007        sb.min_extra_isize = EXTRA_ISIZE;
1008        sb.want_extra_isize = EXTRA_ISIZE;
1009        sb.log_groups_per_flex = 31;
1010        sb.uuid = *uuid::Uuid::new_v4().as_bytes();
1011
1012        // Write: 1024 zero bytes (boot sector) + superblock + 2048 zero bytes.
1013        self.seek_to_block(0)?;
1014        self.file.write_all(&[0u8; 1024])?;
1015        let mut sb_buf = [0u8; SUPERBLOCK_SIZE];
1016        sb.write_to(&mut sb_buf);
1017        self.file.write_all(&sb_buf)?;
1018        self.file.write_all(&[0u8; 2048])?;
1019
1020        self.file.flush()?;
1021        Ok(())
1022    }
1023
1024    // -----------------------------------------------------------------------
1025    // Private helpers
1026    // -----------------------------------------------------------------------
1027
1028    /// BFS-walk the file tree and write directory entries for every directory
1029    /// inode.  Updates each directory inode's size and extent tree.
1030    fn commit_directories(&mut self) -> FormatResult<()> {
1031        // Collect BFS order: (parent_idx_opt, node_idx).
1032        let mut queue: std::collections::VecDeque<(Option<usize>, usize)> =
1033            std::collections::VecDeque::new();
1034        queue.push_back((None, self.tree.root()));
1035
1036        let mut bfs_order: Vec<(Option<usize>, usize)> = Vec::new();
1037        while let Some((parent, node)) = queue.pop_front() {
1038            bfs_order.push((parent, node));
1039            // Skip hardlink nodes (no children to descend into).
1040            if self.tree.node(node).link.is_some() {
1041                continue;
1042            }
1043            let children: Vec<usize> = self.tree.node(node).children.clone();
1044            for &child in &children {
1045                queue.push_back((Some(node), child));
1046            }
1047        }
1048
1049        // Commit each directory.
1050        for (parent_opt, node_idx) in bfs_order {
1051            let inode_num = self.tree.node(node_idx).inode;
1052            let inode = &self.inodes[(inode_num - 1) as usize];
1053
1054            if inode.links_count == 0 {
1055                continue;
1056            }
1057            if self.tree.node(node_idx).link.is_some() {
1058                continue;
1059            }
1060            if !inode.is_dir() {
1061                continue;
1062            }
1063
1064            self.align_to_block()?;
1065            let start_block = self.current_block();
1066
1067            let mut left = self.block_size as i32;
1068
1069            // Write "." entry.
1070            dir::write_dir_entry(
1071                &mut self.file,
1072                ".",
1073                inode_num,
1074                self.inodes[(inode_num - 1) as usize].mode,
1075                None,
1076                None,
1077                self.block_size,
1078                &mut left,
1079            )?;
1080
1081            // Write ".." entry.
1082            let parent_ino = match parent_opt {
1083                Some(pidx) => self.tree.node(pidx).inode,
1084                None => inode_num, // root's ".." points to itself
1085            };
1086            dir::write_dir_entry(
1087                &mut self.file,
1088                "..",
1089                parent_ino,
1090                self.inodes[(parent_ino - 1) as usize].mode,
1091                None,
1092                None,
1093                self.block_size,
1094                &mut left,
1095            )?;
1096
1097            // Sort children by inode number for e2fsck compatibility.
1098            let mut sorted_children: Vec<usize> =
1099                self.tree.node(node_idx).children.clone();
1100            sorted_children.sort_by_key(|&ci| self.tree.node(ci).inode);
1101
1102            for &child_idx in &sorted_children {
1103                let child_ino = self.tree.node(child_idx).inode;
1104                let child_name = self.tree.node(child_idx).name.clone();
1105
1106                // Check that the inode has links (not deleted).
1107                let effective_ino;
1108                let effective_mode;
1109                if let Some(linked_ino) = self.tree.node(child_idx).link {
1110                    if self.inodes[(linked_ino - 1) as usize].links_count == 0 {
1111                        continue;
1112                    }
1113                    effective_ino = linked_ino;
1114                    effective_mode = self.inodes[(linked_ino - 1) as usize].mode;
1115                } else {
1116                    if self.inodes[(child_ino - 1) as usize].links_count == 0 {
1117                        continue;
1118                    }
1119                    effective_ino = child_ino;
1120                    effective_mode = self.inodes[(child_ino - 1) as usize].mode;
1121                }
1122
1123                let (link_inode, link_mode) =
1124                    if self.tree.node(child_idx).link.is_some() {
1125                        (Some(effective_ino), Some(effective_mode))
1126                    } else {
1127                        (None, None)
1128                    };
1129
1130                dir::write_dir_entry(
1131                    &mut self.file,
1132                    &child_name,
1133                    child_ino,
1134                    self.inodes[(child_ino.max(1) - 1) as usize].mode,
1135                    link_inode,
1136                    link_mode,
1137                    self.block_size,
1138                    &mut left,
1139                )?;
1140            }
1141
1142            dir::finish_dir_entry_block(&mut self.file, &mut left, self.block_size)?;
1143
1144            let end_block = self.current_block();
1145            let size = (end_block - start_block) as u64 * self.block_size as u64;
1146            self.inodes[(inode_num - 1) as usize].set_file_size(size);
1147
1148            // Store block range in the tree node.
1149            self.tree.node_mut(node_idx).blocks = Some(BlockRange {
1150                start: start_block,
1151                end: end_block,
1152            });
1153
1154            // Write extent tree for this directory.
1155            self.align_to_block()?;
1156            let blocks = BlockRange {
1157                start: start_block,
1158                end: end_block,
1159            };
1160            let mut cur = self.current_block();
1161            extent::write_extents(
1162                &mut self.inodes[(inode_num - 1) as usize],
1163                blocks,
1164                self.block_size,
1165                &mut self.file,
1166                &mut cur,
1167            )?;
1168        }
1169
1170        Ok(())
1171    }
1172
1173    /// Write all inodes sequentially, padding to fill inode tables for all
1174    /// block groups.
1175    fn commit_inode_table(
1176        &mut self,
1177        block_groups: u32,
1178        inodes_per_group: u32,
1179    ) -> FormatResult<u64> {
1180        self.align_to_block()?;
1181        let inode_table_offset = self.pos() / self.block_size as u64;
1182
1183        // Write the actual inodes.
1184        let mut inode_buf = [0u8; Inode::SIZE];
1185        for inode in &self.inodes {
1186            inode.write_to(&mut inode_buf);
1187            self.file.write_all(&inode_buf)?;
1188        }
1189
1190        // Pad the rest of the table with zeros.
1191        let table_size =
1192            INODE_SIZE as u64 * block_groups as u64 * inodes_per_group as u64;
1193        let written = self.inodes.len() as u64 * INODE_SIZE as u64;
1194        let rest = table_size - written;
1195        if rest > 0 {
1196            let zero_block = vec![0u8; self.block_size as usize];
1197            let full_blocks = rest / self.block_size as u64;
1198            for _ in 0..full_blocks {
1199                self.file.write_all(&zero_block)?;
1200            }
1201            let remainder = (rest % self.block_size as u64) as usize;
1202            if remainder > 0 {
1203                self.file.write_all(&vec![0u8; remainder])?;
1204            }
1205        }
1206
1207        Ok(inode_table_offset)
1208    }
1209
1210    /// Find the (block_groups, inodes_per_group) pair that minimizes the
1211    /// number of block groups needed to hold all inodes and all data blocks.
1212    fn optimize_block_group_layout(
1213        &self,
1214        blocks: u32,
1215        inodes: u32,
1216    ) -> (u32, u32) {
1217        let group_count =
1218            |blocks: u32, inodes: u32, ipg: u32| -> u32 {
1219                let inode_blocks_per_group = ipg * INODE_SIZE / self.block_size;
1220                // 2 blocks reserved for bitmaps.
1221                let data_blocks_per_group =
1222                    self.blocks_per_group() - inode_blocks_per_group - 2;
1223                // Ensure enough groups for all the inodes.
1224                let min_blocks =
1225                    (inodes.saturating_sub(1)) / ipg * data_blocks_per_group + 1;
1226                let effective_blocks = blocks.max(min_blocks);
1227                (effective_blocks + data_blocks_per_group - 1)
1228                    / data_blocks_per_group
1229            };
1230
1231        let inc = (self.block_size * 512 / INODE_SIZE) as usize;
1232        let mut best_groups = u32::MAX;
1233        let mut best_ipg: u32 = inc as u32;
1234
1235        let mut ipg = inc;
1236        while ipg <= self.max_inodes_per_group() as usize {
1237            let g = group_count(blocks, inodes, ipg as u32);
1238            if g < best_groups {
1239                best_groups = g;
1240                best_ipg = ipg as u32;
1241            }
1242            ipg += inc;
1243        }
1244
1245        (best_groups, best_ipg)
1246    }
1247}
1248
1249// ---------------------------------------------------------------------------
1250// Free-standing helpers
1251// ---------------------------------------------------------------------------
1252
1253/// Return the parent directory of `path`.
1254///
1255/// `"/foo/bar"` -> `"/foo"`, `"/foo"` -> `"/"`, `"/"` -> `"/"`.
1256fn parent_of(path: &str) -> &str {
1257    if path == "/" {
1258        return "/";
1259    }
1260    let trimmed = path.trim_end_matches('/');
1261    match trimmed.rfind('/') {
1262        Some(0) => "/",
1263        Some(pos) => &trimmed[..pos],
1264        None => "/",
1265    }
1266}
1267
1268/// Return the last path component.
1269///
1270/// `"/foo/bar"` -> `"bar"`, `"/"` -> `"/"`.
1271fn basename(path: &str) -> &str {
1272    if path == "/" {
1273        return "/";
1274    }
1275    let trimmed = path.trim_end_matches('/');
1276    match trimmed.rfind('/') {
1277        Some(pos) => &trimmed[pos + 1..],
1278        None => trimmed,
1279    }
1280}
1281
1282#[cfg(test)]
1283mod tests {
1284    use super::*;
1285
1286    #[test]
1287    fn test_parent_of() {
1288        assert_eq!(parent_of("/"), "/");
1289        assert_eq!(parent_of("/foo"), "/");
1290        assert_eq!(parent_of("/foo/bar"), "/foo");
1291        assert_eq!(parent_of("/a/b/c"), "/a/b");
1292    }
1293
1294    #[test]
1295    fn test_basename() {
1296        assert_eq!(basename("/"), "/");
1297        assert_eq!(basename("/foo"), "foo");
1298        assert_eq!(basename("/foo/bar"), "bar");
1299        assert_eq!(basename("/a/b/c"), "c");
1300    }
1301
1302    #[test]
1303    fn test_formatter_new_and_close() {
1304        let dir = tempfile::tempdir().unwrap();
1305        let path = dir.path().join("test.ext4");
1306        let fmt = Formatter::new(&path, 4096, 256 * 1024).unwrap();
1307        fmt.close().unwrap();
1308
1309        // The file should exist and have non-zero size.
1310        let meta = std::fs::metadata(&path).unwrap();
1311        assert!(meta.len() > 0);
1312    }
1313
1314    #[test]
1315    fn test_create_file_and_directory() {
1316        let dir = tempfile::tempdir().unwrap();
1317        let path = dir.path().join("test.ext4");
1318        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1319
1320        // Create a nested directory.
1321        fmt.create(
1322            "/etc",
1323            make_mode(file_mode::S_IFDIR, 0o755),
1324            None, None, None, None, None, None,
1325        ).unwrap();
1326
1327        // Create a regular file with content.
1328        let data = b"hello world\n";
1329        let mut cursor = std::io::Cursor::new(&data[..]);
1330        fmt.create(
1331            "/etc/motd",
1332            make_mode(file_mode::S_IFREG, 0o644),
1333            None, None, Some(&mut cursor), None, None, None,
1334        ).unwrap();
1335
1336        // Create a symlink.
1337        fmt.create(
1338            "/etc/motd.link",
1339            make_mode(file_mode::S_IFLNK, 0o777),
1340            Some("motd"),
1341            None, None, None, None, None,
1342        ).unwrap();
1343
1344        fmt.close().unwrap();
1345    }
1346
1347    #[test]
1348    fn test_hard_link() {
1349        let dir = tempfile::tempdir().unwrap();
1350        let path = dir.path().join("test.ext4");
1351        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1352
1353        let data = b"content";
1354        let mut cursor = std::io::Cursor::new(&data[..]);
1355        fmt.create(
1356            "/file",
1357            make_mode(file_mode::S_IFREG, 0o644),
1358            None, None, Some(&mut cursor), None, None, None,
1359        ).unwrap();
1360
1361        fmt.link("/hardlink", "/file").unwrap();
1362        fmt.close().unwrap();
1363    }
1364
1365    #[test]
1366    fn test_unlink() {
1367        let dir = tempfile::tempdir().unwrap();
1368        let path = dir.path().join("test.ext4");
1369        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1370
1371        fmt.create(
1372            "/dir/nested",
1373            make_mode(file_mode::S_IFDIR, 0o755),
1374            None, None, None, None, None, None,
1375        ).unwrap();
1376
1377        fmt.unlink("/dir", false).unwrap();
1378        fmt.close().unwrap();
1379    }
1380
1381    #[test]
1382    fn test_mkdir_p_idempotent() {
1383        let dir = tempfile::tempdir().unwrap();
1384        let path = dir.path().join("test.ext4");
1385        let mut fmt = Formatter::new(&path, 4096, 1024 * 1024).unwrap();
1386
1387        // Creating the same directory twice should succeed (mkdir -p semantics).
1388        fmt.create(
1389            "/var/log",
1390            make_mode(file_mode::S_IFDIR, 0o755),
1391            None, None, None, None, None, None,
1392        ).unwrap();
1393        fmt.create(
1394            "/var/log",
1395            make_mode(file_mode::S_IFDIR, 0o755),
1396            None, None, None, None, None, None,
1397        ).unwrap();
1398
1399        fmt.close().unwrap();
1400    }
1401
1402    #[test]
1403    fn test_large_file() {
1404        let dir = tempfile::tempdir().unwrap();
1405        let path = dir.path().join("test.ext4");
1406        let mut fmt = Formatter::new(&path, 4096, 4 * 1024 * 1024).unwrap();
1407
1408        // Create a file larger than one block.
1409        let data = vec![0xABu8; 8192];
1410        let mut cursor = std::io::Cursor::new(&data[..]);
1411        fmt.create(
1412            "/big",
1413            make_mode(file_mode::S_IFREG, 0o644),
1414            None, None, Some(&mut cursor), None, None, None,
1415        ).unwrap();
1416
1417        fmt.close().unwrap();
1418    }
1419}