backhand 0.25.1

Library for the reading, creating, and modification of SquashFS file systems
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
use core::num::NonZeroUsize;
use no_std_io2::io::{Read, Seek, Write};
use std::ffi::OsStr;
use std::io::{Cursor, SeekFrom};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::{SystemTime, UNIX_EPOCH};

use deku::prelude::*;
use tracing::{error, info, trace};

use crate::error::BackhandError;
use crate::kinds::Kind;
use crate::kinds::LE_V4_0;
use crate::v4::compressor::{CompressionOptions, Compressor};
use crate::v4::data::DataWriter;
use crate::v4::entry::Entry;
use crate::v4::filesystem::node::SquashfsSymlink;
use crate::v4::filesystem::node::{InnerNode, Nodes};
use crate::v4::filesystem::normalize_squashfs_path;
use crate::v4::fragment;
use crate::v4::id::Id;
use crate::v4::metadata::{self, METADATA_MAXSIZE, MetadataWriter};
use crate::v4::reader::WriteSeek;
use crate::v4::squashfs::SuperBlock;
use crate::{
    DEFAULT_BLOCK_SIZE, DEFAULT_PAD_LEN, FilesystemReader, Flags, MAX_BLOCK_SIZE, MIN_BLOCK_SIZE,
    Node, NodeHeader, SquashfsBlockDevice, SquashfsCharacterDevice, SquashfsDir,
    SquashfsFileWriter,
};

/// Representation of SquashFS filesystem to be written back to an image
/// - Use [`Self::from_fs_reader`] to write with the data from a previous SquashFS image
/// - Use [`Self::default`] to create an empty SquashFS image without an original image. For example:
/// ```rust
/// # use std::time::SystemTime;
/// # use backhand::{NodeHeader, Id, FilesystemCompressor, FilesystemWriter, SquashfsDir, compression::Compressor, kind, DEFAULT_BLOCK_SIZE, ExtraXz, CompressionExtra, kind::Kind};
/// // Add empty default FilesytemWriter
/// let mut fs = FilesystemWriter::default();
/// fs.set_current_time();
/// fs.set_block_size(DEFAULT_BLOCK_SIZE);
/// fs.set_only_root_id();
/// fs.set_kind(Kind::from_const(kind::LE_V4_0).unwrap());
///
/// // set root image permissions
/// let header = NodeHeader {
///     permissions: 0o755,
///     ..NodeHeader::default()
/// };
/// fs.set_root_mode(0o777);
///
/// // set extra compression options
/// let mut xz_extra = ExtraXz::default();
/// xz_extra.level(9).unwrap();
/// let extra = CompressionExtra::Xz(xz_extra);
/// let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
/// compressor.extra(extra).unwrap();
/// fs.set_compressor(compressor);
///
/// // push some dirs and a file
/// fs.push_dir("usr", header);
/// fs.push_dir("usr/bin", header);
/// fs.push_file(std::io::Cursor::new(vec![0x00, 0x01]), "usr/bin/file", header);
/// ```
#[derive(Debug)]
pub struct FilesystemWriter<'a, 'b, 'c> {
    pub(crate) kind: Kind,
    /// The size of a data block in bytes. Must be a power of two between 4096 (4k) and 1048576 (1 MiB).
    pub(crate) block_size: u32,
    /// Last modification time of the archive. Count seconds since 00:00, Jan 1st 1970 UTC (not counting leap seconds).
    /// This is unsigned, so it expires in the year 2106 (as opposed to 2038).
    pub(crate) mod_time: u32,
    /// 32 bit user and group IDs
    pub(crate) id_table: Vec<Id>,
    /// Compressor used when writing
    pub(crate) fs_compressor: FilesystemCompressor,
    /// All files and directories in filesystem, including root
    pub(crate) root: Nodes<SquashfsFileWriter<'a, 'b, 'c>>,
    /// The log2 of the block size. If the two fields do not agree, the archive is considered corrupted.
    pub(crate) block_log: u16,
    pub(crate) pad_len: u32,
    /// Superblock Flag to remove duplicate flags
    pub(crate) no_duplicate_files: bool,
    pub(crate) emit_compression_options: bool,
}

impl Default for FilesystemWriter<'_, '_, '_> {
    /// Create default FilesystemWriter
    ///
    /// block_size: [`DEFAULT_BLOCK_SIZE`], compressor: default XZ compression, no nodes,
    /// kind: [`LE_V4_0`], and mod_time: `0`.
    fn default() -> Self {
        let block_size = DEFAULT_BLOCK_SIZE;
        Self {
            block_size,
            mod_time: 0,
            id_table: Id::root(),
            fs_compressor: FilesystemCompressor::default(),
            kind: Kind { inner: Arc::new(LE_V4_0) },
            root: Nodes::new_root(NodeHeader::default()),
            block_log: block_size.ilog2() as u16,
            pad_len: DEFAULT_PAD_LEN,
            no_duplicate_files: true,
            emit_compression_options: true,
        }
    }
}

impl<'a, 'b, 'c> FilesystemWriter<'a, 'b, 'c> {
    /// Set block size
    ///
    /// # Panics
    /// If invalid, must be [`MIN_BLOCK_SIZE`] `> block_size <` [`MAX_BLOCK_SIZE`]
    pub fn set_block_size(&mut self, block_size: u32) {
        let power_of_two = block_size != 0 && (block_size & (block_size - 1)) == 0;
        if !(MIN_BLOCK_SIZE..=MAX_BLOCK_SIZE).contains(&block_size) || !power_of_two {
            panic!("invalid block_size");
        }
        self.block_size = block_size;
        self.block_log = block_size.ilog2() as u16;
    }

    /// Set time of image as `mod_time`
    ///
    /// # Example: Set to `Wed Oct 19 01:26:15 2022`
    /// ```rust
    /// # use backhand::{FilesystemWriter, kind};
    /// let mut fs = FilesystemWriter::default();
    /// fs.set_time(0x634f_5237);
    /// ```
    pub fn set_time(&mut self, mod_time: u32) {
        self.mod_time = mod_time;
    }

    /// Set time of image as current time
    pub fn set_current_time(&mut self) {
        self.mod_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;
    }

    /// Set kind as `kind`
    ///
    /// # Example: Set kind to default V4.0
    /// ```rust
    /// # use backhand::{FilesystemWriter, kind::Kind, kind};
    /// let mut fs = FilesystemWriter::default();
    /// fs.set_kind(Kind::from_const(kind::LE_V4_0).unwrap());
    /// ```
    pub fn set_kind(&mut self, kind: Kind) {
        self.kind = kind;
    }

    /// Set root mode as `mode`
    ///
    /// # Example
    ///```rust
    /// # use backhand::FilesystemWriter;
    /// let mut fs = FilesystemWriter::default();
    /// fs.set_root_mode(0o777);
    /// ```
    pub fn set_root_mode(&mut self, mode: u16) {
        self.root.root_mut().header.permissions = mode;
    }

    /// Set root uid as `uid`
    pub fn set_root_uid(&mut self, uid: u32) {
        self.root.root_mut().header.uid = uid;
    }

    /// Set root gid as `gid`
    pub fn set_root_gid(&mut self, gid: u32) {
        self.root.root_mut().header.gid = gid;
    }

    /// Set compressor as `compressor`
    ///
    ///```rust
    /// # use backhand::{FilesystemWriter, FilesystemCompressor, compression::Compressor};
    /// let mut compressor = FilesystemCompressor::new(Compressor::Xz, None).unwrap();
    /// ```
    pub fn set_compressor(&mut self, compressor: FilesystemCompressor) {
        self.fs_compressor = compressor;
    }

    /// Set id_table to [`Id::root`], removing old entries
    pub fn set_only_root_id(&mut self) {
        self.id_table = Id::root();
    }

    /// Set padding(zero bytes) added to the end of the image after calling [`write`].
    ///
    /// For example, if given `pad_kib` of 8; a 8K padding will be added to the end of the image.
    ///
    /// Default: [`DEFAULT_PAD_LEN`]
    pub fn set_kib_padding(&mut self, pad_kib: u32) {
        self.pad_len = pad_kib * 1024;
    }

    /// Set *no* padding(zero bytes) added to the end of the image after calling [`write`].
    pub fn set_no_padding(&mut self) {
        self.pad_len = 0;
    }

    /// Set if we perform duplicate file checking, on by default
    pub fn set_no_duplicate_files(&mut self, value: bool) {
        self.no_duplicate_files = value;
    }

    /// Set if compression options are written
    pub fn set_emit_compression_options(&mut self, value: bool) {
        self.emit_compression_options = value;
    }

    /// Inherit filesystem structure and properties from `reader`
    pub fn from_fs_reader(reader: &'a FilesystemReader<'b>) -> Result<Self, BackhandError> {
        let mut root: Vec<Node<_>> = reader
            .root
            .nodes
            .iter()
            .map(|node| {
                let inner = match &node.inner {
                    InnerNode::File(file) => {
                        let reader = reader.file(file);
                        InnerNode::File(SquashfsFileWriter::SquashfsFile(reader))
                    }
                    InnerNode::Symlink(x) => InnerNode::Symlink(x.clone()),
                    InnerNode::Dir(x) => InnerNode::Dir(*x),
                    InnerNode::CharacterDevice(x) => InnerNode::CharacterDevice(*x),
                    InnerNode::BlockDevice(x) => InnerNode::BlockDevice(*x),
                    InnerNode::NamedPipe => InnerNode::NamedPipe,
                    InnerNode::Socket => InnerNode::Socket,
                };
                Node { fullpath: node.fullpath.clone(), header: node.header, inner }
            })
            .collect();
        root.sort();
        Ok(Self {
            kind: Kind { inner: reader.kind.inner.clone() },
            block_size: reader.block_size,
            block_log: reader.block_log,
            fs_compressor: FilesystemCompressor::new(
                reader.compressor,
                reader.compression_options,
            )?,
            mod_time: reader.mod_time,
            id_table: reader.id_table.clone(),
            root: Nodes { nodes: root },
            pad_len: DEFAULT_PAD_LEN,
            no_duplicate_files: reader.no_duplicate_files,
            emit_compression_options: true,
        })
    }

    //find the node relative to this path and return a mutable reference
    fn mut_node<S>(&mut self, find_path: S) -> Option<&mut Node<SquashfsFileWriter<'a, 'b, 'c>>>
    where
        S: AsRef<Path>,
    {
        //the search path root prefix is optional, so remove it if present to
        //not affect the search
        let find_path = normalize_squashfs_path(find_path.as_ref()).ok()?;
        self.root.node_mut(find_path)
    }

    fn insert_node<P>(
        &mut self,
        path: P,
        header: NodeHeader,
        node: InnerNode<SquashfsFileWriter<'a, 'b, 'c>>,
    ) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        // create gid id
        self.lookup_add_id(header.gid);
        // create uid id
        self.lookup_add_id(header.uid);

        let path = normalize_squashfs_path(path.as_ref())?;
        let node = Node::new(path, header, node);
        self.root.insert(node)
    }

    /// Insert `reader` into filesystem with `path` and metadata `header`.
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_file<P>(
        &mut self,
        reader: impl Read + 'c,
        path: P,
        header: NodeHeader,
    ) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let reader = Arc::new(Mutex::new(reader));
        let new_file = InnerNode::File(SquashfsFileWriter::UserDefined(reader));
        self.insert_node(path, header, new_file)?;
        Ok(())
    }

    /// Take a mutable reference to existing file at `find_path`
    pub fn mut_file<S>(&mut self, find_path: S) -> Option<&mut SquashfsFileWriter<'a, 'b, 'c>>
    where
        S: AsRef<Path>,
    {
        self.mut_node(find_path).and_then(|node| {
            if let InnerNode::File(file) = &mut node.inner { Some(file) } else { None }
        })
    }

    /// Replace an existing file
    pub fn replace_file<S>(
        &mut self,
        find_path: S,
        reader: impl Read + 'c,
    ) -> Result<(), BackhandError>
    where
        S: AsRef<Path>,
    {
        let file = self.mut_file(find_path).ok_or(BackhandError::FileNotFound)?;
        let reader = Arc::new(Mutex::new(reader));
        *file = SquashfsFileWriter::UserDefined(reader);
        Ok(())
    }

    /// Insert symlink `path` -> `link`
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_symlink<P, S>(
        &mut self,
        link: S,
        path: P,
        header: NodeHeader,
    ) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
        S: Into<PathBuf>,
    {
        let new_symlink = InnerNode::Symlink(SquashfsSymlink { link: link.into() });
        self.insert_node(path, header, new_symlink)?;
        Ok(())
    }

    /// Insert empty `dir` at `path`
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_dir<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let new_dir = InnerNode::Dir(SquashfsDir::default());
        self.insert_node(path, header, new_dir)?;
        Ok(())
    }

    /// Recursively create an empty directory and all of its parent components
    /// if they are missing.
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_dir_all<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        //the search path root prefix is optional, so remove it if present to
        //not affect the search
        let path = normalize_squashfs_path(path.as_ref())?;
        //TODO this is not elegant, find a better solution
        let ancestors: Vec<&Path> = path.ancestors().collect();

        for file in ancestors.iter().rev() {
            match self.root.nodes.binary_search_by(|node| node.fullpath.as_path().cmp(file)) {
                Ok(index) => {
                    //if exists, but is not a directory, return an error
                    let node = &self.root.nodes[index];
                    if !matches!(&node.inner, InnerNode::Dir(_)) {
                        return Err(BackhandError::InvalidFilePath);
                    }
                }
                //if the dir don't exists, create it
                Err(_index) => self.push_dir(file, header)?,
            }
        }
        Ok(())
    }

    /// Insert character device with `device_number` at `path`
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_char_device<P>(
        &mut self,
        device_number: u32,
        path: P,
        header: NodeHeader,
    ) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let new_device = InnerNode::CharacterDevice(SquashfsCharacterDevice { device_number });
        self.insert_node(path, header, new_device)?;
        Ok(())
    }

    /// Insert block device with `device_number` at `path`
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_block_device<P>(
        &mut self,
        device_number: u32,
        path: P,
        header: NodeHeader,
    ) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let new_device = InnerNode::BlockDevice(SquashfsBlockDevice { device_number });
        self.insert_node(path, header, new_device)?;
        Ok(())
    }

    /// Insert FIFO (named pipe)
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_fifo<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let new_device = InnerNode::NamedPipe;
        self.insert_node(path, header, new_device)?;
        Ok(())
    }

    /// Insert Socket (UNIX domain socket)
    ///
    /// The `uid` and `gid` in `header` are added to FilesystemWriters id's
    pub fn push_socket<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
    where
        P: AsRef<Path>,
    {
        let new_device = InnerNode::Socket;
        self.insert_node(path, header, new_device)?;
        Ok(())
    }

    /// Same as [`Self::write`], but seek'ing to `offset` in `w` before reading. This offset
    /// is treated as the base image offset.
    pub fn write_with_offset<W>(
        &mut self,
        w: W,
        offset: u64,
    ) -> Result<(SuperBlock, u64), BackhandError>
    where
        W: Write + Seek,
    {
        let mut writer = WriterWithOffset::new(w, offset)?;
        self.write(&mut writer)
    }

    fn write_data<W>(
        &mut self,
        compressor: FilesystemCompressor,
        block_size: u32,
        mut writer: W,
        data_writer: &mut DataWriter<'b>,
    ) -> Result<(), BackhandError>
    where
        W: WriteSeek,
    {
        let files = self.root.nodes.iter_mut().filter_map(|node| match &mut node.inner {
            InnerNode::File(file) => Some(file),
            _ => None,
        });
        for file in files {
            let (filesize, added) = match file {
                SquashfsFileWriter::UserDefined(file) => {
                    let file_ptr = Arc::clone(file);
                    let mut file_lock =
                        file_ptr.lock().map_err(|_| BackhandError::MutexPoisoned)?;
                    data_writer.add_bytes(&mut *file_lock, &mut writer)?
                }
                SquashfsFileWriter::SquashfsFile(file) => {
                    // if the source file and the destination files are both
                    // squashfs files and use the same compressor and block_size
                    // just copy the data, don't compress->decompress
                    if file.system.compressor == compressor.id
                        && file.system.compression_options == compressor.options
                        && file.system.block_size == block_size
                    {
                        data_writer.just_copy_it(file.raw_data_reader(), &mut writer)?
                    } else {
                        data_writer.add_bytes(file.reader(), &mut writer)?
                    }
                }
                SquashfsFileWriter::Consumed(_, _) => unreachable!(),
            };
            *file = SquashfsFileWriter::Consumed(filesize, added);
        }
        Ok(())
    }

    /// Create SquashFS file system from each node of Tree
    ///
    /// This works by recursively creating Inodes and Dirs for each node in the tree. This also
    /// keeps track of parent directories by calling this function on all nodes of a dir to get only
    /// the nodes, but going into the child dirs in the case that it contains a child dir.
    #[allow(clippy::too_many_arguments)]
    fn write_inode_dir<'slf>(
        &'slf self,
        inode_writer: &'_ mut MetadataWriter,
        dir_writer: &'_ mut MetadataWriter,
        parent_node_id: u32,
        node_id: NonZeroUsize,
        superblock: &SuperBlock,
        kind: &Kind,
        id_table: &Vec<Id>,
    ) -> Result<Entry<'slf>, BackhandError> {
        let node = &self
            .root
            .node(node_id)
            .ok_or(BackhandError::InternalState("node not found".to_string()))?;
        let filename = node.fullpath.file_name().unwrap_or(OsStr::new("/"));
        //if not a dir, return the entry
        match &node.inner {
            InnerNode::File(SquashfsFileWriter::Consumed(filesize, added)) => {
                return Entry::file(
                    filename,
                    node.header,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("file node id: {}", e))
                    })?,
                    inode_writer,
                    *filesize,
                    added,
                    superblock,
                    kind,
                    id_table,
                );
            }
            InnerNode::File(_) => unreachable!(),
            InnerNode::Symlink(symlink) => {
                return Entry::symlink(
                    filename,
                    node.header,
                    symlink,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("symlink node id: {}", e))
                    })?,
                    inode_writer,
                    superblock,
                    kind,
                    id_table,
                );
            }
            InnerNode::CharacterDevice(char) => {
                return Entry::char(
                    filename,
                    node.header,
                    char,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("character device node id: {}", e))
                    })?,
                    inode_writer,
                    superblock,
                    kind,
                    id_table,
                );
            }
            InnerNode::BlockDevice(block) => {
                return Entry::block_device(
                    filename,
                    node.header,
                    block,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("block device node id: {}", e))
                    })?,
                    inode_writer,
                    superblock,
                    kind,
                    id_table,
                );
            }
            InnerNode::NamedPipe => {
                return Entry::named_pipe(
                    filename,
                    node.header,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("named pipe node id: {}", e))
                    })?,
                    inode_writer,
                    superblock,
                    kind,
                    id_table,
                );
            }
            InnerNode::Socket => {
                return Entry::socket(
                    filename,
                    node.header,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("socket node id: {}", e))
                    })?,
                    inode_writer,
                    superblock,
                    kind,
                    id_table,
                );
            }
            // if dir, fall through
            InnerNode::Dir(_) => (),
        };

        // ladies and gentlemen, we have a directory
        let entries: Vec<_> = self
            .root
            .children_of(node_id)
            //only direct children
            .filter(|(_child_id, child)| {
                child.fullpath.parent().map(|child| child == node.fullpath).unwrap_or(false)
            })
            .map(|(child_id, _child)| {
                self.write_inode_dir(
                    inode_writer,
                    dir_writer,
                    node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!(
                            "parent node id for directory: {}",
                            e
                        ))
                    })?,
                    child_id,
                    superblock,
                    kind,
                    id_table,
                )
            })
            .collect::<Result<_, _>>()?;
        let children_num = entries.len();

        // write dir
        let block_index = dir_writer.metadata_start;
        let block_offset = dir_writer.uncompressed_bytes.len() as u16;
        trace!("WRITING DIR: {block_offset:#02x?}");
        let mut total_size: usize = 3;
        for dir in Entry::into_dir(entries)? {
            let mut bytes = Cursor::new(vec![]);
            let mut writer = Writer::new(&mut bytes);
            dir.to_writer(&mut writer, kind.inner.type_endian)?;
            total_size += bytes.get_ref().len();
            dir_writer.write_all(bytes.get_ref())?;
        }
        let entry = Entry::path(
            filename,
            node.header,
            node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
                BackhandError::NumericConversion(format!("directory node id: {}", e))
            })?,
            children_num,
            parent_node_id,
            inode_writer,
            total_size,
            block_offset,
            block_index,
            superblock,
            kind,
            id_table,
        )?;
        trace!("[{:?}] entries: {:#02x?}", filename, &entry);
        Ok(entry)
    }

    /// Generate and write the resulting squashfs image to `w`
    ///
    /// # Returns
    /// (written populated [`SuperBlock`], total amount of bytes written including padding)
    pub fn write<W: Write + Seek>(&mut self, mut w: W) -> Result<(SuperBlock, u64), BackhandError> {
        let mut superblock =
            SuperBlock::new(self.fs_compressor.id, Kind { inner: self.kind.inner.clone() });

        if self.no_duplicate_files {
            superblock.flags |= Flags::DataHasBeenDeduplicated as u16;
        }

        trace!("{:#02x?}", self.root);

        let v4_compressor = match &self.kind.inner.compressor {
            crate::kinds::VersionedCompressor::V4(compressor) => *compressor,
            crate::kinds::VersionedCompressor::CustomV4(compressor) => *compressor,
            #[allow(unreachable_patterns)]
            _ => panic!("v4 filesystem writer requires v4 compressor"),
        };

        // Empty Squashfs Superblock
        w.write_all(&[0x00; SuperBlock::SIZE])?;

        if self.emit_compression_options && self.fs_compressor.options.is_some() {
            trace!("writing compression options");
            // Use the full CompressionAction trait which properly handles the compression options
            // Use a temporary superblock copy for compression_options call
            let options = v4_compressor.compression_options(
                &mut superblock,
                &self.kind,
                self.fs_compressor,
            )?;

            if let Some(options_bytes) = &options {
                w.write_all(options_bytes)?;
            }
        }

        let mut data_writer = DataWriter::new(
            v4_compressor,
            self.fs_compressor,
            self.block_size,
            self.no_duplicate_files,
        );
        let mut inode_writer = MetadataWriter::new(
            v4_compressor,
            self.fs_compressor,
            self.block_size,
            self.kind.inner.data_endian,
        );
        let mut dir_writer = MetadataWriter::new(
            v4_compressor,
            self.fs_compressor,
            self.block_size,
            self.kind.inner.data_endian,
        );

        info!("Creating Inodes and Dirs");
        //trace!("TREE: {:#02x?}", &self.root);
        info!("Writing Data");
        self.write_data(self.fs_compressor, self.block_size, &mut w, &mut data_writer)?;
        info!("Writing Data Fragments");
        // Compress fragments and write
        data_writer.finalize(&mut w)?;

        info!("Writing Other stuff");
        let root = self.write_inode_dir(
            &mut inode_writer,
            &mut dir_writer,
            0,
            1.try_into().map_err(|e: std::num::TryFromIntError| {
                BackhandError::NumericConversion(e.to_string())
            })?,
            &superblock,
            &self.kind,
            &self.id_table,
        )?;
        superblock.root_inode = ((root.start as u64) << 16) | ((root.offset as u64) & 0xffff);
        superblock.inode_count =
            self.root.nodes.len().try_into().map_err(|e: std::num::TryFromIntError| {
                BackhandError::NumericConversion(format!("inode count: {}", e))
            })?;
        superblock.block_size = self.block_size;
        superblock.block_log = self.block_log;
        superblock.mod_time = self.mod_time;

        info!("Writing Inodes");
        superblock.inode_table = w.stream_position()?;
        inode_writer.finalize(&mut w)?;

        info!("Writing Dirs");
        superblock.dir_table = w.stream_position()?;
        dir_writer.finalize(&mut w)?;

        info!("Writing Frag Lookup Table");
        let (table_position, count) =
            self.write_lookup_table(&mut w, &data_writer.fragment_table, fragment::SIZE)?;
        superblock.frag_table = table_position;
        superblock.frag_count = count;

        info!("Writing Id Lookup Table");
        let (table_position, count) = self.write_lookup_table(&mut w, &self.id_table, Id::SIZE)?;
        superblock.id_table = table_position;
        superblock.id_count = count.try_into().map_err(|e: std::num::TryFromIntError| {
            BackhandError::NumericConversion(format!("id count: {}", e))
        })?;

        info!("Finalize Superblock and End Bytes");
        let bytes_written = self.finalize(w, &mut superblock)?;

        info!("Success");
        Ok((superblock, bytes_written))
    }

    fn finalize<W>(&self, mut w: W, superblock: &mut SuperBlock) -> Result<u64, BackhandError>
    where
        W: Write + Seek,
    {
        superblock.bytes_used = w.stream_position()?;

        // pad bytes if required
        let mut pad_len = 0;
        if self.pad_len != 0 {
            // Pad out block_size to 4K
            info!("Writing Padding");
            let blocks_used: u64 = superblock.bytes_used / (self.pad_len as u64);
            let total_pad_len = (blocks_used + 1) * (self.pad_len as u64);
            pad_len = total_pad_len - superblock.bytes_used;

            // Write 1K at a time
            let mut total_written = 0;
            while w.stream_position()? < (superblock.bytes_used + pad_len) {
                let arr = &[0x00; 1024];

                // check if last block to write
                let len = if (pad_len - total_written) < 1024 {
                    (pad_len - total_written) % 1024
                } else {
                    // else, full 1K
                    1024
                };

                w.write_all(
                    &arr[..len.try_into().map_err(|e: std::num::TryFromIntError| {
                        BackhandError::NumericConversion(format!("padding chunk length: {}", e))
                    })?],
                )?;
                total_written += len;
            }
        }

        // Seek back the beginning and write the superblock
        info!("Writing Superblock");
        w.rewind()?;
        let mut writer = Writer::new(&mut w);
        superblock.to_writer(
            &mut writer,
            (
                self.kind.inner.magic,
                self.kind.inner.version_major,
                self.kind.inner.version_minor,
                self.kind.inner.type_endian,
            ),
        )?;
        info!("Writing Finished");

        //clean any cache, make sure the output is on disk
        w.flush()?;
        Ok(superblock.bytes_used + pad_len)
    }

    /// For example, writing a fragment table:
    /// ```text
    ///  ┌──────────────────────────────┐
    ///  │Metadata                      │◄───┐
    ///  │┌────────────────────────────┐│    │
    ///  ││pointer to fragment block   ││    │
    ///  │├────────────────────────────┤│    │
    ///  ││pointer to fragment block   ││    │
    ///  │└────────────────────────────┘│    │
    ///  └──────────────────────────────┘    │
    ///  ┌──────────────────────────────┐    │
    ///  │Metadata                      │◄─┐ │
    ///  │┌────────────────────────────┐│  │ │
    ///  ││pointer to fragment block   ││  │ │
    ///  │├────────────────────────────┤│  │ │
    ///  ││pointer to fragment block   ││  │ │
    ///  │└────────────────────────────┘│  │ │
    ///  └──────────────────────────────┘  │ │
    ///  ┌──────────────────────────────┐──│─│───►superblock.frag_table
    ///  │Frag Table                    │  │ │
    ///  │┌────────────────────────────┐│  │ │
    ///  ││fragment0(u64)         ─────────│─┘
    ///  │├────────────────────────────┤│  │
    ///  ││fragment1(u64)         ─────────┘
    ///  │└────────────────────────────┘│
    ///  └──────────────────────────────┘
    ///  ```
    fn write_lookup_table<D, W>(
        &self,
        mut w: W,
        table: &[D],
        element_size: usize,
    ) -> Result<(u64, u32), BackhandError>
    where
        D: DekuWriter<deku::ctx::Endian>,
        W: Write + Seek,
    {
        let mut ptrs: Vec<u64> = vec![];
        let mut table_bytes = Cursor::new(Vec::with_capacity(table.len() * element_size));
        let mut iter = table.iter().peekable();
        while let Some(t) = iter.next() {
            // convert fragment ptr to bytes
            let mut table_writer = Writer::new(&mut table_bytes);
            t.to_writer(&mut table_writer, self.kind.inner.type_endian)?;

            // once table_bytes + next is over the maximum size of a metadata block, write
            if ((table_bytes.get_ref().len() + element_size) > METADATA_MAXSIZE)
                || iter.peek().is_none()
            {
                ptrs.push(w.stream_position()?);

                // write metadata len
                let len = metadata::set_if_uncompressed(table_bytes.get_ref().len() as u16);
                let mut writer = Writer::new(&mut w);
                len.to_writer(&mut writer, self.kind.inner.data_endian)?;
                // write metadata bytes
                w.write_all(table_bytes.get_ref())?;

                table_bytes.get_mut().clear();
                table_bytes.rewind()?;
            }
        }

        let table_position = w.stream_position()?;
        let count = table.len() as u32;

        // write ptr
        for ptr in ptrs {
            let mut writer = Writer::new(&mut w);
            ptr.to_writer(&mut writer, self.kind.inner.type_endian)?;
        }

        Ok((table_position, count))
    }

    /// Return index of id, adding if required
    fn lookup_add_id(&mut self, id: u32) -> u32 {
        let found = self.id_table.iter().position(|a| a.num == id);

        match found {
            Some(found) => found as u32,
            None => {
                self.id_table.push(Id::new(id));
                self.id_table.len() as u32 - 1
            }
        }
    }
}

struct WriterWithOffset<W: WriteSeek> {
    w: W,
    offset: u64,
}

impl<W: WriteSeek> WriterWithOffset<W> {
    pub fn new(mut w: W, offset: u64) -> std::io::Result<Self> {
        w.seek(SeekFrom::Start(offset))?;
        Ok(Self { w, offset })
    }
}

impl<W> Write for WriterWithOffset<W>
where
    W: WriteSeek,
{
    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
        self.w.write(buf)
    }

    fn flush(&mut self) -> std::io::Result<()> {
        self.w.flush()
    }
}

impl<W> Seek for WriterWithOffset<W>
where
    W: Write + Seek,
{
    fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
        let seek = match pos {
            SeekFrom::Start(start) => SeekFrom::Start(self.offset + start),
            seek => seek,
        };
        self.w.seek(seek).map(|x| x - self.offset)
    }
}

/// All compression options for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone, Default)]
pub struct FilesystemCompressor {
    pub(crate) id: Compressor,
    pub(crate) options: Option<CompressionOptions>,
    pub(crate) extra: Option<CompressionExtra>,
}

impl FilesystemCompressor {
    pub fn new(id: Compressor, options: Option<CompressionOptions>) -> Result<Self, BackhandError> {
        match (id, options) {
            // lz4 always requires options
            (Compressor::Lz4, None) => {
                error!("Lz4 compression options missing");
                return Err(BackhandError::InvalidCompressionOption);
            }
            //others having no options is always valid
            (_, None) => {}
            //only the corresponding option are valid
            (Compressor::Gzip, Some(CompressionOptions::Gzip(_)))
            | (Compressor::Lzma, Some(CompressionOptions::Lzma))
            | (Compressor::Lzo, Some(CompressionOptions::Lzo(_)))
            | (Compressor::Xz, Some(CompressionOptions::Xz(_)))
            | (Compressor::Lz4, Some(CompressionOptions::Lz4(_)))
            | (Compressor::Zstd, Some(CompressionOptions::Zstd(_))) => {}
            //other combinations are invalid
            _ => {
                error!("invalid compression settings");
                return Err(BackhandError::InvalidCompressionOption);
            }
        }
        Ok(Self { id, options, extra: None })
    }

    /// Set options that are originally derived from the image if from a [`FilesystemReader`].
    /// These options will be written to the image when
    /// <https://github.com/wcampbell0x2a/backhand/issues/53> is fixed.
    pub fn options(&mut self, options: CompressionOptions) -> Result<(), BackhandError> {
        self.options = Some(options);
        Ok(())
    }

    /// Extra options that are *only* using during compression and are *not* stored in the
    /// resulting image
    pub fn extra(&mut self, extra: CompressionExtra) -> Result<(), BackhandError> {
        if matches!(extra, CompressionExtra::Xz(_)) && matches!(self.id, Compressor::Xz) {
            self.extra = Some(extra);
            return Ok(());
        }

        error!("invalid extra compression settings");
        Err(BackhandError::InvalidCompressionOption)
    }
}

/// Compression options only for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone)]
pub enum CompressionExtra {
    Xz(ExtraXz),
}

/// Xz compression option for [`FilesystemWriter`]
#[derive(Debug, Copy, Clone, Default)]
pub struct ExtraXz {
    pub(crate) level: Option<u32>,
}

impl ExtraXz {
    /// Set compress preset level. Must be in range `0..=9`
    pub fn level(&mut self, level: u32) -> Result<(), BackhandError> {
        if level > 9 {
            return Err(BackhandError::InvalidCompressionOption);
        }
        self.level = Some(level);

        Ok(())
    }
}