backhand 0.25.1

Library for the reading, creating, and modification of SquashFS file systems
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
//! Reader traits

use no_std_io2::io::{Read, Seek};
use std::collections::HashMap;
use std::io::{BufRead, Cursor, SeekFrom, Write};

use deku::prelude::*;
use solana_nohash_hasher::IntMap;
use tracing::{error, trace};

use super::export::Export;
use super::fragment::Fragment;
use super::inode::Inode;
use super::metadata::METADATA_MAXSIZE;
use super::squashfs::SuperBlock;
use super::{fragment, metadata};
use crate::error::BackhandError;
use crate::kinds::Kind;

/// Private struct containing logic to read the `Squashfs` section from a file
#[derive(Debug)]
pub(crate) struct SquashfsReaderWithOffset<R: BufReadSeek> {
    io: R,
    /// Offset from start of file to squashfs
    offset: u64,
}

impl<R: BufReadSeek> SquashfsReaderWithOffset<R> {
    pub fn new(mut io: R, offset: u64) -> std::io::Result<Self> {
        io.seek(SeekFrom::Start(offset))?;
        Ok(Self { io, offset })
    }
}

impl<R: BufReadSeek> BufRead for SquashfsReaderWithOffset<R> {
    fn fill_buf(&mut self) -> std::io::Result<&[u8]> {
        self.io.fill_buf()
    }

    fn consume(&mut self, amt: usize) {
        self.io.consume(amt)
    }
}

impl<R: BufReadSeek> Read for SquashfsReaderWithOffset<R> {
    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
        self.io.read(buf)
    }
}

impl<R: BufReadSeek> Seek for SquashfsReaderWithOffset<R> {
    fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
        let seek = match pos {
            SeekFrom::Start(start) => SeekFrom::Start(self.offset + start),
            seek => seek,
        };
        self.io.seek(seek).map(|x| x - self.offset)
    }
}

/// Pseudo-Trait for BufRead + Seek
pub trait BufReadSeek: BufRead + Seek + Send {}
impl<T: BufRead + Seek + Send> BufReadSeek for T {}

/// Pseudo-Trait for Write + Seek
pub trait WriteSeek: Write + Seek {}
impl<T: Write + Seek> WriteSeek for T {}

impl<T: BufReadSeek> SquashFsReader for T {}

/// Squashfs data extraction methods implemented over [`Read`] and [`Seek`]
pub trait SquashFsReader: BufReadSeek {
    /// Parse Inode Table into `Vec<(position_read, Inode)>`
    fn inodes(
        &mut self,
        superblock: &SuperBlock,
        kind: &Kind,
    ) -> Result<IntMap<u32, Inode>, BackhandError> {
        self.seek(SeekFrom::Start(superblock.inode_table_start))?;

        // The directory inodes store the total, uncompressed size of the entire listing, including headers.
        // Using this size, a SquashFS reader can determine if another header with further entries
        // should be following once it reaches the end of a run.

        let mut next = vec![];

        let mut metadata_offsets = vec![];
        let mut ret_vec = HashMap::default();
        let start = self.stream_position()?;

        while self.stream_position()? < superblock.directory_table_start {
            metadata_offsets.push(self.stream_position()? - start);
            // parse into metadata
            let mut bytes = metadata::read_block(self, superblock, kind)?;

            // parse as many inodes as you can
            let mut inode_bytes = next;
            inode_bytes.append(&mut bytes);
            let mut c_inode_bytes = Cursor::new(inode_bytes.clone());
            let mut container = Reader::new(&mut c_inode_bytes);

            // store last successful read position
            let mut container_bits_read = container.bits_read;
            loop {
                match Inode::from_reader_with_ctx(
                    &mut container,
                    (
                        superblock.bytes_used,
                        superblock.block_size,
                        superblock.block_log,
                        kind.inner.type_endian,
                        kind.inner.bit_order.unwrap(),
                    ),
                ) {
                    Ok(inode) => {
                        ret_vec.insert(inode.header.inode_number, inode);
                        container_bits_read = container.bits_read;
                    }
                    Err(e) => {
                        if matches!(e, DekuError::Incomplete(_)) {
                            // try next block, inodes can span multiple blocks!
                            next = inode_bytes.clone()[(container_bits_read / 8)..].to_vec();
                            break;
                        } else {
                            error!("Fatal error parsing inode: {:?}", e);
                            return Err(BackhandError::Deku(e));
                        }
                    }
                }
            }
        }

        if ret_vec.len() != superblock.inode_count.try_into().unwrap() {
            error!("Parsed {} inodes, expected {}", ret_vec.len(), superblock.inode_count);
            return Err(BackhandError::CorruptedOrInvalidSquashfs);
        }

        Ok(ret_vec)
    }

    /// Extract the root `Inode` as a `BasicDirectory`
    fn root_inode(&mut self, superblock: &SuperBlock, kind: &Kind) -> Result<Inode, BackhandError> {
        let root_inode_start = (superblock.root_inode >> 16) as usize;
        let root_inode_offset = (superblock.root_inode & 0xffff) as usize;
        tracing::info!("root_inode_start:  0x{root_inode_start:02x?}");
        tracing::info!("root_inode_offset: 0x{root_inode_offset:02x?}");
        if (root_inode_start as u64) > superblock.bytes_used {
            error!("root_inode_offset > bytes_used");
            return Err(BackhandError::CorruptedOrInvalidSquashfs);
        }

        // Assumptions are made here that the root inode fits within two metadatas
        let seek = superblock.inode_table_start + root_inode_start as u64;
        self.seek(SeekFrom::Start(seek))?;
        let mut bytes_01 = metadata::read_block(self, superblock, kind)?;

        // try reading just one metdata block
        if root_inode_offset > bytes_01.len() {
            error!("root_inode_offset > bytes.len()");
            return Err(BackhandError::CorruptedOrInvalidSquashfs);
        }
        let mut cursor = Cursor::new(&bytes_01[root_inode_offset..]);
        let mut new_bytes = Reader::new(&mut cursor);
        if let Ok(inode) = Inode::from_reader_with_ctx(
            &mut new_bytes,
            (
                superblock.bytes_used,
                superblock.block_size,
                superblock.block_log,
                kind.inner.type_endian,
                kind.inner.bit_order.unwrap(),
            ),
        ) {
            tracing::info!("ROOT: {:?}", inode);
            return Ok(inode);
        }

        // if that doesn't work, we need another block
        let bytes_02 = metadata::read_block(self, superblock, kind)?;
        bytes_01.write_all(&bytes_02)?;
        if root_inode_offset > bytes_01.len() {
            error!("root_inode_offset > bytes.len()");
            return Err(BackhandError::CorruptedOrInvalidSquashfs);
        }

        let mut cursor = Cursor::new(&bytes_01[root_inode_offset..]);
        let mut new_bytes = Reader::new(&mut cursor);
        match Inode::from_reader_with_ctx(
            &mut new_bytes,
            (
                superblock.bytes_used,
                superblock.block_size,
                superblock.block_log,
                kind.inner.type_endian,
                kind.inner.bit_order.unwrap(),
            ),
        ) {
            Ok(inode) => Ok(inode),
            Err(e) => Err(e.into()),
        }
    }

    /// Parse required number of `Metadata`s uncompressed blocks required for `Dir`s
    fn dir_blocks(
        &mut self,
        superblock: &SuperBlock,
        end_ptr: u64,
        kind: &Kind,
    ) -> Result<(IntMap<u64, u64>, Vec<u8>), BackhandError> {
        let seek = superblock.directory_table_start;
        self.seek(SeekFrom::Start(seek))?;
        let mut block_map = IntMap::default();
        let mut dir_data = Vec::new();

        while self.stream_position()? != end_ptr {
            let metadata_start = self.stream_position()?;
            let bytes = metadata::read_block(self, superblock, kind)?;
            let compressed_offset = metadata_start - seek;
            let decompressed_offset = dir_data.len() as u64;
            block_map.insert(compressed_offset, decompressed_offset);
            dir_data.extend(bytes);
        }

        Ok((block_map, dir_data))
    }

    /// Parse Fragment Table
    fn fragments(
        &mut self,
        superblock: &SuperBlock,
        kind: &Kind,
    ) -> Result<Option<(u64, Vec<Fragment>)>, BackhandError> {
        let (ptr, table) = self.fragment_lookup_table(
            superblock,
            superblock.fragment_table_start,
            u64::from(superblock.fragments) * fragment::SIZE as u64,
            kind,
        )?;
        trace!("{:02x?}", table);
        Ok(Some((ptr, table)))
    }

    /// Parse Export Table
    fn export(
        &mut self,
        _superblock: &SuperBlock,
        _kind: &Kind,
    ) -> Result<Option<(u64, Vec<Export>)>, BackhandError> {
        Ok(None)
    }

    /// Parse UID Table
    fn uid(&mut self, superblock: &SuperBlock, kind: &Kind) -> Result<Vec<u16>, BackhandError> {
        let ptr = superblock.uid_start;
        let count = superblock.no_uids as u64;
        self.seek(SeekFrom::Start(ptr))?;

        // I wish self was Read here, but this works
        let mut buf = vec![0u8; count as usize * core::mem::size_of::<u16>()];
        self.read_exact(&mut buf)?;

        let mut cursor = Cursor::new(buf);
        let mut deku_reader = Reader::new(&mut cursor);
        let mut table = Vec::with_capacity(count as usize);
        for _ in 0..count {
            let v = u16::from_reader_with_ctx(
                &mut deku_reader,
                (kind.inner.type_endian, kind.inner.bit_order.unwrap()),
            )?;
            table.push(v);
        }

        Ok(table)
    }

    /// Parse GUID Table
    fn guid(&mut self, superblock: &SuperBlock, kind: &Kind) -> Result<Vec<u16>, BackhandError> {
        let ptr = superblock.guid_start;
        let count = superblock.no_guids as u64;
        self.seek(SeekFrom::Start(ptr))?;

        // I wish self was Read here, but this works
        let mut buf = vec![0u8; count as usize * core::mem::size_of::<u16>()];
        self.read_exact(&mut buf)?;

        let mut cursor = Cursor::new(buf);
        let mut deku_reader = Reader::new(&mut cursor);
        let mut table = Vec::with_capacity(count as usize);
        for _ in 0..count {
            let v = u16::from_reader_with_ctx(
                &mut deku_reader,
                (kind.inner.type_endian, kind.inner.bit_order.unwrap()),
            )?;
            table.push(v);
        }

        Ok(table)
    }

    /// Parse Fragment Lookup Table (specialized for Fragment context)
    fn fragment_lookup_table(
        &mut self,
        superblock: &SuperBlock,
        seek: u64,
        size: u64,
        kind: &Kind,
    ) -> Result<(u64, Vec<Fragment>), BackhandError> {
        trace!(
            "fragment_lookup_table: seek=0x{:x}, size={}, fragments={}",
            seek, size, superblock.fragments
        );

        // V3 fragment table parsing follows the same pattern as v4:
        // 1. Read index table that points to metadata blocks
        // 2. Read metadata blocks to get fragment entries

        // Calculate number of metadata blocks needed
        let fragment_count = superblock.fragments as u64;
        let fragment_bytes = fragment_count * fragment::SIZE as u64;
        let metadata_block_count = fragment_bytes.div_ceil(METADATA_MAXSIZE as u64);

        trace!(
            "fragment_lookup_table: {} fragments need {} metadata blocks",
            fragment_count, metadata_block_count
        );

        // Read the index table (pointers to metadata blocks)
        self.seek(SeekFrom::Start(seek))?;
        let index_size = metadata_block_count * core::mem::size_of::<u64>() as u64;
        let mut index_buf = vec![0u8; index_size as usize];
        self.read_exact(&mut index_buf)?;

        // Parse the index table
        let mut index_ptrs = vec![];
        let mut cursor = Cursor::new(&index_buf);
        let mut reader = Reader::new(&mut cursor);

        for i in 0..metadata_block_count {
            let ptr = u64::from_reader_with_ctx(
                &mut reader,
                (kind.inner.type_endian, kind.inner.bit_order.unwrap()),
            )?;
            trace!("Fragment metadata block {}: pointer 0x{:x}", i, ptr);
            index_ptrs.push(ptr);
        }

        // Read fragments from metadata blocks
        let mut ret_vec = vec![];
        let mut fragments_read = 0;

        for (i, &ptr) in index_ptrs.iter().enumerate() {
            if fragments_read >= fragment_count {
                break;
            }

            let fragments_in_this_block = core::cmp::min(
                fragment_count - fragments_read,
                METADATA_MAXSIZE as u64 / fragment::SIZE as u64,
            );

            trace!(
                "Reading {} fragments from metadata block {} at 0x{:x}",
                fragments_in_this_block, i, ptr
            );

            self.seek(SeekFrom::Start(ptr))?;
            let block_fragments = self.fragment_metadata_with_count(superblock, ptr, 1, kind)?;

            // Only take the fragments we need
            let take_count =
                core::cmp::min(block_fragments.len(), fragments_in_this_block as usize);
            ret_vec.extend_from_slice(&block_fragments[..take_count]);
            fragments_read += take_count as u64;
        }

        trace!("fragment_lookup_table: successfully read {} fragments", ret_vec.len());
        Ok((seek, ret_vec))
    }

    /// Parse count of Fragment `Metadata` blocks
    fn fragment_metadata_with_count(
        &mut self,
        superblock: &SuperBlock,
        seek: u64,
        count: u64,
        kind: &Kind,
    ) -> Result<Vec<Fragment>, BackhandError> {
        trace!("fragment_metadata_with_count: seek=0x{:02x}, count={}", seek, count);
        self.seek(SeekFrom::Start(seek))?;

        let mut all_bytes = vec![];
        for i in 0..count {
            let pos_before = self.stream_position()?;
            let mut bytes = metadata::read_block(self, superblock, kind)?;
            let pos_after = self.stream_position()?;
            trace!(
                "fragment metadata block {}: pos 0x{:x} -> 0x{:x}, read {} decompressed bytes, first 20: {:02x?}",
                i,
                pos_before,
                pos_after,
                bytes.len(),
                &bytes[..core::cmp::min(20, bytes.len())]
            );
            all_bytes.append(&mut bytes);
        }

        trace!(
            "fragment_metadata_with_count: total decompressed bytes: {}, content: {:02x?}",
            all_bytes.len(),
            &all_bytes[..core::cmp::min(50, all_bytes.len())]
        );

        let mut ret_vec = vec![];
        // Read until we fail to turn bytes into Fragment
        let mut cursor = Cursor::new(&all_bytes);
        let mut container = Reader::new(&mut cursor);
        loop {
            match Fragment::from_reader_with_ctx(
                &mut container,
                (kind.inner.type_endian, kind.inner.bit_order.unwrap()),
            ) {
                Ok(t) => {
                    trace!("Parsed fragment: {:?}", t);
                    ret_vec.push(t);
                }
                Err(e) => {
                    trace!("Failed to parse more fragments: {:?}", e);
                    break;
                }
            }
        }

        Ok(ret_vec)
    }
}