use core::num::NonZeroUsize;
use no_std_io2::io::{Read, Seek, Write};
use std::ffi::OsStr;
use std::io::{Cursor, SeekFrom};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::{SystemTime, UNIX_EPOCH};
use deku::prelude::*;
use tracing::{error, info, trace};
use crate::error::BackhandError;
use crate::kinds::Kind;
use crate::kinds::LE_V4_0;
use crate::v4::compressor::{CompressionOptions, Compressor};
use crate::v4::data::DataWriter;
use crate::v4::entry::Entry;
use crate::v4::filesystem::node::SquashfsSymlink;
use crate::v4::filesystem::node::{InnerNode, Nodes};
use crate::v4::filesystem::normalize_squashfs_path;
use crate::v4::fragment;
use crate::v4::id::Id;
use crate::v4::metadata::{self, METADATA_MAXSIZE, MetadataWriter};
use crate::v4::reader::WriteSeek;
use crate::v4::squashfs::SuperBlock;
use crate::{
DEFAULT_BLOCK_SIZE, DEFAULT_PAD_LEN, FilesystemReader, Flags, MAX_BLOCK_SIZE, MIN_BLOCK_SIZE,
Node, NodeHeader, SquashfsBlockDevice, SquashfsCharacterDevice, SquashfsDir,
SquashfsFileWriter,
};
#[derive(Debug)]
pub struct FilesystemWriter<'a, 'b, 'c> {
pub(crate) kind: Kind,
pub(crate) block_size: u32,
pub(crate) mod_time: u32,
pub(crate) id_table: Vec<Id>,
pub(crate) fs_compressor: FilesystemCompressor,
pub(crate) root: Nodes<SquashfsFileWriter<'a, 'b, 'c>>,
pub(crate) block_log: u16,
pub(crate) pad_len: u32,
pub(crate) no_duplicate_files: bool,
pub(crate) emit_compression_options: bool,
}
impl Default for FilesystemWriter<'_, '_, '_> {
fn default() -> Self {
let block_size = DEFAULT_BLOCK_SIZE;
Self {
block_size,
mod_time: 0,
id_table: Id::root(),
fs_compressor: FilesystemCompressor::default(),
kind: Kind { inner: Arc::new(LE_V4_0) },
root: Nodes::new_root(NodeHeader::default()),
block_log: block_size.ilog2() as u16,
pad_len: DEFAULT_PAD_LEN,
no_duplicate_files: true,
emit_compression_options: true,
}
}
}
impl<'a, 'b, 'c> FilesystemWriter<'a, 'b, 'c> {
pub fn set_block_size(&mut self, block_size: u32) {
let power_of_two = block_size != 0 && (block_size & (block_size - 1)) == 0;
if !(MIN_BLOCK_SIZE..=MAX_BLOCK_SIZE).contains(&block_size) || !power_of_two {
panic!("invalid block_size");
}
self.block_size = block_size;
self.block_log = block_size.ilog2() as u16;
}
pub fn set_time(&mut self, mod_time: u32) {
self.mod_time = mod_time;
}
pub fn set_current_time(&mut self) {
self.mod_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;
}
pub fn set_kind(&mut self, kind: Kind) {
self.kind = kind;
}
pub fn set_root_mode(&mut self, mode: u16) {
self.root.root_mut().header.permissions = mode;
}
pub fn set_root_uid(&mut self, uid: u32) {
self.root.root_mut().header.uid = uid;
}
pub fn set_root_gid(&mut self, gid: u32) {
self.root.root_mut().header.gid = gid;
}
pub fn set_compressor(&mut self, compressor: FilesystemCompressor) {
self.fs_compressor = compressor;
}
pub fn set_only_root_id(&mut self) {
self.id_table = Id::root();
}
pub fn set_kib_padding(&mut self, pad_kib: u32) {
self.pad_len = pad_kib * 1024;
}
pub fn set_no_padding(&mut self) {
self.pad_len = 0;
}
pub fn set_no_duplicate_files(&mut self, value: bool) {
self.no_duplicate_files = value;
}
pub fn set_emit_compression_options(&mut self, value: bool) {
self.emit_compression_options = value;
}
pub fn from_fs_reader(reader: &'a FilesystemReader<'b>) -> Result<Self, BackhandError> {
let mut root: Vec<Node<_>> = reader
.root
.nodes
.iter()
.map(|node| {
let inner = match &node.inner {
InnerNode::File(file) => {
let reader = reader.file(file);
InnerNode::File(SquashfsFileWriter::SquashfsFile(reader))
}
InnerNode::Symlink(x) => InnerNode::Symlink(x.clone()),
InnerNode::Dir(x) => InnerNode::Dir(*x),
InnerNode::CharacterDevice(x) => InnerNode::CharacterDevice(*x),
InnerNode::BlockDevice(x) => InnerNode::BlockDevice(*x),
InnerNode::NamedPipe => InnerNode::NamedPipe,
InnerNode::Socket => InnerNode::Socket,
};
Node { fullpath: node.fullpath.clone(), header: node.header, inner }
})
.collect();
root.sort();
Ok(Self {
kind: Kind { inner: reader.kind.inner.clone() },
block_size: reader.block_size,
block_log: reader.block_log,
fs_compressor: FilesystemCompressor::new(
reader.compressor,
reader.compression_options,
)?,
mod_time: reader.mod_time,
id_table: reader.id_table.clone(),
root: Nodes { nodes: root },
pad_len: DEFAULT_PAD_LEN,
no_duplicate_files: reader.no_duplicate_files,
emit_compression_options: true,
})
}
fn mut_node<S>(&mut self, find_path: S) -> Option<&mut Node<SquashfsFileWriter<'a, 'b, 'c>>>
where
S: AsRef<Path>,
{
let find_path = normalize_squashfs_path(find_path.as_ref()).ok()?;
self.root.node_mut(find_path)
}
fn insert_node<P>(
&mut self,
path: P,
header: NodeHeader,
node: InnerNode<SquashfsFileWriter<'a, 'b, 'c>>,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
self.lookup_add_id(header.gid);
self.lookup_add_id(header.uid);
let path = normalize_squashfs_path(path.as_ref())?;
let node = Node::new(path, header, node);
self.root.insert(node)
}
pub fn push_file<P>(
&mut self,
reader: impl Read + 'c,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let reader = Arc::new(Mutex::new(reader));
let new_file = InnerNode::File(SquashfsFileWriter::UserDefined(reader));
self.insert_node(path, header, new_file)?;
Ok(())
}
pub fn mut_file<S>(&mut self, find_path: S) -> Option<&mut SquashfsFileWriter<'a, 'b, 'c>>
where
S: AsRef<Path>,
{
self.mut_node(find_path).and_then(|node| {
if let InnerNode::File(file) = &mut node.inner { Some(file) } else { None }
})
}
pub fn replace_file<S>(
&mut self,
find_path: S,
reader: impl Read + 'c,
) -> Result<(), BackhandError>
where
S: AsRef<Path>,
{
let file = self.mut_file(find_path).ok_or(BackhandError::FileNotFound)?;
let reader = Arc::new(Mutex::new(reader));
*file = SquashfsFileWriter::UserDefined(reader);
Ok(())
}
pub fn push_symlink<P, S>(
&mut self,
link: S,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
S: Into<PathBuf>,
{
let new_symlink = InnerNode::Symlink(SquashfsSymlink { link: link.into() });
self.insert_node(path, header, new_symlink)?;
Ok(())
}
pub fn push_dir<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_dir = InnerNode::Dir(SquashfsDir::default());
self.insert_node(path, header, new_dir)?;
Ok(())
}
pub fn push_dir_all<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let path = normalize_squashfs_path(path.as_ref())?;
let ancestors: Vec<&Path> = path.ancestors().collect();
for file in ancestors.iter().rev() {
match self.root.nodes.binary_search_by(|node| node.fullpath.as_path().cmp(file)) {
Ok(index) => {
let node = &self.root.nodes[index];
if !matches!(&node.inner, InnerNode::Dir(_)) {
return Err(BackhandError::InvalidFilePath);
}
}
Err(_index) => self.push_dir(file, header)?,
}
}
Ok(())
}
pub fn push_char_device<P>(
&mut self,
device_number: u32,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::CharacterDevice(SquashfsCharacterDevice { device_number });
self.insert_node(path, header, new_device)?;
Ok(())
}
pub fn push_block_device<P>(
&mut self,
device_number: u32,
path: P,
header: NodeHeader,
) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::BlockDevice(SquashfsBlockDevice { device_number });
self.insert_node(path, header, new_device)?;
Ok(())
}
pub fn push_fifo<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::NamedPipe;
self.insert_node(path, header, new_device)?;
Ok(())
}
pub fn push_socket<P>(&mut self, path: P, header: NodeHeader) -> Result<(), BackhandError>
where
P: AsRef<Path>,
{
let new_device = InnerNode::Socket;
self.insert_node(path, header, new_device)?;
Ok(())
}
pub fn write_with_offset<W>(
&mut self,
w: W,
offset: u64,
) -> Result<(SuperBlock, u64), BackhandError>
where
W: Write + Seek,
{
let mut writer = WriterWithOffset::new(w, offset)?;
self.write(&mut writer)
}
fn write_data<W>(
&mut self,
compressor: FilesystemCompressor,
block_size: u32,
mut writer: W,
data_writer: &mut DataWriter<'b>,
) -> Result<(), BackhandError>
where
W: WriteSeek,
{
let files = self.root.nodes.iter_mut().filter_map(|node| match &mut node.inner {
InnerNode::File(file) => Some(file),
_ => None,
});
for file in files {
let (filesize, added) = match file {
SquashfsFileWriter::UserDefined(file) => {
let file_ptr = Arc::clone(file);
let mut file_lock =
file_ptr.lock().map_err(|_| BackhandError::MutexPoisoned)?;
data_writer.add_bytes(&mut *file_lock, &mut writer)?
}
SquashfsFileWriter::SquashfsFile(file) => {
if file.system.compressor == compressor.id
&& file.system.compression_options == compressor.options
&& file.system.block_size == block_size
{
data_writer.just_copy_it(file.raw_data_reader(), &mut writer)?
} else {
data_writer.add_bytes(file.reader(), &mut writer)?
}
}
SquashfsFileWriter::Consumed(_, _) => unreachable!(),
};
*file = SquashfsFileWriter::Consumed(filesize, added);
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn write_inode_dir<'slf>(
&'slf self,
inode_writer: &'_ mut MetadataWriter,
dir_writer: &'_ mut MetadataWriter,
parent_node_id: u32,
node_id: NonZeroUsize,
superblock: &SuperBlock,
kind: &Kind,
id_table: &Vec<Id>,
) -> Result<Entry<'slf>, BackhandError> {
let node = &self
.root
.node(node_id)
.ok_or(BackhandError::InternalState("node not found".to_string()))?;
let filename = node.fullpath.file_name().unwrap_or(OsStr::new("/"));
match &node.inner {
InnerNode::File(SquashfsFileWriter::Consumed(filesize, added)) => {
return Entry::file(
filename,
node.header,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("file node id: {}", e))
})?,
inode_writer,
*filesize,
added,
superblock,
kind,
id_table,
);
}
InnerNode::File(_) => unreachable!(),
InnerNode::Symlink(symlink) => {
return Entry::symlink(
filename,
node.header,
symlink,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("symlink node id: {}", e))
})?,
inode_writer,
superblock,
kind,
id_table,
);
}
InnerNode::CharacterDevice(char) => {
return Entry::char(
filename,
node.header,
char,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("character device node id: {}", e))
})?,
inode_writer,
superblock,
kind,
id_table,
);
}
InnerNode::BlockDevice(block) => {
return Entry::block_device(
filename,
node.header,
block,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("block device node id: {}", e))
})?,
inode_writer,
superblock,
kind,
id_table,
);
}
InnerNode::NamedPipe => {
return Entry::named_pipe(
filename,
node.header,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("named pipe node id: {}", e))
})?,
inode_writer,
superblock,
kind,
id_table,
);
}
InnerNode::Socket => {
return Entry::socket(
filename,
node.header,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("socket node id: {}", e))
})?,
inode_writer,
superblock,
kind,
id_table,
);
}
InnerNode::Dir(_) => (),
};
let entries: Vec<_> = self
.root
.children_of(node_id)
.filter(|(_child_id, child)| {
child.fullpath.parent().map(|child| child == node.fullpath).unwrap_or(false)
})
.map(|(child_id, _child)| {
self.write_inode_dir(
inode_writer,
dir_writer,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!(
"parent node id for directory: {}",
e
))
})?,
child_id,
superblock,
kind,
id_table,
)
})
.collect::<Result<_, _>>()?;
let children_num = entries.len();
let block_index = dir_writer.metadata_start;
let block_offset = dir_writer.uncompressed_bytes.len() as u16;
trace!("WRITING DIR: {block_offset:#02x?}");
let mut total_size: usize = 3;
for dir in Entry::into_dir(entries)? {
let mut bytes = Cursor::new(vec![]);
let mut writer = Writer::new(&mut bytes);
dir.to_writer(&mut writer, kind.inner.type_endian)?;
total_size += bytes.get_ref().len();
dir_writer.write_all(bytes.get_ref())?;
}
let entry = Entry::path(
filename,
node.header,
node_id.get().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("directory node id: {}", e))
})?,
children_num,
parent_node_id,
inode_writer,
total_size,
block_offset,
block_index,
superblock,
kind,
id_table,
)?;
trace!("[{:?}] entries: {:#02x?}", filename, &entry);
Ok(entry)
}
pub fn write<W: Write + Seek>(&mut self, mut w: W) -> Result<(SuperBlock, u64), BackhandError> {
let mut superblock =
SuperBlock::new(self.fs_compressor.id, Kind { inner: self.kind.inner.clone() });
if self.no_duplicate_files {
superblock.flags |= Flags::DataHasBeenDeduplicated as u16;
}
trace!("{:#02x?}", self.root);
let v4_compressor = match &self.kind.inner.compressor {
crate::kinds::VersionedCompressor::V4(compressor) => *compressor,
crate::kinds::VersionedCompressor::CustomV4(compressor) => *compressor,
#[allow(unreachable_patterns)]
_ => panic!("v4 filesystem writer requires v4 compressor"),
};
w.write_all(&[0x00; SuperBlock::SIZE])?;
if self.emit_compression_options && self.fs_compressor.options.is_some() {
trace!("writing compression options");
let options = v4_compressor.compression_options(
&mut superblock,
&self.kind,
self.fs_compressor,
)?;
if let Some(options_bytes) = &options {
w.write_all(options_bytes)?;
}
}
let mut data_writer = DataWriter::new(
v4_compressor,
self.fs_compressor,
self.block_size,
self.no_duplicate_files,
);
let mut inode_writer = MetadataWriter::new(
v4_compressor,
self.fs_compressor,
self.block_size,
self.kind.inner.data_endian,
);
let mut dir_writer = MetadataWriter::new(
v4_compressor,
self.fs_compressor,
self.block_size,
self.kind.inner.data_endian,
);
info!("Creating Inodes and Dirs");
info!("Writing Data");
self.write_data(self.fs_compressor, self.block_size, &mut w, &mut data_writer)?;
info!("Writing Data Fragments");
data_writer.finalize(&mut w)?;
info!("Writing Other stuff");
let root = self.write_inode_dir(
&mut inode_writer,
&mut dir_writer,
0,
1.try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(e.to_string())
})?,
&superblock,
&self.kind,
&self.id_table,
)?;
superblock.root_inode = ((root.start as u64) << 16) | ((root.offset as u64) & 0xffff);
superblock.inode_count =
self.root.nodes.len().try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("inode count: {}", e))
})?;
superblock.block_size = self.block_size;
superblock.block_log = self.block_log;
superblock.mod_time = self.mod_time;
info!("Writing Inodes");
superblock.inode_table = w.stream_position()?;
inode_writer.finalize(&mut w)?;
info!("Writing Dirs");
superblock.dir_table = w.stream_position()?;
dir_writer.finalize(&mut w)?;
info!("Writing Frag Lookup Table");
let (table_position, count) =
self.write_lookup_table(&mut w, &data_writer.fragment_table, fragment::SIZE)?;
superblock.frag_table = table_position;
superblock.frag_count = count;
info!("Writing Id Lookup Table");
let (table_position, count) = self.write_lookup_table(&mut w, &self.id_table, Id::SIZE)?;
superblock.id_table = table_position;
superblock.id_count = count.try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("id count: {}", e))
})?;
info!("Finalize Superblock and End Bytes");
let bytes_written = self.finalize(w, &mut superblock)?;
info!("Success");
Ok((superblock, bytes_written))
}
fn finalize<W>(&self, mut w: W, superblock: &mut SuperBlock) -> Result<u64, BackhandError>
where
W: Write + Seek,
{
superblock.bytes_used = w.stream_position()?;
let mut pad_len = 0;
if self.pad_len != 0 {
info!("Writing Padding");
let blocks_used: u64 = superblock.bytes_used / (self.pad_len as u64);
let total_pad_len = (blocks_used + 1) * (self.pad_len as u64);
pad_len = total_pad_len - superblock.bytes_used;
let mut total_written = 0;
while w.stream_position()? < (superblock.bytes_used + pad_len) {
let arr = &[0x00; 1024];
let len = if (pad_len - total_written) < 1024 {
(pad_len - total_written) % 1024
} else {
1024
};
w.write_all(
&arr[..len.try_into().map_err(|e: std::num::TryFromIntError| {
BackhandError::NumericConversion(format!("padding chunk length: {}", e))
})?],
)?;
total_written += len;
}
}
info!("Writing Superblock");
w.rewind()?;
let mut writer = Writer::new(&mut w);
superblock.to_writer(
&mut writer,
(
self.kind.inner.magic,
self.kind.inner.version_major,
self.kind.inner.version_minor,
self.kind.inner.type_endian,
),
)?;
info!("Writing Finished");
w.flush()?;
Ok(superblock.bytes_used + pad_len)
}
fn write_lookup_table<D, W>(
&self,
mut w: W,
table: &[D],
element_size: usize,
) -> Result<(u64, u32), BackhandError>
where
D: DekuWriter<deku::ctx::Endian>,
W: Write + Seek,
{
let mut ptrs: Vec<u64> = vec![];
let mut table_bytes = Cursor::new(Vec::with_capacity(table.len() * element_size));
let mut iter = table.iter().peekable();
while let Some(t) = iter.next() {
let mut table_writer = Writer::new(&mut table_bytes);
t.to_writer(&mut table_writer, self.kind.inner.type_endian)?;
if ((table_bytes.get_ref().len() + element_size) > METADATA_MAXSIZE)
|| iter.peek().is_none()
{
ptrs.push(w.stream_position()?);
let len = metadata::set_if_uncompressed(table_bytes.get_ref().len() as u16);
let mut writer = Writer::new(&mut w);
len.to_writer(&mut writer, self.kind.inner.data_endian)?;
w.write_all(table_bytes.get_ref())?;
table_bytes.get_mut().clear();
table_bytes.rewind()?;
}
}
let table_position = w.stream_position()?;
let count = table.len() as u32;
for ptr in ptrs {
let mut writer = Writer::new(&mut w);
ptr.to_writer(&mut writer, self.kind.inner.type_endian)?;
}
Ok((table_position, count))
}
fn lookup_add_id(&mut self, id: u32) -> u32 {
let found = self.id_table.iter().position(|a| a.num == id);
match found {
Some(found) => found as u32,
None => {
self.id_table.push(Id::new(id));
self.id_table.len() as u32 - 1
}
}
}
}
struct WriterWithOffset<W: WriteSeek> {
w: W,
offset: u64,
}
impl<W: WriteSeek> WriterWithOffset<W> {
pub fn new(mut w: W, offset: u64) -> std::io::Result<Self> {
w.seek(SeekFrom::Start(offset))?;
Ok(Self { w, offset })
}
}
impl<W> Write for WriterWithOffset<W>
where
W: WriteSeek,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.w.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.w.flush()
}
}
impl<W> Seek for WriterWithOffset<W>
where
W: Write + Seek,
{
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let seek = match pos {
SeekFrom::Start(start) => SeekFrom::Start(self.offset + start),
seek => seek,
};
self.w.seek(seek).map(|x| x - self.offset)
}
}
#[derive(Debug, Copy, Clone, Default)]
pub struct FilesystemCompressor {
pub(crate) id: Compressor,
pub(crate) options: Option<CompressionOptions>,
pub(crate) extra: Option<CompressionExtra>,
}
impl FilesystemCompressor {
pub fn new(id: Compressor, options: Option<CompressionOptions>) -> Result<Self, BackhandError> {
match (id, options) {
(Compressor::Lz4, None) => {
error!("Lz4 compression options missing");
return Err(BackhandError::InvalidCompressionOption);
}
(_, None) => {}
(Compressor::Gzip, Some(CompressionOptions::Gzip(_)))
| (Compressor::Lzma, Some(CompressionOptions::Lzma))
| (Compressor::Lzo, Some(CompressionOptions::Lzo(_)))
| (Compressor::Xz, Some(CompressionOptions::Xz(_)))
| (Compressor::Lz4, Some(CompressionOptions::Lz4(_)))
| (Compressor::Zstd, Some(CompressionOptions::Zstd(_))) => {}
_ => {
error!("invalid compression settings");
return Err(BackhandError::InvalidCompressionOption);
}
}
Ok(Self { id, options, extra: None })
}
pub fn options(&mut self, options: CompressionOptions) -> Result<(), BackhandError> {
self.options = Some(options);
Ok(())
}
pub fn extra(&mut self, extra: CompressionExtra) -> Result<(), BackhandError> {
if matches!(extra, CompressionExtra::Xz(_)) && matches!(self.id, Compressor::Xz) {
self.extra = Some(extra);
return Ok(());
}
error!("invalid extra compression settings");
Err(BackhandError::InvalidCompressionOption)
}
}
#[derive(Debug, Copy, Clone)]
pub enum CompressionExtra {
Xz(ExtraXz),
}
#[derive(Debug, Copy, Clone, Default)]
pub struct ExtraXz {
pub(crate) level: Option<u32>,
}
impl ExtraXz {
pub fn level(&mut self, level: u32) -> Result<(), BackhandError> {
if level > 9 {
return Err(BackhandError::InvalidCompressionOption);
}
self.level = Some(level);
Ok(())
}
}