use no_std_io2::io::Read;
use std::io::{Seek, SeekFrom};
use super::reader::{BlockFragment, BlockIterator, FilesystemReaderFile};
use crate::error::BackhandError;
#[derive(Clone, Copy)]
pub(crate) struct RawDataBlock {
pub(crate) fragment: bool,
pub(crate) uncompressed: bool,
}
pub(crate) struct SquashfsRawData<'a, 'b> {
pub(crate) file: FilesystemReaderFile<'a, 'b>,
current_block: BlockIterator<'a>,
pub(crate) pos: u64,
}
impl<'a, 'b> SquashfsRawData<'a, 'b> {
pub fn new(file: FilesystemReaderFile<'a, 'b>) -> Self {
let pos = file.file.blocks_start();
let current_block = file.into_iter();
Self { file, current_block, pos }
}
fn read_raw_data(
&mut self,
data: &mut Vec<u8>,
block: &BlockFragment<'a>,
) -> Result<RawDataBlock, BackhandError> {
match block {
BlockFragment::Block(block) => {
let block_size = block.size() as usize;
if block_size == 0 {
*data = vec![0; self.file.system.block_size as usize];
return Ok(RawDataBlock { fragment: false, uncompressed: true });
}
data.resize(block_size, 0);
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(self.pos))?;
reader.read_exact(data)?;
self.pos = reader.stream_position()?;
}
Ok(RawDataBlock { fragment: false, uncompressed: block.uncompressed() })
}
BlockFragment::Fragment(fragment) => {
{
let cache = self.file.system.cache.read().unwrap();
if let Some(cache_bytes) = cache.fragment_cache.get(&fragment.start) {
let range = self.fragment_range();
tracing::trace!("fragment in cache: {:02x}:{range:02x?}", fragment.start);
data.resize(range.end - range.start, 0);
data.copy_from_slice(&cache_bytes[range]);
return Ok(RawDataBlock { fragment: true, uncompressed: true });
}
}
tracing::trace!("fragment: reading from data");
let frag_size = fragment.size.size() as usize;
data.resize(frag_size, 0);
{
let mut reader = self.file.system.reader.lock().unwrap();
reader.seek(SeekFrom::Start(fragment.start))?;
reader.read_exact(data)?;
}
if fragment.size.uncompressed() {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, data.clone());
let range = self.fragment_range();
data.drain(range.end..);
data.drain(..range.start);
}
Ok(RawDataBlock { fragment: true, uncompressed: fragment.size.uncompressed() })
}
}
}
#[inline]
pub fn next_block(&mut self, buf: &mut Vec<u8>) -> Option<Result<RawDataBlock, BackhandError>> {
self.current_block.next().map(|next| self.read_raw_data(buf, &next))
}
#[inline]
pub(crate) fn skip_block(&mut self) -> bool {
match self.current_block.next() {
Some(BlockFragment::Block(block)) => {
self.pos += block.size() as u64; true
}
Some(BlockFragment::Fragment(_)) => true, None => false,
}
}
#[inline]
fn fragment_range(&self) -> std::ops::Range<usize> {
let block_len = self.file.system.block_size as usize;
let block_num = self.file.file.block_sizes().len();
let file_size = self.file.file.file_len();
let frag_len = file_size - (block_num * block_len);
let frag_start = self.file.file.block_offset() as usize;
let frag_end = frag_start + frag_len;
frag_start..frag_end
}
pub fn decompress(
&self,
data: RawDataBlock,
input_buf: &mut Vec<u8>,
output_buf: &mut Vec<u8>,
) -> Result<(), BackhandError> {
assert!(output_buf.is_empty());
if data.uncompressed {
std::mem::swap(input_buf, output_buf);
} else {
output_buf.reserve(self.file.system.block_size as usize);
self.file.system.kind.inner.compressor.decompress(
input_buf,
output_buf,
self.file.system.compressor,
)?;
if data.fragment {
self.file
.system
.cache
.write()
.unwrap()
.fragment_cache
.insert(self.file.fragment().unwrap().start, output_buf.clone());
let range = self.fragment_range();
output_buf.drain(range.end..);
output_buf.drain(..range.start);
}
}
Ok(())
}
#[inline]
pub fn into_reader(self) -> SquashfsReadFile<'a, 'b> {
SquashfsReadFile::new(self)
}
}
pub struct SquashfsReadFile<'a, 'b> {
raw_data: SquashfsRawData<'a, 'b>,
buf_read: Vec<u8>,
buf_decompress: Vec<u8>,
current_block_position: usize,
cursor_pos: u64,
}
impl<'a, 'b> SquashfsReadFile<'a, 'b> {
fn new(raw_data: SquashfsRawData<'a, 'b>) -> Self {
let block_size = raw_data.file.system.block_size as usize;
Self {
raw_data,
buf_read: Vec::with_capacity(block_size),
buf_decompress: Vec::with_capacity(block_size),
current_block_position: 0,
cursor_pos: 0,
}
}
#[inline]
fn file_len64(&self) -> u64 {
self.raw_data.file.file.file_len() as u64
}
#[inline]
fn available(&self) -> &[u8] {
&self.buf_decompress[self.current_block_position..]
}
#[inline]
fn read_available(&mut self, buf: &mut [u8]) -> usize {
let available = self.available();
let bytes_left = self.file_len64().saturating_sub(self.cursor_pos);
let read_len = bytes_left.min(buf.len().min(available.len()) as u64) as usize;
buf[..read_len].copy_from_slice(&available[..read_len]);
self.cursor_pos += read_len as u64;
self.current_block_position += read_len;
read_len
}
#[inline]
fn read_next_block(&mut self) -> Result<(), BackhandError> {
let block = match self.raw_data.next_block(&mut self.buf_read) {
Some(block) => block?,
None => return Ok(()),
};
self.buf_decompress.clear();
self.raw_data.decompress(block, &mut self.buf_read, &mut self.buf_decompress)?;
self.current_block_position = 0;
Ok(())
}
}
impl Read for SquashfsReadFile<'_, '_> {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
if self.cursor_pos >= self.file_len64() {
return Ok(0);
}
if self.available().is_empty() {
self.read_next_block()?;
}
Ok(self.read_available(buf))
}
}
impl Seek for SquashfsReadFile<'_, '_> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
let file_len = self.file_len64();
let new_pos = u64::try_from(match pos {
SeekFrom::Start(n) => n as i64,
SeekFrom::End(n) => file_len as i64 + n,
SeekFrom::Current(n) => self.cursor_pos as i64 + n,
})
.map_err(|_| std::io::Error::from(std::io::ErrorKind::InvalidInput))?;
if new_pos == self.cursor_pos {
return Ok(new_pos);
}
if self.cursor_pos <= file_len && !self.buf_decompress.is_empty() {
self.cursor_pos -= self.current_block_position as u64;
self.current_block_position = 0;
if new_pos >= self.cursor_pos
&& new_pos - self.cursor_pos <= self.buf_decompress.len() as u64
{
self.current_block_position = (new_pos - self.cursor_pos) as usize;
self.cursor_pos = new_pos;
return Ok(new_pos);
}
}
self.raw_data = self.raw_data.file.raw_data_reader();
self.buf_read.clear();
self.buf_decompress.clear();
self.current_block_position = 0;
self.cursor_pos = 0;
if new_pos < file_len {
let block_size = 1u64 << self.raw_data.file.system.block_log; while new_pos >= self.cursor_pos + block_size {
let _skipped = self.raw_data.skip_block();
debug_assert!(_skipped);
self.cursor_pos += block_size;
}
if new_pos != self.cursor_pos {
self.read_next_block()?;
debug_assert!(new_pos <= self.cursor_pos + self.buf_decompress.len() as u64);
self.current_block_position = (new_pos - self.cursor_pos) as usize;
self.cursor_pos = new_pos;
}
} else {
while self.raw_data.skip_block() {}
self.cursor_pos = new_pos; }
Ok(new_pos)
}
}