use bincode::Options;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use serde::{Deserialize, Serialize};
use crate::layers::traits::{
InnerWriterTrait, InnerWriterType, LayerFailSafeReader, LayerReader, LayerWriter,
};
use crate::{Error, BINCODE_MAX_DESERIALIZE};
use std::io;
use std::io::{Read, Seek, SeekFrom, Write};
use crate::config::{ArchiveWriterConfig, ConfigResult};
use crate::errors::ConfigError;
const UNCOMPRESSED_DATA_SIZE: u32 = 4 * 1024 * 1024;
const DEFAULT_COMPRESSION_LEVEL: u32 = 5;
const BROTLI_LOG_WINDOW: u32 = 22;
pub struct CompressionConfig {
compression_level: u32,
}
impl std::default::Default for CompressionConfig {
fn default() -> Self {
CompressionConfig {
compression_level: DEFAULT_COMPRESSION_LEVEL,
}
}
}
impl ArchiveWriterConfig {
pub fn with_compression_level(&mut self, compression_level: u32) -> ConfigResult {
if compression_level > 11 {
Err(ConfigError::CompressionLevelOutOfRange)
} else {
self.compress.compression_level = compression_level;
Ok(self)
}
}
}
enum CompressionLayerReaderState<R: Read> {
Ready(R),
InData {
read: u32,
uncompressed_size: u32,
decompressor: brotli::Decompressor<R>,
},
Empty,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SizesInfo {
pub compressed_sizes: Vec<u32>,
last_block_size: u32,
}
impl SizesInfo {
fn uncompressed_block_size_at(&self, block_num: usize) -> u32 {
if block_num < self.compressed_sizes.len() - 1 {
UNCOMPRESSED_DATA_SIZE
} else {
self.last_block_size
}
}
fn compressed_block_size_at(&self, uncompressed_pos: u64) -> u32 {
let block_num = uncompressed_pos / (UNCOMPRESSED_DATA_SIZE as u64);
self.compressed_sizes[block_num as usize]
}
fn max_uncompressed_pos(&self) -> u64 {
(self.compressed_sizes.len() as u64 - 1) * UNCOMPRESSED_DATA_SIZE as u64
+ self.last_block_size as u64
}
pub fn get_compressed_size(&self) -> u64 {
self.compressed_sizes.iter().map(|v| *v as u64).sum()
}
}
pub struct CompressionLayerReader<'a, R: 'a + Read> {
state: CompressionLayerReaderState<Box<dyn 'a + LayerReader<'a, R>>>,
pub sizes_info: Option<SizesInfo>,
underlayer_pos: u64,
}
impl<R: Read> CompressionLayerReaderState<R> {
fn into_inner(self) -> R {
match self {
CompressionLayerReaderState::Ready(inner) => inner,
CompressionLayerReaderState::InData { decompressor, .. } => decompressor.into_inner(),
_ => panic!("[Reader] Empty type to inner is impossible"),
}
}
}
impl<'a, R: 'a + Read> CompressionLayerReader<'a, R> {
pub fn new(mut inner: Box<dyn 'a + LayerReader<'a, R>>) -> Result<Self, Error> {
let underlayer_pos = inner.seek(SeekFrom::Current(0))? as u64;
Ok(Self {
state: CompressionLayerReaderState::Ready(inner),
sizes_info: None,
underlayer_pos,
})
}
fn pos_in_stream(&self, uncompressed_pos: u64) -> bool {
match &self.sizes_info {
Some(sizes_info) => {
let pos_max = sizes_info.max_uncompressed_pos();
uncompressed_pos < pos_max
}
None => true,
}
}
fn new_decompressor_at<S: Read + Seek>(
&self,
inner: S,
uncompressed_pos: u64,
) -> Result<brotli::Decompressor<S>, Error> {
if uncompressed_pos % (UNCOMPRESSED_DATA_SIZE as u64) != 0 {
return Err(Error::BadAPIArgument(
"[new_decompressor_at] not a starting position".to_string(),
));
}
if !self.pos_in_stream(uncompressed_pos) {
return Err(Error::EndOfStream);
}
match &self.sizes_info {
Some(sizes_info) => {
Ok(brotli::Decompressor::new(
inner,
sizes_info.compressed_block_size_at(uncompressed_pos) as usize,
))
}
None => Err(Error::MissingMetadata),
}
}
fn uncompressed_block_size_at(&self, uncompressed_pos: u64) -> Result<u32, Error> {
if uncompressed_pos % (UNCOMPRESSED_DATA_SIZE as u64) != 0 {
return Err(Error::BadAPIArgument(
"[uncompressed_block_size_at] not a starting position".to_string(),
));
}
if !self.pos_in_stream(uncompressed_pos) {
return Err(Error::EndOfStream);
}
match &self.sizes_info {
Some(sizes_info) => {
let block_num = uncompressed_pos / (UNCOMPRESSED_DATA_SIZE as u64);
Ok(sizes_info.uncompressed_block_size_at(block_num as usize))
}
None => Err(Error::MissingMetadata),
}
}
fn sync_inner_with_uncompressed_pos<S: Read + Seek>(
&self,
inner: &mut S,
uncompressed_pos: u64,
) -> Result<(), Error> {
if uncompressed_pos % (UNCOMPRESSED_DATA_SIZE as u64) != 0 {
return Err(Error::BadAPIArgument(
"[sync_inner_with_uncompressed_pos] not a starting position".to_string(),
));
}
if !self.pos_in_stream(uncompressed_pos) {
return Err(Error::EndOfStream);
}
let block_num = uncompressed_pos / (UNCOMPRESSED_DATA_SIZE as u64);
match &self.sizes_info {
Some(SizesInfo {
compressed_sizes, ..
}) => {
let start_position = compressed_sizes
.iter()
.take(block_num as usize)
.map(|size| *size as u64)
.sum();
inner.seek(SeekFrom::Start(start_position))?;
}
None => {
return Err(Error::MissingMetadata);
}
}
Ok(())
}
}
impl<'a, R: 'a + Read + Seek> LayerReader<'a, R> for CompressionLayerReader<'a, R> {
fn into_inner(self) -> Option<Box<dyn 'a + LayerReader<'a, R>>> {
Some(self.state.into_inner())
}
fn into_raw(self: Box<Self>) -> R {
self.state.into_inner().into_raw()
}
fn initialize(&mut self) -> Result<(), Error> {
match &mut self.state {
CompressionLayerReaderState::Ready(inner) => {
inner.initialize()?;
let pos = inner.seek(SeekFrom::End(-4))?;
let len = inner.read_u32::<LittleEndian>()? as u64;
inner.seek(SeekFrom::Start(pos - len))?;
self.sizes_info = match bincode::options()
.with_limit(BINCODE_MAX_DESERIALIZE)
.with_fixint_encoding()
.deserialize_from(inner.take(len))
{
Ok(sinfo) => Some(sinfo),
_ => {
return Err(Error::DeserializationError);
}
};
Ok(())
}
_ => {
Err(Error::WrongReaderState(
"[Compression Layer]: on initialization, must be in Ready state".to_string(),
))
}
}
}
}
impl<'a, R: 'a + Read + Seek> Read for CompressionLayerReader<'a, R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if !self.pos_in_stream(self.underlayer_pos) {
return Ok(0);
}
let old_state = std::mem::replace(&mut self.state, CompressionLayerReaderState::Empty);
match old_state {
CompressionLayerReaderState::Ready(mut inner) => {
self.sync_inner_with_uncompressed_pos(&mut inner, self.underlayer_pos)?;
let decompressor = self.new_decompressor_at(inner, self.underlayer_pos)?;
let uncompressed_size = self.uncompressed_block_size_at(self.underlayer_pos)?;
self.state = CompressionLayerReaderState::InData {
read: 0,
uncompressed_size,
decompressor,
};
self.read(buf)
}
CompressionLayerReaderState::InData {
read,
uncompressed_size,
mut decompressor,
} => {
if read > uncompressed_size {
return Err(Error::WrongReaderState(
"[Compression Layer] Too much data read".to_string(),
)
.into());
}
if read == uncompressed_size {
self.state = CompressionLayerReaderState::Ready(decompressor.into_inner());
return self.read(buf);
}
let size = std::cmp::min((uncompressed_size - read) as usize, buf.len());
let read_add = decompressor.read(&mut buf[..size])?;
self.underlayer_pos += read_add as u64;
self.state = CompressionLayerReaderState::InData {
read: read + read_add as u32,
uncompressed_size,
decompressor,
};
Ok(read_add)
}
CompressionLayerReaderState::Empty => Err(Error::WrongReaderState(
"[Compression Layer] Should never happens, unless an error already occurs before"
.to_string(),
)
.into()),
}
}
}
impl<'a, R: Read + Seek> Seek for CompressionLayerReader<'a, R> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
match &self.sizes_info {
Some(_sizes_info) => {
match pos {
SeekFrom::Start(pos) => {
let inside_block = pos % (UNCOMPRESSED_DATA_SIZE as u64);
let rounded_pos = pos - inside_block;
let old_state =
std::mem::replace(&mut self.state, CompressionLayerReaderState::Empty);
let mut inner = old_state.into_inner();
self.sync_inner_with_uncompressed_pos(&mut inner, rounded_pos)?;
let mut decompressor = self.new_decompressor_at(inner, rounded_pos)?;
let uncompressed_size = self.uncompressed_block_size_at(rounded_pos)?;
io::copy(&mut (&mut decompressor).take(inside_block), &mut io::sink())?;
self.state = CompressionLayerReaderState::InData {
read: inside_block as u32,
uncompressed_size,
decompressor,
};
self.underlayer_pos = pos;
Ok(pos)
}
SeekFrom::Current(pos) => {
if pos == 0 {
Ok(self.underlayer_pos)
} else {
self.seek(SeekFrom::Start((pos + self.underlayer_pos as i64) as u64))
}
}
SeekFrom::End(pos) => {
if pos > 0 {
return Err(Error::EndOfStream.into());
}
let end_pos = (&self.sizes_info).as_ref().unwrap().max_uncompressed_pos();
let distance_from_end = -pos;
self.seek(SeekFrom::Start(end_pos - distance_from_end as u64))
}
}
}
None => Err(Error::MissingMetadata.into()),
}
}
}
struct WriterWithCount<W: Write> {
inner: W,
pos: u32,
}
impl<W: Write> WriterWithCount<W> {
fn new(inner: W) -> Self {
Self { inner, pos: 0 }
}
fn into_inner(self) -> W {
self.inner
}
}
impl<W: Write> Write for WriterWithCount<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf).map(|i| {
self.pos += i as u32;
i
})
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
enum CompressionLayerWriterState<W: Write> {
Ready(W),
InData(u32, brotli::CompressorWriter<WriterWithCount<W>>),
Empty,
}
pub struct CompressionLayerWriter<'a, W: 'a + InnerWriterTrait> {
state: CompressionLayerWriterState<InnerWriterType<'a, W>>,
compressed_sizes: Vec<u32>,
compression_level: u32,
}
impl<W: InnerWriterTrait> CompressionLayerWriterState<W> {
fn into_inner(self) -> W {
match self {
CompressionLayerWriterState::Ready(inner) => inner,
CompressionLayerWriterState::InData(_written, compress) => {
compress.into_inner().into_inner()
}
_ => panic!("[Writer] Empty type to inner is impossible"),
}
}
}
impl<'a, W: 'a + InnerWriterTrait> CompressionLayerWriter<'a, W> {
pub fn new(
inner: InnerWriterType<'a, W>,
config: &CompressionConfig,
) -> CompressionLayerWriter<'a, W> {
Self {
state: CompressionLayerWriterState::Ready(inner),
compressed_sizes: Vec::new(),
compression_level: config.compression_level,
}
}
}
impl<'a, W: 'a + InnerWriterTrait> LayerWriter<'a, W> for CompressionLayerWriter<'a, W> {
fn into_inner(self) -> Option<InnerWriterType<'a, W>> {
Some(self.state.into_inner())
}
fn into_raw(self: Box<Self>) -> W {
self.state.into_inner().into_raw()
}
fn finalize(&mut self) -> Result<(), Error> {
let old_state = std::mem::replace(&mut self.state, CompressionLayerWriterState::Empty);
let mut last_block_size = 0;
let mut inner = match old_state {
CompressionLayerWriterState::Ready(inner) => inner,
CompressionLayerWriterState::InData(written, compress) => {
let inner_count = compress.into_inner();
self.compressed_sizes.push(inner_count.pos);
last_block_size = written;
inner_count.into_inner()
}
CompressionLayerWriterState::Empty => {
return Err(Error::WrongReaderState("[Compression Layer] bad state in finalization, an error may already occurs before".to_string()));
}
};
let compressed_sizes = std::mem::take(&mut self.compressed_sizes);
let sinfo = SizesInfo {
compressed_sizes,
last_block_size,
};
if bincode::options()
.with_limit(BINCODE_MAX_DESERIALIZE)
.with_fixint_encoding()
.serialize_into(&mut inner, &sinfo)
.is_err()
{
return Err(Error::SerializationError);
};
match bincode::serialized_size(&sinfo) {
Ok(size) => {
inner.write_u32::<LittleEndian>(size as u32)?;
}
Err(_) => {
return Err(Error::SerializationError);
}
};
self.compressed_sizes = sinfo.compressed_sizes;
inner.finalize()?;
self.state = CompressionLayerWriterState::Ready(inner);
Ok(())
}
}
impl<'a, W: 'a + InnerWriterTrait> Write for CompressionLayerWriter<'a, W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let old_state = std::mem::replace(&mut self.state, CompressionLayerWriterState::Empty);
match old_state {
CompressionLayerWriterState::Ready(inner) => {
let inner_count = WriterWithCount::new(inner);
let mut compress = brotli::CompressorWriter::new(
inner_count,
0,
self.compression_level,
BROTLI_LOG_WINDOW,
);
let size = std::cmp::min(UNCOMPRESSED_DATA_SIZE as usize, buf.len());
let written = compress.write(&buf[..size])?;
self.state = CompressionLayerWriterState::InData(written as u32, compress);
Ok(written)
}
CompressionLayerWriterState::InData(written, mut compress) => {
if written > UNCOMPRESSED_DATA_SIZE {
return Err(Error::WrongReaderState(
"[Compression Layer] Too much written".to_string(),
).into());
}
if written == UNCOMPRESSED_DATA_SIZE {
let inner_count = compress.into_inner();
self.compressed_sizes.push(inner_count.pos);
self.state = CompressionLayerWriterState::Ready(inner_count.into_inner());
return self.write(buf);
}
let size = std::cmp::min((UNCOMPRESSED_DATA_SIZE - written) as usize, buf.len());
let written_add = compress.write(&buf[..size])?;
self.state =
CompressionLayerWriterState::InData(written + written_add as u32, compress);
Ok(written_add)
}
CompressionLayerWriterState::Empty => {
Err(Error::WrongReaderState("[Compression Layer] On write, should never happens, unless an error already occurs before".to_string()).into())
}
}
}
fn flush(&mut self) -> io::Result<()> {
match &mut self.state {
CompressionLayerWriterState::Ready(inner) => inner.flush(),
CompressionLayerWriterState::InData(_written, compress) => compress.flush(),
CompressionLayerWriterState::Empty => {
Err(Error::WrongReaderState("[Compression Layer] On flush, should never happens, unless an error already occurs before".to_string()).into())
}
}
}
}
pub struct CompressionLayerFailSafeReader<'a, R: 'a + Read> {
state: CompressionLayerReaderState<Box<dyn 'a + LayerFailSafeReader<'a, R>>>,
}
impl<'a, R: 'a + Read> CompressionLayerFailSafeReader<'a, R> {
pub fn new(inner: Box<dyn 'a + LayerFailSafeReader<'a, R>>) -> Result<Self, Error> {
Ok(Self {
state: CompressionLayerReaderState::Ready(inner),
})
}
}
impl<'a, R: 'a + Read> LayerFailSafeReader<'a, R> for CompressionLayerFailSafeReader<'a, R> {
fn into_inner(self) -> Option<Box<dyn 'a + LayerFailSafeReader<'a, R>>> {
Some(self.state.into_inner())
}
fn into_raw(self: Box<Self>) -> R {
self.state.into_inner().into_raw()
}
}
impl<'a, R: 'a + Read> Read for CompressionLayerFailSafeReader<'a, R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let old_state = std::mem::replace(&mut self.state, CompressionLayerReaderState::Empty);
match old_state {
CompressionLayerReaderState::Ready(inner) => {
let decompressor = brotli::Decompressor::new(inner, 1);
self.state = CompressionLayerReaderState::InData {
read: 0,
uncompressed_size: UNCOMPRESSED_DATA_SIZE,
decompressor,
};
self.read(buf)
}
CompressionLayerReaderState::InData {
read,
uncompressed_size,
mut decompressor,
} => {
if read > uncompressed_size {
return Err(Error::WrongReaderState(
"[Compress FailSafe Layer] Too much data read".to_string(),
)
.into());
}
if read == uncompressed_size {
io::copy(&mut decompressor, &mut io::sink())?;
self.state = CompressionLayerReaderState::Ready(decompressor.into_inner());
return self.read(buf);
}
let size = std::cmp::min((uncompressed_size - read) as usize, buf.len());
let read_add = decompressor.read(&mut buf[..size])?;
self.state = CompressionLayerReaderState::InData {
read: read + read_add as u32,
uncompressed_size,
decompressor,
};
Ok(read_add)
}
CompressionLayerReaderState::Empty => Err(Error::WrongReaderState(
"[Compression Layer] Should never happens, unless an error already occurs before"
.to_string(),
)
.into()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Layers;
use crate::layers::raw::{RawLayerFailSafeReader, RawLayerReader, RawLayerWriter};
use rand::distributions::{Alphanumeric, Distribution, Standard};
use rand::SeedableRng;
use std::io::{Cursor, Read, Write};
use std::time::Instant;
static SIZE: usize = (UNCOMPRESSED_DATA_SIZE * 2 + UNCOMPRESSED_DATA_SIZE / 2) as usize;
fn get_data() -> Vec<u8> {
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
let data: Vec<u8> = Alphanumeric
.sample_iter(&mut rng)
.take(SIZE)
.map(|c| c as u8)
.collect();
assert_eq!(data.len(), SIZE);
data
}
fn get_uncompressable_data() -> Vec<u8> {
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
let data: Vec<u8> = Standard.sample_iter(&mut rng).take(SIZE).collect();
assert_eq!(data.len(), SIZE);
data
}
#[test]
fn compress_layer_writer() {
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
let mut fake_data = vec![1, 2, 3, 4];
let fake_data2 = vec![5, 6, 7, 8];
comp.write_all(fake_data.as_slice()).unwrap();
comp.write_all(fake_data2.as_slice()).unwrap();
let file = comp.into_raw();
let mut src = Cursor::new(file.as_slice());
let mut reader = brotli::Decompressor::new(&mut src, 0);
let mut buf = Vec::new();
reader.read_to_end(&mut buf).unwrap();
println!("{:?}", buf);
fake_data.extend(fake_data2);
assert_eq!(fake_data, buf);
}
#[test]
fn compress_layer_several() {
let data = get_data();
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
let now = Instant::now();
comp.write_all(bytes).unwrap();
println!(
"Compression: {} us for {} bytes",
now.elapsed().as_micros(),
bytes.len()
);
let file = comp.into_raw();
println!("{}", file.len());
let mut src = Cursor::new(file.as_slice());
let now = Instant::now();
let mut reader = brotli::Decompressor::new(&mut src, 1);
let mut buf = vec![0; UNCOMPRESSED_DATA_SIZE as usize];
reader.read_exact(&mut buf).expect("First buffer");
assert_eq!(buf.len(), UNCOMPRESSED_DATA_SIZE as usize);
assert_eq!(buf.as_slice(), &bytes[..(UNCOMPRESSED_DATA_SIZE as usize)]);
let mut reader = brotli::Decompressor::new(&mut src, 1);
let mut buf2 = vec![0; UNCOMPRESSED_DATA_SIZE as usize];
reader.read_exact(&mut buf2).expect("Second buffer");
assert_eq!(buf2.len(), UNCOMPRESSED_DATA_SIZE as usize);
assert_eq!(
buf2.as_slice(),
&bytes[(UNCOMPRESSED_DATA_SIZE as usize)..((UNCOMPRESSED_DATA_SIZE * 2) as usize)]
);
let mut reader = brotli::Decompressor::new(&mut src, 1);
let mut buf3 = vec![0; SIZE - buf.len() - buf2.len() as usize];
reader.read_exact(&mut buf3).expect("Last buffer");
assert_eq!(buf.len() + buf2.len() + buf3.len(), SIZE);
assert_eq!(
buf3.as_slice(),
&bytes[(buf.len() + buf2.len())..(buf.len() + buf2.len() + buf3.len())]
);
println!(
"Decompression: {} us for {} bytes",
now.elapsed().as_micros(),
buf.len() + buf2.len() + buf3.len()
);
println!("Buf sizes {} {} {}", buf.len(), buf2.len(), buf3.len());
}
#[test]
fn compress_layer() {
for data in vec![get_data(), get_uncompressable_data()] {
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
let now = Instant::now();
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let file = comp.into_raw();
let buf = Cursor::new(file.as_slice());
let mut decomp =
Box::new(CompressionLayerReader::new(Box::new(RawLayerReader::new(buf))).unwrap());
decomp.initialize().unwrap();
let mut buf = Vec::new();
decomp.read_to_end(&mut buf).unwrap();
println!(
"Compression / Decompression: {} us for {} bytes ({} compressed)",
now.elapsed().as_micros(),
bytes.len(),
file.len()
);
assert_eq!(buf.len(), bytes.len());
assert_eq!(buf.as_slice(), bytes);
}
}
#[test]
fn compress_failsafe_layer() {
for data in vec![get_data(), get_uncompressable_data()] {
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
let now = Instant::now();
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let file = comp.into_raw();
let mut decomp = Box::new(
CompressionLayerFailSafeReader::new(Box::new(RawLayerFailSafeReader::new(
file.as_slice(),
)))
.unwrap(),
);
let mut buf = Vec::new();
decomp.read_to_end(&mut buf).unwrap();
println!(
"Compression / Decompression (fail-safe): {} us for {} bytes ({} compressed)",
now.elapsed().as_micros(),
bytes.len(),
file.len()
);
assert_eq!(buf.len(), bytes.len());
assert_eq!(buf.as_slice(), bytes);
}
}
#[test]
fn compress_failsafe_truncated() {
for data in vec![get_data(), get_uncompressable_data()] {
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
let now = Instant::now();
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let file = comp.into_raw();
let stop = file.len() / 2;
let mut decomp = Box::new(
CompressionLayerFailSafeReader::new(Box::new(RawLayerFailSafeReader::new(
&file[..stop],
)))
.unwrap(),
);
let mut buf = Vec::new();
decomp.read_to_end(&mut buf).unwrap_err();
println!(
"Compression / Decompression (fail-safe): {} us for {} bytes ({} compressed, {} keeped)",
now.elapsed().as_micros(),
bytes.len(),
file.len(),
buf.len(),
);
assert_eq!(buf.as_slice(), &bytes[..buf.len()]);
assert!(buf.len() >= bytes.len() / 3);
}
}
#[test]
fn compress_layer_with_footer() {
let data = get_data();
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let mut compressed_sizes = Vec::new();
compressed_sizes.extend_from_slice(&comp.compressed_sizes);
let file = comp.into_raw();
let buf = Cursor::new(file.as_slice());
let mut decomp =
Box::new(CompressionLayerReader::new(Box::new(RawLayerReader::new(buf))).unwrap());
decomp.initialize().unwrap();
assert_eq!(
compressed_sizes,
decomp.sizes_info.unwrap().compressed_sizes
);
}
#[test]
fn seek_with_footer() {
for data in vec![get_data(), get_uncompressable_data()] {
let bytes = data.as_slice();
let file = Vec::new();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&CompressionConfig::default(),
));
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let file = comp.into_raw();
let buf = Cursor::new(file.as_slice());
let mut decomp =
Box::new(CompressionLayerReader::new(Box::new(RawLayerReader::new(buf))).unwrap());
decomp.initialize().unwrap();
let pos = decomp.seek(SeekFrom::Start(5)).unwrap();
assert_eq!(pos, 5);
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[5..10]);
let pos = decomp
.seek(SeekFrom::Start((UNCOMPRESSED_DATA_SIZE + 4).into()))
.unwrap();
assert_eq!(pos, (UNCOMPRESSED_DATA_SIZE + 4).into());
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[pos as usize..(pos + 5) as usize]);
let pos = decomp.seek(SeekFrom::Current(2)).unwrap();
assert_eq!(pos, (UNCOMPRESSED_DATA_SIZE + 4 + 5 + 2).into());
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[pos as usize..(pos + 5) as usize]);
let pos = decomp
.seek(SeekFrom::Current(UNCOMPRESSED_DATA_SIZE.into()))
.unwrap();
assert_eq!(pos, (UNCOMPRESSED_DATA_SIZE * 2 + 4 + 5 + 2 + 5).into());
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[pos as usize..(pos + 5) as usize]);
let pos = decomp.seek(SeekFrom::Current(-5)).unwrap();
assert_eq!(pos, (UNCOMPRESSED_DATA_SIZE * 2 + 4 + 5 + 2 + 5).into());
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[pos as usize..(pos + 5) as usize]);
let pos = decomp.seek(SeekFrom::End(-5)).unwrap();
assert_eq!(pos, (SIZE - 5) as u64);
let mut buf = [0u8; 5];
decomp.read_exact(&mut buf).unwrap();
assert_eq!(&buf, &bytes[pos as usize..(pos + 5) as usize]);
}
}
#[test]
fn sizes_info() {
let sizes_info = SizesInfo {
compressed_sizes: vec![1, 2, 5],
last_block_size: 42,
};
assert_eq!(
sizes_info.uncompressed_block_size_at(1),
UNCOMPRESSED_DATA_SIZE
);
assert_eq!(sizes_info.uncompressed_block_size_at(3), 42);
assert_eq!(
sizes_info.max_uncompressed_pos(),
2 * UNCOMPRESSED_DATA_SIZE as u64 + 42
);
assert_eq!(
sizes_info.compressed_block_size_at(UNCOMPRESSED_DATA_SIZE as u64 + 1),
2
);
}
#[test]
fn compress_config() {
let data = get_data();
let bytes = data.as_slice();
let file = Vec::new();
let mut config = ArchiveWriterConfig::new();
config
.enable_layer(Layers::COMPRESS)
.with_compression_level(0)
.unwrap();
let mut comp = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file)),
&config.compress,
));
comp.write_all(bytes).unwrap();
comp.finalize().unwrap();
let file2 = Vec::new();
let mut config2 = ArchiveWriterConfig::new();
config2
.enable_layer(Layers::COMPRESS)
.with_compression_level(5)
.unwrap();
let mut comp2 = Box::new(CompressionLayerWriter::new(
Box::new(RawLayerWriter::new(file2)),
&config2.compress,
));
comp2.write_all(bytes).unwrap();
comp2.finalize().unwrap();
let file = comp.into_raw();
let file2 = comp2.into_raw();
assert!(file.len() > file2.len());
let buf = Cursor::new(file.as_slice());
let mut buf_out = Vec::new();
let mut decomp =
Box::new(CompressionLayerReader::new(Box::new(RawLayerReader::new(buf))).unwrap());
decomp.initialize().unwrap();
decomp.read_to_end(&mut buf_out).unwrap();
let buf2 = Cursor::new(file2.as_slice());
let mut buf2_out = Vec::new();
let mut decomp =
Box::new(CompressionLayerReader::new(Box::new(RawLayerReader::new(buf2))).unwrap());
decomp.initialize().unwrap();
decomp.read_to_end(&mut buf2_out).unwrap();
assert_eq!(buf_out, buf2_out);
}
}