use std::collections::HashMap;
use std::fs::File;
use std::io::{copy, Read, Seek, SeekFrom, Write};
use anyhow::{bail, Context, Result};
use bincode::Options;
use clap::crate_version;
use serde::{Deserialize, Serialize};
use xz2::read::XzDecoder;
use xz2::write::XzEncoder;
use crate::io::*;
use crate::iso9660;
const HEADER_MAGIC: [u8; 8] = *b"MINISO\0\0";
const HEADER_VERSION: u32 = 1;
const DATA_MAX_SIZE: u64 = 1024 * 1024;
#[derive(Serialize, Deserialize, Debug)]
struct Table {
entries: Vec<TableEntry>,
}
impl Table {
fn new(
full_files: &HashMap<String, iso9660::File>,
minimal_files: &HashMap<String, iso9660::File>,
) -> Result<(Self, usize)> {
let mut entries: Vec<TableEntry> = Vec::new();
for (path, minimal_entry) in minimal_files {
let full_entry = full_files
.get(path)
.with_context(|| format!("missing minimal file {path} in full ISO"))?;
if full_entry.length != minimal_entry.length {
bail!("File {path} has different lengths in full and minimal ISOs");
}
entries.push(TableEntry {
minimal: minimal_entry.address,
full: full_entry.address,
length: full_entry.length,
});
}
entries.sort_by_key(|e| e.minimal.as_sector());
let size = entries.len();
entries = entries.drain(..).filter(|e| e.length > 0).collect();
entries.dedup();
let extraneous = size - entries.len();
let table = Table { entries };
table.validate().context("validating table")?;
Ok((table, extraneous))
}
fn validate(&self) -> Result<()> {
let n = self.entries.len();
if n == 0 {
bail!("table is empty; ISOs have no files in common?");
}
for (e, next_e) in self.entries[..n - 1].iter().zip(self.entries[1..n].iter()) {
if e.minimal.as_offset() + e.length as u64 > next_e.minimal.as_offset() {
bail!(
"Files at offsets {} and {} overlap",
e.minimal.as_offset(),
next_e.minimal.as_offset(),
);
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
struct TableEntry {
minimal: iso9660::Address,
full: iso9660::Address,
length: u32,
}
#[derive(Serialize, Deserialize, Debug)]
struct Header {
magic: [u8; 8],
version: u32,
app_version: String,
}
impl Default for Header {
fn default() -> Self {
Self {
magic: HEADER_MAGIC,
version: HEADER_VERSION,
app_version: crate_version!().into(),
}
}
}
impl Header {
pub fn validate(&self) -> Result<()> {
if self.magic != HEADER_MAGIC {
bail!("not a miniso file!");
}
if self.version != HEADER_VERSION {
bail!(
"incompatible miniso file version: {} vs {} (created by {})",
HEADER_VERSION,
self.version,
self.app_version,
);
}
Ok(())
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Data {
table: Table,
digest: Sha256Digest,
xzpacked: Vec<u8>,
}
impl Data {
pub fn xzpack(
miniso: &mut File,
full_files: &HashMap<String, iso9660::File>,
minimal_files: &HashMap<String, iso9660::File>,
) -> Result<(Self, usize, u64, u64, u64)> {
let (table, extraneous) = Table::new(full_files, minimal_files)?;
let digest = Sha256Digest::from_file(miniso)?;
miniso.rewind().context("seeking back to miniso start")?;
let mut offset = 0;
let mut xzw = XzEncoder::new(Vec::new(), 9);
let mut buf = [0u8; BUFFER_SIZE];
let mut skipped: u64 = 0;
for entry in &table.entries {
let addr: u64 = entry.minimal.as_offset();
assert!(offset <= addr);
if addr > offset {
copy_exactly_n(miniso, &mut xzw, addr - offset, &mut buf).with_context(|| {
format!(
"copying {} miniso bytes at offset {}",
addr - offset,
offset
)
})?;
}
offset = miniso
.seek(SeekFrom::Current(entry.length as i64))
.with_context(|| format!("skipping miniso file at offset {addr}"))?;
skipped += entry.length as u64;
}
copy(miniso, &mut xzw).context("copying remaining miniso bytes")?;
xzw.try_finish().context("trying to finish xz stream")?;
let matches = table.entries.len() + extraneous;
let written = xzw.total_in();
let written_compressed = xzw.total_out();
Ok((
Self {
table,
digest,
xzpacked: xzw.finish().context("finishing xz stream")?,
},
matches,
skipped,
written,
written_compressed,
))
}
pub fn serialize(&self, w: impl Write) -> Result<()> {
let mut limiter = LimitWriter::new(w, DATA_MAX_SIZE, "data size limit".into());
let header = Header::default();
let coder = &mut bincoder();
coder
.serialize_into(&mut limiter, &header)
.context("failed to serialize header")?;
coder
.serialize_into(&mut limiter, &self)
.context("failed to serialize data")?;
Ok(())
}
pub fn deserialize(r: impl Read) -> Result<Self> {
let mut limiter = LimitReader::new(r, DATA_MAX_SIZE, "data size limit".into());
let coder = &mut bincoder();
let header: Header = coder
.deserialize_from(&mut limiter)
.context("failed to deserialize header")?;
header.validate().context("validating header")?;
let data: Self = coder
.deserialize_from(&mut limiter)
.context("failed to deserialize data")?;
data.table.validate().context("validating table")?;
Ok(data)
}
pub fn unxzpack(&self, fulliso: &mut File, w: impl Write) -> Result<()> {
let mut xzr = XzDecoder::new(self.xzpacked.as_slice());
let mut w = WriteHasher::new_sha256(w)?;
let mut buf = [0u8; BUFFER_SIZE];
let mut offset = 0;
for entry in &self.table.entries {
let minimal_addr = entry.minimal.as_offset();
let fulliso_addr = entry.full.as_offset();
if minimal_addr > offset {
offset += copy_exactly_n(&mut xzr, &mut w, minimal_addr - offset, &mut buf)
.with_context(|| {
format!(
"copying {} packed bytes at offset {}",
minimal_addr - offset,
offset
)
})?;
}
fulliso
.seek(SeekFrom::Start(fulliso_addr))
.with_context(|| format!("seeking to full ISO file at offset {fulliso_addr}"))?;
offset += copy_exactly_n(fulliso, &mut w, entry.length as u64, &mut buf)
.with_context(|| format!("copying full ISO file at offset {fulliso_addr}"))?;
}
copy(&mut xzr, &mut w).context("copying remaining packed bytes")?;
let digest = w.try_into()?;
if self.digest != digest {
bail!(
"wrong final digest: expected {}, found {}",
self.digest.to_hex_string()?,
digest.to_hex_string()?
);
}
Ok(())
}
}