pub mod checksum;
pub mod error;
pub mod format;
pub mod reader;
pub mod writer;
pub use checksum::{CHECKSUM_TYPE_CRC32, CHECKSUM_TYPE_NONE, crc32};
pub use error::{DppError, Result};
pub use format::{BlockType, KolyHeader, MishHeader, PartitionEntry};
pub use reader::{CompressionInfo, DmgReader, DmgReaderOptions, DmgStats, is_dmg, open};
pub use writer::{CompressionMethod, DmgWriter, create, create_from_data, create_from_file};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PartitionType {
Hfs,
Hfsx,
Apfs,
Other,
}
impl PartitionType {
pub fn from_partition_name(name: &str) -> Self {
if name.contains("Apple_HFSX") {
PartitionType::Hfsx
} else if name.contains("Apple_HFS") {
PartitionType::Hfs
} else if name.contains("Apple_APFS") {
PartitionType::Apfs
} else {
PartitionType::Other
}
}
pub fn is_hfs_compatible(&self) -> bool {
matches!(self, PartitionType::Hfs | PartitionType::Hfsx)
}
}
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
pub struct DmgArchive {
reader: DmgReader<BufReader<File>>,
}
#[derive(Debug, Clone)]
pub struct PartitionInfo {
pub name: String,
pub id: i32,
pub sectors: u64,
pub size: u64,
pub compressed_size: u64,
pub partition_type: PartitionType,
}
impl DmgArchive {
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
let reader = DmgReader::open(path)?;
Ok(DmgArchive { reader })
}
pub fn open_with_options<P: AsRef<Path>>(
path: P,
options: reader::DmgReaderOptions,
) -> Result<Self> {
let reader = DmgReader::open_with_options(path, options)?;
Ok(DmgArchive { reader })
}
pub fn stats(&self) -> DmgStats {
self.reader.stats()
}
pub fn compression_info(&self) -> CompressionInfo {
self.reader.compression_info()
}
pub fn partitions(&self) -> Vec<PartitionInfo> {
self.reader
.partitions()
.iter()
.map(|p| PartitionInfo {
name: p.name.clone(),
id: p.id,
sectors: p.block_map.sector_count,
size: p.block_map.uncompressed_size(),
compressed_size: p.block_map.compressed_size(),
partition_type: PartitionType::from_partition_name(&p.name),
})
.collect()
}
pub fn partition(&self, name: &str) -> Option<PartitionInfo> {
self.reader.partition(name).map(|p| PartitionInfo {
name: p.name.clone(),
id: p.id,
sectors: p.block_map.sector_count,
size: p.block_map.uncompressed_size(),
compressed_size: p.block_map.compressed_size(),
partition_type: PartitionType::from_partition_name(&p.name),
})
}
pub fn extract_partition(&mut self, id: i32) -> Result<Vec<u8>> {
self.reader.decompress_partition_auto(id)
}
pub fn extract_partition_by_name(&mut self, name: &str) -> Result<Vec<u8>> {
let partition = self
.reader
.partition(name)
.ok_or_else(|| DppError::FileNotFound(name.to_string()))?;
self.reader.decompress_partition_auto(partition.id)
}
pub fn extract_main_partition(&mut self) -> Result<Vec<u8>> {
self.reader.decompress_main_partition_auto()
}
pub fn extract_all(&mut self) -> Result<Vec<u8>> {
self.reader.decompress_all()
}
pub fn extract_partition_to<W: std::io::Write>(
&mut self,
id: i32,
writer: &mut W,
) -> Result<u64> {
self.reader.decompress_partition_to(id, writer)
}
pub fn extract_main_partition_to<W: std::io::Write>(&mut self, writer: &mut W) -> Result<u64> {
self.reader.decompress_main_partition_to_auto(writer)
}
pub fn main_partition_id(&self) -> Result<i32> {
self.reader.main_partition_id()
}
pub fn hfs_partition_id(&self) -> Result<i32> {
self.reader.hfs_partition_id()
}
pub fn extract_partition_to_file<P: AsRef<Path>>(&mut self, id: i32, path: P) -> Result<()> {
let data = self.reader.decompress_partition_auto(id)?;
std::fs::write(path, &data)?;
Ok(())
}
pub fn extract_main_partition_to_file<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
let data = self.reader.decompress_main_partition_auto()?;
std::fs::write(path, &data)?;
Ok(())
}
pub fn koly(&self) -> &KolyHeader {
self.reader.koly()
}
}
pub struct DmgBuilder {
compression: CompressionMethod,
compression_level: u32,
chunk_size: usize,
partitions: Vec<(String, Vec<u8>)>,
skip_checksums: bool,
}
impl Default for DmgBuilder {
fn default() -> Self {
Self::new()
}
}
impl DmgBuilder {
pub fn new() -> Self {
DmgBuilder {
compression: CompressionMethod::Zlib,
compression_level: 6,
chunk_size: 1024 * 1024,
partitions: Vec::new(),
skip_checksums: false,
}
}
pub fn compression(mut self, method: CompressionMethod) -> Self {
self.compression = method;
self
}
pub fn compression_level(mut self, level: u32) -> Self {
self.compression_level = level;
self
}
pub fn chunk_size(mut self, size: usize) -> Self {
self.chunk_size = size;
self
}
pub fn skip_checksums(mut self, skip: bool) -> Self {
self.skip_checksums = skip;
self
}
pub fn add_partition(mut self, name: &str, data: Vec<u8>) -> Self {
self.partitions.push((name.to_string(), data));
self
}
pub fn build<P: AsRef<Path>>(self, path: P) -> Result<()> {
let mut writer = DmgWriter::create(path)?
.compression(self.compression)
.compression_level(self.compression_level)
.chunk_size(self.chunk_size)
.skip_checksums(self.skip_checksums);
for (name, data) in self.partitions {
writer.add_partition(&name, &data)?;
}
writer.finish()
}
}
pub fn check_dmg<P: AsRef<Path>>(path: P) -> bool {
is_dmg(path)
}
pub fn stats<P: AsRef<Path>>(path: P) -> Result<DmgStats> {
let reader = DmgReader::open(path)?;
Ok(reader.stats())
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[test]
fn test_block_type_conversion() {
assert_eq!(
BlockType::try_from(0x00000000).unwrap(),
BlockType::ZeroFill
);
assert_eq!(BlockType::try_from(0x80000005).unwrap(), BlockType::Zlib);
assert_eq!(BlockType::try_from(0x80000006).unwrap(), BlockType::Bzip2);
assert_eq!(BlockType::try_from(0x80000007).unwrap(), BlockType::Lzfse);
assert_eq!(BlockType::try_from(0x80000008).unwrap(), BlockType::Xz);
assert_eq!(BlockType::try_from(0xFFFFFFFF).unwrap(), BlockType::End);
assert_eq!(BlockType::try_from(0x7FFFFFFE).unwrap(), BlockType::Comment);
assert!(BlockType::try_from(0x12345678).is_err());
}
#[test]
fn test_compression_method() {
assert_eq!(CompressionMethod::default(), CompressionMethod::Zlib);
}
#[test]
fn test_koly_header_size_is_512() {
use crate::format::{KOLY_MAGIC, KOLY_SIZE, KolyHeader};
assert_eq!(KOLY_SIZE, 512, "KOLY_SIZE constant must be 512");
let koly = KolyHeader {
magic: *KOLY_MAGIC,
version: 4,
header_size: 512,
flags: 1,
running_data_fork_offset: 0,
data_fork_offset: 0,
data_fork_length: 1000,
rsrc_fork_offset: 0,
rsrc_fork_length: 0,
segment_number: 1,
segment_count: 1,
segment_id: [0u8; 16],
data_checksum_type: 2,
data_checksum_size: 32,
data_checksum: [0u8; 128],
plist_offset: 1000,
plist_length: 500,
reserved: [0u8; 64],
master_checksum_type: 2,
master_checksum_size: 32,
master_checksum: [0u8; 128],
image_variant: 1,
sector_count: 100,
};
let mut buf = Vec::new();
koly.write(&mut buf).unwrap();
assert_eq!(
buf.len(),
512,
"Koly header serialization must be exactly 512 bytes"
);
}
#[test]
fn test_koly_magic_position() {
use crate::format::{KOLY_MAGIC, KolyHeader};
let mut dmg_data = vec![0u8; 1024];
let plist = b"<?xml version=\"1.0\"?><plist></plist>";
let plist_offset = dmg_data.len() as u64;
dmg_data.extend_from_slice(plist);
let plist_length = plist.len() as u64;
let koly = KolyHeader {
magic: *KOLY_MAGIC,
version: 4,
header_size: 512,
flags: 1,
running_data_fork_offset: 0,
data_fork_offset: 0,
data_fork_length: plist_offset,
rsrc_fork_offset: 0,
rsrc_fork_length: 0,
segment_number: 1,
segment_count: 1,
segment_id: [0u8; 16],
data_checksum_type: 2,
data_checksum_size: 32,
data_checksum: [0u8; 128],
plist_offset,
plist_length,
reserved: [0u8; 64],
master_checksum_type: 2,
master_checksum_size: 32,
master_checksum: [0u8; 128],
image_variant: 1,
sector_count: 2,
};
koly.write(&mut dmg_data).unwrap();
let total_len = dmg_data.len();
let koly_start = total_len - 512;
assert_eq!(&dmg_data[koly_start..koly_start + 4], b"koly");
}
#[test]
fn test_mish_block_count_at_offset_200() {
use crate::format::{MISH_MAGIC, MishHeader};
use byteorder::{BigEndian, WriteBytesExt};
let mut mish_data = Vec::new();
mish_data.extend_from_slice(MISH_MAGIC); mish_data.write_u32::<BigEndian>(1).unwrap(); mish_data.write_u64::<BigEndian>(0).unwrap(); mish_data.write_u64::<BigEndian>(10).unwrap(); mish_data.write_u64::<BigEndian>(0).unwrap(); mish_data.write_u32::<BigEndian>(0).unwrap(); mish_data.write_u32::<BigEndian>(999).unwrap(); mish_data.extend_from_slice(&[0u8; 24]); mish_data.write_u32::<BigEndian>(2).unwrap(); mish_data.write_u32::<BigEndian>(32).unwrap(); mish_data.extend_from_slice(&[0u8; 128]); mish_data.write_u32::<BigEndian>(2).unwrap();
mish_data.write_u32::<BigEndian>(0x00000000).unwrap(); mish_data.write_u32::<BigEndian>(0).unwrap(); mish_data.write_u64::<BigEndian>(0).unwrap(); mish_data.write_u64::<BigEndian>(10).unwrap(); mish_data.write_u64::<BigEndian>(0).unwrap(); mish_data.write_u64::<BigEndian>(0).unwrap();
mish_data.write_u32::<BigEndian>(0xFFFFFFFF).unwrap(); mish_data.write_u32::<BigEndian>(0).unwrap();
mish_data.write_u64::<BigEndian>(10).unwrap();
mish_data.write_u64::<BigEndian>(0).unwrap();
mish_data.write_u64::<BigEndian>(0).unwrap();
mish_data.write_u64::<BigEndian>(0).unwrap();
let mish = MishHeader::from_bytes(&mish_data).unwrap();
assert_eq!(mish.block_runs.len(), 2);
assert_eq!(mish.actual_block_count, 2);
assert_eq!(mish.block_descriptor_count, 999); }
#[test]
fn test_mish_header_size_is_204() {
use crate::format::{MISH_MAGIC, MishHeader};
use byteorder::{BigEndian, WriteBytesExt};
let mut mish_data = Vec::new();
mish_data.extend_from_slice(MISH_MAGIC);
mish_data.write_u32::<BigEndian>(1).unwrap();
mish_data.write_u64::<BigEndian>(0).unwrap();
mish_data.write_u64::<BigEndian>(1).unwrap();
mish_data.write_u64::<BigEndian>(0).unwrap();
mish_data.write_u32::<BigEndian>(0).unwrap();
mish_data.write_u32::<BigEndian>(0).unwrap();
mish_data.extend_from_slice(&[0u8; 24]);
mish_data.write_u32::<BigEndian>(2).unwrap();
mish_data.write_u32::<BigEndian>(32).unwrap();
mish_data.extend_from_slice(&[0u8; 128]);
mish_data.write_u32::<BigEndian>(0).unwrap();
assert_eq!(mish_data.len(), 204);
let mish = MishHeader::from_bytes(&mish_data).unwrap();
assert_eq!(mish.block_runs.len(), 0);
}
#[test]
fn test_lzfse_needs_larger_buffer() {
let original = b"Hello, World! This is a test of LZFSE compression. ".repeat(10);
let mut compressed = vec![0u8; original.len() + 4096];
let compressed_len = lzfse::encode_buffer(&original, &mut compressed).unwrap();
compressed.truncate(compressed_len);
let mut exact_buf = vec![0u8; original.len()];
let result = lzfse::decode_buffer(&compressed, &mut exact_buf);
if result.is_err() {
let mut large_buf = vec![0u8; original.len() * 2];
let decoded_len = lzfse::decode_buffer(&compressed, &mut large_buf).unwrap();
assert_eq!(decoded_len, original.len());
assert_eq!(&large_buf[..decoded_len], &original[..]);
}
}
#[test]
fn test_zlib_partial_sector() {
use flate2::Compression;
use flate2::read::ZlibDecoder;
use flate2::write::ZlibEncoder;
use std::io::{Read, Write};
let original = b"This is test data that is not aligned to 512-byte sectors!";
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
encoder.write_all(original).unwrap();
let compressed = encoder.finish().unwrap();
let mut sector_buf = vec![0u8; 512];
let mut decoder = ZlibDecoder::new(&compressed[..]);
let bytes_read = decoder.read(&mut sector_buf).unwrap();
assert_eq!(bytes_read, original.len());
assert_eq!(§or_buf[..bytes_read], &original[..]);
assert!(sector_buf[bytes_read..].iter().all(|&b| b == 0));
}
#[test]
fn test_roundtrip_sector_aligned() {
let original = vec![0x42u8; 1024];
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_roundtrip_non_sector_aligned() {
let original = b"Short test data that is not sector aligned".to_vec();
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_roundtrip_empty_data() {
let original: Vec<u8> = vec![];
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("empty", &original).unwrap();
writer.finish().unwrap();
}
let reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let partitions = reader.partitions();
assert_eq!(partitions.len(), 1);
}
#[test]
fn test_roundtrip_zeros() {
let original = vec![0u8; 2048];
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("zeros", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert_eq!(extracted.len(), original.len());
assert!(extracted.iter().all(|&b| b == 0));
}
#[test]
fn test_block_run_size() {
use crate::format::{BlockRun, BlockType};
let block_run = BlockRun {
block_type: BlockType::Zlib,
comment: 0,
sector_number: 100,
sector_count: 50,
compressed_offset: 1000,
compressed_length: 500,
};
let bytes = block_run.to_bytes();
assert_eq!(bytes.len(), 40, "Block run must be exactly 40 bytes");
let parsed = BlockRun::from_bytes(&bytes).unwrap();
assert_eq!(parsed.block_type, BlockType::Zlib);
assert_eq!(parsed.sector_number, 100);
assert_eq!(parsed.sector_count, 50);
assert_eq!(parsed.compressed_offset, 1000);
assert_eq!(parsed.compressed_length, 500);
}
#[test]
fn test_is_dmg_checks_correct_offset() {
use crate::format::is_dmg;
let mut fake_dmg = vec![0u8; 600];
let len = fake_dmg.len();
fake_dmg[len - 4..].copy_from_slice(b"koly");
let mut cursor = Cursor::new(&fake_dmg);
assert!(
!is_dmg(&mut cursor),
"Should not detect koly at wrong offset"
);
let mut real_dmg = vec![0u8; 600];
let len = real_dmg.len();
real_dmg[len - 512..len - 508].copy_from_slice(b"koly");
let mut cursor = Cursor::new(&real_dmg);
assert!(is_dmg(&mut cursor), "Should detect koly at correct offset");
}
#[test]
fn test_compression_methods() {
let original = b"Test data for compression testing. ".repeat(100);
for method in [
CompressionMethod::Raw,
CompressionMethod::Zlib,
CompressionMethod::Bzip2,
] {
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(method);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(
extracted.len() >= original.len(),
"Extracted should be at least as large as original for {:?}",
method
);
assert_eq!(
&extracted[..original.len()],
&original[..],
"Data mismatch for {:?}",
method
);
}
}
#[test]
fn test_lzfse_compression_roundtrip() {
let original = b"LZFSE compression test data. ".repeat(100);
let mut dmg_buf = Vec::new();
{
let mut writer =
DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(CompressionMethod::Lzfse);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_xz_roundtrip() {
use std::io::Write;
use xz2::read::XzDecoder;
use xz2::write::XzEncoder;
let original = b"XZ compression roundtrip test data. ".repeat(100);
let mut encoder = XzEncoder::new(Vec::new(), 6);
encoder.write_all(&original).unwrap();
let compressed = encoder.finish().unwrap();
assert_eq!(&compressed[..6], &[0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00]);
let mut decoder = XzDecoder::new(&compressed[..]);
let mut decompressed = vec![0u8; original.len()];
let n = crate::reader::read_full(&mut decoder, &mut decompressed).unwrap();
assert_eq!(n, original.len());
assert_eq!(&decompressed[..], &original[..]);
}
#[test]
#[ignore]
fn test_real_dmg_if_available() {
let test_dmg = "../tests/kdk.dmg";
let archive = DmgArchive::open(test_dmg).unwrap();
let stats = archive.stats();
assert_eq!(stats.version, 4);
assert!(stats.partition_count > 0);
assert!(stats.total_uncompressed > stats.total_compressed);
let partitions = archive.partitions();
assert!(!partitions.is_empty());
let hfsx = partitions.iter().find(|p| p.name.contains("HFSX"));
assert!(hfsx.is_some(), "Should have HFSX partition");
}
#[test]
#[ignore]
fn test_real_dmg_decompress() {
let test_dmg = "../tests/kdk.dmg";
let mut archive = DmgArchive::open(test_dmg).unwrap();
let data = archive.extract_main_partition().unwrap();
assert_eq!(&data[1024..1026], &[0x48, 0x58], "Should be HFSX signature");
let mut archive2 = DmgArchive::open(test_dmg).unwrap();
let mut buf = Vec::new();
let _n = archive2.extract_main_partition_to(&mut buf).unwrap();
assert_eq!(&buf[1024..1026], &[0x48, 0x58], "Should be HFSX signature");
assert_eq!(data.len(), buf.len());
assert_eq!(
data, buf,
"Buffered and streaming should produce identical output"
);
}
#[test]
#[ignore]
fn test_xz_dmg_googlechrome() {
let test_dmg = "../tests/googlechrome.dmg";
let archive = DmgArchive::open(test_dmg).unwrap();
let comp_info = archive.compression_info();
assert!(comp_info.xz_blocks > 0, "Should have XZ blocks");
let mut archive = DmgArchive::open(test_dmg).unwrap();
let data = archive.extract_main_partition().unwrap();
assert!(
!data.iter().all(|&b| b == 0),
"Decompressed data should not be all zeros"
);
if data.len() > 1026 {
let sig = &data[1024..1026];
assert!(
sig == [0x48, 0x2B] || sig == [0x48, 0x58],
"Should have HFS+/HFSX signature, got {:02X}{:02X}",
sig[0],
sig[1]
);
}
}
#[test]
fn test_checksum_verification_disabled() {
let original = b"Test data for checksum verification".repeat(20);
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let options = reader::DmgReaderOptions {
verify_checksums: false,
};
let mut reader = DmgReader::with_options(Cursor::new(&dmg_buf), options).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_checksum_verification_enabled_with_zero_checksums() {
let original = b"Test data for checksum verification".repeat(20);
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_dmg_archive_with_options() {
let original = b"Test data".repeat(10);
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let temp_dir = tempfile::tempdir().unwrap();
let temp_path = temp_dir.path().join("test.dmg");
std::fs::write(&temp_path, &dmg_buf).unwrap();
let options = reader::DmgReaderOptions {
verify_checksums: false,
};
let mut archive = DmgArchive::open_with_options(&temp_path, options).unwrap();
let extracted = archive.extract_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
}
#[test]
fn test_checksum_roundtrip_with_verification() {
let original = b"Test data for checksum roundtrip verification. ".repeat(50);
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let extracted = reader.decompress_partition(0).unwrap();
assert!(extracted.len() >= original.len());
assert_eq!(&extracted[..original.len()], &original[..]);
let koly = reader.koly();
assert_eq!(koly.data_checksum_type, 2); assert_ne!(&koly.data_checksum[..4], &[0u8; 4]); assert_eq!(koly.master_checksum_type, 2); assert_ne!(&koly.master_checksum[..4], &[0u8; 4]); }
#[test]
fn test_checksum_detection_corrupted_data() {
let original = b"Test data for corruption detection".repeat(20);
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
for byte in dmg_buf.iter_mut().take(100) {
*byte ^= 0xFF;
}
let result = DmgReader::new(Cursor::new(&dmg_buf));
assert!(result.is_err());
if let Err(DppError::ChecksumMismatch { expected, actual }) = result {
assert_ne!(expected, actual); } else {
panic!("Expected ChecksumMismatch error");
}
}
#[test]
fn test_checksum_all_compression_methods() {
let original = b"Testing checksums with all compressions! ".repeat(100);
for method in [
CompressionMethod::Raw,
CompressionMethod::Zlib,
CompressionMethod::Bzip2,
CompressionMethod::Lzfse,
] {
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(method);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf))
.unwrap_or_else(|e| panic!("Failed to open DMG with {:?}: {:?}", method, e));
let extracted = reader
.decompress_partition(0)
.unwrap_or_else(|e| panic!("Failed to decompress with {:?}: {:?}", method, e));
assert!(
extracted.len() >= original.len(),
"Extracted size mismatch for {:?}",
method
);
assert_eq!(
&extracted[..original.len()],
&original[..],
"Data mismatch for {:?}",
method
);
}
}
}
#[cfg(test)]
#[cfg(feature = "parallel")]
mod parallel_tests {
use super::*;
use std::io::Cursor;
#[test]
fn test_parallel_matches_sequential() {
let original = b"Test data for parallel decompression. ".repeat(100);
for method in [
CompressionMethod::Raw,
CompressionMethod::Zlib,
CompressionMethod::Bzip2,
CompressionMethod::Lzfse,
] {
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(method);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader1 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let sequential = reader1.decompress_partition(0).unwrap();
let mut reader2 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let parallel = reader2.decompress_partition_parallel(0).unwrap();
assert_eq!(sequential, parallel, "Parallel mismatch for {:?}", method);
}
}
#[test]
fn test_parallel_to_writer() {
let original = b"Test data for parallel writer. ".repeat(100);
let mut dmg_buf = Vec::new();
{
let mut writer =
DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(CompressionMethod::Zlib);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader1 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let sequential = reader1.decompress_partition(0).unwrap();
let mut reader2 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let mut output = Vec::new();
reader2
.decompress_partition_to_parallel(0, &mut output)
.unwrap();
assert_eq!(sequential, output);
}
#[test]
fn test_parallel_empty_partition() {
let original: Vec<u8> = vec![];
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("empty", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let result = reader.decompress_partition_parallel(0).unwrap();
assert!(result.iter().all(|&b| b == 0));
}
#[test]
fn test_parallel_zeros() {
let original = vec![0u8; 2048];
let mut dmg_buf = Vec::new();
{
let mut writer = DmgWriter::new(Cursor::new(&mut dmg_buf));
writer.add_partition("zeros", &original).unwrap();
writer.finish().unwrap();
}
let mut reader = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let result = reader.decompress_partition_parallel(0).unwrap();
assert_eq!(result.len(), original.len());
assert!(result.iter().all(|&b| b == 0));
}
#[test]
fn test_auto_selects_parallel() {
let original = b"Auto-select test data. ".repeat(50);
let mut dmg_buf = Vec::new();
{
let mut writer =
DmgWriter::new(Cursor::new(&mut dmg_buf)).compression(CompressionMethod::Zlib);
writer.add_partition("test", &original).unwrap();
writer.finish().unwrap();
}
let mut reader1 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let auto_result = reader1.decompress_partition_auto(0).unwrap();
let mut reader2 = DmgReader::new(Cursor::new(&dmg_buf)).unwrap();
let parallel_result = reader2.decompress_partition_parallel(0).unwrap();
assert_eq!(auto_result, parallel_result);
}
}