use std::fs::File;
use std::io::{BufReader, Read, Seek, SeekFrom, Write};
use std::path::Path;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use crate::checksum::{has_checksum, verify_crc32};
use crate::error::{DppError, Result};
use crate::format::{BlockType, KolyHeader, MishHeader, PartitionEntry};
const SECTOR_SIZE: u64 = 512;
pub(crate) fn read_full<R: Read>(reader: &mut R, buf: &mut [u8]) -> std::io::Result<usize> {
let mut total = 0;
while total < buf.len() {
match reader.read(&mut buf[total..])? {
0 => break, n => total += n,
}
}
Ok(total)
}
#[derive(Debug, Clone)]
pub struct DmgReaderOptions {
pub verify_checksums: bool,
}
impl Default for DmgReaderOptions {
fn default() -> Self {
Self {
verify_checksums: true,
}
}
}
pub struct DmgReader<R> {
reader: R,
koly: KolyHeader,
partitions: Vec<PartitionEntry>,
}
impl<R: Read + Seek> DmgReader<R> {
pub fn new(reader: R) -> Result<Self> {
Self::with_options(reader, DmgReaderOptions::default())
}
pub fn with_options(mut reader: R, options: DmgReaderOptions) -> Result<Self> {
let koly = KolyHeader::read(&mut reader)?;
if options.verify_checksums {
Self::verify_data_fork_checksum(&mut reader, &koly)?;
}
reader.seek(SeekFrom::Start(koly.plist_offset))?;
let mut plist_data = vec![0u8; koly.plist_length as usize];
reader.read_exact(&mut plist_data)?;
let partitions = parse_plist(&plist_data)?;
if options.verify_checksums {
Self::verify_master_checksum(&koly, &partitions)?;
}
Ok(DmgReader {
reader,
koly,
partitions,
})
}
fn verify_data_fork_checksum(reader: &mut R, koly: &KolyHeader) -> Result<()> {
if !has_checksum(koly.data_checksum_type, &koly.data_checksum) {
return Ok(());
}
reader.seek(SeekFrom::Start(koly.data_fork_offset))?;
let mut data_fork = vec![0u8; koly.data_fork_length as usize];
reader.read_exact(&mut data_fork)?;
verify_crc32(koly.data_checksum_type, &koly.data_checksum, &data_fork)
.map_err(|(expected, actual)| DppError::ChecksumMismatch { expected, actual })
}
fn verify_master_checksum(koly: &KolyHeader, partitions: &[PartitionEntry]) -> Result<()> {
if !has_checksum(koly.master_checksum_type, &koly.master_checksum) {
return Ok(());
}
let mut all_checksums = Vec::new();
for partition in partitions {
all_checksums.extend_from_slice(&partition.block_map.checksum[..4]);
}
verify_crc32(
koly.master_checksum_type,
&koly.master_checksum,
&all_checksums,
)
.map_err(|(expected, actual)| DppError::ChecksumMismatch { expected, actual })
}
pub fn koly(&self) -> &KolyHeader {
&self.koly
}
pub fn partitions(&self) -> &[PartitionEntry] {
&self.partitions
}
pub fn partition(&self, name: &str) -> Option<&PartitionEntry> {
self.partitions.iter().find(|p| p.name == name)
}
pub fn list_partitions(&self) -> Vec<&str> {
self.partitions.iter().map(|p| p.name.as_str()).collect()
}
pub fn stats(&self) -> DmgStats {
let total_sectors: u64 = self
.partitions
.iter()
.map(|p| p.block_map.sector_count)
.sum();
let total_compressed: u64 = self
.partitions
.iter()
.map(|p| p.block_map.compressed_size())
.sum();
DmgStats {
version: self.koly.version,
sector_count: self.koly.sector_count,
partition_count: self.partitions.len(),
total_uncompressed: total_sectors * SECTOR_SIZE,
total_compressed,
data_fork_length: self.koly.data_fork_length,
}
}
pub fn decompress_partition(&mut self, partition_id: i32) -> Result<Vec<u8>> {
let partition = self
.partitions
.iter()
.find(|p| p.id == partition_id)
.ok_or_else(|| DppError::FileNotFound(format!("partition {}", partition_id)))?
.clone();
let total_size = partition.block_map.sector_count * SECTOR_SIZE;
let mut output = vec![0u8; total_size as usize];
for block_run in &partition.block_map.block_runs {
let out_offset = block_run.sector_number * SECTOR_SIZE;
let out_size = block_run.sector_count * SECTOR_SIZE;
match block_run.block_type {
BlockType::ZeroFill => {
}
BlockType::Raw | BlockType::Ignore => {
if block_run.compressed_length > 0 {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let end = out_offset as usize + block_run.compressed_length as usize;
self.reader
.read_exact(&mut output[out_offset as usize..end])?;
}
}
BlockType::Zlib => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = flate2::read::ZlibDecoder::new(&compressed[..]);
let slice = &mut output[out_offset as usize..(out_offset + out_size) as usize];
read_full(&mut decoder, slice)?;
}
BlockType::Bzip2 => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = bzip2::read::BzDecoder::new(&compressed[..]);
let slice = &mut output[out_offset as usize..(out_offset + out_size) as usize];
read_full(&mut decoder, slice)?;
}
BlockType::Lzfse => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let expected_size = out_size as usize;
let mut temp_buf = vec![0u8; expected_size * 2];
let decoded_size = lzfse::decode_buffer(&compressed, &mut temp_buf)
.map_err(|e| DppError::Decompression(format!("LZFSE: {:?}", e)))?;
let copy_size = decoded_size.min(expected_size);
let end = out_offset as usize + copy_size;
output[out_offset as usize..end].copy_from_slice(&temp_buf[..copy_size]);
}
BlockType::Xz => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = xz2::read::XzDecoder::new(&compressed[..]);
let slice = &mut output[out_offset as usize..(out_offset + out_size) as usize];
read_full(&mut decoder, slice)?;
}
BlockType::Adc => {
return Err(DppError::Unsupported("ADC compression".into()));
}
BlockType::Comment | BlockType::End => {
}
}
}
Ok(output)
}
pub fn decompress_partition_to<W: Write>(
&mut self,
partition_id: i32,
writer: &mut W,
) -> Result<u64> {
let partition = self
.partitions
.iter()
.find(|p| p.id == partition_id)
.ok_or_else(|| DppError::FileNotFound(format!("partition {}", partition_id)))?
.clone();
let block_size = partition.block_map.sector_count * SECTOR_SIZE;
let mut bytes_written = 0u64;
for block_run in &partition.block_map.block_runs {
let out_offset = block_run.sector_number * SECTOR_SIZE;
let out_size = block_run.sector_count * SECTOR_SIZE;
if out_offset > bytes_written {
let gap = (out_offset - bytes_written) as usize;
let zeros = vec![0u8; gap];
writer.write_all(&zeros)?;
bytes_written += gap as u64;
}
match block_run.block_type {
BlockType::ZeroFill => {
let zeros = vec![0u8; out_size as usize];
writer.write_all(&zeros)?;
bytes_written += out_size;
}
BlockType::Raw | BlockType::Ignore => {
if block_run.compressed_length > 0 {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut buf = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut buf)?;
writer.write_all(&buf)?;
bytes_written += block_run.compressed_length;
let remaining = out_size - block_run.compressed_length;
if remaining > 0 {
let zeros = vec![0u8; remaining as usize];
writer.write_all(&zeros)?;
bytes_written += remaining;
}
} else {
let zeros = vec![0u8; out_size as usize];
writer.write_all(&zeros)?;
bytes_written += out_size;
}
}
BlockType::Zlib => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = flate2::read::ZlibDecoder::new(&compressed[..]);
let mut decompressed = vec![0u8; out_size as usize];
read_full(&mut decoder, &mut decompressed)?;
writer.write_all(&decompressed)?;
bytes_written += out_size;
}
BlockType::Bzip2 => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = bzip2::read::BzDecoder::new(&compressed[..]);
let mut decompressed = vec![0u8; out_size as usize];
read_full(&mut decoder, &mut decompressed)?;
writer.write_all(&decompressed)?;
bytes_written += out_size;
}
BlockType::Lzfse => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let expected_size = out_size as usize;
let mut temp_buf = vec![0u8; expected_size * 2];
let decoded_size = lzfse::decode_buffer(&compressed, &mut temp_buf)
.map_err(|e| DppError::Decompression(format!("LZFSE: {:?}", e)))?;
let mut block = vec![0u8; expected_size];
let copy_size = decoded_size.min(expected_size);
block[..copy_size].copy_from_slice(&temp_buf[..copy_size]);
writer.write_all(&block)?;
bytes_written += expected_size as u64;
}
BlockType::Xz => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = xz2::read::XzDecoder::new(&compressed[..]);
let mut decompressed = vec![0u8; out_size as usize];
read_full(&mut decoder, &mut decompressed)?;
writer.write_all(&decompressed)?;
bytes_written += out_size;
}
BlockType::Adc => {
return Err(DppError::Unsupported("ADC compression".into()));
}
BlockType::Comment | BlockType::End => {
}
}
}
if bytes_written < block_size {
let remaining = (block_size - bytes_written) as usize;
let zeros = vec![0u8; remaining];
writer.write_all(&zeros)?;
bytes_written += remaining as u64;
}
Ok(bytes_written)
}
pub fn decompress_main_partition(&mut self) -> Result<Vec<u8>> {
let id = self.main_partition_id()?;
self.decompress_partition(id)
}
pub fn decompress_main_partition_to<W: Write>(&mut self, writer: &mut W) -> Result<u64> {
let id = self.main_partition_id()?;
self.decompress_partition_to(id, writer)
}
pub fn main_partition_id(&self) -> Result<i32> {
let partition = self
.partitions
.iter()
.filter(|p| {
p.name.contains("Apple_HFS")
|| p.name.contains("Apple_HFSX")
|| p.name.contains("Apple_APFS")
})
.max_by_key(|p| p.block_map.sector_count)
.or_else(|| {
self.partitions
.iter()
.max_by_key(|p| p.block_map.sector_count)
})
.ok_or_else(|| DppError::FileNotFound("no partitions found".into()))?;
Ok(partition.id)
}
pub fn hfs_partition_id(&self) -> Result<i32> {
let partition = self
.partitions
.iter()
.filter(|p| p.name.contains("Apple_HFS") || p.name.contains("Apple_HFSX"))
.max_by_key(|p| p.block_map.sector_count)
.ok_or_else(|| DppError::FileNotFound("no HFS+/HFSX partition found".into()))?;
Ok(partition.id)
}
pub fn decompress_all(&mut self) -> Result<Vec<u8>> {
let total_sectors = self.koly.sector_count;
let total_size = total_sectors * SECTOR_SIZE;
let mut output = vec![0u8; total_size as usize];
for partition in self.partitions.clone() {
for block_run in &partition.block_map.block_runs {
let out_offset =
(partition.block_map.first_sector + block_run.sector_number) * SECTOR_SIZE;
let out_size = block_run.sector_count * SECTOR_SIZE;
if out_offset + out_size > total_size {
continue; }
match block_run.block_type {
BlockType::ZeroFill => {}
BlockType::Raw | BlockType::Ignore => {
if block_run.compressed_length > 0 {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let end = out_offset as usize + block_run.compressed_length as usize;
self.reader
.read_exact(&mut output[out_offset as usize..end])?;
}
}
BlockType::Zlib => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = flate2::read::ZlibDecoder::new(&compressed[..]);
let end = (out_offset + out_size) as usize;
let _ = decoder.read(&mut output[out_offset as usize..end])?;
}
BlockType::Bzip2 => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = bzip2::read::BzDecoder::new(&compressed[..]);
let end = (out_offset + out_size) as usize;
let _ = decoder.read(&mut output[out_offset as usize..end])?;
}
BlockType::Lzfse => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let expected_size = out_size as usize;
let mut temp_buf = vec![0u8; expected_size * 2];
let decoded_size = lzfse::decode_buffer(&compressed, &mut temp_buf)
.map_err(|e| DppError::Decompression(format!("LZFSE: {:?}", e)))?;
let copy_size = decoded_size.min(expected_size);
let end = out_offset as usize + copy_size;
output[out_offset as usize..end].copy_from_slice(&temp_buf[..copy_size]);
}
BlockType::Xz => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut compressed = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut compressed)?;
let mut decoder = xz2::read::XzDecoder::new(&compressed[..]);
let slice =
&mut output[out_offset as usize..(out_offset + out_size) as usize];
read_full(&mut decoder, slice)?;
}
BlockType::Adc => {
return Err(DppError::Unsupported("ADC compression".into()));
}
BlockType::Comment | BlockType::End => {}
}
}
}
Ok(output)
}
pub fn decompress_partition_auto(&mut self, partition_id: i32) -> Result<Vec<u8>> {
#[cfg(feature = "parallel")]
{
self.decompress_partition_parallel(partition_id)
}
#[cfg(not(feature = "parallel"))]
{
self.decompress_partition(partition_id)
}
}
pub fn decompress_main_partition_auto(&mut self) -> Result<Vec<u8>> {
let id = self.main_partition_id()?;
self.decompress_partition_auto(id)
}
pub fn decompress_main_partition_to_auto<W: Write>(&mut self, writer: &mut W) -> Result<u64> {
#[cfg(feature = "parallel")]
{
let id = self.main_partition_id()?;
self.decompress_partition_to_parallel(id, writer)
}
#[cfg(not(feature = "parallel"))]
{
self.decompress_main_partition_to(writer)
}
}
pub fn compression_info(&self) -> CompressionInfo {
let mut info = CompressionInfo::default();
for partition in &self.partitions {
for block_run in &partition.block_map.block_runs {
match block_run.block_type {
BlockType::ZeroFill => info.zero_fill_blocks += 1,
BlockType::Raw => info.raw_blocks += 1,
BlockType::Zlib => info.zlib_blocks += 1,
BlockType::Bzip2 => info.bzip2_blocks += 1,
BlockType::Lzfse => info.lzfse_blocks += 1,
BlockType::Xz => info.xz_blocks += 1,
BlockType::Adc => info.adc_blocks += 1,
_ => {}
}
}
}
info
}
}
impl DmgReader<BufReader<File>> {
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
Self::open_with_options(path, DmgReaderOptions::default())
}
pub fn open_with_options<P: AsRef<Path>>(path: P, options: DmgReaderOptions) -> Result<Self> {
let file = File::open(path)?;
let reader = BufReader::new(file);
Self::with_options(reader, options)
}
}
#[cfg(feature = "parallel")]
struct ReadBlock {
block_type: BlockType,
data: Vec<u8>,
out_offset: usize,
out_size: usize,
}
#[cfg(feature = "parallel")]
fn decompress_block(block: &ReadBlock, output: &mut [u8]) -> Result<()> {
match block.block_type {
BlockType::Raw | BlockType::Ignore => {
let copy_len = block.data.len().min(output.len());
output[..copy_len].copy_from_slice(&block.data[..copy_len]);
}
BlockType::Zlib => {
let mut decoder = flate2::read::ZlibDecoder::new(&block.data[..]);
read_full(&mut decoder, output)?;
}
BlockType::Bzip2 => {
let mut decoder = bzip2::read::BzDecoder::new(&block.data[..]);
read_full(&mut decoder, output)?;
}
BlockType::Lzfse => {
let expected_size = output.len();
let mut temp_buf = vec![0u8; expected_size * 2];
let decoded_size = lzfse::decode_buffer(&block.data, &mut temp_buf)
.map_err(|e| DppError::Decompression(format!("LZFSE: {:?}", e)))?;
let copy_size = decoded_size.min(expected_size);
output[..copy_size].copy_from_slice(&temp_buf[..copy_size]);
}
BlockType::Xz => {
let mut decoder = xz2::read::XzDecoder::new(&block.data[..]);
read_full(&mut decoder, output)?;
}
_ => {
}
}
Ok(())
}
#[cfg(feature = "parallel")]
impl<R: Read + Seek> DmgReader<R> {
pub fn decompress_partition_parallel(&mut self, partition_id: i32) -> Result<Vec<u8>> {
let partition = self
.partitions
.iter()
.find(|p| p.id == partition_id)
.ok_or_else(|| DppError::FileNotFound(format!("partition {}", partition_id)))?
.clone();
let total_size = (partition.block_map.sector_count * SECTOR_SIZE) as usize;
let mut blocks = Vec::new();
for block_run in &partition.block_map.block_runs {
let out_offset = (block_run.sector_number * SECTOR_SIZE) as usize;
let out_size = (block_run.sector_count * SECTOR_SIZE) as usize;
match block_run.block_type {
BlockType::ZeroFill | BlockType::Comment | BlockType::End => {
}
BlockType::Adc => {
return Err(DppError::Unsupported("ADC compression".into()));
}
BlockType::Raw | BlockType::Ignore => {
if block_run.compressed_length > 0 {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut data = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut data)?;
blocks.push(ReadBlock {
block_type: block_run.block_type,
data,
out_offset,
out_size,
});
}
}
_ => {
self.reader.seek(SeekFrom::Start(
self.koly.data_fork_offset + block_run.compressed_offset,
))?;
let mut data = vec![0u8; block_run.compressed_length as usize];
self.reader.read_exact(&mut data)?;
blocks.push(ReadBlock {
block_type: block_run.block_type,
data,
out_offset,
out_size,
});
}
}
}
let mut output = vec![0u8; total_size];
blocks.sort_by_key(|b| b.out_offset);
let mut slices: Vec<(&ReadBlock, &mut [u8])> = Vec::with_capacity(blocks.len());
let mut remaining = output.as_mut_slice();
let mut current_pos = 0usize;
for block in &blocks {
if block.out_offset > current_pos {
let gap = block.out_offset - current_pos;
remaining = &mut remaining[gap..];
current_pos += gap;
}
let (block_slice, rest) = remaining.split_at_mut(block.out_size);
slices.push((block, block_slice));
remaining = rest;
current_pos += block.out_size;
}
let results: Vec<Result<()>> = slices
.into_par_iter()
.map(|(block, slice)| decompress_block(block, slice))
.collect();
for result in results {
result?;
}
Ok(output)
}
pub fn decompress_partition_to_parallel<W: Write>(
&mut self,
partition_id: i32,
writer: &mut W,
) -> Result<u64> {
let output = self.decompress_partition_parallel(partition_id)?;
let len = output.len() as u64;
writer.write_all(&output)?;
Ok(len)
}
}
#[derive(Debug, Clone)]
pub struct DmgStats {
pub version: u32,
pub sector_count: u64,
pub partition_count: usize,
pub total_uncompressed: u64,
pub total_compressed: u64,
pub data_fork_length: u64,
}
impl DmgStats {
pub fn compression_ratio(&self) -> f64 {
if self.total_uncompressed == 0 {
return 1.0;
}
self.total_compressed as f64 / self.total_uncompressed as f64
}
pub fn space_savings(&self) -> f64 {
(1.0 - self.compression_ratio()) * 100.0
}
}
#[derive(Debug, Clone, Default)]
pub struct CompressionInfo {
pub zero_fill_blocks: u32,
pub raw_blocks: u32,
pub zlib_blocks: u32,
pub bzip2_blocks: u32,
pub lzfse_blocks: u32,
pub xz_blocks: u32,
pub adc_blocks: u32,
}
fn parse_plist(plist_data: &[u8]) -> Result<Vec<PartitionEntry>> {
let plist: plist::Value = plist::from_bytes(plist_data)
.map_err(|e| DppError::InvalidPlist(format!("plist parse error: {}", e)))?;
let dict = plist
.as_dictionary()
.ok_or_else(|| DppError::InvalidPlist("expected dictionary".into()))?;
let resource_fork = dict
.get("resource-fork")
.and_then(|v| v.as_dictionary())
.ok_or_else(|| DppError::InvalidPlist("missing resource-fork".into()))?;
let blkx = resource_fork
.get("blkx")
.and_then(|v| v.as_array())
.ok_or_else(|| DppError::InvalidPlist("missing blkx array".into()))?;
let mut partitions = Vec::with_capacity(blkx.len());
for entry in blkx {
let entry_dict = entry
.as_dictionary()
.ok_or_else(|| DppError::InvalidPlist("blkx entry not a dictionary".into()))?;
let name = entry_dict
.get("Name")
.and_then(|v| v.as_string())
.unwrap_or("")
.to_string();
let id = entry_dict
.get("ID")
.and_then(|v| v.as_string())
.and_then(|s| s.parse().ok())
.unwrap_or(0);
let attributes = entry_dict
.get("Attributes")
.and_then(|v| v.as_string())
.and_then(|s| {
if let Some(hex) = s.strip_prefix("0x") {
u32::from_str_radix(hex, 16).ok()
} else {
s.parse().ok()
}
})
.unwrap_or(0);
let data = entry_dict
.get("Data")
.and_then(|v| v.as_data())
.ok_or_else(|| DppError::InvalidPlist("missing Data in blkx entry".into()))?;
let block_map = MishHeader::from_bytes(data)?;
partitions.push(PartitionEntry {
name,
id,
attributes,
block_map,
});
}
Ok(partitions)
}
pub fn open<P: AsRef<Path>>(path: P) -> Result<DmgReader<BufReader<File>>> {
DmgReader::open(path)
}
pub fn is_dmg<P: AsRef<Path>>(path: P) -> bool {
File::open(path)
.ok()
.map(BufReader::new)
.map(|mut r| crate::format::is_dmg(&mut r))
.unwrap_or(false)
}