#![no_std]
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), deny(clippy::disallowed_types))]
#![doc = include_str!("../README.md")]
#![cfg_attr(test, allow(clippy::uninlined_format_args))]
#[cfg(feature = "std")]
extern crate std;
use core::convert::TryFrom;
use core::fmt;
use core::ops::Range;
macro_rules! le_bytes {
($t:ty, $bytes:expr) => {
<$t>::from_le_bytes($bytes.try_into().unwrap())
};
}
const EOCD_SIGNATURE: u32 = 0x0605_4B50;
const ZIP64_EOCD_SIGNATURE: u32 = 0x0606_4B50;
const ZIP64_LOCATOR_SIGNATURE: u32 = 0x0706_4B50;
const CENTRAL_HEADER_SIGNATURE: u32 = 0x0201_4B50;
const LOCAL_HEADER_SIGNATURE: u32 = 0x0403_4B50;
const EOCD_LEN: usize = 22;
const ZIP64_LOCATOR_LEN: usize = 20;
const CENTRAL_HEADER_LEN: usize = 46;
const LOCAL_HEADER_LEN: usize = 30;
const MAX_EOCD_SCAN: usize = EOCD_LEN + u16::MAX as usize;
const PATH_SCAN_CHUNK_LEN: usize = 64;
pub trait Reader {
type Error;
fn size(&self) -> Result<u64, Self::Error>;
fn read_exact_at(&self, pos: u64, buf: &mut [u8]) -> Result<(), Self::Error>;
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum SliceReaderError {
OutOfBounds,
}
impl fmt::Display for SliceReaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::OutOfBounds => f.write_str("byte range out of bounds"),
}
}
}
impl core::error::Error for SliceReaderError {}
impl<E: core::error::Error + 'static> core::error::Error for Error<E> {
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
match self {
Self::Io(e) => Some(e),
_ => None,
}
}
}
impl Reader for &[u8] {
type Error = SliceReaderError;
fn size(&self) -> Result<u64, Self::Error> {
Ok(self.len() as u64)
}
fn read_exact_at(&self, pos: u64, buf: &mut [u8]) -> Result<(), Self::Error> {
let pos = usize::try_from(pos).map_err(|_| SliceReaderError::OutOfBounds)?;
let end = pos
.checked_add(buf.len())
.ok_or(SliceReaderError::OutOfBounds)?;
let src = self.get(pos..end).ok_or(SliceReaderError::OutOfBounds)?;
buf.copy_from_slice(src);
Ok(())
}
}
#[cfg(feature = "std")]
pub mod std_io;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Compression {
Stored,
Deflated,
}
impl Compression {
#[must_use]
pub fn from_raw(raw: u16) -> Option<Self> {
match raw {
0 => Some(Self::Stored),
8 => Some(Self::Deflated),
_ => None,
}
}
#[must_use]
pub fn raw(self) -> u16 {
match self {
Self::Stored => 0,
Self::Deflated => 8,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct DataRange {
pub local_header_range: Range<u64>,
pub local_name_range: Range<u64>,
pub local_extra_range: Range<u64>,
pub data_range: Range<u64>,
}
#[derive(Debug, Eq, PartialEq)]
pub enum Error<E> {
Io(E),
NotZip,
Truncated,
InvalidSignature,
InvalidOffset,
InvalidRecord,
MultiDisk,
StrongEncryption,
MaskedLocalHeaders,
UnsupportedCompression(u16),
NotFound,
}
impl<E: fmt::Display> fmt::Display for Error<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Io(err) => write!(f, "I/O error: {err}"),
Self::NotZip => f.write_str("not a ZIP archive"),
Self::Truncated => f.write_str("truncated ZIP archive"),
Self::InvalidSignature => f.write_str("invalid ZIP signature"),
Self::InvalidOffset => f.write_str("invalid ZIP offset"),
Self::InvalidRecord => f.write_str("invalid ZIP record"),
Self::MultiDisk => f.write_str("multi-disk ZIP archives are unsupported"),
Self::StrongEncryption => f.write_str("strong encryption is unsupported"),
Self::MaskedLocalHeaders => f.write_str("masked local headers are unsupported"),
Self::UnsupportedCompression(method) => {
write!(f, "unsupported ZIP compression method {method}")
}
Self::NotFound => f.write_str("file not found in archive"),
}
}
}
pub struct Archive<R> {
pub(crate) reader: R,
size: u64,
base_offset: u64,
directory_end_offset: u64,
central_directory_offset: u64,
entry_count: u64,
}
impl<R: Reader> Archive<R> {
pub fn open(reader: R) -> Result<Self, Error<R::Error>> {
let size = reader.size().map_err(Error::Io)?;
let (eocd_offset, eocd) = find_eocd(&reader, size)?;
ensure_single_disk(
u32::from(eocd.disk_number),
u32::from(eocd.central_directory_disk),
)?;
let (entry_count, central_directory_size, central_directory_offset, payload_end) =
if eocd.needs_zip64() {
let zip64 = parse_zip64_metadata(&reader, size, eocd_offset)?;
ensure_single_disk(zip64.disk_number, zip64.central_directory_disk)?;
(
zip64.entry_count,
zip64.central_directory_size,
zip64.central_directory_offset,
zip64.record_offset,
)
} else {
(
u64::from(eocd.entry_count),
u64::from(eocd.central_directory_size),
u64::from(eocd.central_directory_offset),
eocd_offset,
)
};
let used = add(central_directory_offset, central_directory_size)?;
let inferred_base_offset = payload_end.checked_sub(used).ok_or(Error::InvalidOffset)?;
let absolute_cd_offset = resolve_central_directory_offset(
&reader,
size,
central_directory_offset,
inferred_base_offset,
)?;
let base_offset = absolute_cd_offset
.checked_sub(central_directory_offset)
.ok_or(Error::InvalidOffset)?;
let absolute_cd_end = add(absolute_cd_offset, central_directory_size)?;
if absolute_cd_end > eocd_offset || eocd_offset + EOCD_LEN as u64 > size {
return Err(Error::InvalidOffset);
}
if entry_count == 0
&& central_directory_size == 0
&& central_directory_offset == 0
&& eocd_offset != 0
{
return Err(Error::InvalidOffset);
}
Ok(Self {
reader,
size,
base_offset,
directory_end_offset: payload_end,
central_directory_offset,
entry_count,
})
}
pub fn size(&self) -> u64 {
self.size
}
pub fn entry_count(&self) -> u64 {
self.entry_count
}
pub fn entries(&self) -> Entries<'_, R> {
Entries {
archive: self,
next_offset: self.base_offset + self.central_directory_offset,
remaining: self.entry_count,
end_offset: self.directory_end_offset,
}
}
pub fn find_file(&self, path: impl AsRef<[u8]>) -> Result<Entry<'_, R>, Error<R::Error>> {
let path = path.as_ref();
for entry in self.entries() {
let entry = entry?;
if entry.path_is(path)? {
return Ok(entry);
}
}
Err(Error::NotFound)
}
fn read_exact_at(&self, pos: u64, buf: &mut [u8]) -> Result<(), Error<R::Error>> {
let len = u64::try_from(buf.len()).map_err(|_| Error::InvalidOffset)?;
if add(pos, len)? > self.size {
return Err(Error::Truncated);
}
self.reader.read_exact_at(pos, buf).map_err(Error::Io)
}
}
pub struct Entries<'a, R> {
archive: &'a Archive<R>,
next_offset: u64,
remaining: u64,
end_offset: u64,
}
impl<'a, R: Reader> Iterator for Entries<'a, R> {
type Item = Result<Entry<'a, R>, Error<R::Error>>;
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let result = Entry::parse(self.archive, self.next_offset, self.end_offset);
match result {
Ok((entry, next_offset)) => {
self.remaining -= 1;
self.next_offset = next_offset;
Some(Ok(entry))
}
Err(err) => {
self.remaining = 0;
Some(Err(err))
}
}
}
}
pub struct Entry<'a, R> {
archive: &'a Archive<R>,
name_range: Range<u64>,
flags: u16,
compression_method: u16,
crc32: u32,
compressed_size: u64,
uncompressed_size: u64,
local_header_offset: u64,
}
impl<'a, R: Reader> Entry<'a, R> {
fn parse(
archive: &'a Archive<R>,
header_offset: u64,
end_offset: u64,
) -> Result<(Self, u64), Error<R::Error>> {
let mut header = [0u8; CENTRAL_HEADER_LEN];
archive.read_exact_at(header_offset, &mut header)?;
if le_bytes!(u32, &header[0..4]) != CENTRAL_HEADER_SIGNATURE {
return Err(Error::InvalidSignature);
}
let flags = le_bytes!(u16, &header[8..10]);
if flags & (1 << 6) != 0 || flags & (1 << 13) != 0 {
return Err(Error::StrongEncryption);
}
let name_len = u64::from(le_bytes!(u16, &header[28..30]));
let extra_len = u64::from(le_bytes!(u16, &header[30..32]));
let comment_len = u64::from(le_bytes!(u16, &header[32..34]));
let record_len =
central_record_len(name_len, extra_len, comment_len).ok_or(Error::InvalidOffset)?;
let next_offset = add(header_offset, record_len)?;
if next_offset > end_offset {
return Err(Error::Truncated);
}
let name_range = (header_offset + CENTRAL_HEADER_LEN as u64)
..(header_offset + CENTRAL_HEADER_LEN as u64 + name_len);
let extra_range = name_range.end..name_range.end + extra_len;
let raw_compressed_size = le_bytes!(u32, &header[20..24]);
let raw_uncompressed_size = le_bytes!(u32, &header[24..28]);
let raw_local_offset = le_bytes!(u32, &header[42..46]);
let mut compressed_size = u64::from(raw_compressed_size);
let mut uncompressed_size = u64::from(raw_uncompressed_size);
let mut local_header_offset = u64::from(raw_local_offset);
let zip64_needed = raw_compressed_size == u32::MAX
|| raw_uncompressed_size == u32::MAX
|| raw_local_offset == u32::MAX;
if zip64_needed {
let mut scratch = [0u8; 256];
let extra_len_usize = usize::try_from(extra_len).map_err(|_| Error::InvalidOffset)?;
if extra_len_usize > scratch.len() {
return Err(Error::InvalidRecord);
}
archive.read_exact_at(extra_range.start, &mut scratch[..extra_len_usize])?;
let zip64 = find_zip64_extra(
&scratch[..extra_len_usize],
raw_uncompressed_size == u32::MAX,
raw_compressed_size == u32::MAX,
raw_local_offset == u32::MAX,
)?;
if let Some(size) = zip64.uncompressed_size {
uncompressed_size = size;
}
if let Some(size) = zip64.compressed_size {
compressed_size = size;
}
if let Some(offset) = zip64.local_header_offset {
local_header_offset = offset;
}
}
let compression_method = le_bytes!(u16, &header[10..12]);
let entry = Self {
archive,
name_range,
flags,
compression_method,
crc32: le_bytes!(u32, &header[16..20]),
compressed_size,
uncompressed_size,
local_header_offset,
};
Ok((entry, next_offset))
}
#[must_use]
pub fn flags(&self) -> u16 {
self.flags
}
pub fn compression(&self) -> Result<Compression, Error<R::Error>> {
Compression::from_raw(self.compression_method)
.ok_or(Error::UnsupportedCompression(self.compression_method))
}
#[must_use]
pub fn crc32(&self) -> u32 {
self.crc32
}
#[must_use]
pub fn compressed_size(&self) -> u64 {
self.compressed_size
}
#[must_use]
pub fn uncompressed_size(&self) -> u64 {
self.uncompressed_size
}
pub fn read_path<'b>(&self, buf: &'b mut [u8]) -> Result<&'b [u8], Error<R::Error>> {
read_variable_range(self.archive, self.name_range.clone(), buf)
}
#[must_use]
pub fn path_is_utf8(&self) -> bool {
self.flags & (1 << 11) != 0
}
pub fn filename_is(&self, file_name: &[u8]) -> Result<bool, Error<R::Error>> {
let component_start = find_path_file_name_start(self.archive, self.name_range.clone())?;
let component_len = self
.name_range
.end
.checked_sub(component_start)
.ok_or(Error::InvalidOffset)?;
let file_name_len = u64::try_from(file_name.len()).map_err(|_| Error::InvalidOffset)?;
if component_len != file_name_len {
return Ok(false);
}
let mut scratch = [0u8; PATH_SCAN_CHUNK_LEN];
let mut compared = 0usize;
while compared < file_name.len() {
let chunk_len = (file_name.len() - compared).min(scratch.len());
let chunk_offset =
component_start + u64::try_from(compared).map_err(|_| Error::InvalidOffset)?;
self.archive
.read_exact_at(chunk_offset, &mut scratch[..chunk_len])?;
if scratch[..chunk_len] != file_name[compared..compared + chunk_len] {
return Ok(false);
}
compared += chunk_len;
}
Ok(true)
}
pub fn data_range(&self) -> Result<DataRange, Error<R::Error>> {
if self.flags & 1 != 0 {
return Err(Error::StrongEncryption);
}
let local_header_offset = add(self.archive.base_offset, self.local_header_offset)?;
let mut header = [0u8; LOCAL_HEADER_LEN];
self.archive
.read_exact_at(local_header_offset, &mut header)?;
if le_bytes!(u32, &header[0..4]) != LOCAL_HEADER_SIGNATURE {
return Err(Error::InvalidSignature);
}
let local_flags = le_bytes!(u16, &header[6..8]);
if local_flags & 1 != 0 {
return Err(Error::StrongEncryption);
}
if local_flags & (1 << 13) != 0 {
return Err(Error::MaskedLocalHeaders);
}
let local_name_len = u64::from(le_bytes!(u16, &header[26..28]));
let local_extra_len = u64::from(le_bytes!(u16, &header[28..30]));
let local_name_range = (local_header_offset + LOCAL_HEADER_LEN as u64)
..(local_header_offset + LOCAL_HEADER_LEN as u64 + local_name_len);
let local_extra_range = local_name_range.end..local_name_range.end + local_extra_len;
let data_start = local_extra_range.end;
let data_end = add(data_start, self.compressed_size)?;
if data_end > self.archive.size() {
return Err(Error::Truncated);
}
Ok(DataRange {
local_header_range: local_header_offset..data_start,
local_name_range,
local_extra_range,
data_range: data_start..data_end,
})
}
pub fn path_is(&self, path: impl AsRef<[u8]>) -> Result<bool, Error<R::Error>> {
let path = path.as_ref();
let path_len = u64::try_from(path.len()).map_err(|_| Error::InvalidOffset)?;
let stored_len = self
.name_range
.end
.checked_sub(self.name_range.start)
.ok_or(Error::InvalidOffset)?;
if stored_len != path_len {
return Ok(false);
}
let mut scratch = [0u8; PATH_SCAN_CHUNK_LEN];
let mut remaining = path.len();
while remaining > 0 {
let chunk_len = remaining.min(scratch.len());
let offset = remaining - chunk_len;
let archive_offset =
self.name_range.start + u64::try_from(offset).map_err(|_| Error::InvalidOffset)?;
self.archive
.read_exact_at(archive_offset, &mut scratch[..chunk_len])?;
if scratch[..chunk_len] != path[offset..offset + chunk_len] {
return Ok(false);
}
remaining = offset;
}
Ok(true)
}
pub fn read_to_slice<'b>(&self, buf: &'b mut [u8]) -> Result<&'b [u8], Error<R::Error>> {
let range = self.data_range()?;
let len = range_len_usize(&range.data_range)?;
if buf.len() < len {
return Err(Error::InvalidOffset);
}
self.archive
.read_exact_at(range.data_range.start, &mut buf[..len])?;
Ok(&buf[..len])
}
pub fn read_chunks<const N: usize>(&self) -> Result<DataChunks<'a, R, N>, Error<R::Error>> {
let range = self.data_range()?;
Ok(DataChunks {
archive: self.archive,
buf: [0u8; N],
pos: range.data_range.start,
end: range.data_range.end,
})
}
}
pub struct DataChunks<'a, R, const N: usize> {
archive: &'a Archive<R>,
buf: [u8; N],
pos: u64,
end: u64,
}
impl<'a, R: Reader, const N: usize> DataChunks<'a, R, N> {
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<Result<&[u8], Error<R::Error>>> {
if self.pos >= self.end {
return None;
}
let remaining = (self.end - self.pos) as usize;
let chunk_len = remaining.min(N);
match self
.archive
.read_exact_at(self.pos, &mut self.buf[..chunk_len])
{
Ok(()) => {
self.pos += chunk_len as u64;
Some(Ok(&self.buf[..chunk_len]))
}
Err(e) => {
self.pos = self.end;
Some(Err(e))
}
}
}
}
impl<'a, const N: usize> DataChunks<'a, &'a [u8], N> {
pub fn iter(&self) -> core::slice::Chunks<'a, u8> {
let start = self.pos as usize;
let end = self.end as usize;
self.archive.reader[start..end].chunks(N)
}
}
fn read_variable_range<'a, R: Reader>(
archive: &Archive<R>,
range: Range<u64>,
buf: &'a mut [u8],
) -> Result<&'a [u8], Error<R::Error>> {
let len = range_len_usize(&range)?;
if buf.len() < len {
return Err(Error::InvalidOffset);
}
archive.read_exact_at(range.start, &mut buf[..len])?;
Ok(&buf[..len])
}
fn find_path_file_name_start<R: Reader>(
archive: &Archive<R>,
path_range: Range<u64>,
) -> Result<u64, Error<R::Error>> {
let mut cursor = path_range.end;
let mut scratch = [0u8; PATH_SCAN_CHUNK_LEN];
while cursor > path_range.start {
let remaining = cursor
.checked_sub(path_range.start)
.ok_or(Error::InvalidOffset)?;
let chunk_len_u64 = remaining.min(PATH_SCAN_CHUNK_LEN as u64);
let chunk_len = usize::try_from(chunk_len_u64).map_err(|_| Error::InvalidOffset)?;
let chunk_start = cursor - chunk_len_u64;
archive.read_exact_at(chunk_start, &mut scratch[..chunk_len])?;
if let Some(index) = scratch[..chunk_len].iter().rposition(|&byte| byte == b'/') {
return Ok(chunk_start + index as u64 + 1);
}
cursor = chunk_start;
}
Ok(path_range.start)
}
#[derive(Clone, Copy)]
struct Eocd {
disk_number: u16,
central_directory_disk: u16,
entry_count: u16,
central_directory_size: u32,
central_directory_offset: u32,
}
impl Eocd {
fn needs_zip64(self) -> bool {
self.entry_count == u16::MAX
|| self.central_directory_size == u32::MAX
|| self.central_directory_offset == u32::MAX
}
}
#[derive(Clone, Copy)]
struct Zip64Record {
record_offset: u64,
disk_number: u32,
central_directory_disk: u32,
entry_count: u64,
central_directory_size: u64,
central_directory_offset: u64,
}
#[derive(Clone, Copy, Default)]
struct Zip64Extra {
uncompressed_size: Option<u64>,
compressed_size: Option<u64>,
local_header_offset: Option<u64>,
}
#[allow(clippy::large_stack_arrays)]
fn find_eocd<R: Reader>(reader: &R, size: u64) -> Result<(u64, Eocd), Error<R::Error>> {
if size < EOCD_LEN as u64 {
return Err(Error::NotZip);
}
let window_u64 = size.min(MAX_EOCD_SCAN as u64);
let window = usize::try_from(window_u64).map_err(|_| Error::InvalidOffset)?;
let start = size - window_u64;
let mut buffer = [0u8; MAX_EOCD_SCAN];
let buf = &mut buffer[..window];
reader.read_exact_at(start, buf).map_err(Error::Io)?;
for idx in (0..=window - EOCD_LEN).rev() {
if le_bytes!(u32, &buf[idx..idx + 4]) != EOCD_SIGNATURE {
continue;
}
let comment_len = usize::from(le_bytes!(u16, &buf[idx + 20..idx + 22]));
let end = idx + EOCD_LEN + comment_len;
if end > window {
continue;
}
let eocd = Eocd {
disk_number: le_bytes!(u16, &buf[idx + 4..idx + 6]),
central_directory_disk: le_bytes!(u16, &buf[idx + 6..idx + 8]),
entry_count: le_bytes!(u16, &buf[idx + 10..idx + 12]),
central_directory_size: le_bytes!(u32, &buf[idx + 12..idx + 16]),
central_directory_offset: le_bytes!(u32, &buf[idx + 16..idx + 20]),
};
return Ok((start + idx as u64, eocd));
}
Err(Error::NotZip)
}
fn parse_zip64_metadata<R: Reader>(
reader: &R,
size: u64,
eocd_offset: u64,
) -> Result<Zip64Record, Error<R::Error>> {
let locator_offset = eocd_offset
.checked_sub(ZIP64_LOCATOR_LEN as u64)
.ok_or(Error::InvalidRecord)?;
let mut locator = [0u8; ZIP64_LOCATOR_LEN];
reader
.read_exact_at(locator_offset, &mut locator)
.map_err(Error::Io)?;
if le_bytes!(u32, &locator[0..4]) != ZIP64_LOCATOR_SIGNATURE {
return Err(Error::InvalidSignature);
}
let disk_number = le_bytes!(u32, &locator[4..8]);
let zip64_offset = le_bytes!(u64, &locator[8..16]);
let total_disks = le_bytes!(u32, &locator[16..20]);
if disk_number != 0 || total_disks != 1 {
return Err(Error::MultiDisk);
}
let zip64_offset = resolve_zip64_record_offset(reader, size, locator_offset, zip64_offset)?;
let mut header = [0u8; 56];
if add(zip64_offset, header.len() as u64)? > size {
return Err(Error::Truncated);
}
reader
.read_exact_at(zip64_offset, &mut header)
.map_err(Error::Io)?;
if le_bytes!(u32, &header[0..4]) != ZIP64_EOCD_SIGNATURE {
return Err(Error::InvalidSignature);
}
let record_size = le_bytes!(u64, &header[4..12]);
let total_len = add(record_size, 12)?;
if add(zip64_offset, total_len)? > size {
return Err(Error::Truncated);
}
Ok(Zip64Record {
record_offset: zip64_offset,
disk_number: le_bytes!(u32, &header[16..20]),
central_directory_disk: le_bytes!(u32, &header[20..24]),
entry_count: le_bytes!(u64, &header[32..40]),
central_directory_size: le_bytes!(u64, &header[40..48]),
central_directory_offset: le_bytes!(u64, &header[48..56]),
})
}
fn resolve_zip64_record_offset<R: Reader>(
reader: &R,
size: u64,
locator_offset: u64,
advertised_offset: u64,
) -> Result<u64, Error<R::Error>> {
const SEARCH_WINDOW: usize = 4096;
if looks_like_signature(reader, size, advertised_offset, ZIP64_EOCD_SIGNATURE)? {
return Ok(advertised_offset);
}
let start = locator_offset.saturating_sub(SEARCH_WINDOW as u64);
let window = locator_offset
.checked_sub(start)
.ok_or(Error::InvalidOffset)?;
let window_usize = usize::try_from(window).map_err(|_| Error::InvalidOffset)?;
let mut buffer = [0u8; SEARCH_WINDOW];
reader
.read_exact_at(start, &mut buffer[..window_usize])
.map_err(Error::Io)?;
if window_usize < 4 {
return Err(Error::InvalidSignature);
}
for idx in (0..=window_usize - 4).rev() {
if le_bytes!(u32, &buffer[idx..idx + 4]) == ZIP64_EOCD_SIGNATURE {
return Ok(start + idx as u64);
}
}
Err(Error::InvalidSignature)
}
fn resolve_central_directory_offset<R: Reader>(
reader: &R,
size: u64,
central_directory_offset: u64,
inferred_base_offset: u64,
) -> Result<u64, Error<R::Error>> {
let raw_offset = central_directory_offset;
let inferred_offset = add(inferred_base_offset, central_directory_offset)?;
let raw_valid = looks_like_central_header(reader, size, raw_offset)?;
let inferred_valid = looks_like_central_header(reader, size, inferred_offset)?;
if raw_valid {
return Ok(raw_offset);
}
if inferred_valid {
return Ok(inferred_offset);
}
if inferred_offset <= size {
return Ok(inferred_offset);
}
Err(Error::InvalidOffset)
}
fn looks_like_central_header<R: Reader>(
reader: &R,
size: u64,
offset: u64,
) -> Result<bool, Error<R::Error>> {
looks_like_signature(reader, size, offset, CENTRAL_HEADER_SIGNATURE)
}
fn looks_like_signature<R: Reader>(
reader: &R,
size: u64,
offset: u64,
signature: u32,
) -> Result<bool, Error<R::Error>> {
let Some(end) = offset.checked_add(4) else {
return Ok(false);
};
if end > size {
return Ok(false);
}
let mut bytes = [0u8; 4];
reader
.read_exact_at(offset, &mut bytes)
.map_err(Error::Io)?;
Ok(le_bytes!(u32, &bytes[..]) == signature)
}
fn find_zip64_extra<E>(
mut extra: &[u8],
need_uncompressed: bool,
need_compressed: bool,
need_offset: bool,
) -> Result<Zip64Extra, Error<E>> {
let mut out = Zip64Extra::default();
while extra.len() >= 4 {
let kind = le_bytes!(u16, &extra[0..2]);
let len = usize::from(le_bytes!(u16, &extra[2..4]));
extra = &extra[4..];
if len > extra.len() {
return Err(Error::InvalidRecord);
}
let field = &extra[..len];
extra = &extra[len..];
if kind != 0x0001 {
continue;
}
let mut pos = 0usize;
if need_uncompressed {
out.uncompressed_size = Some(read_extra_u64(field, &mut pos)?);
}
if need_compressed {
out.compressed_size = Some(read_extra_u64(field, &mut pos)?);
}
if need_offset {
out.local_header_offset = Some(read_extra_u64(field, &mut pos)?);
}
return Ok(out);
}
Err(Error::InvalidRecord)
}
fn read_extra_u64<E>(extra: &[u8], pos: &mut usize) -> Result<u64, Error<E>> {
let end = pos.checked_add(8).ok_or(Error::InvalidOffset)?;
if end > extra.len() {
return Err(Error::InvalidRecord);
}
let value = le_bytes!(u64, &extra[*pos..end]);
*pos = end;
Ok(value)
}
fn ensure_single_disk<E>(disk_number: u32, central_directory_disk: u32) -> Result<(), Error<E>> {
if disk_number != 0 || central_directory_disk != 0 {
return Err(Error::MultiDisk);
}
Ok(())
}
fn add<E>(lhs: u64, rhs: u64) -> Result<u64, Error<E>> {
lhs.checked_add(rhs).ok_or(Error::InvalidOffset)
}
fn central_record_len(name_len: u64, extra_len: u64, comment_len: u64) -> Option<u64> {
(CENTRAL_HEADER_LEN as u64)
.checked_add(name_len)
.and_then(|v| v.checked_add(extra_len))
.and_then(|v| v.checked_add(comment_len))
}
fn range_len_usize<E>(range: &Range<u64>) -> Result<usize, Error<E>> {
let len = range
.end
.checked_sub(range.start)
.ok_or(Error::InvalidOffset)?;
usize::try_from(len).map_err(|_| Error::InvalidOffset)
}