pub mod bsdiff;
pub mod pwr;
pub mod tlc;
use md5::{Digest, Md5};
use std::io::{BufRead, Read};
const PATCH_MAGIC: u32 = 0x0FEF5F00;
const SIGNATURE_MAGIC: u32 = PATCH_MAGIC + 1;
const _MODE_MASK: u32 = 0o644;
const BLOCK_SIZE: usize = 64 * 1024;
const PROTOBUF_VARINT_MAX_LENGTH: usize = 10;
const MAX_OPEN_FILES_PATCH: std::num::NonZeroUsize = std::num::NonZeroUsize::new(16).unwrap();
#[derive(Debug, Clone, PartialEq)]
pub struct Signature<R> {
pub header: pwr::SignatureHeader,
pub container_new: tlc::Container,
pub block_hash_iter: BlockHashIter<R>,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Patch<R> {
pub header: pwr::PatchHeader,
pub container_old: tlc::Container,
pub container_new: tlc::Container,
pub sync_op_iter: SyncEntryIter<R>,
}
#[derive(Debug, Clone, PartialEq)]
pub struct BlockHashIter<R> {
reader: R,
}
impl<R> Iterator for BlockHashIter<R>
where
R: BufRead,
{
type Item = Result<pwr::BlockHash, String>;
fn next(&mut self) -> Option<Self::Item> {
match self.reader.fill_buf() {
Err(e) => Some(Err(format!("Couldn't read from reader into buffer!\n{e}"))),
Ok([]) => None,
Ok(_) => Some(decode_protobuf::<pwr::BlockHash>(&mut self.reader)),
}
}
}
#[derive(Debug, PartialEq)]
pub struct RsyncOpIter<'a, R> {
reader: &'a mut R,
}
impl<'a, R> Iterator for RsyncOpIter<'a, R>
where
R: BufRead,
{
type Item = Result<pwr::SyncOp, String>;
fn next(&mut self) -> Option<Self::Item> {
match decode_protobuf::<pwr::SyncOp>(&mut self.reader) {
Err(e) => Some(Err(format!(
"Couldn't decode Rsync SyncOp message from reader!\n{e}"
))),
Ok(sync_op) => {
if let pwr::sync_op::Type::HeyYouDidIt = sync_op.r#type() {
None
} else {
Some(Ok(sync_op))
}
}
}
}
}
#[derive(Debug, PartialEq)]
pub struct BsdiffOpIter<'a, R> {
reader: &'a mut R,
}
impl<'a, R> Iterator for BsdiffOpIter<'a, R>
where
R: BufRead,
{
type Item = Result<bsdiff::Control, String>;
fn next(&mut self) -> Option<Self::Item> {
match decode_protobuf::<bsdiff::Control>(&mut self.reader) {
Err(e) => Some(Err(format!(
"Couldn't decode Bsdiff Control message from reader!\n{e}"
))),
Ok(control_op) => {
if control_op.eof {
match decode_protobuf::<pwr::SyncOp>(&mut self.reader) {
Err(e) => Some(Err(format!(
"Couldn't decode Rsync SyncOp message from reader!\n{e}"
))),
Ok(sync_op) => {
if let pwr::sync_op::Type::HeyYouDidIt = sync_op.r#type() {
None
} else {
Some(Err(
"Expected a Rsync HeyYouDidIt sync operation, but did not found it!".to_string(),
))
}
}
}
} else {
Some(Ok(control_op))
}
}
}
}
}
#[derive(Debug, PartialEq)]
pub enum SyncHeader<'a, R> {
Rsync {
file_index: i64,
op_iter: RsyncOpIter<'a, R>,
},
Bsdiff {
file_index: i64,
target_index: i64,
op_iter: BsdiffOpIter<'a, R>,
},
}
#[derive(Debug, Clone, PartialEq)]
pub struct SyncEntryIter<R> {
reader: R,
}
impl<'a, R> SyncEntryIter<R>
where
R: BufRead,
{
pub fn next_header(&'a mut self) -> Option<Result<SyncHeader<'a, R>, String>> {
match self.reader.fill_buf() {
Err(e) => Some(Err(format!("Couldn't read from reader into buffer!\n{e}"))),
Ok([]) => None,
Ok(_) => {
let header = match decode_protobuf::<pwr::SyncHeader>(&mut self.reader) {
Err(e) => return Some(Err(e)),
Ok(sync_header) => sync_header,
};
let bsdiff_header = match header.r#type() {
pwr::sync_header::Type::Rsync => None,
pwr::sync_header::Type::Bsdiff => {
match decode_protobuf::<pwr::BsdiffHeader>(&mut self.reader) {
Err(e) => return Some(Err(e)),
Ok(bsdiff_header) => Some(bsdiff_header),
}
}
};
Some(Ok(match bsdiff_header {
None => SyncHeader::Rsync {
file_index: header.file_index,
op_iter: RsyncOpIter {
reader: &mut self.reader,
},
},
Some(bsdiff) => SyncHeader::Bsdiff {
file_index: header.file_index,
target_index: bsdiff.target_index,
op_iter: BsdiffOpIter {
reader: &mut self.reader,
},
},
}))
}
}
}
}
fn read_length_delimiter(reader: &mut impl Read) -> Result<usize, String> {
let mut varint = [0u8; PROTOBUF_VARINT_MAX_LENGTH];
for current_byte in &mut varint {
let mut byte = [0u8; 1];
reader
.read_exact(&mut byte)
.map_err(|e| format!("Couldn't read from reader into buffer!\n{e}"))?;
*current_byte = byte[0];
if (byte[0] & 0x80) == 0 {
break;
}
}
prost::decode_length_delimiter(varint.as_slice())
.map_err(|e| format!("Couldn't decode the signature header length delimiter!\n{e}"))
}
fn decode_protobuf<T: prost::Message + Default>(reader: &mut impl Read) -> Result<T, String> {
let length = read_length_delimiter(reader)?;
let mut bytes = vec![0u8; length];
reader
.read_exact(&mut bytes)
.map_err(|e| format!("Couldn't read from reader into buffer!\n{e}"))?;
T::decode(bytes.as_slice()).map_err(|e| format!("Couldn't decode Protobuf message!\n{e}"))
}
fn check_magic_bytes(reader: &mut impl Read, expected_magic: u32) -> Result<(), String> {
let mut magic_bytes = [0u8; _];
reader
.read_exact(&mut magic_bytes)
.map_err(|e| format!("Couldn't read magic bytes!\n{e}"))?;
let actual_magic = u32::from_le_bytes(magic_bytes);
if actual_magic == expected_magic {
Ok(())
} else {
Err("The magic bytes don't match! The binary file is corrupted!".to_string())
}
}
fn decompress_stream(
reader: &mut impl BufRead,
algorithm: pwr::CompressionAlgorithm,
) -> Result<Box<dyn std::io::BufRead + '_>, String> {
match algorithm {
pwr::CompressionAlgorithm::None => Ok(Box::new(reader)),
pwr::CompressionAlgorithm::Brotli => {
#[cfg(feature = "brotli")]
{
Ok(Box::new(std::io::BufReader::new(
brotli::Decompressor::new(reader, 0),
)))
}
#[cfg(not(feature = "brotli"))]
{
Err(
"This binary was built without Brotli support. Recompile with `--features brotli` to be able to decompress the stream",
)
}
}
pwr::CompressionAlgorithm::Gzip => {
#[cfg(feature = "gzip")]
{
Ok(Box::new(std::io::BufReader::new(
flate2::bufread::GzDecoder::new(reader),
)))
}
#[cfg(not(feature = "gzip"))]
{
Err(
"This binary was built without gzip support. Recompile with `--features gzip` to be able to decompress the stream",
)
}
}
pwr::CompressionAlgorithm::Zstd => {
#[cfg(feature = "zstd")]
{
Ok(Box::new(std::io::BufReader::new(
zstd::Decoder::with_buffer(reader)
.map_err(|e| format!("Couldn't create zstd decoder!\n{e}"))?,
)))
}
#[cfg(not(feature = "zstd"))]
{
Err(
"This binary was built without Zstd support. Recompile with `--features zstd` to be able to decompress the stream",
)
}
}
}
}
pub fn read_signature(reader: &mut impl BufRead) -> Result<Signature<impl BufRead>, String> {
check_magic_bytes(reader, SIGNATURE_MAGIC)?;
let header = decode_protobuf::<pwr::SignatureHeader>(reader)?;
let compression_algorithm = header
.compression
.ok_or("Missing compressing field in Signature Header!")?
.algorithm();
let mut decompressed = decompress_stream(reader, compression_algorithm)?;
let container_new = decode_protobuf::<tlc::Container>(&mut decompressed)?;
let block_hash_iter = BlockHashIter {
reader: decompressed,
};
Ok(Signature {
header,
container_new,
block_hash_iter,
})
}
pub fn verify_files(
build_folder: &std::path::Path,
signature: &mut Signature<impl BufRead>,
) -> Result<(), String> {
let mut buffer = vec![0u8; BLOCK_SIZE];
let mut hasher = Md5::new();
for container_file in &signature.container_new.files {
let file_path = build_folder.join(&container_file.path);
let file = std::fs::File::open(&file_path).map_err(|e| {
format!(
"Couldn't open file: \"{}\"\n{e}",
file_path.to_string_lossy()
)
})?;
let metadata = file.metadata().map_err(|e| {
format!(
"Couldn't get file metadata: \"{}\"\n{e}",
file_path.to_string_lossy()
)
})?;
if metadata.len() as i64 != container_file.size {
return Err(format!(
"The signature and the in-disk size of \"{}\" don't match!",
file_path.to_string_lossy()
));
}
let mut file_bufreader = std::io::BufReader::new(file);
let mut block_index: usize = 0;
loop {
let block_start: usize = block_index * BLOCK_SIZE;
let block_end: usize = std::cmp::min(block_start + BLOCK_SIZE, container_file.size as usize);
let buf = &mut buffer[..block_end - block_start];
file_bufreader.read_exact(buf).map_err(|e| {
format!(
"Couldn't read file data into buffer: \"{}\"\n{e}",
file_path.to_string_lossy()
)
})?;
hasher.update(buf);
let hash = hasher.finalize_reset();
let signature_hash = signature.block_hash_iter.next().ok_or_else(|| {
"Expected a block hash message in the signature, but EOF was encountered!".to_string()
})??;
if *signature_hash.strong_hash != *hash {
return Err(format!(
"Hash mismatch!
Signature: {:X?}
In-disk: {:X?}",
signature_hash.strong_hash, hash,
));
}
if block_end == container_file.size as usize {
break;
}
block_index += 1;
}
}
Ok(())
}
pub fn read_patch(reader: &mut impl BufRead) -> Result<Patch<impl BufRead>, String> {
check_magic_bytes(reader, PATCH_MAGIC)?;
let header = decode_protobuf::<pwr::PatchHeader>(reader)?;
let compression_algorithm = header
.compression
.ok_or("Missing compressing field in Patch Header!")?
.algorithm();
let mut decompressed = decompress_stream(reader, compression_algorithm)?;
let container_old = decode_protobuf::<tlc::Container>(&mut decompressed)?;
let container_new = decode_protobuf::<tlc::Container>(&mut decompressed)?;
let sync_op_iter = SyncEntryIter {
reader: decompressed,
};
Ok(Patch {
header,
container_old,
container_new,
sync_op_iter,
})
}
pub fn apply_patch(
_old_build_folder: &std::path::Path,
new_build_folder: &std::path::Path,
patch: &mut Patch<impl BufRead>,
) -> Result<(), String> {
for folder in &patch.container_new.dirs {
std::fs::create_dir_all(new_build_folder.join(&folder.path)).map_err(|e| {
format!(
"Couldn't create folder: \"{}\"\n{e}",
new_build_folder.join(&folder.path).to_string_lossy()
)
})?;
}
let _old_files_cache: lru::LruCache<u64, std::fs::File> =
lru::LruCache::new(MAX_OPEN_FILES_PATCH);
while let Some(header) = patch.sync_op_iter.next_header() {
let header = header.map_err(|e| format!("Couldn't get next patch sync operation!\n{e}"))?;
match header {
SyncHeader::Rsync {
file_index: _,
op_iter: _,
} => {}
SyncHeader::Bsdiff {
file_index: _,
target_index: _,
op_iter: _,
} => {}
}
}
Ok(())
}