#![allow(clippy::cast_possible_truncation, clippy::too_many_lines)]
use std::collections::VecDeque;
use std::fs::OpenOptions;
use std::io::{Seek, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use super::{
CompressionMethod, MAGIC, MAX_VERSION, PATH_LENGTH, PakPhase, PakProgress, TABLE_ENTRY_SIZE,
};
use crate::error::{Error, Result};
pub type WriteProgressCallback<'a> = &'a (dyn Fn(&PakProgress) + Sync + Send);
struct FileEntry {
relative_path: PathBuf,
data: Vec<u8>,
}
struct WrittenEntry {
path: PathBuf,
offset: u64,
size_compressed: u32,
size_decompressed: u32,
compression: CompressionMethod,
}
struct CompressedEntry {
path: PathBuf,
compressed_data: Vec<u8>,
size_decompressed: u32,
}
pub struct LspkWriter {
files: Vec<FileEntry>,
version: u32,
compression: CompressionMethod,
}
impl LspkWriter {
pub fn new(root_path: impl Into<PathBuf>) -> Result<Self> {
let root_path = root_path.into();
let files = Self::collect_files(&root_path)?;
Ok(Self {
files,
version: MAX_VERSION, compression: CompressionMethod::Lz4, })
}
#[must_use]
pub fn with_compression(mut self, compression: CompressionMethod) -> Self {
self.compression = compression;
self
}
fn collect_files(root: &Path) -> Result<Vec<FileEntry>> {
let mut files = Vec::new();
let mut dirs_to_check = VecDeque::new();
dirs_to_check.push_back(root.to_path_buf());
while let Some(dir) = dirs_to_check.pop_front() {
let entries = std::fs::read_dir(&dir)?;
for entry in entries {
let entry = entry?;
let file_type = entry.file_type()?;
let path = entry.path();
if file_type.is_symlink() {
continue;
}
if entry.file_name() == ".DS_Store" {
continue;
}
if file_type.is_dir() {
dirs_to_check.push_back(path);
} else {
let relative_path = path
.strip_prefix(root)
.map_err(|_| Error::InvalidPath(path.display().to_string()))?
.to_path_buf();
let data = std::fs::read(&path)?;
files.push(FileEntry {
relative_path,
data,
});
}
}
}
Ok(files)
}
#[allow(dead_code)] pub fn write(self, output_path: impl AsRef<Path>) -> Result<()> {
self.write_with_progress(output_path, &|_| {})
}
pub fn write_with_progress(
self,
output_path: impl AsRef<Path>,
progress: WriteProgressCallback,
) -> Result<()> {
let output_path = output_path.as_ref();
if let Some(parent) = output_path.parent() {
std::fs::create_dir_all(parent)?;
}
let total_files = self.files.len();
let processed = AtomicUsize::new(0);
progress(&PakProgress {
phase: PakPhase::CompressingFiles,
current: 0,
total: total_files,
current_file: None,
});
let compression_results: Vec<std::result::Result<CompressedEntry, String>> = self
.files
.par_iter()
.map(|file| {
let file_name = file.relative_path.file_name().map_or_else(
|| file.relative_path.to_string_lossy().to_string(),
|n| n.to_string_lossy().to_string(),
);
let current = processed.fetch_add(1, Ordering::SeqCst) + 1;
progress(&PakProgress {
phase: PakPhase::CompressingFiles,
current,
total: total_files,
current_file: Some(file_name.clone()),
});
let size_decompressed = file.data.len();
let size_decompressed: u32 = size_decompressed.try_into().map_err(|_| {
format!("File {file_name} is too large: {size_decompressed} bytes")
})?;
let compressed_data = match self.compression {
CompressionMethod::None => file.data.clone(),
CompressionMethod::Lz4 => lz4_flex::block::compress(&file.data),
CompressionMethod::Zlib => {
use flate2::Compression;
use flate2::write::ZlibEncoder;
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
encoder
.write_all(&file.data)
.map_err(|e| format!("Failed to compress {file_name}: {e}"))?;
encoder.finish().map_err(|e| {
format!("Failed to finish compression for {file_name}: {e}")
})?
}
};
Ok(CompressedEntry {
path: file.relative_path.clone(),
compressed_data,
size_decompressed,
})
})
.collect();
let mut compressed_entries = Vec::with_capacity(total_files);
for result in compression_results {
match result {
Ok(entry) => compressed_entries.push(entry),
Err(e) => return Err(Error::ConversionError(e)),
}
}
progress(&PakProgress {
phase: PakPhase::WritingFiles,
current: total_files,
total: total_files,
current_file: None,
});
let mut output = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(output_path)?;
output.write_all(&MAGIC)?;
output.write_all(&self.version.to_le_bytes())?;
output.write_all(&0u64.to_le_bytes())?;
let mut written_entries = Vec::with_capacity(compressed_entries.len());
for entry in compressed_entries {
let size_compressed: u32 = entry.compressed_data.len().try_into().map_err(|_| {
let path_display = entry.path.display();
let len = entry.compressed_data.len();
Error::ConversionError(format!(
"Compressed file {path_display} is too large: {len} bytes"
))
})?;
let offset = output.stream_position()?;
output.write_all(&entry.compressed_data)?;
written_entries.push(WrittenEntry {
path: entry.path,
offset,
size_compressed,
size_decompressed: entry.size_decompressed,
compression: self.compression,
});
}
let footer_offset = output.stream_position()?;
let num_files: u32 = written_entries.len().try_into().map_err(|_| {
let count = written_entries.len();
Error::ConversionError(format!("Too many files: {count}"))
})?;
output.write_all(&num_files.to_le_bytes())?;
progress(&PakProgress {
phase: PakPhase::WritingTable,
current: total_files,
total: total_files,
current_file: None,
});
let mut table_data = Vec::with_capacity(TABLE_ENTRY_SIZE * written_entries.len());
for entry in &written_entries {
let entry_start = table_data.len();
let path_bytes = entry.path.as_os_str().as_encoded_bytes();
table_data.extend_from_slice(path_bytes);
table_data.resize(entry_start + PATH_LENGTH, 0);
let offset_bytes = entry.offset.to_le_bytes();
table_data.extend_from_slice(&offset_bytes[0..6]);
table_data.push(0);
table_data.push(entry.compression.to_flags());
table_data.extend_from_slice(&entry.size_compressed.to_le_bytes());
table_data.extend_from_slice(&entry.size_decompressed.to_le_bytes());
}
let compressed_table = lz4_flex::block::compress(&table_data);
let table_size: u32 = compressed_table.len().try_into().map_err(|_| {
let len = compressed_table.len();
Error::ConversionError(format!("File table too large: {len} bytes"))
})?;
output.write_all(&table_size.to_le_bytes())?;
output.write_all(&compressed_table)?;
output.seek(SeekFrom::Start(8))?;
output.write_all(&footer_offset.to_le_bytes())?;
progress(&PakProgress {
phase: PakPhase::Complete,
current: total_files,
total: total_files,
current_file: None,
});
Ok(())
}
}