1use crate::{
8 Archive, ArchiveBuilder, Error, ListfileOption, Result,
9 compression::{self, CompressionMethod, compress},
10 crypto::{encrypt_block, hash_string, hash_type},
11 header::FormatVersion,
12 special_files::{AttributeFlags, Attributes, FileAttributes},
13 tables::{BetHeader, BlockEntry, BlockTable, HashEntry, HashTable, HetHeader, HiBlockTable},
14};
15use bytes::Bytes;
16use std::collections::HashMap;
17use std::fs::{File, OpenOptions};
18use std::io::{Read, Seek, SeekFrom, Write};
19use std::path::{Path, PathBuf};
20
21fn generate_anonymous_filename(hash: u32) -> String {
23 format!("File{:08X}.unknown", hash)
24}
25
26#[derive(Debug, Clone)]
28pub struct AddFileOptions {
29 pub compression: CompressionMethod,
31 pub encrypt: bool,
33 pub fix_key: bool,
35 pub replace_existing: bool,
37 pub locale: u16,
39 pub platform: u8,
41}
42
43impl Default for AddFileOptions {
44 fn default() -> Self {
45 Self {
46 compression: CompressionMethod::Zlib,
47 encrypt: false,
48 fix_key: false,
49 replace_existing: true,
50 locale: 0,
51 platform: 0,
52 }
53 }
54}
55
56impl AddFileOptions {
57 pub fn new() -> Self {
59 Self::default()
60 }
61
62 pub fn compression(mut self, method: CompressionMethod) -> Self {
64 self.compression = method;
65 self
66 }
67
68 pub fn encrypt(mut self) -> Self {
70 self.encrypt = true;
71 self
72 }
73
74 pub fn fix_key(mut self) -> Self {
76 self.fix_key = true;
77 self.encrypt = true; self
79 }
80
81 pub fn replace_existing(mut self, replace: bool) -> Self {
83 self.replace_existing = replace;
84 self
85 }
86
87 pub fn locale(mut self, locale: u16) -> Self {
89 self.locale = locale;
90 self
91 }
92}
93
94#[derive(Debug)]
96pub struct MutableArchive {
97 _path: PathBuf,
99 archive: Archive,
101 file: File,
103 hash_table: Option<HashTable>,
105 block_table: Option<BlockTable>,
107 _hi_block_table: Option<HiBlockTable>,
109 dirty: bool,
111 next_file_offset: Option<u64>,
113 _special_file_blocks: HashMap<String, u32>,
115 attributes_dirty: bool,
117 modified_blocks: HashMap<u32, String>,
119 updated_het_pos: Option<u64>,
121 updated_bet_pos: Option<u64>,
123 updated_hash_table_pos: Option<u64>,
125 updated_block_table_pos: Option<u64>,
127}
128
129impl MutableArchive {
130 pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
144 let path = path.as_ref().to_path_buf();
145
146 let archive = Archive::open(&path)?;
148
149 let file = OpenOptions::new().read(true).write(true).open(&path)?;
151
152 Ok(Self {
153 _path: path,
154 archive,
155 file,
156 hash_table: None,
157 block_table: None,
158 _hi_block_table: None,
159 dirty: false,
160 next_file_offset: None,
161 _special_file_blocks: HashMap::new(),
162 attributes_dirty: false,
163 modified_blocks: HashMap::new(),
164 updated_het_pos: None,
165 updated_bet_pos: None,
166 updated_hash_table_pos: None,
167 updated_block_table_pos: None,
168 })
169 }
170
171 pub fn archive(&self) -> &Archive {
176 &self.archive
177 }
178
179 pub fn archive_mut(&mut self) -> &mut Archive {
185 &mut self.archive
186 }
187
188 pub fn debug_state(&self) -> (Option<usize>, Option<usize>) {
190 let block_count = self.block_table.as_ref().map(|t| t.entries().len());
191 let hash_count = self.hash_table.as_ref().map(|t| t.size());
192 (block_count, hash_count)
193 }
194
195 pub fn read_file(&mut self, name: &str) -> Result<Vec<u8>> {
200 match self.read_current_file(name) {
202 Ok(data) => Ok(data),
203 Err(Error::FileNotFound(_)) => {
204 self.archive.read_file(name)
206 }
207 Err(e) => Err(e),
208 }
209 }
210
211 pub fn list(&mut self) -> Result<Vec<crate::FileEntry>> {
217 self.archive.list()
218 }
219
220 pub fn find_file(&mut self, name: &str) -> Result<Option<crate::FileInfo>> {
225 let normalized_name = name.replace('/', "\\");
227
228 self.ensure_tables_loaded()?;
230
231 match self.find_file_entry(&normalized_name)? {
233 Some((hash_index, hash_entry)) => {
234 let block_table = self
236 .block_table
237 .as_ref()
238 .or_else(|| self.archive.block_table())
239 .ok_or_else(|| Error::InvalidFormat("No block table".to_string()))?;
240
241 let block_index = hash_entry.block_index as usize;
242 if let Some(block_entry) = block_table.entries().get(block_index) {
243 let file_pos = self.archive.archive_offset() + block_entry.file_pos as u64;
244 Ok(Some(crate::FileInfo {
245 filename: normalized_name,
246 hash_index,
247 block_index,
248 file_pos,
249 compressed_size: block_entry.compressed_size as u64,
250 file_size: block_entry.file_size as u64,
251 flags: block_entry.flags,
252 locale: hash_entry.locale,
253 }))
254 } else {
255 Ok(None)
256 }
257 }
258 None => {
259 Ok(None)
261 }
262 }
263 }
264
265 pub fn verify_signature(&mut self) -> Result<crate::SignatureStatus> {
269 self.archive.verify_signature()
270 }
271
272 pub fn load_attributes(&mut self) -> Result<()> {
276 self.archive.load_attributes()
277 }
278
279 pub fn add_file<P: AsRef<Path>>(
304 &mut self,
305 source_path: P,
306 archive_name: &str,
307 options: AddFileOptions,
308 ) -> Result<()> {
309 let mut file_data = Vec::new();
311 File::open(source_path)?.read_to_end(&mut file_data)?;
312
313 self.add_file_data(&file_data, archive_name, options)
314 }
315
316 pub fn add_file_data(
323 &mut self,
324 data: &[u8],
325 archive_name: &str,
326 options: AddFileOptions,
327 ) -> Result<()> {
328 let archive_name = archive_name.replace('/', "\\");
330
331 self.ensure_tables_loaded()?;
333
334 let is_internal_update = archive_name == "(listfile)" || archive_name == "(attributes)";
336
337 let existing_block_index =
339 if let Some((hash_index, entry)) = self.find_file_entry(&archive_name)? {
340 if !options.replace_existing {
341 return Err(Error::FileExists(archive_name));
342 }
343 if let Some(hash_table) = &mut self.hash_table {
345 hash_table.get_mut(hash_index).unwrap().block_index = HashEntry::EMPTY_DELETED;
346 }
347
348 if is_internal_update {
350 Some(entry.block_index)
351 } else {
352 None
353 }
354 } else {
355 None
356 };
357
358 let block_index = if let Some(existing_idx) = existing_block_index {
360 existing_idx
361 } else {
362 self.block_table.as_ref().unwrap().entries().len() as u32
363 };
364
365 let file_offset = self.get_archive_end_offset()?;
367
368 let (compressed_data, compressed_size, flags) =
370 self.prepare_file_data(data, &archive_name, &options)?;
371
372 self.file.seek(SeekFrom::Start(file_offset))?;
374 self.file.write_all(&compressed_data)?;
375
376 let next_offset = file_offset + compressed_data.len() as u64;
378 let aligned_next = (next_offset + 511) & !511; self.next_file_offset = Some(aligned_next);
380
381 let relative_pos = (file_offset - self.archive.archive_offset()) as u32;
383 let block_entry = BlockEntry {
384 file_pos: relative_pos,
385 compressed_size: compressed_size as u32,
386 file_size: data.len() as u32, flags,
388 };
389
390 if let Some(block_table) = &mut self.block_table {
392 if existing_block_index.is_some() {
393 if let Some(entry) = block_table.get_mut(block_index as usize) {
395 *entry = block_entry;
396 }
397 } else {
398 let old_entries = block_table.entries();
400 let new_size = old_entries.len() + 1;
401 let mut new_table = BlockTable::new_mut(new_size)?;
402
403 for (i, entry) in old_entries.iter().enumerate() {
405 if let Some(new_entry) = new_table.get_mut(i) {
406 *new_entry = *entry;
407 }
408 }
409
410 if let Some(new_entry) = new_table.get_mut(new_size - 1) {
412 *new_entry = block_entry;
413 }
414
415 *block_table = new_table;
416 }
417 }
418
419 self.add_to_hash_table(&archive_name, block_index, options.locale)?;
421
422 if archive_name != "(attributes)" {
424 self.modified_blocks
425 .insert(block_index, archive_name.clone());
426 }
427
428 if archive_name != "(listfile)" && !is_internal_update {
430 self.update_listfile(&archive_name)?;
431 }
432
433 if archive_name != "(attributes)" {
435 self.attributes_dirty = true;
436 }
437
438 self.dirty = true;
439 Ok(())
440 }
441
442 fn read_current_file(&mut self, filename: &str) -> Result<Vec<u8>> {
444 if let Some((_, entry)) = self.find_file_entry(filename)? {
446 let block_idx = entry.block_index as usize;
447 if let Some(block_table) = &self.block_table
448 && let Some(block) = block_table.entries().get(block_idx)
449 {
450 let file_pos = self.archive.archive_offset() + block.file_pos as u64;
452 self.file.seek(SeekFrom::Start(file_pos))?;
453
454 let mut data = vec![0u8; block.compressed_size as usize];
455 self.file.read_exact(&mut data)?;
456
457 if block.is_compressed() || block.is_encrypted() {
460 return self.archive.read_file(filename);
462 }
463
464 data.truncate(block.file_size as usize);
466 return Ok(data);
467 }
468 }
469
470 self.archive.read_file(filename)
472 }
473
474 pub fn remove_file(&mut self, archive_name: &str) -> Result<()> {
482 let archive_name = archive_name.replace('/', "\\");
484
485 self.ensure_tables_loaded()?;
487
488 let (hash_index, _) = self
490 .find_file_entry(&archive_name)?
491 .ok_or_else(|| Error::FileNotFound(archive_name.clone()))?;
492
493 if let Some(hash_table) = &mut self.hash_table {
495 hash_table.get_mut(hash_index).unwrap().block_index = HashEntry::EMPTY_DELETED;
496 }
497
498 self.remove_from_listfile(&archive_name)?;
500
501 self.attributes_dirty = true;
503
504 self.dirty = true;
505 Ok(())
506 }
507
508 pub fn rename_file(&mut self, old_name: &str, new_name: &str) -> Result<()> {
514 let old_name = old_name.replace('/', "\\");
516 let new_name = new_name.replace('/', "\\");
517
518 self.ensure_tables_loaded()?;
520
521 let (old_hash_index, old_entry) = self
523 .find_file_entry(&old_name)?
524 .ok_or_else(|| Error::FileNotFound(old_name.clone()))?;
525
526 if self.find_file_entry(&new_name)?.is_some() {
528 return Err(Error::FileExists(new_name));
529 }
530
531 let block_index = old_entry.block_index;
533 let locale = old_entry.locale;
534
535 if let Some(hash_table) = &mut self.hash_table {
537 hash_table.get_mut(old_hash_index).unwrap().block_index = HashEntry::EMPTY_DELETED;
538 }
539
540 self.add_to_hash_table(&new_name, block_index, locale)?;
542
543 self.remove_from_listfile(&old_name)?;
545 self.update_listfile(&new_name)?;
546
547 self.attributes_dirty = true;
549
550 self.dirty = true;
551 Ok(())
552 }
553
554 pub fn compact(&mut self) -> Result<()> {
559 use std::fs;
560 use tempfile::NamedTempFile;
561
562 self.ensure_tables_loaded()?;
564
565 let archive_dir = self
567 ._path
568 .parent()
569 .ok_or_else(|| Error::InvalidFormat("Invalid archive path".to_string()))?;
570 let temp_file = NamedTempFile::new_in(archive_dir)?;
571 let temp_path = temp_file.path().to_path_buf();
572
573 let header = self.archive.header();
575 let format_version = header.format_version;
576
577 let mut builder = ArchiveBuilder::new()
579 .version(format_version)
580 .listfile_option(ListfileOption::Generate);
581
582 let file_list = self.list().ok();
584
585 let mut files_to_copy = Vec::new();
587 if let Some(hash_table) = &self.hash_table {
588 for (hash_idx, entry) in hash_table.entries().iter().enumerate() {
589 if !entry.is_valid() || entry.is_deleted() {
590 continue;
591 }
592
593 let block_idx = entry.block_index as usize;
594 if let Some(block_table) = &self.block_table
595 && let Some(block) = block_table.entries().get(block_idx)
596 {
597 let filename = if let Some(ref list) = file_list {
599 list.iter()
600 .find(|e| {
601 let name_hash1 = hash_string(&e.name, hash_type::NAME_A);
603 let name_hash2 = hash_string(&e.name, hash_type::NAME_B);
604 entry.name_1 == name_hash1 && entry.name_2 == name_hash2
605 })
606 .map(|e| e.name.clone())
607 } else {
608 None
609 };
610
611 let filename = filename.unwrap_or_else(|| {
612 generate_anonymous_filename(
614 ((entry.name_1 as u64) << 32 | entry.name_2 as u64) as u32,
615 )
616 });
617
618 files_to_copy.push((hash_idx, block_idx, filename, *entry, *block));
619 }
620 }
621 }
622
623 for (_, _, filename, hash_entry, block_entry) in &files_to_copy {
625 if filename == "(listfile)" || filename == "(attributes)" || filename == "(signature)" {
627 continue;
628 }
629
630 let file_data = match self.read_file(filename) {
632 Ok(data) => data,
633 Err(_) => {
634 log::warn!("Skipping file {filename} during compaction (read error)");
636 continue;
637 }
638 };
639
640 let compression = if block_entry.is_compressed() {
642 compression::flags::ZLIB
645 } else {
646 0 };
648
649 let encrypt = block_entry.is_encrypted();
650
651 builder = builder.add_file_data_with_options(
653 file_data,
654 filename,
655 compression,
656 encrypt,
657 hash_entry.locale,
658 );
659 }
660
661 builder.build(&temp_path)?;
663
664 let _ = std::mem::replace(&mut self.file, File::open(&temp_path)?);
666
667 fs::rename(&temp_path, &self._path)?;
669
670 self.archive = Archive::open(&self._path)?;
672 self.file = OpenOptions::new()
673 .read(true)
674 .write(true)
675 .open(&self._path)?;
676
677 self.hash_table = None;
679 self.block_table = None;
680 self._hi_block_table = None;
681 self.dirty = false;
682 self.next_file_offset = None;
683 self.attributes_dirty = false;
684 self.modified_blocks.clear();
685
686 Ok(())
687 }
688
689 pub fn flush(&mut self) -> Result<()> {
694 if !self.dirty {
695 return Ok(());
696 }
697
698 if self.attributes_dirty {
700 self.update_attributes()?;
701 }
702
703 self.write_tables()?;
705
706 self.update_header()?;
708
709 self.file.sync_all()?;
710 self.dirty = false;
711
712 Ok(())
713 }
714
715 fn update_attributes(&mut self) -> Result<()> {
717 use std::time::{SystemTime, UNIX_EPOCH};
718
719 if self.archive.find_file("(attributes)")?.is_none() {
721 return Ok(());
723 }
724
725 let attrs_data = self.read_current_file("(attributes)")?;
727 let block_count = self
728 .block_table
729 .as_ref()
730 .map(|t| t.entries().len())
731 .unwrap_or_else(|| {
732 self.archive
733 .block_table()
734 .map(|t| t.entries().len())
735 .unwrap_or(0)
736 });
737
738 let mut attrs = match Attributes::parse(&Bytes::from(attrs_data), block_count) {
739 Ok(a) => a,
740 Err(_) => {
741 Attributes {
743 version: Attributes::EXPECTED_VERSION,
744 flags: AttributeFlags::new(AttributeFlags::CRC32 | AttributeFlags::FILETIME),
745 file_attributes: vec![FileAttributes::new(); block_count],
746 crc32: None, md5: None, filetime: None, }
750 }
751 };
752
753 while attrs.file_attributes.len() < block_count {
755 attrs.file_attributes.push(FileAttributes::new());
756 }
757
758 if attrs.file_attributes.len() > block_count {
760 attrs.file_attributes.truncate(block_count);
761 }
762
763 let now = SystemTime::now()
765 .duration_since(UNIX_EPOCH)
766 .unwrap_or_default()
767 .as_secs();
768 let filetime = (now + 11644473600) * 10_000_000;
770
771 let modified_files: Vec<(u32, String)> = self
774 .modified_blocks
775 .iter()
776 .map(|(&idx, name)| (idx, name.clone()))
777 .collect();
778
779 for (block_idx, filename) in modified_files {
780 let block_idx = block_idx as usize;
781 if block_idx >= block_count {
782 continue;
783 }
784
785 if attrs.flags.has_filetime() {
787 attrs.file_attributes[block_idx].filetime = Some(filetime);
788 }
789
790 if attrs.flags.has_crc32() && filename != "(listfile)" {
792 match self.read_current_file(&filename) {
794 Ok(data) => {
795 let crc = crc32fast::hash(&data);
797 attrs.file_attributes[block_idx].crc32 = Some(crc);
798 }
799 Err(_) => {
800 if attrs.file_attributes[block_idx].crc32.is_none() {
802 attrs.file_attributes[block_idx].crc32 = Some(0);
803 }
804 }
805 }
806 }
807
808 if attrs.flags.has_md5() && filename != "(listfile)" {
810 if attrs.file_attributes[block_idx].md5.is_none() {
812 attrs.file_attributes[block_idx].md5 = Some([0u8; 16]);
813 }
814 }
815 }
816
817 if attrs.flags.has_filetime()
819 && let Some(hash_table) = &self.hash_table
820 {
821 for entry in hash_table.entries() {
822 if !entry.is_valid() {
823 continue;
824 }
825
826 let block_idx = entry.block_index as usize;
827 if block_idx >= block_count {
828 continue;
829 }
830
831 if !self.modified_blocks.contains_key(&entry.block_index) {
833 attrs.file_attributes[block_idx].filetime = Some(filetime);
834 }
835 }
836 }
837
838 let new_attrs_data = attrs.to_bytes()?;
840
841 let options = AddFileOptions::new()
843 .compression(CompressionMethod::None)
844 .replace_existing(true);
845
846 self.add_file_data(&new_attrs_data, "(attributes)", options)?;
847
848 self.attributes_dirty = false;
849 Ok(())
850 }
851
852 fn ensure_tables_loaded(&mut self) -> Result<()> {
854 if self.hash_table.is_none() {
855 if let Some(table) = self.archive.hash_table() {
857 let entries = table.entries();
858 let mut new_table = HashTable::new_mut(entries.len())?;
859 for (i, entry) in entries.iter().enumerate() {
861 if let Some(new_entry) = new_table.get_mut(i) {
862 *new_entry = *entry;
863 }
864 }
865 self.hash_table = Some(new_table);
866 } else {
867 return Err(Error::InvalidFormat("No hash table in archive".to_string()));
868 }
869 }
870
871 if self.block_table.is_none() {
872 if let Some(table) = self.archive.block_table() {
874 let entries = table.entries();
875 let mut new_table = BlockTable::new_mut(entries.len())?;
876 for (i, entry) in entries.iter().enumerate() {
878 if let Some(new_entry) = new_table.get_mut(i) {
879 *new_entry = *entry;
880 }
881 }
882 self.block_table = Some(new_table);
883 } else {
884 return Err(Error::InvalidFormat(
885 "No block table in archive".to_string(),
886 ));
887 }
888 }
889
890 Ok(())
893 }
894
895 fn find_file_entry(&self, archive_name: &str) -> Result<Option<(usize, HashEntry)>> {
897 let hash_table = self
898 .hash_table
899 .as_ref()
900 .or_else(|| self.archive.hash_table())
901 .ok_or_else(|| Error::InvalidFormat("No hash table".to_string()))?;
902
903 let name_hash1 = hash_string(archive_name, hash_type::NAME_A);
904 let name_hash2 = hash_string(archive_name, hash_type::NAME_B);
905 let start_index = hash_string(archive_name, hash_type::TABLE_OFFSET) as usize;
906
907 let table_size = hash_table.entries().len();
908 let mut index = start_index & (table_size - 1);
909
910 loop {
912 let entry = &hash_table.entries()[index];
913
914 if entry.is_valid() && entry.name_1 == name_hash1 && entry.name_2 == name_hash2 {
916 return Ok(Some((index, *entry)));
917 }
918
919 if entry.is_empty() {
921 return Ok(None);
922 }
923
924 index = (index + 1) & (table_size - 1);
926
927 if index == (start_index & (table_size - 1)) {
929 return Ok(None);
930 }
931 }
932 }
933
934 fn get_archive_end_offset(&mut self) -> Result<u64> {
936 if let Some(offset) = self.next_file_offset {
938 return Ok(offset);
939 }
940
941 let header = self.archive.header();
943 let archive_offset = self.archive.archive_offset();
944
945 let hash_table_end =
947 archive_offset + header.get_hash_table_pos() + (header.hash_table_size * 16) as u64; let block_table_end =
949 archive_offset + header.get_block_table_pos() + (header.block_table_size * 16) as u64; let mut max_file_end = 0u64;
953 if let Some(block_table) = self.archive.block_table() {
954 for entry in block_table.entries() {
955 if entry.flags & BlockEntry::FLAG_EXISTS != 0 {
956 let file_end =
957 archive_offset + entry.file_pos as u64 + entry.compressed_size as u64;
958 max_file_end = max_file_end.max(file_end);
959 }
960 }
961 }
962
963 if let Some(block_table) = &self.block_table {
965 for entry in block_table.entries() {
966 if entry.flags & BlockEntry::FLAG_EXISTS != 0 {
967 let file_end =
968 archive_offset + entry.file_pos as u64 + entry.compressed_size as u64;
969 max_file_end = max_file_end.max(file_end);
970 }
971 }
972 }
973
974 let end_offset = hash_table_end.max(block_table_end).max(max_file_end);
976 let aligned_offset = (end_offset + 511) & !511; self.next_file_offset = Some(aligned_offset);
980
981 Ok(aligned_offset)
982 }
983
984 fn prepare_file_data(
986 &self,
987 data: &[u8],
988 archive_name: &str,
989 options: &AddFileOptions,
990 ) -> Result<(Vec<u8>, usize, u32)> {
991 let mut flags = BlockEntry::FLAG_EXISTS;
992 let mut output_data = data.to_vec();
993
994 if options.compression != CompressionMethod::None {
996 let compression_flag = match options.compression {
998 CompressionMethod::None => 0,
999 CompressionMethod::Huffman => compression::flags::HUFFMAN,
1000 CompressionMethod::Zlib => compression::flags::ZLIB,
1001 CompressionMethod::Implode => compression::flags::IMPLODE,
1002 CompressionMethod::PKWare => compression::flags::PKWARE,
1003 CompressionMethod::BZip2 => compression::flags::BZIP2,
1004 CompressionMethod::Sparse => compression::flags::SPARSE,
1005 CompressionMethod::AdpcmMono => compression::flags::ADPCM_MONO,
1006 CompressionMethod::AdpcmStereo => compression::flags::ADPCM_STEREO,
1007 CompressionMethod::Lzma => compression::flags::LZMA,
1008 CompressionMethod::Multiple(flags) => flags,
1009 };
1010
1011 let compressed = compress(data, compression_flag)?;
1012 if compressed.len() < data.len() {
1013 output_data = compressed;
1014 flags |= BlockEntry::FLAG_COMPRESS;
1015 }
1016 }
1017
1018 if options.encrypt {
1020 let key = if options.fix_key {
1021 hash_string(archive_name, hash_type::FILE_KEY)
1024 } else {
1025 hash_string(archive_name, hash_type::FILE_KEY)
1026 };
1027
1028 let _original_len = output_data.len();
1030
1031 while !output_data.len().is_multiple_of(4) {
1033 output_data.push(0);
1034 }
1035
1036 let mut u32_buffer: Vec<u32> = output_data
1038 .chunks_exact(4)
1039 .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
1040 .collect();
1041
1042 encrypt_block(&mut u32_buffer, key);
1043
1044 output_data.clear();
1046 for &value in &u32_buffer {
1047 output_data.extend_from_slice(&value.to_le_bytes());
1048 }
1049
1050 flags |= BlockEntry::FLAG_ENCRYPTED;
1055 if options.fix_key {
1056 flags |= BlockEntry::FLAG_FIX_KEY;
1057 }
1058 }
1059
1060 flags |= BlockEntry::FLAG_SINGLE_UNIT;
1063
1064 let output_len = output_data.len();
1065 Ok((output_data, output_len, flags))
1066 }
1067
1068 fn add_to_hash_table(&mut self, filename: &str, block_index: u32, locale: u16) -> Result<()> {
1070 let hash_table = self
1071 .hash_table
1072 .as_mut()
1073 .ok_or_else(|| Error::InvalidFormat("No hash table".to_string()))?;
1074
1075 let table_offset = hash_string(filename, hash_type::TABLE_OFFSET);
1076 let name_a = hash_string(filename, hash_type::NAME_A);
1077 let name_b = hash_string(filename, hash_type::NAME_B);
1078
1079 let table_size = hash_table.size() as u32;
1080 let mut index = table_offset & (table_size - 1);
1081
1082 loop {
1084 let entry = hash_table.get_mut(index as usize).ok_or_else(|| {
1085 Error::InvalidFormat("Hash table index out of bounds".to_string())
1086 })?;
1087
1088 if entry.is_empty() || entry.is_deleted() {
1089 *entry = HashEntry {
1091 name_1: name_a,
1092 name_2: name_b,
1093 locale,
1094 platform: 0, block_index,
1096 };
1097 break;
1098 }
1099
1100 index = (index + 1) & (table_size - 1);
1102 }
1103
1104 Ok(())
1105 }
1106
1107 fn update_listfile(&mut self, filename: &str) -> Result<()> {
1109 if self.archive.find_file("(listfile)")?.is_none() {
1111 return Ok(()); }
1113
1114 let mut current_content = match self.read_current_file("(listfile)") {
1116 Ok(data) => String::from_utf8_lossy(&data).to_string(),
1117 Err(_) => String::new(), };
1119
1120 let filename_line = filename.to_string();
1122 if !current_content.contains(&filename_line) {
1123 if !current_content.ends_with('\n') && !current_content.is_empty() {
1124 current_content.push('\n');
1125 }
1126 current_content.push_str(&filename_line);
1127 current_content.push('\n');
1128
1129 let options = AddFileOptions::new()
1131 .compression(CompressionMethod::None) .replace_existing(true);
1133
1134 self.add_file_data(current_content.as_bytes(), "(listfile)", options)?;
1135 }
1136
1137 Ok(())
1138 }
1139
1140 fn remove_from_listfile(&mut self, filename: &str) -> Result<()> {
1142 if self.archive.find_file("(listfile)")?.is_none() {
1144 return Ok(()); }
1146
1147 let current_content = match self.read_current_file("(listfile)") {
1149 Ok(data) => String::from_utf8_lossy(&data).to_string(),
1150 Err(_) => return Ok(()), };
1152
1153 let lines: Vec<&str> = current_content
1155 .lines()
1156 .filter(|line| line.trim() != filename)
1157 .collect();
1158
1159 let new_content = lines.join("\n");
1161 if new_content != current_content.trim() {
1162 let mut final_content = new_content;
1163 if !final_content.is_empty() {
1164 final_content.push('\n');
1165 }
1166
1167 let options = AddFileOptions::new()
1168 .compression(CompressionMethod::None)
1169 .replace_existing(true);
1170
1171 self.add_file_data(final_content.as_bytes(), "(listfile)", options)?;
1172 }
1173
1174 Ok(())
1175 }
1176
1177 fn write_tables(&mut self) -> Result<()> {
1179 let header = self.archive.header();
1180
1181 if header.format_version >= FormatVersion::V3 {
1184 return self.write_tables_v3_plus();
1185 }
1186
1187 let archive_offset = self.archive.archive_offset();
1189
1190 if let Some(hash_table) = &self.hash_table {
1192 let hash_table_pos = archive_offset + header.hash_table_pos as u64;
1193 self.file.seek(SeekFrom::Start(hash_table_pos))?;
1194
1195 let mut table_data = Vec::new();
1197 for entry in hash_table.entries() {
1198 table_data.extend_from_slice(&entry.name_1.to_le_bytes());
1199 table_data.extend_from_slice(&entry.name_2.to_le_bytes());
1200 table_data.extend_from_slice(&entry.locale.to_le_bytes());
1201 table_data.extend_from_slice(&entry.platform.to_le_bytes());
1202 table_data.extend_from_slice(&entry.block_index.to_le_bytes());
1203 }
1204
1205 let key = hash_string("(hash table)", hash_type::FILE_KEY);
1207 let mut u32_buffer: Vec<u32> = table_data
1208 .chunks_exact(4)
1209 .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
1210 .collect();
1211 encrypt_block(&mut u32_buffer, key);
1212
1213 for &value in &u32_buffer {
1215 self.file.write_all(&value.to_le_bytes())?;
1216 }
1217 }
1218
1219 if let Some(block_table) = &self.block_table {
1221 let block_table_pos = archive_offset + header.block_table_pos as u64;
1222 self.file.seek(SeekFrom::Start(block_table_pos))?;
1223
1224 let mut table_data = Vec::new();
1226 for entry in block_table.entries() {
1227 table_data.extend_from_slice(&entry.file_pos.to_le_bytes());
1228 table_data.extend_from_slice(&entry.compressed_size.to_le_bytes());
1229 table_data.extend_from_slice(&entry.file_size.to_le_bytes());
1230 table_data.extend_from_slice(&entry.flags.to_le_bytes());
1231 }
1232
1233 let key = hash_string("(block table)", hash_type::FILE_KEY);
1235 let mut u32_buffer: Vec<u32> = table_data
1236 .chunks_exact(4)
1237 .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
1238 .collect();
1239 encrypt_block(&mut u32_buffer, key);
1240
1241 for &value in &u32_buffer {
1243 self.file.write_all(&value.to_le_bytes())?;
1244 }
1245 }
1246
1247 Ok(())
1248 }
1249
1250 fn write_tables_v3_plus(&mut self) -> Result<()> {
1252 let hash_table = self
1253 .hash_table
1254 .as_ref()
1255 .ok_or_else(|| Error::invalid_format("Hash table not loaded for V3+ table write"))?;
1256 let block_table = self
1257 .block_table
1258 .as_ref()
1259 .ok_or_else(|| Error::invalid_format("Block table not loaded for V3+ table write"))?;
1260
1261 let current_pos = self.file.stream_position()?;
1263 let archive_offset = self.archive.archive_offset();
1264
1265 let het_pos = current_pos - archive_offset;
1267 let (het_data, _het_header) = self.create_het_table_from_hash_table(hash_table)?;
1268 self.file.write_all(&het_data)?;
1269
1270 let bet_pos = self.file.stream_position()? - archive_offset;
1272 let (bet_data, _bet_header) = self.create_bet_table_from_block_table(block_table)?;
1273 self.file.write_all(&bet_data)?;
1274
1275 let hash_table_pos = self.file.stream_position()? - archive_offset;
1277 let mut table_data = Vec::new();
1278 for entry in hash_table.entries() {
1279 table_data.extend_from_slice(&entry.name_1.to_le_bytes());
1280 table_data.extend_from_slice(&entry.name_2.to_le_bytes());
1281 table_data.extend_from_slice(&entry.locale.to_le_bytes());
1282 table_data.extend_from_slice(&entry.platform.to_le_bytes());
1283 table_data.extend_from_slice(&entry.block_index.to_le_bytes());
1284 }
1285
1286 let key = hash_string("(hash table)", hash_type::FILE_KEY);
1288 let mut u32_buffer: Vec<u32> = table_data
1289 .chunks_exact(4)
1290 .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
1291 .collect();
1292 encrypt_block(&mut u32_buffer, key);
1293
1294 for &value in &u32_buffer {
1296 self.file.write_all(&value.to_le_bytes())?;
1297 }
1298
1299 let block_table_pos = self.file.stream_position()? - archive_offset;
1301 let mut table_data = Vec::new();
1302 for entry in block_table.entries() {
1303 table_data.extend_from_slice(&entry.file_pos.to_le_bytes());
1304 table_data.extend_from_slice(&entry.compressed_size.to_le_bytes());
1305 table_data.extend_from_slice(&entry.file_size.to_le_bytes());
1306 table_data.extend_from_slice(&entry.flags.to_le_bytes());
1307 }
1308
1309 let key = hash_string("(block table)", hash_type::FILE_KEY);
1311 let mut u32_buffer: Vec<u32> = table_data
1312 .chunks_exact(4)
1313 .map(|chunk| u32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]))
1314 .collect();
1315 encrypt_block(&mut u32_buffer, key);
1316
1317 for &value in &u32_buffer {
1319 self.file.write_all(&value.to_le_bytes())?;
1320 }
1321
1322 self.updated_het_pos = Some(het_pos);
1324 self.updated_bet_pos = Some(bet_pos);
1325 self.updated_hash_table_pos = Some(hash_table_pos);
1326 self.updated_block_table_pos = Some(block_table_pos);
1327
1328 Ok(())
1329 }
1330
1331 fn create_het_table_from_hash_table(
1333 &self,
1334 hash_table: &HashTable,
1335 ) -> Result<(Vec<u8>, HetHeader)> {
1336 use crate::crypto::het_hash;
1337
1338 let mut file_count = 0u32;
1340 for entry in hash_table.entries() {
1341 if !entry.is_empty() {
1342 file_count += 1;
1343 }
1344 }
1345
1346 let hash_table_entries = (file_count * 2).max(16).next_power_of_two();
1347
1348 let header = HetHeader {
1350 table_size: 0, max_file_count: file_count,
1352 hash_table_size: hash_table_entries,
1353 hash_entry_size: 8,
1354 total_index_size: hash_table_entries * Self::calculate_bits_needed(file_count as u64),
1355 index_size_extra: 0,
1356 index_size: Self::calculate_bits_needed(file_count as u64),
1357 block_table_size: 0,
1358 };
1359
1360 let index_size = header.index_size;
1361
1362 let mut het_hash_table = vec![0xFFu8; hash_table_entries as usize];
1364 let file_indices_size = (header.total_index_size as usize).div_ceil(8);
1365 let mut file_indices = vec![0u8; file_indices_size];
1366
1367 let invalid_index = (1u64 << index_size) - 1;
1369 for i in 0..hash_table_entries {
1370 self.write_bit_entry(&mut file_indices, i as usize, invalid_index, index_size)?;
1371 }
1372
1373 let mut file_index = 0;
1375 for entry in hash_table.entries() {
1376 if !entry.is_empty() {
1377 let filename = generate_anonymous_filename(file_index); let hash_bits = 8;
1381 let (hash, name_hash1) = het_hash(&filename, hash_bits);
1382 let start_index = (hash % hash_table_entries as u64) as usize;
1383
1384 let mut current_index = start_index;
1386 loop {
1387 if het_hash_table[current_index] == 0xFF {
1388 het_hash_table[current_index] = name_hash1;
1389 self.write_bit_entry(
1390 &mut file_indices,
1391 current_index,
1392 file_index as u64,
1393 index_size,
1394 )?;
1395 break;
1396 }
1397 current_index = (current_index + 1) % hash_table_entries as usize;
1398 if current_index == start_index {
1399 return Err(Error::invalid_format("HET table full"));
1400 }
1401 }
1402 file_index += 1;
1403 }
1404 }
1405
1406 let het_header_size = std::mem::size_of::<HetHeader>();
1408 let data_size = het_header_size as u32 + hash_table_entries + file_indices_size as u32;
1409 let table_size = 12 + data_size;
1410
1411 let mut final_header = header;
1412 final_header.table_size = table_size;
1413
1414 let mut result = Vec::with_capacity((12 + data_size) as usize);
1415
1416 result.extend_from_slice(&0x1A544548u32.to_le_bytes()); result.extend_from_slice(&1u32.to_le_bytes()); result.extend_from_slice(&data_size.to_le_bytes()); result.extend_from_slice(&final_header.table_size.to_le_bytes());
1423 result.extend_from_slice(&final_header.max_file_count.to_le_bytes());
1424 result.extend_from_slice(&final_header.hash_table_size.to_le_bytes());
1425 result.extend_from_slice(&final_header.hash_entry_size.to_le_bytes());
1426 result.extend_from_slice(&final_header.total_index_size.to_le_bytes());
1427 result.extend_from_slice(&final_header.index_size_extra.to_le_bytes());
1428 result.extend_from_slice(&final_header.index_size.to_le_bytes());
1429 result.extend_from_slice(&final_header.block_table_size.to_le_bytes());
1430
1431 result.extend_from_slice(&het_hash_table);
1433 result.extend_from_slice(&file_indices);
1434
1435 Ok((result, final_header))
1436 }
1437
1438 fn create_bet_table_from_block_table(
1440 &self,
1441 block_table: &BlockTable,
1442 ) -> Result<(Vec<u8>, BetHeader)> {
1443 use crate::crypto::jenkins_hash;
1444
1445 let file_count = block_table.entries().len() as u32;
1446
1447 let bit_count_file_pos = 32; let bit_count_file_size = 32;
1450 let bit_count_cmp_size = 32;
1451 let bit_count_flag_index = 8; let table_entry_size =
1453 bit_count_file_pos + bit_count_file_size + bit_count_cmp_size + bit_count_flag_index;
1454
1455 let header = BetHeader {
1456 table_size: 0, file_count,
1458 unknown_08: 0x10,
1459 table_entry_size,
1460 bit_index_file_pos: 0,
1461 bit_index_file_size: bit_count_file_pos,
1462 bit_index_cmp_size: bit_count_file_pos + bit_count_file_size,
1463 bit_index_flag_index: bit_count_file_pos + bit_count_file_size + bit_count_cmp_size,
1464 bit_index_unknown: table_entry_size,
1465 bit_count_file_pos,
1466 bit_count_file_size,
1467 bit_count_cmp_size,
1468 bit_count_flag_index,
1469 bit_count_unknown: 0,
1470 total_bet_hash_size: file_count * 64, bet_hash_size_extra: 0,
1472 bet_hash_size: 64,
1473 bet_hash_array_size: file_count * 8, flag_count: 1, };
1476
1477 let bet_header_size = std::mem::size_of::<BetHeader>();
1479 let data_size = bet_header_size as u32 + 4 + (file_count * 12); let table_size = 12 + data_size;
1481
1482 let mut final_header = header;
1483 final_header.table_size = table_size;
1484
1485 let mut result = Vec::with_capacity((12 + data_size) as usize);
1486
1487 result.extend_from_slice(&0x1A544542u32.to_le_bytes()); result.extend_from_slice(&1u32.to_le_bytes()); result.extend_from_slice(&data_size.to_le_bytes()); result.extend_from_slice(&final_header.table_size.to_le_bytes());
1494 result.extend_from_slice(&final_header.file_count.to_le_bytes());
1495 result.extend_from_slice(&final_header.unknown_08.to_le_bytes());
1496 result.extend_from_slice(&final_header.table_entry_size.to_le_bytes());
1497
1498 for _ in 0..15 {
1500 result.extend_from_slice(&0u32.to_le_bytes());
1502 }
1503
1504 result.extend_from_slice(&0u32.to_le_bytes()); for (i, entry) in block_table.entries().iter().enumerate() {
1509 result.extend_from_slice(&entry.file_pos.to_le_bytes());
1510 result.extend_from_slice(&entry.file_size.to_le_bytes());
1511 result.extend_from_slice(&entry.compressed_size.to_le_bytes());
1512
1513 let hash = jenkins_hash(&generate_anonymous_filename(i as u32));
1515 result.extend_from_slice(&hash.to_le_bytes());
1516 }
1517
1518 Ok((result, final_header))
1519 }
1520
1521 fn write_bit_entry(
1523 &self,
1524 data: &mut [u8],
1525 index: usize,
1526 value: u64,
1527 bit_size: u32,
1528 ) -> Result<()> {
1529 let bit_offset = index * bit_size as usize;
1530 let byte_offset = bit_offset / 8;
1531 let bit_shift = bit_offset % 8;
1532
1533 let bits_needed = bit_shift + bit_size as usize;
1534 let bytes_needed = bits_needed.div_ceil(8);
1535
1536 if byte_offset + bytes_needed > data.len() {
1537 return Err(Error::invalid_format("Bit entry out of bounds"));
1538 }
1539
1540 let mut existing = 0u64;
1542 let max_bytes = bytes_needed.min(8);
1543 for i in 0..max_bytes {
1544 if byte_offset + i < data.len() && i * 8 < 64 {
1545 existing |= (data[byte_offset + i] as u64) << (i * 8);
1546 }
1547 }
1548
1549 let value_mask = if bit_size >= 64 {
1551 u64::MAX
1552 } else {
1553 (1u64 << bit_size) - 1
1554 };
1555 let mask = value_mask << bit_shift;
1556 existing &= !mask;
1557
1558 existing |= (value & value_mask) << bit_shift;
1560
1561 for i in 0..max_bytes {
1563 if byte_offset + i < data.len() && i * 8 < 64 {
1564 data[byte_offset + i] = (existing >> (i * 8)) as u8;
1565 }
1566 }
1567
1568 Ok(())
1569 }
1570
1571 fn calculate_bits_needed(max_value: u64) -> u32 {
1573 if max_value == 0 {
1574 1
1575 } else {
1576 (64 - max_value.leading_zeros()).max(1)
1577 }
1578 }
1579
1580 fn update_header(&mut self) -> Result<()> {
1582 let archive_offset = self.archive.archive_offset();
1583 let mut header = self.archive.header().clone();
1584 let mut needs_update = false;
1585
1586 if let Some(block_table) = &self.block_table {
1588 let new_size = block_table.entries().len() as u32;
1589 if new_size != header.block_table_size {
1590 header.block_table_size = new_size;
1591 needs_update = true;
1592 }
1593 }
1594
1595 if needs_update {
1596 self.file.seek(SeekFrom::Start(archive_offset))?;
1598
1599 self.file.write_all(b"MPQ\x1A")?; self.file.write_all(&header.header_size.to_le_bytes())?;
1602 self.file.write_all(&header.archive_size.to_le_bytes())?;
1603 self.file
1604 .write_all(&(header.format_version as u16).to_le_bytes())?;
1605 self.file.write_all(&header.block_size.to_le_bytes())?;
1606
1607 let hash_pos = self
1609 .updated_hash_table_pos
1610 .unwrap_or(header.hash_table_pos as u64) as u32;
1611 let block_pos = self
1612 .updated_block_table_pos
1613 .unwrap_or(header.block_table_pos as u64) as u32;
1614
1615 self.file.write_all(&hash_pos.to_le_bytes())?;
1616 self.file.write_all(&block_pos.to_le_bytes())?;
1617 self.file.write_all(&header.hash_table_size.to_le_bytes())?;
1618 self.file
1619 .write_all(&header.block_table_size.to_le_bytes())?;
1620
1621 if header.format_version >= FormatVersion::V2 {
1623 self.file
1624 .write_all(&header.hi_block_table_pos.unwrap_or(0).to_le_bytes())?;
1625 self.file
1626 .write_all(&header.hash_table_pos_hi.unwrap_or(0).to_le_bytes())?;
1627 self.file
1628 .write_all(&header.block_table_pos_hi.unwrap_or(0).to_le_bytes())?;
1629 }
1630
1631 if header.format_version >= FormatVersion::V3 {
1633 self.file
1634 .write_all(&header.archive_size_64.unwrap_or(0).to_le_bytes())?;
1635
1636 let het_pos = self.updated_het_pos.or(header.het_table_pos).unwrap_or(0);
1638 let bet_pos = self.updated_bet_pos.or(header.bet_table_pos).unwrap_or(0);
1639
1640 self.file.write_all(&het_pos.to_le_bytes())?;
1641 self.file.write_all(&bet_pos.to_le_bytes())?;
1642 }
1643 }
1644
1645 Ok(())
1646 }
1647}
1648
1649impl Drop for MutableArchive {
1650 fn drop(&mut self) {
1651 let _ = self.flush();
1653 }
1654}
1655
1656#[cfg(test)]
1657mod tests {
1658 use super::*;
1659
1660 #[test]
1661 fn test_add_file_options() {
1662 let options = AddFileOptions::new()
1663 .compression(CompressionMethod::Lzma)
1664 .encrypt()
1665 .locale(0x409); assert_eq!(options.compression, CompressionMethod::Lzma);
1668 assert!(options.encrypt);
1669 assert_eq!(options.locale, 0x409);
1670 }
1671
1672 #[test]
1673 fn test_fix_key_enables_encryption() {
1674 let options = AddFileOptions::new().fix_key();
1675 assert!(options.encrypt);
1676 assert!(options.fix_key);
1677 }
1678}