1use crate::{Error, Result};
18use std::path::{Component, Path};
19use std::sync::Arc;
20use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
21use std::time::{Duration, Instant};
22
23#[derive(Debug, Clone)]
25pub struct SecurityLimits {
26 pub max_archive_size: u64,
28 pub max_hash_entries: u32,
30 pub max_block_entries: u32,
32 pub max_sector_shift: u16,
34 pub max_path_length: usize,
36 pub max_compression_ratio: u32,
38 pub max_decompressed_size: u64,
40 pub max_file_count: u32,
42 pub max_session_decompressed: u64,
44 pub max_decompression_time: Duration,
46 pub enable_pattern_detection: bool,
48 pub enable_adaptive_limits: bool,
50}
51
52impl Default for SecurityLimits {
53 fn default() -> Self {
54 Self {
55 max_archive_size: 4 * 1024 * 1024 * 1024, max_hash_entries: 1_000_000,
57 max_block_entries: 1_000_000,
58 max_sector_shift: 20, max_path_length: 260, max_compression_ratio: 1000,
61 max_decompressed_size: 100 * 1024 * 1024, max_file_count: 100_000,
63 max_session_decompressed: 1024 * 1024 * 1024, max_decompression_time: Duration::from_secs(30),
65 enable_pattern_detection: true,
66 enable_adaptive_limits: true,
67 }
68 }
69}
70
71#[derive(Debug, Clone)]
73pub struct SessionTracker {
74 pub total_decompressed: Arc<AtomicU64>,
76 pub files_decompressed: Arc<AtomicUsize>,
78 pub session_start: Instant,
80}
81
82impl Default for SessionTracker {
83 fn default() -> Self {
84 Self::new()
85 }
86}
87
88impl SessionTracker {
89 pub fn new() -> Self {
91 Self {
92 total_decompressed: Arc::new(AtomicU64::new(0)),
93 files_decompressed: Arc::new(AtomicUsize::new(0)),
94 session_start: Instant::now(),
95 }
96 }
97
98 pub fn record_decompression(&self, bytes: u64) {
100 self.total_decompressed.fetch_add(bytes, Ordering::Relaxed);
101 self.files_decompressed.fetch_add(1, Ordering::Relaxed);
102 }
103
104 pub fn get_stats(&self) -> (u64, usize, Duration) {
106 (
107 self.total_decompressed.load(Ordering::Relaxed),
108 self.files_decompressed.load(Ordering::Relaxed),
109 self.session_start.elapsed(),
110 )
111 }
112
113 pub fn check_session_limits(&self, limits: &SecurityLimits) -> Result<()> {
115 let total = self.total_decompressed.load(Ordering::Relaxed);
116 if total > limits.max_session_decompressed {
117 return Err(Error::resource_exhaustion(
118 "Session decompression limit exceeded - potential resource exhaustion attack",
119 ));
120 }
121 Ok(())
122 }
123
124 pub fn check_session_limits_with_addition(
126 &self,
127 additional_bytes: u64,
128 limits: &SecurityLimits,
129 ) -> Result<()> {
130 let current_total = self.total_decompressed.load(Ordering::Relaxed);
131 let projected_total = current_total.saturating_add(additional_bytes);
132 if projected_total > limits.max_session_decompressed {
133 return Err(Error::resource_exhaustion(
134 "Session decompression limit would be exceeded - potential resource exhaustion attack",
135 ));
136 }
137 Ok(())
138 }
139}
140
141#[derive(Debug)]
143pub struct DecompressionMonitor {
144 pub max_size: u64,
146 pub max_time: Duration,
148 pub start_time: Instant,
150 pub bytes_decompressed: Arc<AtomicU64>,
152 pub should_cancel: Arc<AtomicU64>,
154}
155
156impl DecompressionMonitor {
157 pub fn new(max_size: u64, max_time: Duration) -> Self {
159 Self {
160 max_size,
161 max_time,
162 start_time: Instant::now(),
163 bytes_decompressed: Arc::new(AtomicU64::new(0)),
164 should_cancel: Arc::new(AtomicU64::new(0)),
165 }
166 }
167
168 pub fn check_progress(&self, current_output_size: u64) -> Result<()> {
170 if current_output_size > self.max_size {
172 return Err(Error::resource_exhaustion(
173 "Decompression size limit exceeded - potential compression bomb",
174 ));
175 }
176
177 if self.start_time.elapsed() > self.max_time {
179 return Err(Error::resource_exhaustion(
180 "Decompression time limit exceeded - potential DoS attack",
181 ));
182 }
183
184 if self.should_cancel.load(Ordering::Relaxed) != 0 {
186 return Err(Error::resource_exhaustion(
187 "Decompression cancelled due to security limits",
188 ));
189 }
190
191 self.bytes_decompressed
193 .store(current_output_size, Ordering::Relaxed);
194
195 Ok(())
196 }
197
198 pub fn request_cancellation(&self) {
200 self.should_cancel.store(1, Ordering::Relaxed);
201 }
202
203 pub fn get_stats(&self) -> (u64, Duration) {
205 (
206 self.bytes_decompressed.load(Ordering::Relaxed),
207 self.start_time.elapsed(),
208 )
209 }
210}
211
212#[derive(Debug, Clone)]
214pub struct AdaptiveCompressionLimits {
215 pub base_limit: u32,
217 pub enabled: bool,
219}
220
221impl AdaptiveCompressionLimits {
222 pub fn new(base_limit: u32, enabled: bool) -> Self {
224 Self {
225 base_limit,
226 enabled,
227 }
228 }
229
230 pub fn calculate_limit(&self, compressed_size: u64, compression_method: u8) -> u32 {
232 if !self.enabled {
233 return self.base_limit;
234 }
235
236 let size_based_limit = match compressed_size {
238 0..=512 => self.base_limit * 10, 513..=4096 => self.base_limit * 5, 4097..=65536 => self.base_limit * 2, 65537..=1048576 => self.base_limit, _ => self.base_limit / 2, };
245
246 let method_based_limit = match compression_method {
248 0x02 => size_based_limit * 2, 0x10 => size_based_limit * 3, 0x12 => size_based_limit * 4, 0x20 => size_based_limit / 2, 0x08 => size_based_limit, 0x01 => size_based_limit / 2, 0x40 | 0x80 => size_based_limit * 2, _ => size_based_limit, };
258
259 method_based_limit.clamp(50, 50000)
261 }
262}
263
264impl SecurityLimits {
265 pub fn strict() -> Self {
267 Self {
268 max_archive_size: 1024 * 1024 * 1024, max_hash_entries: 100_000,
270 max_block_entries: 100_000,
271 max_sector_shift: 16, max_path_length: 128,
273 max_compression_ratio: 100,
274 max_decompressed_size: 10 * 1024 * 1024, max_file_count: 10_000,
276 max_session_decompressed: 100 * 1024 * 1024, max_decompression_time: Duration::from_secs(10),
278 enable_pattern_detection: true,
279 enable_adaptive_limits: true,
280 }
281 }
282
283 pub fn permissive() -> Self {
285 Self {
286 max_archive_size: 16 * 1024 * 1024 * 1024, max_hash_entries: 10_000_000,
288 max_block_entries: 10_000_000,
289 max_sector_shift: 24, max_path_length: 1024,
291 max_compression_ratio: 10000,
292 max_decompressed_size: 1024 * 1024 * 1024, max_file_count: 1_000_000,
294 max_session_decompressed: 16 * 1024 * 1024 * 1024, max_decompression_time: Duration::from_secs(300),
296 enable_pattern_detection: true,
297 enable_adaptive_limits: true,
298 }
299 }
300}
301
302#[allow(clippy::too_many_arguments)]
304pub fn validate_header_security(
305 signature: u32,
306 header_size: u32,
307 archive_size: u32,
308 format_version: u16,
309 sector_shift: u16,
310 hash_table_offset: u32,
311 block_table_offset: u32,
312 hash_table_size: u32,
313 block_table_size: u32,
314 limits: &SecurityLimits,
315) -> Result<()> {
316 if signature != crate::signatures::MPQ_ARCHIVE {
318 return Err(Error::invalid_format(
319 "Invalid MPQ signature - not a valid MPQ archive",
320 ));
321 }
322
323 if !(32..=1024).contains(&header_size) {
325 return Err(Error::invalid_format(
326 "Invalid header size - must be between 32 and 1024 bytes",
327 ));
328 }
329
330 if archive_size == 0 || archive_size as u64 > limits.max_archive_size {
332 return Err(Error::invalid_format(
333 "Invalid archive size - too large or zero",
334 ));
335 }
336
337 if format_version > 4 {
339 return Err(Error::UnsupportedVersion(format_version));
340 }
341
342 if sector_shift > limits.max_sector_shift {
344 return Err(Error::invalid_format(
345 "Invalid sector shift - would create excessive sector size",
346 ));
347 }
348
349 if hash_table_offset >= archive_size {
351 return Err(Error::invalid_format(
352 "Hash table offset exceeds archive size",
353 ));
354 }
355
356 if block_table_size == 0 && block_table_offset == archive_size {
359 } else if block_table_offset > archive_size {
361 return Err(Error::invalid_format(
362 "Block table offset exceeds archive size",
363 ));
364 }
365
366 if hash_table_size > limits.max_hash_entries {
368 return Err(Error::resource_exhaustion(
369 "Hash table too large - potential memory exhaustion attack",
370 ));
371 }
372
373 if block_table_size > limits.max_block_entries {
374 return Err(Error::invalid_format(
375 "Block table too large - potential memory exhaustion attack",
376 ));
377 }
378
379 let hash_table_bytes = hash_table_size
381 .checked_mul(16) .ok_or_else(|| Error::invalid_format("Hash table size causes integer overflow"))?;
383
384 let block_table_bytes = block_table_size
385 .checked_mul(16) .ok_or_else(|| Error::invalid_format("Block table size causes integer overflow"))?;
387
388 if let Some(end_pos) = hash_table_offset.checked_add(hash_table_bytes) {
390 if end_pos > archive_size.saturating_add(65536) {
392 return Err(Error::invalid_format(
393 "Hash table extends beyond archive bounds",
394 ));
395 }
396 } else {
397 return Err(Error::invalid_format(
398 "Hash table size calculation overflows",
399 ));
400 }
401
402 if let Some(end_pos) = block_table_offset.checked_add(block_table_bytes) {
404 if end_pos > archive_size.saturating_add(65536) {
407 return Err(Error::invalid_format(
408 "Block table extends beyond archive bounds",
409 ));
410 }
411 } else {
412 return Err(Error::invalid_format(
413 "Block table size calculation overflows",
414 ));
415 }
416
417 if hash_table_size == 0 || !crate::is_power_of_two(hash_table_size) {
419 return Err(Error::invalid_format(
420 "Hash table size must be a non-zero power of 2",
421 ));
422 }
423
424 Ok(())
425}
426
427pub fn validate_file_path(path: &str, limits: &SecurityLimits) -> Result<()> {
429 if path.len() > limits.max_path_length {
431 return Err(Error::invalid_format(
432 "File path too long - potential buffer overflow",
433 ));
434 }
435
436 if path.is_empty() {
438 return Err(Error::invalid_format("Empty file path not allowed"));
439 }
440
441 if path.contains('\0') {
443 return Err(Error::invalid_format(
444 "File path contains null bytes - potential security issue",
445 ));
446 }
447
448 let normalized_path = Path::new(path);
450
451 for component in normalized_path.components() {
452 match component {
453 Component::ParentDir => {
455 return Err(Error::directory_traversal(
456 "File path contains parent directory reference",
457 ));
458 }
459 Component::RootDir => {
461 return Err(Error::invalid_format(
462 "Absolute file paths not allowed in MPQ archives",
463 ));
464 }
465 Component::Normal(name) => {
467 let name_str = name.to_string_lossy();
468
469 let reserved_names = [
471 "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6",
472 "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7",
473 "LPT8", "LPT9",
474 ];
475
476 let name_upper = name_str.to_uppercase();
477 for &reserved in &reserved_names {
479 if name_upper == reserved || name_upper.starts_with(&format!("{reserved}.")) {
480 return Err(Error::invalid_format(
481 "File path contains Windows reserved name",
482 ));
483 }
484 }
485
486 for ch in name_str.chars() {
488 match ch {
489 '\0'..='\x1f' | '\x7f' => {
491 return Err(Error::invalid_format(
492 "File path contains control characters",
493 ));
494 }
495 '<' | '>' | '|' | '"' | '?' | '*' => {
497 return Err(Error::invalid_format(
498 "File path contains dangerous characters",
499 ));
500 }
501 _ => {} }
503 }
504 }
505 _ => {} }
507 }
508
509 Ok(())
510}
511
512pub fn validate_file_bounds(
514 file_offset: u64,
515 file_size: u64,
516 compressed_size: u64,
517 archive_size: u64,
518 limits: &SecurityLimits,
519) -> Result<()> {
520 if compressed_size == 0 {
522 return Err(Error::invalid_format("Compressed file size cannot be zero"));
523 }
524
525 if file_size > limits.max_decompressed_size {
527 return Err(Error::resource_exhaustion(
528 "File size exceeds maximum allowed limit",
529 ));
530 }
531
532 let file_end = file_offset
534 .checked_add(compressed_size)
535 .ok_or_else(|| Error::invalid_format("File offset causes integer overflow"))?;
536
537 if file_end > archive_size {
538 return Err(Error::invalid_format(
539 "File data extends beyond archive bounds",
540 ));
541 }
542
543 if file_size > 0 && compressed_size > 0 {
545 let compression_ratio = file_size / compressed_size;
546 if compression_ratio > limits.max_compression_ratio as u64 {
547 let ratio = file_size / compressed_size;
548 return Err(Error::compression_bomb(
549 ratio,
550 limits.max_compression_ratio as u64,
551 ));
552 }
553 }
554
555 Ok(())
556}
557
558pub fn validate_sector_data(
560 sector_index: u32,
561 sector_size: u32,
562 data_size: usize,
563 expected_crc: Option<u32>,
564) -> Result<()> {
565 if sector_size == 0 || sector_size > 16 * 1024 * 1024 {
567 return Err(Error::invalid_format(
568 "Invalid sector size - must be between 1 byte and 16MB",
569 ));
570 }
571
572 if data_size > sector_size as usize {
574 return Err(Error::invalid_format(
575 "Sector data size exceeds sector size limit",
576 ));
577 }
578
579 if sector_index > 1_000_000 {
581 return Err(Error::invalid_format(
582 "Sector index too high - potential memory exhaustion",
583 ));
584 }
585
586 if let Some(_crc) = expected_crc {
589 }
591
592 Ok(())
593}
594
595pub fn validate_table_entry(
597 entry_index: u32,
598 file_offset: u32,
599 file_size: u32,
600 compressed_size: u32,
601 archive_size: u32,
602 limits: &SecurityLimits,
603) -> Result<()> {
604 if entry_index >= limits.max_file_count {
606 return Err(Error::invalid_format(
607 "Table entry index too high - potential memory exhaustion",
608 ));
609 }
610
611 validate_file_bounds(
613 file_offset as u64,
614 file_size as u64,
615 compressed_size as u64,
616 archive_size as u64,
617 limits,
618 )?;
619
620 if compressed_size > file_size && file_size > 0 {
622 let size_diff = compressed_size - file_size;
625 if size_diff > 1024 && size_diff > file_size {
626 return Err(Error::invalid_format(
627 "Compressed size significantly larger than uncompressed - suspicious",
628 ));
629 }
630 }
631
632 Ok(())
633}
634
635pub fn detect_compression_bomb_patterns(
637 compressed_size: u64,
638 decompressed_size: u64,
639 compression_method: u8,
640 file_path: Option<&str>,
641 limits: &SecurityLimits,
642) -> Result<()> {
643 if !limits.enable_pattern_detection {
644 return Ok(());
645 }
646
647 let adaptive_limits =
649 AdaptiveCompressionLimits::new(limits.max_compression_ratio, limits.enable_adaptive_limits);
650 let max_ratio = adaptive_limits.calculate_limit(compressed_size, compression_method);
651
652 if decompressed_size > 0 && compressed_size > 0 {
654 let ratio = decompressed_size / compressed_size;
655 if ratio > max_ratio as u64 {
656 return Err(Error::compression_bomb(ratio, max_ratio as u64));
657 }
658 }
659
660 if compressed_size < 100 && decompressed_size > 10 * 1024 * 1024 {
662 return Err(Error::malicious_content(
663 "Suspicious compression pattern: tiny compressed data with huge output",
664 ));
665 }
666
667 if let Some(path) = file_path {
669 let path_lower = path.to_lowercase();
670 if (path_lower.ends_with(".mpq")
671 || path_lower.ends_with(".zip")
672 || path_lower.ends_with(".rar")
673 || path_lower.ends_with(".7z"))
674 && decompressed_size > 50 * 1024 * 1024
675 {
677 return Err(Error::malicious_content(
678 "Suspicious nested archive with large decompressed size",
679 ));
680 }
681 }
682
683 if compression_method > 0x80 {
685 let expected_multi_ratio = max_ratio / 2; if decompressed_size > 0 && compressed_size > 0 {
688 let ratio = decompressed_size / compressed_size;
689 if ratio > expected_multi_ratio as u64 {
690 return Err(Error::compression_bomb(ratio, expected_multi_ratio as u64));
691 }
692 }
693 }
694
695 if decompressed_size > limits.max_decompressed_size * 3 / 4 {
697 log::warn!(
698 "Large decompression detected: {} bytes ({}% of limit)",
699 decompressed_size,
700 (decompressed_size * 100) / limits.max_decompressed_size
701 );
702 }
703
704 Ok(())
705}
706
707pub fn validate_decompression_operation(
709 compressed_size: u64,
710 expected_decompressed_size: u64,
711 compression_method: u8,
712 file_path: Option<&str>,
713 session_tracker: &SessionTracker,
714 limits: &SecurityLimits,
715) -> Result<DecompressionMonitor> {
716 session_tracker.check_session_limits_with_addition(expected_decompressed_size, limits)?;
718
719 validate_file_bounds(
721 0, expected_decompressed_size,
723 compressed_size,
724 u64::MAX, limits,
726 )?;
727
728 detect_compression_bomb_patterns(
730 compressed_size,
731 expected_decompressed_size,
732 compression_method,
733 file_path,
734 limits,
735 )?;
736
737 let monitor = DecompressionMonitor::new(
739 expected_decompressed_size.min(limits.max_decompressed_size),
740 limits.max_decompression_time,
741 );
742
743 if expected_decompressed_size > 10 * 1024 * 1024 {
745 log::info!(
747 "Large decompression: {} -> {} bytes ({}:1 ratio) method=0x{:02X} path={}",
748 compressed_size,
749 expected_decompressed_size,
750 if compressed_size > 0 {
751 expected_decompressed_size / compressed_size
752 } else {
753 0
754 },
755 compression_method,
756 file_path.unwrap_or("<unknown>")
757 );
758 }
759
760 Ok(monitor)
761}
762
763pub fn validate_decompression_result(
765 expected_size: u64,
766 actual_size: u64,
767 tolerance_percent: u8,
768) -> Result<()> {
769 if expected_size == 0 {
770 return Ok(()); }
772
773 let tolerance = (expected_size * tolerance_percent as u64) / 100;
774 let min_size = expected_size.saturating_sub(tolerance);
775 let max_size = expected_size.saturating_add(tolerance);
776
777 if actual_size < min_size || actual_size > max_size {
778 return Err(Error::compression(format!(
779 "Decompression size mismatch: expected {}, got {} (±{}% tolerance)",
780 expected_size, actual_size, tolerance_percent
781 )));
782 }
783
784 Ok(())
785}
786
787pub fn security_error<S: Into<String>>(message: S) -> Error {
789 Error::security_violation(message.into())
790}
791
792#[cfg(test)]
793mod tests {
794 use super::*;
795
796 #[test]
797 fn test_security_limits_defaults() {
798 let limits = SecurityLimits::default();
799 assert_eq!(limits.max_archive_size, 4 * 1024 * 1024 * 1024);
800 assert_eq!(limits.max_hash_entries, 1_000_000);
801 assert_eq!(limits.max_compression_ratio, 1000);
802 }
803
804 #[test]
805 fn test_valid_header() {
806 let limits = SecurityLimits::default();
807
808 let result = validate_header_security(
809 crate::signatures::MPQ_ARCHIVE,
810 32, 1024 * 1024, 1, 3, 32, 512, 16, 16, &limits,
819 );
820
821 assert!(result.is_ok());
822 }
823
824 #[test]
825 fn test_invalid_signature() {
826 let limits = SecurityLimits::default();
827
828 let result = validate_header_security(
829 0x12345678, 32,
831 1024 * 1024,
832 1,
833 3,
834 32,
835 512,
836 16,
837 16,
838 &limits,
839 );
840
841 assert!(result.is_err());
842 assert!(
843 result
844 .unwrap_err()
845 .to_string()
846 .contains("Invalid MPQ signature")
847 );
848 }
849
850 #[test]
851 fn test_oversized_tables() {
852 let limits = SecurityLimits::default();
853
854 let result = validate_header_security(
855 crate::signatures::MPQ_ARCHIVE,
856 32,
857 1024 * 1024,
858 1,
859 3,
860 32,
861 512,
862 limits.max_hash_entries + 1, 16,
864 &limits,
865 );
866
867 assert!(result.is_err());
868 assert!(
869 result
870 .unwrap_err()
871 .to_string()
872 .contains("Hash table too large")
873 );
874 }
875
876 #[test]
877 fn test_valid_file_path() {
878 let limits = SecurityLimits::default();
879
880 assert!(validate_file_path("data/models/character.m2", &limits).is_ok());
881 assert!(validate_file_path("sounds/music/theme.mp3", &limits).is_ok());
882 assert!(validate_file_path("world/maps/area.adt", &limits).is_ok());
883 }
884
885 #[test]
886 fn test_directory_traversal_attack() {
887 let limits = SecurityLimits::default();
888
889 assert!(validate_file_path("../../../etc/passwd", &limits).is_err());
890 assert!(validate_file_path("data/../../../secret", &limits).is_err());
891 assert!(validate_file_path("/absolute/path", &limits).is_err());
892 }
893
894 #[test]
895 fn test_dangerous_file_names() {
896 let limits = SecurityLimits::default();
897
898 assert!(validate_file_path("data/CON", &limits).is_err());
899 assert!(validate_file_path("data/PRN.txt", &limits).is_err());
900 assert!(validate_file_path("data/file<script>", &limits).is_err());
901 assert!(validate_file_path("data/file\x00.txt", &limits).is_err());
902 }
903
904 #[test]
905 fn test_file_bounds_validation() {
906 let limits = SecurityLimits::default();
907
908 assert!(
910 validate_file_bounds(
911 1000, 2048, 1024, 100000, &limits,
916 )
917 .is_ok()
918 );
919
920 assert!(
922 validate_file_bounds(
923 99000, 2048, 2000, 100000, &limits,
928 )
929 .is_err()
930 );
931
932 assert!(
934 validate_file_bounds(
935 1000, 1000000, 100, 100000, &limits,
940 )
941 .is_err()
942 );
943 }
944
945 #[test]
946 fn test_compression_ratio_validation() {
947 let limits = SecurityLimits::default();
948
949 assert!(validate_file_bounds(1000, 10240, 1024, 100000, &limits).is_ok());
951
952 assert!(validate_file_bounds(1000, 102400, 1024, 200000, &limits).is_ok());
954
955 assert!(validate_file_bounds(1000, 10240000, 1024, 20000000, &limits).is_err());
957 }
958
959 #[test]
960 fn test_sector_validation() {
961 assert!(
963 validate_sector_data(
964 0, 4096, 2048, None, )
969 .is_ok()
970 );
971
972 assert!(validate_sector_data(0, 0, 1024, None).is_err());
974
975 assert!(validate_sector_data(0, 1024, 2048, None).is_err());
977
978 assert!(validate_sector_data(2_000_000, 4096, 2048, None).is_err());
980 }
981
982 #[test]
983 fn test_session_tracker() {
984 let tracker = SessionTracker::new();
985 let limits = SecurityLimits::default();
986
987 let (total, count, _duration) = tracker.get_stats();
989 assert_eq!(total, 0);
990 assert_eq!(count, 0);
991
992 tracker.record_decompression(1024);
994 tracker.record_decompression(2048);
995
996 let (total, count, _duration) = tracker.get_stats();
997 assert_eq!(total, 3072);
998 assert_eq!(count, 2);
999
1000 assert!(tracker.check_session_limits(&limits).is_ok());
1002 }
1003
1004 #[test]
1005 fn test_session_tracker_limit_exceeded() {
1006 let tracker = SessionTracker::new();
1007 let limits = SecurityLimits::strict();
1008
1009 tracker.record_decompression(limits.max_session_decompressed + 1);
1011
1012 assert!(tracker.check_session_limits(&limits).is_err());
1014 }
1015
1016 #[test]
1017 fn test_decompression_monitor() {
1018 let monitor = DecompressionMonitor::new(
1019 1024 * 1024, Duration::from_secs(5), );
1022
1023 assert!(monitor.check_progress(1024).is_ok());
1025
1026 assert!(monitor.check_progress(2 * 1024 * 1024).is_err());
1028
1029 monitor.request_cancellation();
1031 assert!(monitor.check_progress(512).is_err());
1032 }
1033
1034 #[test]
1035 fn test_adaptive_compression_limits() {
1036 let adaptive = AdaptiveCompressionLimits::new(1000, true);
1037
1038 let small_limit = adaptive.calculate_limit(100, 0x02); let large_limit = adaptive.calculate_limit(100_000, 0x02); assert!(small_limit > large_limit);
1043 assert!(small_limit >= 1000); let zlib_limit = adaptive.calculate_limit(1024, 0x02);
1047 let lzma_limit = adaptive.calculate_limit(1024, 0x12);
1048
1049 assert!(lzma_limit > zlib_limit); }
1051
1052 #[test]
1053 fn test_compression_bomb_pattern_detection() {
1054 let limits = SecurityLimits::default();
1055
1056 assert!(
1058 detect_compression_bomb_patterns(1024, 10240, 0x02, Some("data/file.txt"), &limits)
1059 .is_ok()
1060 );
1061
1062 assert!(
1064 detect_compression_bomb_patterns(
1065 100,
1066 100_000_000,
1067 0x02,
1068 Some("data/file.txt"),
1069 &limits
1070 )
1071 .is_err()
1072 );
1073
1074 assert!(
1076 detect_compression_bomb_patterns(50, 20_000_000, 0x02, Some("data/file.txt"), &limits)
1077 .is_err()
1078 );
1079
1080 assert!(
1082 detect_compression_bomb_patterns(
1083 1_000_000,
1084 100_000_000,
1085 0x02,
1086 Some("nested.mpq"),
1087 &limits
1088 )
1089 .is_err()
1090 );
1091 }
1092
1093 #[test]
1094 fn test_decompression_operation_validation() {
1095 let session = SessionTracker::new();
1096 let limits = SecurityLimits::default();
1097
1098 let result = validate_decompression_operation(
1100 1024,
1101 10240,
1102 0x02,
1103 Some("data/file.txt"),
1104 &session,
1105 &limits,
1106 );
1107 assert!(result.is_ok());
1108
1109 let result = validate_decompression_operation(
1111 100,
1112 100_000_000,
1113 0x02,
1114 Some("bomb.txt"),
1115 &session,
1116 &limits,
1117 );
1118 assert!(result.is_err());
1119 }
1120
1121 #[test]
1122 fn test_decompression_result_validation() {
1123 assert!(validate_decompression_result(1024, 1024, 5).is_ok());
1125
1126 assert!(validate_decompression_result(1024, 1000, 5).is_ok()); assert!(validate_decompression_result(1024, 1050, 5).is_ok()); assert!(validate_decompression_result(1024, 900, 5).is_err()); assert!(validate_decompression_result(1024, 1150, 5).is_err()); assert!(validate_decompression_result(0, 999999, 5).is_ok());
1136 }
1137
1138 #[test]
1139 fn test_security_limits_extended() {
1140 let default_limits = SecurityLimits::default();
1141 let strict_limits = SecurityLimits::strict();
1142 let permissive_limits = SecurityLimits::permissive();
1143
1144 assert!(strict_limits.max_session_decompressed < default_limits.max_session_decompressed);
1146 assert!(
1147 permissive_limits.max_session_decompressed > default_limits.max_session_decompressed
1148 );
1149
1150 assert!(strict_limits.max_decompression_time < default_limits.max_decompression_time);
1152 assert!(permissive_limits.max_decompression_time > default_limits.max_decompression_time);
1153
1154 assert!(default_limits.enable_pattern_detection);
1156 assert!(default_limits.enable_adaptive_limits);
1157 }
1158}