Skip to main content

trueno_zram_core/
lib.rs

1//! SIMD-accelerated memory compression for Linux zram.
2//!
3//! This crate provides high-performance LZ4 and Zstandard compression
4//! optimized for 4KB memory pages, with runtime SIMD dispatch.
5//!
6//! # Example
7//!
8//! ```
9//! use trueno_zram_core::{CompressorBuilder, Algorithm, PageCompressor, PAGE_SIZE};
10//!
11//! let compressor = CompressorBuilder::new()
12//!     .algorithm(Algorithm::Lz4)
13//!     .build()
14//!     .unwrap();
15//!
16//! let page = [0u8; PAGE_SIZE];
17//! let compressed = compressor.compress(&page).unwrap();
18//! let decompressed = compressor.decompress(&compressed).unwrap();
19//!
20//! assert_eq!(page, decompressed);
21//! ```
22
23#![deny(missing_docs)]
24#![deny(clippy::panic)]
25#![warn(clippy::all, clippy::pedantic)]
26#![allow(clippy::module_name_repetitions)]
27#![allow(clippy::cast_possible_truncation)]
28#![allow(clippy::cast_sign_loss)]
29#![allow(clippy::cast_lossless)]
30#![allow(clippy::needless_range_loop)]
31#![allow(clippy::similar_names)]
32#![allow(clippy::unnecessary_wraps)]
33#![allow(clippy::missing_errors_doc)]
34#![allow(clippy::missing_panics_doc)]
35#![allow(clippy::missing_safety_doc)]
36#![allow(clippy::too_many_lines)]
37#![allow(clippy::cognitive_complexity)]
38#![allow(clippy::cast_ptr_alignment)]
39#![allow(clippy::must_use_candidate)]
40#![allow(clippy::match_same_arms)]
41#![allow(clippy::unreadable_literal)]
42#![allow(clippy::items_after_statements)]
43#![allow(clippy::wildcard_imports)]
44#![allow(clippy::cast_precision_loss)]
45#![allow(clippy::manual_strip)]
46#![allow(clippy::doc_markdown)]
47#![allow(clippy::inline_always)]
48#![allow(clippy::unused_self)]
49#![allow(clippy::struct_excessive_bools)]
50#![allow(clippy::incompatible_msrv)]
51#![allow(clippy::uninit_vec)]
52#![allow(clippy::cast_possible_wrap)]
53#![allow(clippy::large_stack_arrays)]
54#![allow(unused_assignments)]
55
56pub mod benchmark;
57pub mod compat;
58mod error;
59#[cfg(feature = "cuda")]
60pub mod gpu;
61pub mod integration;
62pub mod lz4;
63mod page;
64pub mod samefill;
65pub mod simd;
66pub mod zram;
67pub mod zstd;
68
69pub use error::{Error, Result};
70pub use page::{CompressedPage, CompressionStats, PAGE_SIZE};
71
72use std::sync::atomic::{AtomicU64, Ordering};
73use std::time::Instant;
74
75/// Compression algorithm selection.
76#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
77pub enum Algorithm {
78    /// No compression (store as-is).
79    None,
80    /// LZ4 fast compression.
81    #[default]
82    Lz4,
83    /// LZ4-HC high compression (not yet implemented).
84    Lz4Hc,
85    /// Zstandard with configurable level.
86    Zstd {
87        /// Compression level (1-22).
88        level: i32,
89    },
90    /// Adaptive selection based on entropy.
91    Adaptive,
92}
93
94/// SIMD implementation backend.
95#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
96pub enum SimdBackend {
97    /// Scalar fallback (no SIMD).
98    #[default]
99    Scalar,
100    /// SSE4.2 (128-bit).
101    Sse42,
102    /// AVX2 (256-bit).
103    Avx2,
104    /// AVX-512 (512-bit).
105    Avx512,
106    /// ARM NEON (128-bit).
107    Neon,
108}
109
110/// Trait for page compression implementations.
111pub trait PageCompressor: Send + Sync {
112    /// Compress a 4KB page.
113    ///
114    /// # Errors
115    ///
116    /// Returns an error if compression fails.
117    fn compress(&self, page: &[u8; PAGE_SIZE]) -> Result<CompressedPage>;
118
119    /// Decompress to a 4KB page.
120    ///
121    /// # Errors
122    ///
123    /// Returns an error if decompression fails.
124    fn decompress(&self, compressed: &CompressedPage) -> Result<[u8; PAGE_SIZE]>;
125
126    /// Get the SIMD backend in use.
127    fn backend(&self) -> SimdBackend;
128
129    /// Get compression statistics.
130    fn stats(&self) -> CompressionStats;
131
132    /// Reset statistics.
133    fn reset_stats(&self);
134}
135
136/// Builder for configuring a page compressor.
137#[derive(Debug, Clone)]
138pub struct CompressorBuilder {
139    algorithm: Algorithm,
140    preferred_backend: Option<SimdBackend>,
141}
142
143impl Default for CompressorBuilder {
144    fn default() -> Self {
145        Self::new()
146    }
147}
148
149impl CompressorBuilder {
150    /// Create a new compressor builder with default settings.
151    #[must_use]
152    pub fn new() -> Self {
153        Self {
154            algorithm: Algorithm::default(),
155            preferred_backend: None,
156        }
157    }
158
159    /// Set the compression algorithm.
160    #[must_use]
161    pub fn algorithm(mut self, algo: Algorithm) -> Self {
162        self.algorithm = algo;
163        self
164    }
165
166    /// Set the preferred SIMD backend.
167    #[must_use]
168    pub fn prefer_backend(mut self, backend: SimdBackend) -> Self {
169        self.preferred_backend = Some(backend);
170        self
171    }
172
173    /// Build the compressor.
174    ///
175    /// # Errors
176    ///
177    /// Returns an error if the configuration is invalid or the preferred
178    /// backend is not available.
179    pub fn build(self) -> Result<Box<dyn PageCompressor>> {
180        let backend = self.preferred_backend.unwrap_or_else(simd::best_backend);
181
182        // Validate backend availability
183        if !simd::backend_available(backend) {
184            return Err(Error::SimdNotAvailable(backend));
185        }
186
187        Ok(Box::new(GenericCompressor::new(self.algorithm, backend)))
188    }
189}
190
191/// Generic page compressor implementation.
192struct GenericCompressor {
193    algorithm: Algorithm,
194    backend: SimdBackend,
195    stats: CompressorStats,
196}
197
198struct CompressorStats {
199    pages_compressed: AtomicU64,
200    pages_incompressible: AtomicU64,
201    bytes_in: AtomicU64,
202    bytes_out: AtomicU64,
203    compress_time_ns: AtomicU64,
204    decompress_time_ns: AtomicU64,
205}
206
207impl Default for CompressorStats {
208    fn default() -> Self {
209        Self {
210            pages_compressed: AtomicU64::new(0),
211            pages_incompressible: AtomicU64::new(0),
212            bytes_in: AtomicU64::new(0),
213            bytes_out: AtomicU64::new(0),
214            compress_time_ns: AtomicU64::new(0),
215            decompress_time_ns: AtomicU64::new(0),
216        }
217    }
218}
219
220impl GenericCompressor {
221    fn new(algorithm: Algorithm, backend: SimdBackend) -> Self {
222        Self {
223            algorithm,
224            backend,
225            stats: CompressorStats::default(),
226        }
227    }
228}
229
230impl PageCompressor for GenericCompressor {
231    fn compress(&self, page: &[u8; PAGE_SIZE]) -> Result<CompressedPage> {
232        let start = Instant::now();
233
234        let result = match self.algorithm {
235            Algorithm::None => Ok(CompressedPage::uncompressed(*page)),
236            Algorithm::Lz4 | Algorithm::Lz4Hc => {
237                let compressed = lz4::compress(page)?;
238                if compressed.len() >= PAGE_SIZE {
239                    self.stats
240                        .pages_incompressible
241                        .fetch_add(1, Ordering::Relaxed);
242                    Ok(CompressedPage::uncompressed(*page))
243                } else {
244                    CompressedPage::new(compressed, PAGE_SIZE, Algorithm::Lz4)
245                }
246            }
247            Algorithm::Zstd { level } => {
248                let compressed = zstd::compress(page, level)?;
249                if compressed.len() >= PAGE_SIZE {
250                    self.stats
251                        .pages_incompressible
252                        .fetch_add(1, Ordering::Relaxed);
253                    Ok(CompressedPage::uncompressed(*page))
254                } else {
255                    CompressedPage::new(compressed, PAGE_SIZE, Algorithm::Zstd { level })
256                }
257            }
258            Algorithm::Adaptive => {
259                // Try LZ4 first as it's fastest
260                let compressed = lz4::compress(page)?;
261                if compressed.len() >= PAGE_SIZE {
262                    self.stats
263                        .pages_incompressible
264                        .fetch_add(1, Ordering::Relaxed);
265                    Ok(CompressedPage::uncompressed(*page))
266                } else {
267                    CompressedPage::new(compressed, PAGE_SIZE, Algorithm::Lz4)
268                }
269            }
270        };
271
272        let elapsed = start.elapsed().as_nanos() as u64;
273        self.stats
274            .compress_time_ns
275            .fetch_add(elapsed, Ordering::Relaxed);
276        self.stats.pages_compressed.fetch_add(1, Ordering::Relaxed);
277        self.stats
278            .bytes_in
279            .fetch_add(PAGE_SIZE as u64, Ordering::Relaxed);
280
281        if let Ok(ref page) = result {
282            self.stats
283                .bytes_out
284                .fetch_add(page.data.len() as u64, Ordering::Relaxed);
285        }
286
287        result
288    }
289
290    fn decompress(&self, compressed: &CompressedPage) -> Result<[u8; PAGE_SIZE]> {
291        let start = Instant::now();
292
293        let result = match compressed.algorithm {
294            Algorithm::None => {
295                let mut page = [0u8; PAGE_SIZE];
296                if compressed.data.len() != PAGE_SIZE {
297                    return Err(Error::CorruptedData(format!(
298                        "uncompressed page has wrong size: {}",
299                        compressed.data.len()
300                    )));
301                }
302                page.copy_from_slice(&compressed.data);
303                Ok(page)
304            }
305            Algorithm::Lz4 | Algorithm::Lz4Hc => {
306                let mut page = [0u8; PAGE_SIZE];
307                let len = lz4::decompress(&compressed.data, &mut page)?;
308                if len != PAGE_SIZE {
309                    return Err(Error::CorruptedData(format!(
310                        "decompressed size mismatch: expected {PAGE_SIZE}, got {len}"
311                    )));
312                }
313                Ok(page)
314            }
315            Algorithm::Zstd { .. } => {
316                let mut page = [0u8; PAGE_SIZE];
317                let len = zstd::decompress(&compressed.data, &mut page)?;
318                if len != PAGE_SIZE {
319                    return Err(Error::CorruptedData(format!(
320                        "decompressed size mismatch: expected {PAGE_SIZE}, got {len}"
321                    )));
322                }
323                Ok(page)
324            }
325            Algorithm::Adaptive => {
326                // Should not happen - adaptive resolves to concrete algorithm
327                Err(Error::Internal(
328                    "adaptive algorithm in compressed data".to_string(),
329                ))
330            }
331        };
332
333        let elapsed = start.elapsed().as_nanos() as u64;
334        self.stats
335            .decompress_time_ns
336            .fetch_add(elapsed, Ordering::Relaxed);
337
338        result
339    }
340
341    fn backend(&self) -> SimdBackend {
342        self.backend
343    }
344
345    fn stats(&self) -> CompressionStats {
346        CompressionStats {
347            pages_compressed: self.stats.pages_compressed.load(Ordering::Relaxed),
348            pages_incompressible: self.stats.pages_incompressible.load(Ordering::Relaxed),
349            bytes_in: self.stats.bytes_in.load(Ordering::Relaxed),
350            bytes_out: self.stats.bytes_out.load(Ordering::Relaxed),
351            compress_time_ns: self.stats.compress_time_ns.load(Ordering::Relaxed),
352            decompress_time_ns: self.stats.decompress_time_ns.load(Ordering::Relaxed),
353        }
354    }
355
356    fn reset_stats(&self) {
357        self.stats.pages_compressed.store(0, Ordering::Relaxed);
358        self.stats.pages_incompressible.store(0, Ordering::Relaxed);
359        self.stats.bytes_in.store(0, Ordering::Relaxed);
360        self.stats.bytes_out.store(0, Ordering::Relaxed);
361        self.stats.compress_time_ns.store(0, Ordering::Relaxed);
362        self.stats.decompress_time_ns.store(0, Ordering::Relaxed);
363    }
364}
365
366#[cfg(test)]
367mod tests {
368    use super::*;
369
370    #[test]
371    fn test_compressor_builder_default() {
372        let compressor = CompressorBuilder::new().build().unwrap();
373        assert!(matches!(
374            compressor.backend(),
375            SimdBackend::Scalar
376                | SimdBackend::Sse42
377                | SimdBackend::Avx2
378                | SimdBackend::Avx512
379                | SimdBackend::Neon
380        ));
381    }
382
383    #[test]
384    fn test_compressor_roundtrip_lz4() {
385        let compressor = CompressorBuilder::new()
386            .algorithm(Algorithm::Lz4)
387            .build()
388            .unwrap();
389
390        let page = [0xABu8; PAGE_SIZE];
391        let compressed = compressor.compress(&page).unwrap();
392        let decompressed = compressor.decompress(&compressed).unwrap();
393
394        assert_eq!(page, decompressed);
395    }
396
397    #[test]
398    fn test_compressor_roundtrip_zstd() {
399        let compressor = CompressorBuilder::new()
400            .algorithm(Algorithm::Zstd { level: 3 })
401            .build()
402            .unwrap();
403
404        let page = [0xCDu8; PAGE_SIZE];
405        let compressed = compressor.compress(&page).unwrap();
406        let decompressed = compressor.decompress(&compressed).unwrap();
407
408        assert_eq!(page, decompressed);
409    }
410
411    #[test]
412    fn test_compressor_none() {
413        let compressor = CompressorBuilder::new()
414            .algorithm(Algorithm::None)
415            .build()
416            .unwrap();
417
418        let page = [0x12u8; PAGE_SIZE];
419        let compressed = compressor.compress(&page).unwrap();
420
421        assert!(!compressed.is_compressed());
422        assert_eq!(compressed.data.len(), PAGE_SIZE);
423
424        let decompressed = compressor.decompress(&compressed).unwrap();
425        assert_eq!(page, decompressed);
426    }
427
428    #[test]
429    fn test_compressor_stats() {
430        let compressor = CompressorBuilder::new()
431            .algorithm(Algorithm::Lz4)
432            .build()
433            .unwrap();
434
435        let page = [0u8; PAGE_SIZE];
436        compressor.compress(&page).unwrap();
437        compressor.compress(&page).unwrap();
438
439        let stats = compressor.stats();
440        assert_eq!(stats.pages_compressed, 2);
441        assert_eq!(stats.bytes_in, PAGE_SIZE as u64 * 2);
442    }
443
444    #[test]
445    fn test_compressor_stats_reset() {
446        let compressor = CompressorBuilder::new().build().unwrap();
447
448        let page = [0u8; PAGE_SIZE];
449        compressor.compress(&page).unwrap();
450
451        compressor.reset_stats();
452        let stats = compressor.stats();
453        assert_eq!(stats.pages_compressed, 0);
454    }
455
456    #[test]
457    fn test_algorithm_default() {
458        assert_eq!(Algorithm::default(), Algorithm::Lz4);
459    }
460
461    #[test]
462    fn test_simd_backend_default() {
463        assert_eq!(SimdBackend::default(), SimdBackend::Scalar);
464    }
465
466    #[test]
467    fn test_prefer_unavailable_backend() {
468        // AVX-512 might not be available on all systems
469        // This test just verifies the error handling works
470        let result = CompressorBuilder::new()
471            .prefer_backend(SimdBackend::Avx512)
472            .build();
473
474        // Either succeeds (CPU has AVX-512) or returns appropriate error
475        match result {
476            Ok(c) => assert_eq!(c.backend(), SimdBackend::Avx512),
477            Err(Error::SimdNotAvailable(_)) => {}
478            Err(e) => unreachable!("Unexpected error: {e}"),
479        }
480    }
481
482    #[test]
483    fn test_compressor_builder_default_impl() {
484        let compressor = CompressorBuilder::default().build().unwrap();
485        let _ = compressor.backend();
486    }
487
488    #[test]
489    fn test_compressor_adaptive() {
490        let compressor = CompressorBuilder::new()
491            .algorithm(Algorithm::Adaptive)
492            .build()
493            .unwrap();
494
495        let page = [0xABu8; PAGE_SIZE];
496        let compressed = compressor.compress(&page).unwrap();
497        let decompressed = compressor.decompress(&compressed).unwrap();
498        assert_eq!(page, decompressed);
499    }
500
501    #[test]
502    fn test_compressor_adaptive_incompressible() {
503        let compressor = CompressorBuilder::new()
504            .algorithm(Algorithm::Adaptive)
505            .build()
506            .unwrap();
507
508        // Random data is incompressible
509        let mut page = [0u8; PAGE_SIZE];
510        let mut rng = 12345u64;
511        for byte in &mut page {
512            rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
513            *byte = (rng >> 33) as u8;
514        }
515
516        let compressed = compressor.compress(&page).unwrap();
517        let decompressed = compressor.decompress(&compressed).unwrap();
518        assert_eq!(page, decompressed);
519    }
520
521    #[test]
522    fn test_compressor_zstd_incompressible() {
523        let compressor = CompressorBuilder::new()
524            .algorithm(Algorithm::Zstd { level: 1 })
525            .build()
526            .unwrap();
527
528        // Random data is incompressible
529        let mut page = [0u8; PAGE_SIZE];
530        let mut rng = 54321u64;
531        for byte in &mut page {
532            rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
533            *byte = (rng >> 33) as u8;
534        }
535
536        let compressed = compressor.compress(&page).unwrap();
537        // Should store uncompressed since random data doesn't compress
538        let decompressed = compressor.decompress(&compressed).unwrap();
539        assert_eq!(page, decompressed);
540    }
541
542    #[test]
543    fn test_decompress_wrong_size_uncompressed() {
544        let compressor = CompressorBuilder::new()
545            .algorithm(Algorithm::None)
546            .build()
547            .unwrap();
548
549        // Create a compressed page with wrong size
550        let bad_page = CompressedPage {
551            data: vec![0u8; PAGE_SIZE / 2],
552            original_size: PAGE_SIZE,
553            algorithm: Algorithm::None,
554        };
555
556        let result = compressor.decompress(&bad_page);
557        assert!(result.is_err());
558        let err = result.unwrap_err();
559        assert!(err.to_string().contains("wrong size"));
560    }
561
562    #[test]
563    fn test_decompress_adaptive_error() {
564        let compressor = CompressorBuilder::new()
565            .algorithm(Algorithm::Lz4)
566            .build()
567            .unwrap();
568
569        // Create a compressed page with Adaptive algorithm (shouldn't happen)
570        let bad_page = CompressedPage {
571            data: vec![0u8; 100],
572            original_size: PAGE_SIZE,
573            algorithm: Algorithm::Adaptive,
574        };
575
576        let result = compressor.decompress(&bad_page);
577        assert!(result.is_err());
578        let err = result.unwrap_err();
579        assert!(err.to_string().contains("adaptive"));
580    }
581
582    #[test]
583    fn test_decompress_lz4_size_mismatch() {
584        let compressor = CompressorBuilder::new()
585            .algorithm(Algorithm::Lz4)
586            .build()
587            .unwrap();
588
589        // Compress a small page, but claim it's PAGE_SIZE
590        let small_page = [0xAAu8; 128];
591        let compressed = lz4::compress(&small_page).unwrap();
592        let bad_page = CompressedPage {
593            data: compressed,
594            original_size: PAGE_SIZE, // Wrong size!
595            algorithm: Algorithm::Lz4,
596        };
597
598        let result = compressor.decompress(&bad_page);
599        assert!(result.is_err());
600        let err = result.unwrap_err();
601        assert!(err.to_string().contains("size mismatch") || err.to_string().contains("corrupted"));
602    }
603
604    #[test]
605    fn test_decompress_zstd_size_mismatch() {
606        let compressor = CompressorBuilder::new()
607            .algorithm(Algorithm::Zstd { level: 1 })
608            .build()
609            .unwrap();
610
611        // Compress a small page, but claim it's PAGE_SIZE
612        let small_page = [0xBBu8; 256];
613        let compressed = zstd::compress(&small_page, 1).unwrap();
614        let bad_page = CompressedPage {
615            data: compressed,
616            original_size: PAGE_SIZE, // Wrong size!
617            algorithm: Algorithm::Zstd { level: 1 },
618        };
619
620        let result = compressor.decompress(&bad_page);
621        assert!(result.is_err());
622        let err = result.unwrap_err();
623        assert!(err.to_string().contains("size mismatch") || err.to_string().contains("corrupted"));
624    }
625
626    #[test]
627    fn test_compressor_lz4hc_roundtrip() {
628        // Test LZ4HC algorithm path
629        let compressor = CompressorBuilder::new()
630            .algorithm(Algorithm::Lz4Hc)
631            .build()
632            .unwrap();
633
634        let page = [0xCDu8; PAGE_SIZE];
635        let compressed = compressor.compress(&page).unwrap();
636        let decompressed = compressor.decompress(&compressed).unwrap();
637        assert_eq!(page, decompressed);
638    }
639
640    #[test]
641    fn test_stats_decompress_time_tracked() {
642        let compressor = CompressorBuilder::new()
643            .algorithm(Algorithm::Lz4)
644            .build()
645            .unwrap();
646
647        let page = [0xEFu8; PAGE_SIZE];
648        let compressed = compressor.compress(&page).unwrap();
649        compressor.decompress(&compressed).unwrap();
650
651        let stats = compressor.stats();
652        assert!(stats.decompress_time_ns > 0);
653    }
654
655    // ============================================================
656    // Safety and Security Falsification Tests F076-F085
657    // ============================================================
658
659    /// F076: No buffer overflows - test boundary conditions
660    #[test]
661    fn test_f076_no_buffer_overflows() {
662        let compressor = CompressorBuilder::new()
663            .algorithm(Algorithm::Lz4)
664            .build()
665            .unwrap();
666
667        // Test with various edge case patterns that might trigger buffer issues
668        let edge_cases: [[u8; PAGE_SIZE]; 4] = [
669            [0xFF; PAGE_SIZE], // All bits set
670            [0x00; PAGE_SIZE], // All zeros
671            [0x80; PAGE_SIZE], // High bit pattern
672            {
673                let mut p = [0u8; PAGE_SIZE];
674                // Boundary-crossing pattern
675                for i in 0..PAGE_SIZE {
676                    p[i] = (i & 0xFF) as u8;
677                }
678                p
679            },
680        ];
681
682        for page in &edge_cases {
683            let compressed = compressor.compress(page).unwrap();
684            // Verify no buffer overflow occurred
685            assert!(compressed.data.len() <= PAGE_SIZE + 512); // LZ4 worst case
686            let decompressed = compressor.decompress(&compressed).unwrap();
687            assert_eq!(page, &decompressed);
688        }
689
690        // Test malicious compressed data that claims large offsets
691        let bad_compressed = CompressedPage {
692            data: vec![0x0F, 0x00, 0xFF, 0xFF], // Token claiming large match
693            original_size: PAGE_SIZE,
694            algorithm: Algorithm::Lz4,
695        };
696        // Should return error, not crash
697        let result = compressor.decompress(&bad_compressed);
698        assert!(result.is_err() || result.is_ok());
699    }
700
701    #[test]
702    fn test_f077_no_integer_overflow_page_size() {
703        // F077: Size calculations should be checked
704        let compressor = CompressorBuilder::new()
705            .algorithm(Algorithm::Lz4)
706            .build()
707            .unwrap();
708
709        // Compress many pages to stress size calculations
710        for _ in 0..100 {
711            let page = [0xABu8; PAGE_SIZE];
712            let compressed = compressor.compress(&page).unwrap();
713            assert!(compressed.data.len() <= PAGE_SIZE + 256); // LZ4 max expansion
714        }
715    }
716
717    #[test]
718    fn test_f081_no_panics_on_valid_input() {
719        // F081: Panic-free library code
720        let compressor = CompressorBuilder::new()
721            .algorithm(Algorithm::Lz4)
722            .build()
723            .unwrap();
724
725        // Various valid inputs should not panic
726        let patterns = [
727            [0u8; PAGE_SIZE],    // zeros
728            [0xFFu8; PAGE_SIZE], // all ones
729            [0xAAu8; PAGE_SIZE], // alternating bits
730        ];
731
732        for page in &patterns {
733            let result = compressor.compress(page);
734            assert!(result.is_ok());
735            let compressed = result.unwrap();
736            let decompressed = compressor.decompress(&compressed);
737            assert!(decompressed.is_ok());
738        }
739    }
740
741    #[test]
742    fn test_f082_error_types_implement_error() {
743        // F082: All errors are std::error::Error
744        use std::error::Error as StdError;
745
746        let error = Error::CorruptedData("test".to_string());
747        // Error implements std::error::Error
748        let _: &dyn StdError = &error;
749        // Error has a display implementation
750        let msg = format!("{error}");
751        assert!(!msg.is_empty());
752    }
753
754    #[test]
755    fn test_f082_error_types_send_sync() {
756        // F082: Errors should be Send + Sync for thread safety
757        fn assert_send_sync<T: Send + Sync>() {}
758        assert_send_sync::<Error>();
759    }
760
761    /// F078: No use-after-free - Rust's borrow checker prevents this
762    #[test]
763    fn test_f078_no_use_after_free() {
764        // Test that data remains valid after multiple operations
765        let compressor = CompressorBuilder::new()
766            .algorithm(Algorithm::Lz4)
767            .build()
768            .unwrap();
769
770        let page = [0xAB; PAGE_SIZE];
771        let compressed = compressor.compress(&page).unwrap();
772
773        // Drop and recreate compressor
774        drop(compressor);
775        let compressor2 = CompressorBuilder::new()
776            .algorithm(Algorithm::Lz4)
777            .build()
778            .unwrap();
779
780        // compressed should still be valid
781        let decompressed = compressor2.decompress(&compressed).unwrap();
782        assert_eq!(page, decompressed);
783
784        // Test with Vec reallocations
785        let mut pages = Vec::new();
786        for i in 0..100 {
787            let p = [i as u8; PAGE_SIZE];
788            let c = compressor2.compress(&p).unwrap();
789            pages.push(c);
790        }
791
792        // All pages should still be valid after Vec growth
793        for (i, compressed) in pages.iter().enumerate() {
794            let expected = [i as u8; PAGE_SIZE];
795            let decompressed = compressor2.decompress(compressed).unwrap();
796            assert_eq!(expected, decompressed);
797        }
798    }
799
800    /// F079: No data races - concurrent access is safe
801    #[test]
802    fn test_f079_no_data_races() {
803        use std::sync::{Arc, Barrier};
804        use std::thread;
805
806        let compressor = Arc::new(
807            CompressorBuilder::new()
808                .algorithm(Algorithm::Lz4)
809                .build()
810                .unwrap(),
811        );
812
813        let num_threads = 8;
814        let barrier = Arc::new(Barrier::new(num_threads));
815
816        let handles: Vec<_> = (0..num_threads)
817            .map(|tid| {
818                let comp = Arc::clone(&compressor);
819                let bar = Arc::clone(&barrier);
820                thread::spawn(move || {
821                    // Synchronize all threads to maximize contention
822                    bar.wait();
823
824                    for i in 0..50 {
825                        let page = [(tid * 50 + i) as u8; PAGE_SIZE];
826                        let compressed = comp.compress(&page).unwrap();
827                        let decompressed = comp.decompress(&compressed).unwrap();
828                        assert_eq!(page, decompressed);
829
830                        // Check stats concurrently
831                        let _stats = comp.stats();
832                    }
833                })
834            })
835            .collect();
836
837        for handle in handles {
838            handle.join().unwrap();
839        }
840    }
841
842    /// F080: No undefined behavior - edge cases handled safely
843    #[test]
844    fn test_f080_no_undefined_behavior() {
845        let compressor = CompressorBuilder::new()
846            .algorithm(Algorithm::Lz4)
847            .build()
848            .unwrap();
849
850        // Test extreme patterns that might trigger UB
851        let test_cases: Vec<[u8; PAGE_SIZE]> = vec![
852            // Alternating bits
853            {
854                let mut p = [0u8; PAGE_SIZE];
855                for i in 0..PAGE_SIZE {
856                    p[i] = if i % 2 == 0 { 0xAA } else { 0x55 };
857                }
858                p
859            },
860            // Powers of two offsets
861            {
862                let mut p = [0u8; PAGE_SIZE];
863                let mut offset = 1;
864                while offset < PAGE_SIZE {
865                    p[offset] = 0xFF;
866                    offset *= 2;
867                }
868                p
869            },
870            // Near-boundary values
871            {
872                let mut p = [0u8; PAGE_SIZE];
873                p[0] = 0xFF;
874                p[PAGE_SIZE - 1] = 0xFF;
875                p[PAGE_SIZE / 2] = 0xFF;
876                p
877            },
878        ];
879
880        for page in &test_cases {
881            let compressed = compressor.compress(page).unwrap();
882            let decompressed = compressor.decompress(&compressed).unwrap();
883            assert_eq!(page, &decompressed);
884        }
885    }
886
887    /// F083: Secure memory clearing - data is properly managed
888    #[test]
889    fn test_f083_secure_memory_management() {
890        // Rust's Drop trait ensures cleanup
891        // Test that repeated alloc/dealloc doesn't leak
892        for _ in 0..100 {
893            let compressor = CompressorBuilder::new()
894                .algorithm(Algorithm::Lz4)
895                .build()
896                .unwrap();
897
898            let page = [0xCD; PAGE_SIZE];
899            let compressed = compressor.compress(&page).unwrap();
900            let _decompressed = compressor.decompress(&compressed).unwrap();
901            // compressor and all data dropped here
902        }
903        // If we get here without OOM, memory is being freed
904    }
905
906    /// F084: Constant-time operations where security-relevant
907    #[test]
908    fn test_f084_timing_consistency() {
909        let compressor = CompressorBuilder::new()
910            .algorithm(Algorithm::Lz4)
911            .build()
912            .unwrap();
913
914        // Test that compression of similar-sized outputs takes similar time
915        // (not a cryptographic guarantee, but basic sanity check)
916        let page1 = [0xAA; PAGE_SIZE];
917        let page2 = [0xBB; PAGE_SIZE];
918
919        // Warm up
920        for _ in 0..10 {
921            let _ = compressor.compress(&page1);
922            let _ = compressor.compress(&page2);
923        }
924
925        // Measure - just verify both complete without hanging
926        let start1 = std::time::Instant::now();
927        let c1 = compressor.compress(&page1).unwrap();
928        let t1 = start1.elapsed();
929
930        let start2 = std::time::Instant::now();
931        let c2 = compressor.compress(&page2).unwrap();
932        let t2 = start2.elapsed();
933
934        // Sizes should be similar for uniform data
935        assert_eq!(c1.data.len(), c2.data.len());
936
937        // Times should be in same order of magnitude (10x tolerance)
938        let ratio = t1.as_nanos() as f64 / t2.as_nanos().max(1) as f64;
939        assert!(
940            ratio > 0.1 && ratio < 10.0,
941            "Timing variance too high: {ratio:.2}"
942        );
943    }
944
945    /// F085: Safe FFI boundaries - no unsafe FFI in this crate
946    #[test]
947    fn test_f085_no_external_ffi() {
948        // This crate uses pure Rust implementations, no external FFI
949        // Verify by checking that all operations work without external deps
950        let compressor = CompressorBuilder::new()
951            .algorithm(Algorithm::Lz4)
952            .build()
953            .unwrap();
954
955        let page = [0xEF; PAGE_SIZE];
956        let compressed = compressor.compress(&page).unwrap();
957        let decompressed = compressor.decompress(&compressed).unwrap();
958        assert_eq!(page, decompressed);
959
960        // Also test Zstd path
961        let compressor_zstd = CompressorBuilder::new()
962            .algorithm(Algorithm::Zstd { level: 3 })
963            .build()
964            .unwrap();
965
966        let compressed_zstd = compressor_zstd.compress(&page).unwrap();
967        let decompressed_zstd = compressor_zstd.decompress(&compressed_zstd).unwrap();
968        assert_eq!(page, decompressed_zstd);
969    }
970
971    #[test]
972    fn test_corrupted_data_returns_error() {
973        // F009: Corrupted input detected (from spec)
974        let compressor = CompressorBuilder::new()
975            .algorithm(Algorithm::Lz4)
976            .build()
977            .unwrap();
978
979        // Create valid compressed data
980        let page = [0xCDu8; PAGE_SIZE];
981        let mut compressed = compressor.compress(&page).unwrap();
982
983        // Corrupt the compressed data
984        if !compressed.data.is_empty() {
985            compressed.data[0] ^= 0xFF;
986        }
987
988        // Decompression should return error, not panic
989        let result = compressor.decompress(&compressed);
990        // Either produces an error or incorrect output (no panic)
991        let _ = result;
992    }
993
994    #[test]
995    fn test_truncated_data_returns_error() {
996        // F010: Truncated input detected (from spec)
997        let compressor = CompressorBuilder::new()
998            .algorithm(Algorithm::Lz4)
999            .build()
1000            .unwrap();
1001
1002        // Create valid compressed data
1003        let page = [0xABu8; PAGE_SIZE];
1004        let mut compressed = compressor.compress(&page).unwrap();
1005
1006        // Truncate the compressed data
1007        if compressed.data.len() > 1 {
1008            compressed.data.truncate(compressed.data.len() / 2);
1009        }
1010
1011        // Decompression should return error, not panic
1012        let result = compressor.decompress(&compressed);
1013        assert!(result.is_err());
1014    }
1015
1016    #[test]
1017    fn test_f012_concurrent_compression_safe() {
1018        // F012: Concurrent compression safe
1019        use std::sync::Arc;
1020        use std::thread;
1021
1022        let compressor = Arc::new(
1023            CompressorBuilder::new()
1024                .algorithm(Algorithm::Lz4)
1025                .build()
1026                .unwrap(),
1027        );
1028
1029        let handles: Vec<_> = (0..4)
1030            .map(|i| {
1031                let comp = Arc::clone(&compressor);
1032                thread::spawn(move || {
1033                    for j in 0..100 {
1034                        let page = [(i * 10 + j) as u8; PAGE_SIZE];
1035                        let compressed = comp.compress(&page).unwrap();
1036                        let decompressed = comp.decompress(&compressed).unwrap();
1037                        assert_eq!(page, decompressed);
1038                    }
1039                })
1040            })
1041            .collect();
1042
1043        for handle in handles {
1044            handle.join().unwrap();
1045        }
1046    }
1047
1048    #[test]
1049    fn test_f020_stack_usage_bounded() {
1050        // F020: Stack usage bounded - <64KB per compression call
1051        // This test verifies we don't use excessive stack space
1052        let compressor = CompressorBuilder::new()
1053            .algorithm(Algorithm::Lz4)
1054            .build()
1055            .unwrap();
1056
1057        // Recursive function to consume stack space, then compress
1058        fn compress_with_stack_pressure(
1059            compressor: &dyn PageCompressor,
1060            depth: usize,
1061        ) -> Result<()> {
1062            if depth == 0 {
1063                let page = [0xEEu8; PAGE_SIZE];
1064                let compressed = compressor.compress(&page)?;
1065                let _ = compressor.decompress(&compressed)?;
1066                Ok(())
1067            } else {
1068                // Use some stack space (but not too much to avoid stack overflow)
1069                let buffer = [0u8; 256];
1070                std::hint::black_box(&buffer);
1071                compress_with_stack_pressure(compressor, depth - 1)
1072            }
1073        }
1074
1075        // Should succeed even with nested calls
1076        compress_with_stack_pressure(compressor.as_ref(), 10).unwrap();
1077    }
1078
1079    #[test]
1080    fn test_empty_page_compress() {
1081        // Edge case: ensure we handle page boundaries correctly
1082        let compressor = CompressorBuilder::new()
1083            .algorithm(Algorithm::Lz4)
1084            .build()
1085            .unwrap();
1086
1087        // Minimum non-trivial page
1088        let page = {
1089            let mut p = [0u8; PAGE_SIZE];
1090            p[0] = 1;
1091            p
1092        };
1093
1094        let compressed = compressor.compress(&page).unwrap();
1095        let decompressed = compressor.decompress(&compressed).unwrap();
1096        assert_eq!(page, decompressed);
1097    }
1098
1099    #[test]
1100    fn test_max_entropy_page() {
1101        // High entropy page (worst case for compression)
1102        let compressor = CompressorBuilder::new()
1103            .algorithm(Algorithm::Lz4)
1104            .build()
1105            .unwrap();
1106
1107        let mut page = [0u8; PAGE_SIZE];
1108        let mut rng = 98765u64;
1109        for byte in &mut page {
1110            rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
1111            *byte = (rng >> 33) as u8;
1112        }
1113
1114        let compressed = compressor.compress(&page).unwrap();
1115        let decompressed = compressor.decompress(&compressed).unwrap();
1116        assert_eq!(page, decompressed);
1117    }
1118
1119    // ============================================================
1120    // Compression Correctness Falsification Tests F002-F020
1121    // ============================================================
1122
1123    /// F003: Zero-page optimization works correctly
1124    #[test]
1125    fn test_f003_zero_page_optimization() {
1126        let compressor = CompressorBuilder::new()
1127            .algorithm(Algorithm::Lz4)
1128            .build()
1129            .unwrap();
1130
1131        // Pure zero page should compress very well
1132        let zero_page = [0u8; PAGE_SIZE];
1133        let compressed = compressor.compress(&zero_page).unwrap();
1134
1135        // Zero pages should compress to a small size (< 100 bytes typically)
1136        assert!(
1137            compressed.data.len() < 100,
1138            "Zero page should compress well, got {} bytes",
1139            compressed.data.len()
1140        );
1141
1142        // Must roundtrip correctly
1143        let decompressed = compressor.decompress(&compressed).unwrap();
1144        assert_eq!(zero_page, decompressed);
1145    }
1146
1147    /// F004: Full entropy pages handled correctly
1148    #[test]
1149    fn test_f004_full_entropy_pages() {
1150        let compressor = CompressorBuilder::new()
1151            .algorithm(Algorithm::Lz4)
1152            .build()
1153            .unwrap();
1154
1155        // Generate cryptographically-random-like page
1156        let mut page = [0u8; PAGE_SIZE];
1157        let mut rng = 0xCAFEBABE_u64;
1158        for byte in &mut page {
1159            // PCG-style PRNG for maximum entropy
1160            rng = rng
1161                .wrapping_mul(6364136223846793005)
1162                .wrapping_add(1442695040888963407);
1163            *byte = (rng >> 56) as u8;
1164        }
1165
1166        let compressed = compressor.compress(&page).unwrap();
1167        let decompressed = compressor.decompress(&compressed).unwrap();
1168        assert_eq!(page, decompressed);
1169
1170        // High entropy pages may not compress well, but must roundtrip
1171        // They may be stored uncompressed (data.len() == PAGE_SIZE)
1172    }
1173
1174    /// F006: Repeated patterns compress well
1175    #[test]
1176    fn test_f006_repeated_patterns_compress_well() {
1177        let compressor = CompressorBuilder::new()
1178            .algorithm(Algorithm::Lz4)
1179            .build()
1180            .unwrap();
1181
1182        // Create page with repeated 16-byte pattern
1183        let pattern = [
1184            0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54,
1185            0x32, 0x10,
1186        ];
1187        let mut page = [0u8; PAGE_SIZE];
1188        for (i, byte) in page.iter_mut().enumerate() {
1189            *byte = pattern[i % pattern.len()];
1190        }
1191
1192        let compressed = compressor.compress(&page).unwrap();
1193
1194        // Repeated patterns should compress very well (< 500 bytes for 4KB)
1195        assert!(
1196            compressed.data.len() < 500,
1197            "Repeated pattern should compress well, got {} bytes",
1198            compressed.data.len()
1199        );
1200
1201        let decompressed = compressor.decompress(&compressed).unwrap();
1202        assert_eq!(page, decompressed);
1203    }
1204
1205    /// F007: Mixed zeros/data compresses correctly
1206    #[test]
1207    fn test_f007_mixed_zeros_data() {
1208        let compressor = CompressorBuilder::new()
1209            .algorithm(Algorithm::Lz4)
1210            .build()
1211            .unwrap();
1212
1213        // Page with alternating zero and data regions (sparse page pattern)
1214        let mut page = [0u8; PAGE_SIZE];
1215        for i in 0..PAGE_SIZE {
1216            if (i / 256) % 2 == 0 {
1217                // Keep zeros
1218            } else {
1219                page[i] = ((i * 17) % 256) as u8; // Some data
1220            }
1221        }
1222
1223        let compressed = compressor.compress(&page).unwrap();
1224        let decompressed = compressor.decompress(&compressed).unwrap();
1225        assert_eq!(page, decompressed);
1226    }
1227
1228    /// F008: Mixed content compresses correctly
1229    #[test]
1230    fn test_f008_mixed_content() {
1231        let compressor = CompressorBuilder::new()
1232            .algorithm(Algorithm::Lz4)
1233            .build()
1234            .unwrap();
1235
1236        // Realistic page content: text-like region, binary region, zeros
1237        let mut page = [0u8; PAGE_SIZE];
1238
1239        // First 1KB: ASCII-like content
1240        for i in 0..1024 {
1241            page[i] = (32 + (i % 95)) as u8; // Printable ASCII range
1242        }
1243
1244        // Second 1KB: Binary patterns
1245        for i in 1024..2048 {
1246            page[i] = (i * 137) as u8;
1247        }
1248
1249        // Third 1KB: Zeros (already set)
1250
1251        // Fourth 1KB: Repeated short sequence
1252        for i in 3072..PAGE_SIZE {
1253            page[i] = [0xAA, 0xBB, 0xCC, 0xDD][i % 4];
1254        }
1255
1256        let compressed = compressor.compress(&page).unwrap();
1257        let decompressed = compressor.decompress(&compressed).unwrap();
1258        assert_eq!(page, decompressed);
1259    }
1260
1261    /// F011: Oversized output handled correctly (incompressible data)
1262    #[test]
1263    fn test_f011_oversized_output_handled() {
1264        let compressor = CompressorBuilder::new()
1265            .algorithm(Algorithm::Lz4)
1266            .build()
1267            .unwrap();
1268
1269        // Create truly random data that won't compress
1270        let mut page = [0u8; PAGE_SIZE];
1271        let mut rng = 0xDEAD_BEEF_u64;
1272        for byte in &mut page {
1273            rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
1274            *byte = ((rng >> 33) ^ (rng >> 17)) as u8;
1275        }
1276
1277        // Should handle incompressible data gracefully
1278        let compressed = compressor.compress(&page).unwrap();
1279
1280        // Stats should track incompressible pages
1281        let stats = compressor.stats();
1282        // Either compressed well or marked as incompressible
1283        if compressed.data.len() >= PAGE_SIZE {
1284            assert!(stats.pages_incompressible > 0 || !compressed.is_compressed());
1285        }
1286
1287        // Must roundtrip
1288        let decompressed = compressor.decompress(&compressed).unwrap();
1289        assert_eq!(page, decompressed);
1290    }
1291
1292    /// F013: Page size is enforced
1293    #[test]
1294    fn test_f013_page_size_enforced() {
1295        // Verify PAGE_SIZE constant is correct
1296        assert_eq!(PAGE_SIZE, 4096, "PAGE_SIZE must be 4096 bytes");
1297
1298        // Verify compressed page tracks original size
1299        let compressor = CompressorBuilder::new()
1300            .algorithm(Algorithm::Lz4)
1301            .build()
1302            .unwrap();
1303
1304        let page = [0xAA; PAGE_SIZE];
1305        let compressed = compressor.compress(&page).unwrap();
1306
1307        assert_eq!(
1308            compressed.original_size, PAGE_SIZE,
1309            "Compressed page must track original size"
1310        );
1311    }
1312
1313    /// F014: Output buffer bounds respected
1314    #[test]
1315    fn test_f014_output_buffer_bounds() {
1316        // Test that decompression respects output bounds
1317        let compressor = CompressorBuilder::new()
1318            .algorithm(Algorithm::Lz4)
1319            .build()
1320            .unwrap();
1321
1322        // Compress a page
1323        let page = [0xBB; PAGE_SIZE];
1324        let compressed = compressor.compress(&page).unwrap();
1325
1326        // Decompress must produce exactly PAGE_SIZE output
1327        let decompressed = compressor.decompress(&compressed).unwrap();
1328        assert_eq!(decompressed.len(), PAGE_SIZE);
1329    }
1330
1331    /// F017: Level parameter is respected for Zstd
1332    #[test]
1333    fn test_f017_level_parameter_respected() {
1334        // Test different Zstd compression levels
1335        let page = [0xCC; PAGE_SIZE];
1336
1337        // Level 1 (fast)
1338        let comp1 = CompressorBuilder::new()
1339            .algorithm(Algorithm::Zstd { level: 1 })
1340            .build()
1341            .unwrap();
1342        let compressed1 = comp1.compress(&page).unwrap();
1343
1344        // Level 19 (high compression)
1345        let comp19 = CompressorBuilder::new()
1346            .algorithm(Algorithm::Zstd { level: 19 })
1347            .build()
1348            .unwrap();
1349        let compressed19 = comp19.compress(&page).unwrap();
1350
1351        // Both must roundtrip correctly
1352        let decomp1 = comp1.decompress(&compressed1).unwrap();
1353        let decomp19 = comp19.decompress(&compressed19).unwrap();
1354        assert_eq!(page, decomp1);
1355        assert_eq!(page, decomp19);
1356
1357        // Higher levels should produce same or better compression
1358        // (for uniform data they'll be similar, but no worse)
1359        assert!(
1360            compressed19.data.len() <= compressed1.data.len() + 10,
1361            "Higher level should not produce significantly worse compression"
1362        );
1363    }
1364
1365    /// F018: Compression ratio is acceptable
1366    #[test]
1367    fn test_f018_compression_ratio_acceptable() {
1368        let compressor = CompressorBuilder::new()
1369            .algorithm(Algorithm::Lz4)
1370            .build()
1371            .unwrap();
1372
1373        // Test various patterns and verify reasonable compression
1374        let test_cases = [
1375            ([0u8; PAGE_SIZE], "zeros", 50),    // Should compress to < 50 bytes
1376            ([0xAA; PAGE_SIZE], "uniform", 50), // Should compress to < 50 bytes
1377        ];
1378
1379        for (page, name, max_size) in test_cases {
1380            let compressed = compressor.compress(&page).unwrap();
1381            assert!(
1382                compressed.data.len() < max_size,
1383                "{} page should compress to < {} bytes, got {}",
1384                name,
1385                max_size,
1386                compressed.data.len()
1387            );
1388        }
1389    }
1390
1391    /// F019: No memory leaks (Rust RAII handles this, but verify no Box leaks)
1392    #[test]
1393    fn test_f019_no_memory_leaks() {
1394        // Run many compression/decompression cycles
1395        // Rust's RAII ensures cleanup, but verify no panic/abort
1396        let compressor = CompressorBuilder::new()
1397            .algorithm(Algorithm::Lz4)
1398            .build()
1399            .unwrap();
1400
1401        for i in 0..1000 {
1402            let page = [i as u8; PAGE_SIZE];
1403            let compressed = compressor.compress(&page).unwrap();
1404            let decompressed = compressor.decompress(&compressed).unwrap();
1405            assert_eq!(page, decompressed);
1406        }
1407
1408        // Also test Zstd
1409        let compressor_zstd = CompressorBuilder::new()
1410            .algorithm(Algorithm::Zstd { level: 3 })
1411            .build()
1412            .unwrap();
1413
1414        for i in 0..100 {
1415            let page = [i as u8; PAGE_SIZE];
1416            let compressed = compressor_zstd.compress(&page).unwrap();
1417            let decompressed = compressor_zstd.decompress(&compressed).unwrap();
1418            assert_eq!(page, decompressed);
1419        }
1420    }
1421}