Skip to main content

oxihuman_core/
compression_pipeline.rs

1// Copyright (C) 2026 COOLJAPAN OU (Team KitaSan)
2// SPDX-License-Identifier: Apache-2.0
3#![allow(dead_code)]
4
5//! Multi-stage compression pipeline stub.
6
7/// Compression algorithm selector.
8#[derive(Debug, Clone, PartialEq, Eq)]
9pub enum CompressAlgo {
10    None,
11    Lz4,
12    Zstd,
13    Brotli,
14    Deflate,
15}
16
17impl CompressAlgo {
18    pub fn name(&self) -> &'static str {
19        match self {
20            CompressAlgo::None => "none",
21            CompressAlgo::Lz4 => "lz4",
22            CompressAlgo::Zstd => "zstd",
23            CompressAlgo::Brotli => "brotli",
24            CompressAlgo::Deflate => "deflate",
25        }
26    }
27}
28
29/// A single pipeline stage.
30#[derive(Debug, Clone)]
31pub struct PipelineStage {
32    pub algo: CompressAlgo,
33    pub level: u8,
34}
35
36impl PipelineStage {
37    pub fn new(algo: CompressAlgo, level: u8) -> Self {
38        PipelineStage {
39            algo,
40            level: level.min(9),
41        }
42    }
43}
44
45/// Multi-stage compression pipeline.
46pub struct CompressionPipeline {
47    stages: Vec<PipelineStage>,
48}
49
50impl CompressionPipeline {
51    pub fn new() -> Self {
52        CompressionPipeline { stages: Vec::new() }
53    }
54
55    pub fn add_stage(&mut self, stage: PipelineStage) {
56        self.stages.push(stage);
57    }
58
59    pub fn stage_count(&self) -> usize {
60        self.stages.len()
61    }
62
63    /// Compress data through all stages (stub: returns same data with metadata).
64    pub fn compress(&self, data: &[u8]) -> CompressResult {
65        let mut out = data.to_vec();
66        let original_len = data.len();
67        for stage in &self.stages {
68            if stage.algo != CompressAlgo::None && !out.is_empty() {
69                /* Stub: simulate compression by reducing size by 10% per stage */
70                let new_len = (out.len() * 9 / 10).max(1);
71                out.truncate(new_len);
72            }
73        }
74        CompressResult {
75            data: out,
76            original_size: original_len,
77            stages_applied: self.stages.len(),
78        }
79    }
80
81    pub fn is_empty(&self) -> bool {
82        self.stages.is_empty()
83    }
84}
85
86impl Default for CompressionPipeline {
87    fn default() -> Self {
88        Self::new()
89    }
90}
91
92/// Result of pipeline compression.
93pub struct CompressResult {
94    pub data: Vec<u8>,
95    pub original_size: usize,
96    pub stages_applied: usize,
97}
98
99impl CompressResult {
100    pub fn ratio(&self) -> f64 {
101        if self.original_size == 0 {
102            1.0
103        } else {
104            self.data.len() as f64 / self.original_size as f64
105        }
106    }
107
108    pub fn bytes_saved(&self) -> usize {
109        self.original_size.saturating_sub(self.data.len())
110    }
111}
112
113/// Build a default Zstd pipeline.
114pub fn zstd_pipeline(level: u8) -> CompressionPipeline {
115    let mut p = CompressionPipeline::new();
116    p.add_stage(PipelineStage::new(CompressAlgo::Zstd, level));
117    p
118}
119
120/// Build a two-stage LZ4 + Brotli pipeline.
121pub fn lz4_brotli_pipeline() -> CompressionPipeline {
122    let mut p = CompressionPipeline::new();
123    p.add_stage(PipelineStage::new(CompressAlgo::Lz4, 1));
124    p.add_stage(PipelineStage::new(CompressAlgo::Brotli, 6));
125    p
126}
127
128/// Compress bytes with a given algorithm at level 6.
129pub fn compress_bytes(algo: CompressAlgo, data: &[u8]) -> Vec<u8> {
130    let mut p = CompressionPipeline::new();
131    p.add_stage(PipelineStage::new(algo, 6));
132    p.compress(data).data
133}
134
135/// Estimate compressed size (stub: 90% of original).
136pub fn estimate_compressed_size(original: usize) -> usize {
137    original * 9 / 10
138}
139
140#[cfg(test)]
141mod tests {
142    use super::*;
143
144    #[test]
145    fn test_empty_pipeline() {
146        let p = CompressionPipeline::new();
147        assert!(p.is_empty());
148    }
149
150    #[test]
151    fn test_compress_passthrough_no_stages() {
152        let p = CompressionPipeline::new();
153        let r = p.compress(b"hello world");
154        assert_eq!(r.data, b"hello world");
155        assert_eq!(r.stages_applied, 0);
156    }
157
158    #[test]
159    fn test_zstd_pipeline_has_one_stage() {
160        let p = zstd_pipeline(3);
161        assert_eq!(p.stage_count(), 1);
162    }
163
164    #[test]
165    fn test_lz4_brotli_two_stages() {
166        let p = lz4_brotli_pipeline();
167        assert_eq!(p.stage_count(), 2);
168    }
169
170    #[test]
171    fn test_compress_reduces_size() {
172        let p = zstd_pipeline(6);
173        let data = vec![0u8; 100];
174        let r = p.compress(&data);
175        assert!(r.data.len() <= data.len());
176    }
177
178    #[test]
179    fn test_compress_result_ratio() {
180        let r = CompressResult {
181            data: vec![0u8; 90],
182            original_size: 100,
183            stages_applied: 1,
184        };
185        assert!((r.ratio() - 0.9).abs() < 0.01);
186    }
187
188    #[test]
189    fn test_bytes_saved() {
190        let r = CompressResult {
191            data: vec![0u8; 80],
192            original_size: 100,
193            stages_applied: 1,
194        };
195        assert_eq!(r.bytes_saved(), 20);
196    }
197
198    #[test]
199    fn test_compress_bytes_helper() {
200        let compressed = compress_bytes(CompressAlgo::Lz4, &[0u8; 100]);
201        assert!(!compressed.is_empty());
202    }
203
204    #[test]
205    fn test_algo_name() {
206        assert_eq!(CompressAlgo::Zstd.name(), "zstd");
207        assert_eq!(CompressAlgo::None.name(), "none");
208    }
209
210    #[test]
211    fn test_stage_level_clamped() {
212        let s = PipelineStage::new(CompressAlgo::Deflate, 99);
213        assert_eq!(s.level, 9);
214    }
215}