oxirs_vec/
adaptive_compression.rs

1//! Adaptive compression techniques for vector data
2//!
3//! This module provides intelligent compression strategies that adapt to data characteristics,
4//! optimizing compression ratio and decompression speed based on vector patterns and usage.
5
6use crate::{
7    compression::{create_compressor, CompressionMethod, VectorCompressor},
8    Vector, VectorError,
9};
10use anyhow::Result;
11use std::collections::HashMap;
12use std::sync::{Arc, RwLock};
13use std::time::{Duration, Instant};
14
15// Random functionality now provided by scirs2-core
16
17/// Context information for compression decisions
18#[derive(Debug, Clone)]
19pub struct CompressionContext {
20    pub domain: VectorDomain,
21    pub access_frequency: AccessFrequency,
22    pub quality_requirement: QualityRequirement,
23    pub resource_constraints: ResourceConstraints,
24    pub temporal_patterns: TemporalPatterns,
25}
26
27/// Vector domain types for domain-specific optimization
28#[derive(Debug, Clone, PartialEq, Eq, Hash)]
29pub enum VectorDomain {
30    TextEmbeddings,
31    ImageFeatures,
32    AudioFeatures,
33    KnowledgeGraph,
34    TimeSeriesData,
35    Unknown,
36}
37
38/// Access frequency patterns
39#[derive(Debug, Clone)]
40pub enum AccessFrequency {
41    VeryHigh, // Accessed multiple times per second
42    High,     // Accessed multiple times per minute
43    Moderate, // Accessed multiple times per hour
44    Low,      // Accessed daily
45    Archive,  // Rarely accessed
46}
47
48/// Quality requirements for different use cases
49#[derive(Debug, Clone)]
50pub enum QualityRequirement {
51    Lossless,    // No quality loss acceptable
52    HighQuality, // Minimal quality loss (>99% accuracy)
53    Balanced,    // Moderate quality loss (>95% accuracy)
54    Compressed,  // Higher compression priority (>90% accuracy)
55    Aggressive,  // Maximum compression (<90% accuracy)
56}
57
58/// Resource constraints for compression decisions
59#[derive(Debug, Clone)]
60pub struct ResourceConstraints {
61    pub cpu_usage_limit: f32,    // 0.0 to 1.0
62    pub memory_usage_limit: f32, // 0.0 to 1.0
63    pub compression_time_limit: Duration,
64    pub decompression_time_limit: Duration,
65}
66
67/// Temporal patterns for time-aware compression
68#[derive(Debug, Clone)]
69pub struct TemporalPatterns {
70    pub time_of_day_factor: f32, // Compression aggressiveness based on time
71    pub load_factor: f32,        // Current system load
72    pub seasonal_factor: f32,    // Long-term usage patterns
73}
74
75/// Enhanced statistics about vector data characteristics
76#[derive(Debug, Clone)]
77pub struct VectorStats {
78    pub dimensions: usize,
79    pub mean: f32,
80    pub std_dev: f32,
81    pub min_val: f32,
82    pub max_val: f32,
83    pub entropy: f32,
84    pub sparsity: f32,                 // Fraction of near-zero values
85    pub correlation: f32,              // Average correlation between dimensions
86    pub intrinsic_dimension: f32,      // Estimated intrinsic dimensionality
87    pub clustering_tendency: f32,      // Hopkins statistic
88    pub temporal_stability: f32,       // Stability over time
89    pub domain_affinity: VectorDomain, // Detected domain type
90}
91
92impl Default for CompressionContext {
93    fn default() -> Self {
94        Self {
95            domain: VectorDomain::Unknown,
96            access_frequency: AccessFrequency::Moderate,
97            quality_requirement: QualityRequirement::Balanced,
98            resource_constraints: ResourceConstraints {
99                cpu_usage_limit: 0.7,
100                memory_usage_limit: 0.8,
101                compression_time_limit: Duration::from_millis(100),
102                decompression_time_limit: Duration::from_millis(50),
103            },
104            temporal_patterns: TemporalPatterns {
105                time_of_day_factor: 1.0,
106                load_factor: 1.0,
107                seasonal_factor: 1.0,
108            },
109        }
110    }
111}
112
113impl VectorStats {
114    /// Calculate enhanced statistics for a vector with context
115    pub fn from_vector(vector: &Vector) -> Result<Self, VectorError> {
116        Self::from_vector_with_context(vector, &CompressionContext::default())
117    }
118
119    /// Calculate statistics for a vector with compression context
120    pub fn from_vector_with_context(
121        vector: &Vector,
122        context: &CompressionContext,
123    ) -> Result<Self, VectorError> {
124        let values = vector.as_f32();
125        let n = values.len();
126
127        if n == 0 {
128            return Err(VectorError::InvalidDimensions("Empty vector".to_string()));
129        }
130
131        // Basic statistics
132        let sum: f32 = values.iter().sum();
133        let mean = sum / n as f32;
134
135        let variance: f32 = values.iter().map(|x| (x - mean).powi(2)).sum::<f32>() / n as f32;
136        let std_dev = variance.sqrt();
137
138        let min_val = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));
139        let max_val = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));
140
141        // Enhanced entropy estimation with domain-specific bins
142        let bin_count = match context.domain {
143            VectorDomain::TextEmbeddings => 128,
144            VectorDomain::ImageFeatures => 256,
145            VectorDomain::KnowledgeGraph => 64,
146            _ => 256,
147        };
148
149        let mut histogram = vec![0u32; bin_count];
150        let range = max_val - min_val;
151        if range > 0.0 {
152            for val in &values {
153                let bucket = ((val - min_val) / range * (bin_count - 1) as f32)
154                    .clamp(0.0, (bin_count - 1) as f32) as usize;
155                histogram[bucket] += 1;
156            }
157        }
158
159        let entropy = histogram
160            .iter()
161            .filter(|&&count| count > 0)
162            .map(|&count| {
163                let p = count as f32 / n as f32;
164                -p * p.log2()
165            })
166            .sum();
167
168        // Sparsity (fraction of values near zero)
169        let threshold = std_dev * 0.1;
170        let sparse_count = values.iter().filter(|&&x| x.abs() < threshold).count();
171        let sparsity = sparse_count as f32 / n as f32;
172
173        // Enhanced correlation analysis
174        let correlation = Self::calculate_enhanced_correlation(&values);
175
176        // Intrinsic dimensionality estimation using correlation dimension
177        let intrinsic_dimension = Self::estimate_intrinsic_dimension(&values);
178
179        // Clustering tendency using Hopkins statistic
180        let clustering_tendency = Self::calculate_hopkins_statistic(&values);
181
182        // Temporal stability (placeholder for now)
183        let temporal_stability = 1.0;
184
185        // Domain detection based on statistical patterns
186        let domain_affinity = Self::detect_domain(&values, entropy, sparsity, correlation);
187
188        Ok(VectorStats {
189            dimensions: n,
190            mean,
191            std_dev,
192            min_val,
193            max_val,
194            entropy,
195            sparsity,
196            correlation,
197            intrinsic_dimension,
198            clustering_tendency,
199            temporal_stability,
200            domain_affinity,
201        })
202    }
203
204    /// Enhanced correlation analysis with multiple window sizes
205    fn calculate_enhanced_correlation(values: &[f32]) -> f32 {
206        let n = values.len();
207        if n <= 1 {
208            return 0.0;
209        }
210
211        let window_sizes = [5, 10, 20].iter().map(|&w| w.min(n / 2).max(2));
212        let mut total_corr = 0.0;
213        let mut total_count = 0;
214
215        for window_size in window_sizes {
216            if window_size >= n {
217                continue;
218            }
219
220            for i in 0..(n - window_size) {
221                let window1 = &values[i..i + window_size];
222                let window2 = &values[i + 1..i + window_size + 1];
223
224                let mean1: f32 = window1.iter().sum::<f32>() / window_size as f32;
225                let mean2: f32 = window2.iter().sum::<f32>() / window_size as f32;
226
227                let covariance: f32 = window1
228                    .iter()
229                    .zip(window2)
230                    .map(|(a, b)| (a - mean1) * (b - mean2))
231                    .sum();
232                let var1: f32 = window1.iter().map(|x| (x - mean1).powi(2)).sum();
233                let var2: f32 = window2.iter().map(|x| (x - mean2).powi(2)).sum();
234
235                if var1 > 0.0 && var2 > 0.0 {
236                    let corr = covariance / (var1.sqrt() * var2.sqrt());
237                    total_corr += corr.abs();
238                    total_count += 1;
239                }
240            }
241        }
242
243        if total_count > 0 {
244            total_corr / total_count as f32
245        } else {
246            0.0
247        }
248    }
249
250    /// Estimate intrinsic dimensionality using correlation dimension method
251    fn estimate_intrinsic_dimension(values: &[f32]) -> f32 {
252        let n = values.len();
253        if n < 10 {
254            return n as f32;
255        }
256
257        // Sample points for correlation dimension calculation
258        let sample_size = n.min(100);
259        let step = n / sample_size;
260        let sampled: Vec<f32> = (0..sample_size).map(|i| values[i * step]).collect();
261
262        // Calculate correlation dimension with multiple radii
263        let mut log_radii = Vec::new();
264        let mut log_counts = Vec::new();
265
266        let max_val = sampled.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));
267        let min_val = sampled.iter().fold(f32::INFINITY, |a, &b| a.min(b));
268        let range = max_val - min_val;
269
270        if range <= 0.0 {
271            return 1.0;
272        }
273
274        for radius_factor in [0.001, 0.01, 0.1, 0.5] {
275            let radius = range * radius_factor;
276            let mut count = 0;
277
278            for i in 0..sampled.len() {
279                for j in (i + 1)..sampled.len() {
280                    if (sampled[i] - sampled[j]).abs() < radius {
281                        count += 1;
282                    }
283                }
284            }
285
286            if count > 0 {
287                log_radii.push(radius.ln());
288                log_counts.push((count as f32).ln());
289            }
290        }
291
292        // Linear regression to estimate dimension
293        if log_radii.len() < 2 {
294            return n as f32;
295        }
296
297        let mean_log_r: f32 = log_radii.iter().sum::<f32>() / log_radii.len() as f32;
298        let mean_log_c: f32 = log_counts.iter().sum::<f32>() / log_counts.len() as f32;
299
300        let numerator: f32 = log_radii
301            .iter()
302            .zip(&log_counts)
303            .map(|(r, c)| (r - mean_log_r) * (c - mean_log_c))
304            .sum();
305        let denominator: f32 = log_radii.iter().map(|r| (r - mean_log_r).powi(2)).sum();
306
307        if denominator > 0.0 {
308            let slope = numerator / denominator;
309            slope.abs().min(n as f32).max(1.0)
310        } else {
311            n as f32
312        }
313    }
314
315    /// Calculate Hopkins statistic for clustering tendency
316    fn calculate_hopkins_statistic(values: &[f32]) -> f32 {
317        let n = values.len();
318        if n < 10 {
319            return 0.5; // Neutral value
320        }
321
322        let sample_size = (n / 10).clamp(5, 50);
323        let min_val = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));
324        let max_val = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));
325
326        if max_val <= min_val {
327            return 0.5;
328        }
329
330        let mut w_sum = 0.0; // Sum of distances to nearest neighbor in data
331        let mut u_sum = 0.0; // Sum of distances to nearest neighbor in uniform random data
332
333        // Sample from actual data
334        for i in 0..sample_size {
335            let idx = (i * n / sample_size) % n;
336            let point = values[idx];
337
338            let mut min_dist = f32::INFINITY;
339            for &other in values {
340                if other != point {
341                    let dist = (point - other).abs();
342                    min_dist = min_dist.min(dist);
343                }
344            }
345            w_sum += min_dist;
346        }
347
348        // Generate uniform random points and find nearest neighbors in data
349        use std::collections::hash_map::DefaultHasher;
350        use std::hash::{Hash, Hasher};
351
352        let mut hasher = DefaultHasher::new();
353        42u64.hash(&mut hasher);
354        let mut rng_state = hasher.finish();
355
356        for _ in 0..sample_size {
357            rng_state = rng_state.wrapping_mul(1103515245).wrapping_add(12345);
358            let random_point = min_val + (max_val - min_val) * (rng_state as f32 / u64::MAX as f32);
359
360            let mut min_dist = f32::INFINITY;
361            for &data_point in values {
362                let dist = (random_point - data_point).abs();
363                min_dist = min_dist.min(dist);
364            }
365            u_sum += min_dist;
366        }
367
368        if w_sum + u_sum > 0.0 {
369            u_sum / (w_sum + u_sum)
370        } else {
371            0.5
372        }
373    }
374
375    /// Detect vector domain based on statistical patterns
376    fn detect_domain(
377        _values: &[f32],
378        entropy: f32,
379        sparsity: f32,
380        correlation: f32,
381    ) -> VectorDomain {
382        // Text embeddings: moderate entropy, low sparsity, moderate correlation
383        if entropy > 6.0
384            && entropy < 8.0
385            && sparsity < 0.3
386            && correlation > 0.2
387            && correlation < 0.6
388        {
389            return VectorDomain::TextEmbeddings;
390        }
391
392        // Image features: high entropy, variable sparsity, low correlation
393        if entropy > 7.0 && correlation < 0.3 {
394            return VectorDomain::ImageFeatures;
395        }
396
397        // Knowledge graph: lower entropy, higher sparsity, specific patterns
398        if entropy < 6.0 && sparsity > 0.4 {
399            return VectorDomain::KnowledgeGraph;
400        }
401
402        // Time series: high correlation, moderate entropy
403        if correlation > 0.7 && entropy > 5.0 && entropy < 7.0 {
404            return VectorDomain::TimeSeriesData;
405        }
406
407        VectorDomain::Unknown
408    }
409
410    /// Calculate aggregate statistics from multiple vectors
411    pub fn from_vectors(vectors: &[Vector]) -> Result<Self, VectorError> {
412        Self::from_vectors_with_context(vectors, &CompressionContext::default())
413    }
414
415    /// Calculate aggregate statistics from multiple vectors with context
416    pub fn from_vectors_with_context(
417        vectors: &[Vector],
418        context: &CompressionContext,
419    ) -> Result<Self, VectorError> {
420        if vectors.is_empty() {
421            return Err(VectorError::InvalidDimensions(
422                "No vectors provided".to_string(),
423            ));
424        }
425
426        let individual_stats: Result<Vec<_>, _> = vectors
427            .iter()
428            .map(|v| Self::from_vector_with_context(v, context))
429            .collect();
430        let stats = individual_stats?;
431
432        let n = stats.len() as f32;
433
434        Ok(VectorStats {
435            dimensions: stats[0].dimensions,
436            mean: stats.iter().map(|s| s.mean).sum::<f32>() / n,
437            std_dev: stats.iter().map(|s| s.std_dev).sum::<f32>() / n,
438            min_val: stats
439                .iter()
440                .map(|s| s.min_val)
441                .fold(f32::INFINITY, |a, b| a.min(b)),
442            max_val: stats
443                .iter()
444                .map(|s| s.max_val)
445                .fold(f32::NEG_INFINITY, |a, b| a.max(b)),
446            entropy: stats.iter().map(|s| s.entropy).sum::<f32>() / n,
447            sparsity: stats.iter().map(|s| s.sparsity).sum::<f32>() / n,
448            correlation: stats.iter().map(|s| s.correlation).sum::<f32>() / n,
449            intrinsic_dimension: stats.iter().map(|s| s.intrinsic_dimension).sum::<f32>() / n,
450            clustering_tendency: stats.iter().map(|s| s.clustering_tendency).sum::<f32>() / n,
451            temporal_stability: stats.iter().map(|s| s.temporal_stability).sum::<f32>() / n,
452            domain_affinity: Self::aggregate_domain_affinity(&stats),
453        })
454    }
455
456    /// Aggregate domain affinity from multiple statistics
457    fn aggregate_domain_affinity(stats: &[VectorStats]) -> VectorDomain {
458        let mut domain_counts = HashMap::new();
459
460        for stat in stats {
461            *domain_counts
462                .entry(stat.domain_affinity.clone())
463                .or_insert(0) += 1;
464        }
465
466        domain_counts
467            .into_iter()
468            .max_by_key(|(_, count)| *count)
469            .map(|(domain, _)| domain)
470            .unwrap_or(VectorDomain::Unknown)
471    }
472}
473
474/// Performance metrics for compression methods
475#[derive(Debug, Clone)]
476pub struct CompressionMetrics {
477    pub method: CompressionMethod,
478    pub compression_ratio: f32,
479    pub compression_time: Duration,
480    pub decompression_time: Duration,
481    pub reconstruction_error: f32,
482    pub usage_count: u64,
483    pub avg_performance_score: f32,
484}
485
486impl CompressionMetrics {
487    pub fn new(method: CompressionMethod) -> Self {
488        Self {
489            method,
490            compression_ratio: 1.0,
491            compression_time: Duration::ZERO,
492            decompression_time: Duration::ZERO,
493            reconstruction_error: 0.0,
494            usage_count: 0,
495            avg_performance_score: 0.0,
496        }
497    }
498
499    /// Calculate performance score (higher is better)
500    pub fn calculate_score(&self, priorities: &CompressionPriorities) -> f32 {
501        let ratio_score = self.compression_ratio.min(0.9); // Cap at 90% compression
502        let speed_score = 1.0 / (1.0 + self.compression_time.as_millis() as f32 / 1000.0);
503        let accuracy_score = 1.0 / (1.0 + self.reconstruction_error);
504
505        priorities.compression_weight * ratio_score
506            + priorities.speed_weight * speed_score
507            + priorities.accuracy_weight * accuracy_score
508    }
509
510    /// Update metrics with new measurement
511    pub fn update(
512        &mut self,
513        compression_ratio: f32,
514        comp_time: Duration,
515        decomp_time: Duration,
516        error: f32,
517        priorities: &CompressionPriorities,
518    ) {
519        let alpha = 0.1; // Learning rate for exponential moving average
520
521        self.compression_ratio = self.compression_ratio * (1.0 - alpha) + compression_ratio * alpha;
522        self.compression_time = Duration::from_nanos(
523            (self.compression_time.as_nanos() as f32 * (1.0 - alpha)
524                + comp_time.as_nanos() as f32 * alpha) as u64,
525        );
526        self.decompression_time = Duration::from_nanos(
527            (self.decompression_time.as_nanos() as f32 * (1.0 - alpha)
528                + decomp_time.as_nanos() as f32 * alpha) as u64,
529        );
530        self.reconstruction_error = self.reconstruction_error * (1.0 - alpha) + error * alpha;
531        self.usage_count += 1;
532
533        self.avg_performance_score = self.calculate_score(priorities);
534    }
535}
536
537/// Priorities for compression strategy selection
538#[derive(Debug, Clone)]
539pub struct CompressionPriorities {
540    pub compression_weight: f32, // Importance of compression ratio
541    pub speed_weight: f32,       // Importance of compression/decompression speed
542    pub accuracy_weight: f32,    // Importance of reconstruction accuracy
543}
544
545impl Default for CompressionPriorities {
546    fn default() -> Self {
547        Self {
548            compression_weight: 0.4,
549            speed_weight: 0.3,
550            accuracy_weight: 0.3,
551        }
552    }
553}
554
555/// Multi-level compression strategy
556#[derive(Debug, Clone)]
557pub struct MultiLevelCompression {
558    pub levels: Vec<CompressionMethod>,
559    pub thresholds: Vec<f32>, // Quality thresholds for each level
560}
561
562impl Default for MultiLevelCompression {
563    fn default() -> Self {
564        Self::new()
565    }
566}
567
568impl MultiLevelCompression {
569    pub fn new() -> Self {
570        Self {
571            levels: vec![
572                CompressionMethod::None,
573                CompressionMethod::Quantization { bits: 16 },
574                CompressionMethod::Quantization { bits: 8 },
575                CompressionMethod::Pca { components: 0 }, // Will be set adaptively
576                CompressionMethod::Zstd { level: 3 },
577            ],
578            thresholds: vec![0.0, 0.1, 0.3, 0.6, 0.8],
579        }
580    }
581
582    /// Select compression level based on requirements
583    pub fn select_level(&self, required_compression: f32) -> &CompressionMethod {
584        for (i, &threshold) in self.thresholds.iter().enumerate() {
585            if required_compression <= threshold {
586                return &self.levels[i];
587            }
588        }
589        self.levels.last().unwrap()
590    }
591}
592
593/// Adaptive compression engine that learns optimal strategies
594pub struct AdaptiveCompressor {
595    /// Current compression priorities
596    priorities: CompressionPriorities,
597    /// Performance metrics for each compression method
598    metrics: Arc<RwLock<HashMap<String, CompressionMetrics>>>,
599    /// Multi-level compression strategies
600    multi_level: MultiLevelCompression,
601    /// Cache of trained compressors
602    compressor_cache: Arc<RwLock<HashMap<String, Box<dyn VectorCompressor + Send + Sync>>>>,
603    /// Statistics cache for similar vectors
604    stats_cache: Arc<RwLock<HashMap<String, (VectorStats, Instant)>>>,
605    /// Learning parameters
606    exploration_rate: f32,
607    cache_ttl: Duration,
608}
609
610impl AdaptiveCompressor {
611    pub fn new() -> Self {
612        Self::new_with_priorities(CompressionPriorities::default())
613    }
614
615    pub fn new_with_priorities(priorities: CompressionPriorities) -> Self {
616        Self {
617            priorities,
618            metrics: Arc::new(RwLock::new(HashMap::new())),
619            multi_level: MultiLevelCompression::new(),
620            compressor_cache: Arc::new(RwLock::new(HashMap::new())),
621            stats_cache: Arc::new(RwLock::new(HashMap::new())),
622            exploration_rate: 0.1,
623            cache_ttl: Duration::from_secs(3600), // 1 hour cache TTL
624        }
625    }
626
627    /// Analyze vector characteristics and recommend compression strategy
628    pub fn analyze_and_recommend(
629        &mut self,
630        vectors: &[Vector],
631    ) -> Result<CompressionMethod, VectorError> {
632        let stats = VectorStats::from_vectors(vectors)?;
633        let stats_key = self.generate_stats_key(&stats);
634
635        // Check cache first
636        {
637            let cache = self.stats_cache.read().unwrap();
638            if let Some((cached_stats, timestamp)) = cache.get(&stats_key) {
639                if timestamp.elapsed() < self.cache_ttl {
640                    return Ok(self.recommend_from_stats(cached_stats));
641                }
642            }
643        }
644
645        // Cache the stats
646        {
647            let mut cache = self.stats_cache.write().unwrap();
648            cache.insert(stats_key, (stats.clone(), Instant::now()));
649        }
650
651        Ok(self.recommend_from_stats(&stats))
652    }
653
654    /// Recommend compression method based on vector statistics
655    fn recommend_from_stats(&self, stats: &VectorStats) -> CompressionMethod {
656        // High sparsity -> prefer quantization or PCA
657        if stats.sparsity > 0.7 {
658            return CompressionMethod::Quantization { bits: 4 };
659        }
660
661        // High correlation -> PCA works well
662        if stats.correlation > 0.6 && stats.dimensions > 20 {
663            let components = (stats.dimensions as f32 * 0.7) as usize;
664            return CompressionMethod::Pca { components };
665        }
666
667        // Low entropy -> Zstd compression is effective
668        if stats.entropy < 4.0 {
669            return CompressionMethod::Zstd { level: 9 };
670        }
671
672        // High variance -> quantization with more bits
673        if stats.std_dev > stats.mean.abs() {
674            return CompressionMethod::Quantization { bits: 12 };
675        }
676
677        // Default: moderate quantization
678        CompressionMethod::Quantization { bits: 8 }
679    }
680
681    /// Compress vector with adaptive strategy selection
682    pub fn compress_adaptive(&mut self, vector: &Vector) -> Result<Vec<u8>, VectorError> {
683        let stats = VectorStats::from_vector(vector)?;
684        let method = self.recommend_from_stats(&stats);
685
686        // Check if we should explore alternative methods
687        if self.should_explore() {
688            let alternative = self.get_alternative_method(&method);
689            return self.compress_with_method(vector, &alternative);
690        }
691
692        self.compress_with_method(vector, &method)
693    }
694
695    /// Compress with specific method and update metrics
696    pub fn compress_with_method(
697        &mut self,
698        vector: &Vector,
699        method: &CompressionMethod,
700    ) -> Result<Vec<u8>, VectorError> {
701        let method_key = format!("{method:?}");
702        let compressor = self.get_or_create_compressor(method)?;
703
704        let start_time = Instant::now();
705        let compressed = compressor.compress(vector)?;
706        let compression_time = start_time.elapsed();
707
708        // Measure reconstruction error
709        let decompressed = compressor.decompress(&compressed, vector.dimensions)?;
710        let error = self.calculate_reconstruction_error(vector, &decompressed)?;
711
712        let compression_ratio = compressed.len() as f32 / (vector.dimensions * 4) as f32; // Assuming f32 vectors
713
714        // Update metrics
715        {
716            let mut metrics = self.metrics.write().unwrap();
717            let metric = metrics
718                .entry(method_key)
719                .or_insert_with(|| CompressionMetrics::new(method.clone()));
720            metric.update(
721                compression_ratio,
722                compression_time,
723                Duration::ZERO,
724                error,
725                &self.priorities,
726            );
727        }
728
729        Ok(compressed)
730    }
731
732    /// Multi-level compression for extreme compression ratios
733    pub fn compress_multi_level(
734        &mut self,
735        vector: &Vector,
736        target_ratio: f32,
737    ) -> Result<Vec<u8>, VectorError> {
738        let mut current_vector = vector.clone();
739        let mut compression_steps = Vec::new();
740        let mut total_ratio = 1.0;
741
742        while total_ratio > target_ratio && compression_steps.len() < 3 {
743            let remaining_ratio = target_ratio / total_ratio;
744            let method = self.multi_level.select_level(remaining_ratio);
745
746            let compressor = self.get_or_create_compressor(method)?;
747            let compressed = compressor.compress(&current_vector)?;
748
749            let step_ratio = compressed.len() as f32 / (current_vector.dimensions * 4) as f32;
750            total_ratio *= step_ratio;
751
752            compression_steps.push((method.clone(), compressed.clone()));
753
754            // Prepare for next level if needed
755            if total_ratio > target_ratio {
756                current_vector = compressor.decompress(&compressed, current_vector.dimensions)?;
757            }
758        }
759
760        // Serialize the compression steps
761        self.serialize_multi_level_result(compression_steps)
762    }
763
764    /// Get best performing compression method based on current metrics
765    pub fn get_best_method(&self) -> CompressionMethod {
766        let metrics = self.metrics.read().unwrap();
767        let best = metrics.values().max_by(|a, b| {
768            a.avg_performance_score
769                .partial_cmp(&b.avg_performance_score)
770                .unwrap()
771        });
772
773        best.map(|m| m.method.clone())
774            .unwrap_or(CompressionMethod::Quantization { bits: 8 })
775    }
776
777    /// Get compression performance statistics
778    pub fn get_performance_stats(&self) -> HashMap<String, CompressionMetrics> {
779        self.metrics.read().unwrap().clone()
780    }
781
782    /// Update compression priorities
783    pub fn update_priorities(&mut self, priorities: CompressionPriorities) {
784        self.priorities = priorities;
785
786        // Recalculate scores for all metrics
787        let mut metrics = self.metrics.write().unwrap();
788        for metric in metrics.values_mut() {
789            metric.avg_performance_score = metric.calculate_score(&self.priorities);
790        }
791    }
792
793    /// Clear caches and reset learning
794    pub fn reset(&mut self) {
795        self.metrics.write().unwrap().clear();
796        self.compressor_cache.write().unwrap().clear();
797        self.stats_cache.write().unwrap().clear();
798    }
799
800    // Private helper methods
801
802    fn get_or_create_compressor(
803        &self,
804        method: &CompressionMethod,
805    ) -> Result<Box<dyn VectorCompressor>, VectorError> {
806        let method_key = format!("{method:?}");
807
808        {
809            let cache = self.compressor_cache.read().unwrap();
810            if cache.contains_key(&method_key) {
811                // Note: We can't return a reference here due to trait object limitations
812                // So we create a new instance
813            }
814        }
815
816        // Create new compressor (existing create_compressor function)
817        let compressor = create_compressor(method);
818
819        // Cache it (though we can't use it directly due to trait object limitations)
820        {
821            let _cache = self.compressor_cache.write().unwrap();
822            // Note: This is a placeholder for caching logic
823            // In practice, we might need to redesign this for trait objects
824        }
825
826        Ok(compressor)
827    }
828
829    fn calculate_reconstruction_error(
830        &self,
831        original: &Vector,
832        reconstructed: &Vector,
833    ) -> Result<f32, VectorError> {
834        let orig_values = original.as_f32();
835        let recon_values = reconstructed.as_f32();
836
837        if orig_values.len() != recon_values.len() {
838            return Err(VectorError::InvalidDimensions(
839                "Dimension mismatch".to_string(),
840            ));
841        }
842
843        let mse: f32 = orig_values
844            .iter()
845            .zip(recon_values.iter())
846            .map(|(a, b)| (a - b).powi(2))
847            .sum::<f32>()
848            / orig_values.len() as f32;
849
850        Ok(mse.sqrt()) // RMSE
851    }
852
853    fn generate_stats_key(&self, stats: &VectorStats) -> String {
854        format!(
855            "{}_{:.2}_{:.2}_{:.2}_{:.2}",
856            stats.dimensions, stats.entropy, stats.sparsity, stats.correlation, stats.std_dev
857        )
858    }
859
860    fn should_explore(&self) -> bool {
861        #[allow(unused_imports)]
862        use scirs2_core::random::{Random, Rng};
863        let mut rng = Random::seed(42);
864        rng.gen_range(0.0..1.0) < self.exploration_rate
865    }
866
867    fn get_alternative_method(&self, current: &CompressionMethod) -> CompressionMethod {
868        match current {
869            CompressionMethod::None => CompressionMethod::Quantization { bits: 8 },
870            CompressionMethod::Quantization { bits } => {
871                if *bits > 8 {
872                    CompressionMethod::Quantization { bits: bits - 2 }
873                } else {
874                    CompressionMethod::Pca { components: 16 }
875                }
876            }
877            CompressionMethod::Pca { components: _ } => CompressionMethod::Zstd { level: 6 },
878            CompressionMethod::Zstd { level } => {
879                if *level < 15 {
880                    CompressionMethod::Zstd { level: level + 3 }
881                } else {
882                    CompressionMethod::Quantization { bits: 4 }
883                }
884            }
885            _ => CompressionMethod::None,
886        }
887    }
888
889    fn serialize_multi_level_result(
890        &self,
891        steps: Vec<(CompressionMethod, Vec<u8>)>,
892    ) -> Result<Vec<u8>, VectorError> {
893        use std::io::Write;
894
895        let mut result = Vec::new();
896
897        // Write number of steps
898        result.write_all(&(steps.len() as u32).to_le_bytes())?;
899
900        // Write each step
901        for (method, data) in steps {
902            // Serialize method (simplified)
903            let method_id = match method {
904                CompressionMethod::None => 0u8,
905                CompressionMethod::Zstd { .. } => 1u8,
906                CompressionMethod::Quantization { .. } => 2u8,
907                CompressionMethod::Pca { .. } => 3u8,
908                CompressionMethod::ProductQuantization { .. } => 4u8,
909                CompressionMethod::Adaptive { .. } => 5u8,
910            };
911            result.push(method_id);
912
913            // Write data length and data
914            result.write_all(&(data.len() as u32).to_le_bytes())?;
915            result.extend_from_slice(&data);
916        }
917
918        Ok(result)
919    }
920}
921
922impl Default for AdaptiveCompressor {
923    fn default() -> Self {
924        Self::new()
925    }
926}
927
928/// Domain-specific compression profiles
929pub struct CompressionProfiles {
930    profiles: HashMap<VectorDomain, CompressionPriorities>,
931}
932
933impl Default for CompressionProfiles {
934    fn default() -> Self {
935        Self::new()
936    }
937}
938
939impl CompressionProfiles {
940    pub fn new() -> Self {
941        let mut profiles = HashMap::new();
942
943        // Text embeddings: balance compression and quality
944        profiles.insert(
945            VectorDomain::TextEmbeddings,
946            CompressionPriorities {
947                compression_weight: 0.3,
948                speed_weight: 0.4,
949                accuracy_weight: 0.3,
950            },
951        );
952
953        // Image features: favor compression due to redundancy
954        profiles.insert(
955            VectorDomain::ImageFeatures,
956            CompressionPriorities {
957                compression_weight: 0.5,
958                speed_weight: 0.2,
959                accuracy_weight: 0.3,
960            },
961        );
962
963        // Knowledge graph: favor accuracy
964        profiles.insert(
965            VectorDomain::KnowledgeGraph,
966            CompressionPriorities {
967                compression_weight: 0.2,
968                speed_weight: 0.3,
969                accuracy_weight: 0.5,
970            },
971        );
972
973        // Time series: balance speed and accuracy
974        profiles.insert(
975            VectorDomain::TimeSeriesData,
976            CompressionPriorities {
977                compression_weight: 0.3,
978                speed_weight: 0.4,
979                accuracy_weight: 0.3,
980            },
981        );
982
983        // Audio features: favor compression
984        profiles.insert(
985            VectorDomain::AudioFeatures,
986            CompressionPriorities {
987                compression_weight: 0.4,
988                speed_weight: 0.3,
989                accuracy_weight: 0.3,
990            },
991        );
992
993        Self { profiles }
994    }
995
996    pub fn get_profile(&self, domain: &VectorDomain) -> CompressionPriorities {
997        self.profiles.get(domain).cloned().unwrap_or_default()
998    }
999
1000    pub fn update_profile(&mut self, domain: VectorDomain, priorities: CompressionPriorities) {
1001        self.profiles.insert(domain, priorities);
1002    }
1003}
1004
1005#[cfg(test)]
1006mod tests {
1007    use super::*;
1008
1009    #[test]
1010    fn test_vector_stats() {
1011        let vector = Vector::new(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
1012        let stats = VectorStats::from_vector(&vector).unwrap();
1013
1014        assert_eq!(stats.dimensions, 5);
1015        assert_eq!(stats.mean, 3.0);
1016        assert!(stats.std_dev > 0.0);
1017    }
1018
1019    #[test]
1020    fn test_adaptive_compression() {
1021        let vectors = vec![
1022            Vector::new(vec![1.0, 2.0, 3.0, 4.0]),
1023            Vector::new(vec![2.0, 3.0, 4.0, 5.0]),
1024            Vector::new(vec![3.0, 4.0, 5.0, 6.0]),
1025        ];
1026
1027        let mut compressor = AdaptiveCompressor::new();
1028        let recommended = compressor.analyze_and_recommend(&vectors).unwrap();
1029
1030        // Should recommend some compression method
1031        assert!(!matches!(recommended, CompressionMethod::None));
1032    }
1033
1034    #[test]
1035    fn test_compression_metrics() {
1036        let method = CompressionMethod::Quantization { bits: 8 };
1037        let mut metrics = CompressionMetrics::new(method);
1038        let priorities = CompressionPriorities::default();
1039
1040        metrics.update(
1041            0.5,
1042            Duration::from_millis(10),
1043            Duration::from_millis(5),
1044            0.01,
1045            &priorities,
1046        );
1047
1048        assert!(metrics.avg_performance_score > 0.0);
1049        assert_eq!(metrics.usage_count, 1);
1050    }
1051
1052    #[test]
1053    fn test_multi_level_compression() {
1054        let mut compressor = AdaptiveCompressor::new();
1055        // Use a larger vector with repetitive patterns that compress well
1056        let values: Vec<f32> = (0..256).map(|i| (i % 16) as f32).collect();
1057        let vector = Vector::new(values);
1058
1059        let compressed = compressor.compress_multi_level(&vector, 0.1).unwrap();
1060
1061        // Should achieve significant compression on this larger, repetitive vector
1062        // Original size would be 256 * 4 = 1024 bytes
1063        // Multi-level compression includes metadata overhead, so expect reasonable compression
1064        println!(
1065            "Compressed size: {} bytes, original size: {} bytes",
1066            compressed.len(),
1067            vector.dimensions * 4
1068        );
1069        assert!(compressed.len() < vector.dimensions * 4); // At least some compression
1070        assert!(compressed.len() < 900); // Should achieve at least 12% compression
1071    }
1072
1073    #[test]
1074    fn test_stats_aggregation() {
1075        let vectors = vec![
1076            Vector::new(vec![1.0, 2.0]),
1077            Vector::new(vec![3.0, 4.0]),
1078            Vector::new(vec![5.0, 6.0]),
1079        ];
1080
1081        let stats = VectorStats::from_vectors(&vectors).unwrap();
1082        assert_eq!(stats.dimensions, 2);
1083        assert!(stats.mean > 0.0);
1084    }
1085}