Skip to main content

torsh_quantization/analysis/
size.rs

1//! Size analysis for quantized models
2
3use crate::QScheme;
4use std::collections::HashMap;
5
6/// Size analysis for quantized models
7pub struct SizeAnalyzer;
8
9impl SizeAnalyzer {
10    /// Calculate theoretical model size for different quantization schemes
11    pub fn calculate_model_size(num_parameters: usize, scheme: QScheme) -> f32 {
12        let bytes_per_param = match scheme {
13            QScheme::Binary => 0.125,                                // 1 bit
14            QScheme::Ternary => 0.25,                                // 2 bits (with some overhead)
15            QScheme::Int4PerTensor | QScheme::Int4PerChannel => 0.5, // 4 bits
16            QScheme::PerTensorAffine
17            | QScheme::PerChannelAffine
18            | QScheme::PerTensorSymmetric
19            | QScheme::PerChannelSymmetric => 1.0, // 8 bits
20            QScheme::MixedPrecision => 2.0,                          // Assuming average of FP16
21            QScheme::GroupWise => 1.0,                               // 8 bits
22        };
23
24        num_parameters as f32 * bytes_per_param
25    }
26
27    /// Calculate size reduction ratio compared to FP32
28    pub fn calculate_size_reduction_ratio(num_parameters: usize, scheme: QScheme) -> f32 {
29        let fp32_size = num_parameters as f32 * 4.0; // 4 bytes per FP32 parameter
30        let quantized_size = Self::calculate_model_size(num_parameters, scheme);
31
32        if quantized_size == 0.0 {
33            return 1.0;
34        }
35
36        fp32_size / quantized_size
37    }
38
39    /// Calculate memory footprint including activations
40    pub fn calculate_total_memory_footprint(
41        num_parameters: usize,
42        num_activations: usize,
43        param_scheme: QScheme,
44        activation_scheme: QScheme,
45    ) -> f32 {
46        let param_size = Self::calculate_model_size(num_parameters, param_scheme);
47        let activation_size = Self::calculate_model_size(num_activations, activation_scheme);
48
49        param_size + activation_size
50    }
51
52    /// Estimate disk storage requirements with compression
53    pub fn estimate_compressed_size(base_size_mb: f32, scheme: QScheme) -> f32 {
54        let compression_ratio = match scheme {
55            QScheme::Binary => 0.7,   // Binary data compresses well
56            QScheme::Ternary => 0.75, // Some redundancy
57            QScheme::Int4PerTensor | QScheme::Int4PerChannel => 0.8,
58            QScheme::PerTensorAffine | QScheme::PerChannelAffine => 0.85,
59            QScheme::PerTensorSymmetric | QScheme::PerChannelSymmetric => 0.82,
60            QScheme::MixedPrecision => 0.9, // Less compression
61            QScheme::GroupWise => 0.83,
62        };
63
64        base_size_mb * compression_ratio
65    }
66
67    /// Calculate size reduction factor (legacy compatibility)
68    pub fn size_reduction_factor(
69        original_scheme: QScheme,
70        quantized_scheme: QScheme,
71        num_parameters: usize,
72    ) -> f32 {
73        let original_size = Self::calculate_model_size(num_parameters, original_scheme);
74        let quantized_size = Self::calculate_model_size(num_parameters, quantized_scheme);
75
76        if quantized_size == 0.0 {
77            return 1.0;
78        }
79
80        original_size / quantized_size
81    }
82
83    /// Analyze size impact for different quantization schemes
84    pub fn analyze_size_impact(num_parameters: usize) -> HashMap<QScheme, f32> {
85        let mut size_analysis = HashMap::new();
86
87        let schemes = vec![
88            QScheme::Binary,
89            QScheme::Ternary,
90            QScheme::Int4PerTensor,
91            QScheme::PerTensorAffine,
92            QScheme::PerChannelAffine,
93            QScheme::MixedPrecision,
94            QScheme::GroupWise,
95        ];
96
97        for scheme in schemes {
98            let size_mb = Self::model_size_mb(num_parameters, scheme);
99            size_analysis.insert(scheme, size_mb);
100        }
101
102        size_analysis
103    }
104
105    /// Calculate model size in megabytes
106    pub fn model_size_mb(num_parameters: usize, scheme: QScheme) -> f32 {
107        Self::calculate_model_size(num_parameters, scheme) / (1024.0 * 1024.0)
108    }
109
110    /// Generate comprehensive size analysis report
111    pub fn generate_size_report(
112        num_parameters: usize,
113        schemes: &[QScheme],
114    ) -> HashMap<QScheme, SizeReport> {
115        let mut report = HashMap::new();
116        let fp32_size_mb = (num_parameters as f32 * 4.0) / (1024.0 * 1024.0);
117
118        for &scheme in schemes {
119            let quantized_size_mb = Self::model_size_mb(num_parameters, scheme);
120            let reduction_ratio = Self::calculate_size_reduction_ratio(num_parameters, scheme);
121            let compressed_size_mb = Self::estimate_compressed_size(quantized_size_mb, scheme);
122
123            report.insert(
124                scheme,
125                SizeReport {
126                    original_size_mb: fp32_size_mb,
127                    quantized_size_mb,
128                    compressed_size_mb,
129                    reduction_ratio,
130                    space_saved_mb: fp32_size_mb - quantized_size_mb,
131                    compression_efficiency: (fp32_size_mb - compressed_size_mb) / fp32_size_mb,
132                },
133            );
134        }
135
136        report
137    }
138}
139
140/// Detailed size analysis report
141#[derive(Debug, Clone)]
142pub struct SizeReport {
143    /// Original model size in MB (FP32)
144    pub original_size_mb: f32,
145    /// Quantized model size in MB
146    pub quantized_size_mb: f32,
147    /// Compressed quantized model size in MB
148    pub compressed_size_mb: f32,
149    /// Size reduction ratio (original/quantized)
150    pub reduction_ratio: f32,
151    /// Space saved in MB
152    pub space_saved_mb: f32,
153    /// Compression efficiency (0.0 to 1.0)
154    pub compression_efficiency: f32,
155}
156
157impl SizeReport {
158    /// Check if the size reduction meets a minimum threshold
159    pub fn meets_reduction_threshold(&self, min_ratio: f32) -> bool {
160        self.reduction_ratio >= min_ratio
161    }
162
163    /// Get space savings as percentage
164    pub fn space_savings_percentage(&self) -> f32 {
165        (self.space_saved_mb / self.original_size_mb) * 100.0
166    }
167}