Skip to main content

voirs_evaluation/performance/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use parking_lot::RwLock;
6use scirs2_core::parallel_ops::*;
7use std::collections::HashMap;
8use std::hash::Hash;
9use std::path::Path;
10use std::sync::Arc;
11
12/// Thread-safe LRU cache for expensive computations
13pub struct LRUCache<K, V> {
14    map: Arc<RwLock<HashMap<K, V>>>,
15    max_size: usize,
16}
17impl<K, V> LRUCache<K, V>
18where
19    K: Eq + Hash + Clone,
20    V: Clone,
21{
22    /// Create a new LRU cache with the specified maximum size
23    #[must_use]
24    pub fn new(max_size: usize) -> Self {
25        Self {
26            map: Arc::new(RwLock::new(HashMap::new())),
27            max_size,
28        }
29    }
30    /// Get a value from the cache
31    pub fn get(&self, key: &K) -> Option<V> {
32        self.map.read().get(key).cloned()
33    }
34    /// Insert a key-value pair into the cache
35    pub fn insert(&self, key: K, value: V) {
36        let mut map = self.map.write();
37        if map.len() >= self.max_size {
38            map.clear();
39        }
40        map.insert(key, value);
41    }
42    /// Clear all entries from the cache
43    pub fn clear(&self) {
44        self.map.write().clear();
45    }
46    /// Get the current number of entries in the cache
47    #[must_use]
48    pub fn len(&self) -> usize {
49        self.map.read().len()
50    }
51    /// Check if the cache is empty
52    #[must_use]
53    pub fn is_empty(&self) -> bool {
54        self.map.read().is_empty()
55    }
56}
57/// Memory-efficient sliding window processor
58pub struct SlidingWindowProcessor<T> {
59    window_size: usize,
60    hop_size: usize,
61    buffer: Vec<T>,
62}
63impl<T> SlidingWindowProcessor<T>
64where
65    T: Clone + Default,
66{
67    /// Create a new sliding window processor with specified window and hop sizes
68    #[must_use]
69    pub fn new(window_size: usize, hop_size: usize) -> Self {
70        Self {
71            window_size,
72            hop_size,
73            buffer: Vec::with_capacity(window_size),
74        }
75    }
76    /// Process data in sliding windows with parallel computation
77    pub fn process_parallel<F, R>(&self, data: &[T], processor: F) -> Vec<R>
78    where
79        T: Send + Sync,
80        F: Fn(&[T]) -> R + Send + Sync,
81        R: Send,
82    {
83        if data.len() < self.window_size {
84            return Vec::new();
85        }
86        let num_windows = (data.len() - self.window_size) / self.hop_size + 1;
87        (0..num_windows)
88            .into_par_iter()
89            .map(|i| {
90                let start = i * self.hop_size;
91                let end = (start + self.window_size).min(data.len());
92                processor(&data[start..end])
93            })
94            .collect()
95    }
96}
97/// Performance monitoring utilities
98pub struct PerformanceMonitor {
99    timings: Arc<RwLock<HashMap<String, Vec<std::time::Duration>>>>,
100}
101impl PerformanceMonitor {
102    /// Create a new performance monitor
103    #[must_use]
104    pub fn new() -> Self {
105        Self {
106            timings: Arc::new(RwLock::new(HashMap::new())),
107        }
108    }
109    /// Time an operation and record the duration
110    pub fn time_operation<F, R>(&self, name: &str, operation: F) -> R
111    where
112        F: FnOnce() -> R,
113    {
114        let start = std::time::Instant::now();
115        let result = operation();
116        let duration = start.elapsed();
117        self.timings
118            .write()
119            .entry(name.to_string())
120            .or_default()
121            .push(duration);
122        result
123    }
124    /// Get the average time for a named operation
125    #[must_use]
126    pub fn get_average_time(&self, name: &str) -> Option<std::time::Duration> {
127        let timings = self.timings.read();
128        if let Some(times) = timings.get(name) {
129            if times.is_empty() {
130                None
131            } else {
132                let total: std::time::Duration = times.iter().sum();
133                Some(total / times.len() as u32)
134            }
135        } else {
136            None
137        }
138    }
139    /// Clear all recorded timings
140    pub fn clear(&self) {
141        self.timings.write().clear();
142    }
143}
144/// Persistent cache with compression support
145///
146/// This cache stores data to disk with compression for efficient storage
147/// and retrieval across application restarts.
148pub struct PersistentCache<K, V> {
149    memory_cache: LRUCache<K, V>,
150    cache_dir: std::path::PathBuf,
151    compression_level: u32,
152}
153impl<K, V> PersistentCache<K, V>
154where
155    K: Eq + Hash + Clone + serde::Serialize + serde::de::DeserializeOwned,
156    V: Clone + serde::Serialize + serde::de::DeserializeOwned,
157{
158    /// Create a new persistent cache with the specified directory and settings
159    pub fn new<P: AsRef<Path>>(
160        cache_dir: P,
161        max_memory_size: usize,
162        compression_level: u32,
163    ) -> Result<Self, std::io::Error> {
164        let cache_dir = cache_dir.as_ref().to_path_buf();
165        if !cache_dir.exists() {
166            std::fs::create_dir_all(&cache_dir)?;
167        }
168        Ok(Self {
169            memory_cache: LRUCache::new(max_memory_size),
170            cache_dir,
171            compression_level: compression_level.clamp(0, 9),
172        })
173    }
174    /// Get a value from the cache (checks memory first, then disk)
175    pub fn get(&self, key: &K) -> Option<V> {
176        if let Some(value) = self.memory_cache.get(key) {
177            return Some(value);
178        }
179        if let Ok(value) = self.load_from_disk(key) {
180            self.memory_cache.insert(key.clone(), value.clone());
181            return Some(value);
182        }
183        None
184    }
185    /// Insert a key-value pair into the cache (saves to both memory and disk)
186    pub fn insert(&self, key: K, value: V) -> Result<(), std::io::Error> {
187        self.memory_cache.insert(key.clone(), value.clone());
188        self.save_to_disk(&key, &value)
189    }
190    /// Clear all entries from the cache
191    pub fn clear(&self) -> Result<(), std::io::Error> {
192        self.memory_cache.clear();
193        for entry in std::fs::read_dir(&self.cache_dir)? {
194            let entry = entry?;
195            if entry.path().is_file() {
196                std::fs::remove_file(entry.path())?;
197            }
198        }
199        Ok(())
200    }
201    /// Get the current number of entries in memory cache
202    #[must_use]
203    pub fn memory_len(&self) -> usize {
204        self.memory_cache.len()
205    }
206    /// Get the total number of entries (including disk)
207    pub fn total_len(&self) -> usize {
208        std::fs::read_dir(&self.cache_dir)
209            .map(|entries| entries.filter_map(|e| e.ok()).count())
210            .unwrap_or(0)
211    }
212    /// Check if the cache is empty
213    #[must_use]
214    pub fn is_empty(&self) -> bool {
215        self.memory_cache.is_empty() && self.total_len() == 0
216    }
217    /// Get cache statistics
218    pub fn stats(&self) -> CacheStats {
219        CacheStats {
220            memory_entries: self.memory_cache.len(),
221            disk_entries: self.total_len(),
222            cache_dir_size: self.calculate_cache_dir_size(),
223        }
224    }
225    /// Set compression level (0-9, where 9 is highest compression)
226    pub fn set_compression_level(&mut self, level: u32) {
227        self.compression_level = level.clamp(0, 9);
228    }
229    fn cache_key_to_filename(&self, key: &K) -> Result<String, std::io::Error> {
230        let serialized = oxicode::serde::encode_to_vec(key, oxicode::config::standard())
231            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
232        let hash = {
233            use std::collections::hash_map::DefaultHasher;
234            use std::hash::Hasher;
235            let mut hasher = DefaultHasher::new();
236            hasher.write(&serialized);
237            hasher.finish()
238        };
239        Ok(format!("{:x}.cache", hash))
240    }
241    fn save_to_disk(&self, key: &K, value: &V) -> Result<(), std::io::Error> {
242        let filename = self.cache_key_to_filename(key)?;
243        let filepath = self.cache_dir.join(filename);
244        let serialized = oxicode::serde::encode_to_vec(value, oxicode::config::standard())
245            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
246        let mut encoder =
247            oxiarc_deflate::GzipStreamEncoder::new(Vec::new(), self.compression_level as u8);
248        std::io::Write::write_all(&mut encoder, &serialized)?;
249        let compressed = encoder.finish()?;
250        std::fs::write(filepath, compressed)
251    }
252    fn load_from_disk(&self, key: &K) -> Result<V, std::io::Error> {
253        let filename = self.cache_key_to_filename(key)?;
254        let filepath = self.cache_dir.join(filename);
255        if !filepath.exists() {
256            return Err(std::io::Error::new(
257                std::io::ErrorKind::NotFound,
258                "Cache entry not found",
259            ));
260        }
261        let compressed_data = std::fs::read(filepath)?;
262        let mut decoder = oxiarc_deflate::GzipStreamDecoder::new(&compressed_data[..]);
263        let mut decompressed = Vec::new();
264        std::io::Read::read_to_end(&mut decoder, &mut decompressed)?;
265        oxicode::serde::decode_from_slice(&decompressed, oxicode::config::standard())
266            .map(|(v, _)| v)
267            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
268    }
269    fn calculate_cache_dir_size(&self) -> u64 {
270        std::fs::read_dir(&self.cache_dir)
271            .map(|entries| {
272                entries
273                    .filter_map(|entry| entry.ok().and_then(|e| e.metadata().ok()).map(|m| m.len()))
274                    .sum()
275            })
276            .unwrap_or(0)
277    }
278}
279/// Cache statistics
280#[derive(Debug, Clone)]
281pub struct CacheStats {
282    /// Number of entries in memory cache
283    pub memory_entries: usize,
284    /// Number of entries on disk
285    pub disk_entries: usize,
286    /// Total size of cache directory in bytes
287    pub cache_dir_size: u64,
288}