fact_wasm_core/
cache.rs

1//! Fast cache implementation for FACT
2//! 
3//! High-performance caching with LRU eviction, TTL support, and optimizations.
4
5use wasm_bindgen::prelude::*;
6use serde::{Serialize, Deserialize};
7use rustc_hash::FxHashMap;
8use smallvec::SmallVec;
9use std::collections::{VecDeque, HashMap};
10
11/// Cache priority levels
12#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)]
13enum CachePriority {
14    Critical,
15    High,
16    Medium,
17    Low,
18    Disposable,
19}
20
21/// Access pattern tracking
22#[derive(Clone, Debug)]
23struct AccessPattern {
24    frequency_score: f64,
25    recency_score: f64,
26    seasonal_pattern: bool,
27    burst_access: bool,
28    access_intervals: SmallVec<[f64; 8]>,
29}
30
31/// Access pattern statistics
32#[derive(Serialize, Deserialize, Debug, Clone, Default)]
33struct AccessPatternStats {
34    avg_interval: f64,
35    peak_hours: Vec<u8>,
36    pattern_type: String,
37}
38
39/// Cache entry with metadata
40#[derive(Clone, Debug)]
41pub struct CacheEntry {
42    pub value: String,
43    pub timestamp: f64,
44    pub ttl: Option<u64>,
45    pub access_count: u32,
46    pub size: usize,
47    pub priority: CachePriority,
48    pub tags: SmallVec<[String; 4]>,
49    pub compression_ratio: f32,
50    pub last_modified: f64,
51    pub access_pattern: AccessPattern,
52    pub validation_hash: u64,
53}
54
55impl CacheEntry {
56    pub fn new(value: String, ttl: Option<u64>) -> Self {
57        use std::collections::hash_map::DefaultHasher;
58        use std::hash::{Hash, Hasher};
59        
60        let mut hasher = DefaultHasher::new();
61        value.hash(&mut hasher);
62        let hash = hasher.finish();
63        
64        Self {
65            size: value.len(),
66            value,
67            timestamp: js_sys::Date::now(),
68            ttl,
69            access_count: 1,
70            priority: CachePriority::Medium,
71            tags: SmallVec::new(),
72            compression_ratio: 1.0,
73            last_modified: js_sys::Date::now(),
74            access_pattern: AccessPattern {
75                frequency_score: 1.0,
76                recency_score: 1.0,
77                seasonal_pattern: false,
78                burst_access: false,
79                access_intervals: SmallVec::new(),
80            },
81            validation_hash: hash,
82        }
83    }
84
85    pub fn is_expired(&self) -> bool {
86        if let Some(ttl) = self.ttl {
87            js_sys::Date::now() - self.timestamp > ttl as f64
88        } else {
89            false
90        }
91    }
92
93    pub fn access(&mut self) {
94        self.access_count += 1;
95        self.timestamp = js_sys::Date::now();
96    }
97}
98
99/// High-performance cache statistics
100#[derive(Serialize, Deserialize, Debug, Clone)]
101pub struct CacheStats {
102    pub size: usize,
103    pub entries: usize,
104    pub capacity: usize,
105    pub hit_rate: f64,
106    pub miss_rate: f64,
107    pub evictions: u64,
108    pub expired_entries: u64,
109    pub total_requests: u64,
110    pub cache_hits: u64,
111    pub cache_misses: u64,
112    pub compression_savings: u64,
113    pub memory_efficiency: f64,
114    pub avg_access_time_ms: f64,
115    pub hot_entries: u64,
116    pub cold_entries: u64,
117    pub fragmentation_ratio: f64,
118    pub gc_runs: u64,
119    pub gc_time_ms: f64,
120    pub priority_distribution: HashMap<String, u64>,
121    pub access_pattern_stats: AccessPatternStats,
122}
123
124impl Default for CacheStats {
125    fn default() -> Self {
126        Self {
127            size: 0,
128            entries: 0,
129            capacity: 10 * 1024 * 1024, // 10MB default
130            hit_rate: 0.0,
131            miss_rate: 0.0,
132            evictions: 0,
133            expired_entries: 0,
134            total_requests: 0,
135            cache_hits: 0,
136            cache_misses: 0,
137            compression_savings: 0,
138            memory_efficiency: 0.0,
139            avg_access_time_ms: 0.0,
140            hot_entries: 0,
141            cold_entries: 0,
142            fragmentation_ratio: 0.0,
143            gc_runs: 0,
144            gc_time_ms: 0.0,
145            priority_distribution: HashMap::new(),
146            access_pattern_stats: AccessPatternStats::default(),
147        }
148    }
149}
150
151/// Fast cache with LRU eviction and TTL support
152#[wasm_bindgen]
153pub struct FastCache {
154    data: FxHashMap<String, CacheEntry>,
155    access_order: VecDeque<String>,
156    hot_keys: SmallVec<[String; 32]>,
157    stats: CacheStats,
158    max_size: usize,
159    max_entries: usize,
160    hot_threshold: u32,
161}
162
163#[wasm_bindgen]
164impl FastCache {
165    /// Create a new cache with default capacity (10MB)
166    #[wasm_bindgen(constructor)]
167    pub fn new() -> FastCache {
168        Self::with_capacity(10 * 1024 * 1024)
169    }
170
171    /// Create a new cache with specified capacity in bytes
172    #[wasm_bindgen]
173    pub fn with_capacity(max_size: usize) -> FastCache {
174        let max_entries = (max_size / 100).max(1000); // Estimate ~100 bytes per entry average
175        
176        FastCache {
177            data: FxHashMap::default(),
178            access_order: VecDeque::with_capacity(max_entries / 4),
179            hot_keys: SmallVec::new(),
180            stats: CacheStats {
181                capacity: max_size,
182                ..Default::default()
183            },
184            max_size,
185            max_entries,
186            hot_threshold: 5,
187        }
188    }
189
190    /// Get a value from the cache
191    #[wasm_bindgen]
192    pub fn get(&mut self, key: &str) -> Option<String> {
193        self.stats.total_requests += 1;
194
195        // Check if key exists and is not expired
196        let result = if let Some(entry) = self.data.get(key) {
197            if entry.is_expired() {
198                None // Will be removed below
199            } else {
200                Some(entry.value.clone())
201            }
202        } else {
203            None
204        };
205
206        // Handle the result
207        if let Some(value) = result {
208            // Update entry access info
209            if let Some(entry) = self.data.get_mut(key) {
210                entry.access();
211            }
212            self.promote_hot_key(key);
213            self.update_access_order(key);
214            self.stats.cache_hits += 1;
215            self.update_rates();
216            Some(value)
217        } else {
218            // Check if expired and remove
219            if let Some(entry) = self.data.get(key) {
220                if entry.is_expired() {
221                    self.remove_internal(key);
222                    self.stats.expired_entries += 1;
223                }
224            }
225            self.stats.cache_misses += 1;
226            self.update_rates();
227            None
228        }
229    }
230
231    /// Set a value in the cache with optional TTL and priority
232    #[wasm_bindgen]
233    pub fn set(&mut self, key: &str, value: &str, ttl_ms: u64) -> bool {
234        let ttl = if ttl_ms > 0 { Some(ttl_ms) } else { None };
235        self.put_with_ttl_and_priority(key.to_string(), value.to_string(), ttl, CachePriority::Medium)
236    }
237    
238    /// Set a value with custom priority
239    #[wasm_bindgen]
240    pub fn set_with_priority(&mut self, key: &str, value: &str, ttl_ms: u64, priority: u8) -> bool {
241        let ttl = if ttl_ms > 0 { Some(ttl_ms) } else { None };
242        let cache_priority = match priority {
243            4 => CachePriority::Critical,
244            3 => CachePriority::High,
245            2 => CachePriority::Medium,
246            1 => CachePriority::Low,
247            _ => CachePriority::Disposable,
248        };
249        self.put_with_ttl_and_priority(key.to_string(), value.to_string(), ttl, cache_priority)
250    }
251
252    /// Put a value in the cache
253    #[wasm_bindgen]
254    pub fn put(&mut self, key: String, value: String) -> bool {
255        self.put_with_ttl(key, value, None)
256    }
257
258    /// Remove a value from the cache
259    #[wasm_bindgen]
260    pub fn remove(&mut self, key: &str) -> bool {
261        self.remove_internal(key)
262    }
263
264    /// Clear all entries from the cache
265    #[wasm_bindgen]
266    pub fn clear(&mut self) {
267        self.data.clear();
268        self.access_order.clear();
269        self.hot_keys.clear();
270        self.stats.size = 0;
271        self.stats.entries = 0;
272        self.stats.evictions = 0;
273        self.stats.expired_entries = 0;
274    }
275
276    /// Get cache statistics as JSON string
277    #[wasm_bindgen]
278    pub fn get_stats(&self) -> JsValue {
279        serde_wasm_bindgen::to_value(&self.stats).unwrap_or(JsValue::NULL)
280    }
281
282    /// Check if a key exists (without updating access)
283    #[wasm_bindgen]
284    pub fn contains(&self, key: &str) -> bool {
285        if let Some(entry) = self.data.get(key) {
286            !entry.is_expired()
287        } else {
288            false
289        }
290    }
291
292    /// Get the number of entries in the cache
293    #[wasm_bindgen]
294    pub fn size(&self) -> usize {
295        self.data.len()
296    }
297
298    /// Get memory usage in bytes
299    #[wasm_bindgen]
300    pub fn memory_usage(&self) -> usize {
301        self.stats.size
302    }
303
304    /// Optimize cache performance
305    #[wasm_bindgen]
306    pub fn optimize(&mut self) {
307        self.cleanup_expired();
308        self.optimize_hot_keys();
309    }
310
311    /// Aggressive optimization (more expensive)
312    #[wasm_bindgen]
313    pub fn optimize_aggressive(&mut self) {
314        self.cleanup_expired();
315        self.optimize_hot_keys();
316        self.defragment();
317    }
318
319    /// Memory-focused optimization with garbage collection
320    #[wasm_bindgen]
321    pub fn optimize_memory(&mut self) {
322        let gc_start = js_sys::Date::now();
323        
324        self.cleanup_expired();
325        self.compress_entries();
326        self.defragment_advanced();
327        
328        // Shrink collections if they're over-allocated
329        if self.data.capacity() > self.data.len() * 2 {
330            self.data.shrink_to_fit();
331        }
332        
333        self.access_order.shrink_to_fit();
334        self.hot_keys.shrink_to_fit();
335        
336        let gc_time = js_sys::Date::now() - gc_start;
337        self.stats.gc_runs += 1;
338        self.stats.gc_time_ms += gc_time;
339        
340        // Update fragmentation ratio
341        self.update_fragmentation_ratio();
342    }
343    
344    /// Advanced batch operations for better performance
345    #[wasm_bindgen]
346    pub fn batch_set(&mut self, keys_values: &JsValue) -> u32 {
347        let mut inserted = 0;
348        
349        if let Ok(batch) = serde_wasm_bindgen::from_value::<Vec<(String, String, Option<u64>)>>(keys_values.clone()) {
350            for (key, value, ttl) in batch {
351                if self.put_with_ttl_and_priority(key, value, ttl, CachePriority::Medium) {
352                    inserted += 1;
353                }
354            }
355        }
356        
357        inserted
358    }
359    
360    /// Batch get operations
361    #[wasm_bindgen]
362    pub fn batch_get(&mut self, keys: &JsValue) -> JsValue {
363        let mut results = HashMap::new();
364        
365        if let Ok(key_list) = serde_wasm_bindgen::from_value::<Vec<String>>(keys.clone()) {
366            for key in key_list {
367                if let Some(value) = self.get(&key) {
368                    results.insert(key, value);
369                }
370            }
371        }
372        
373        serde_wasm_bindgen::to_value(&results).unwrap_or(JsValue::NULL)
374    }
375    
376    /// Get cache health metrics
377    #[wasm_bindgen]
378    pub fn get_health_metrics(&self) -> JsValue {
379        let health = serde_json::json!({
380            "overall_health": self.calculate_health_score(),
381            "memory_pressure": self.calculate_memory_pressure(),
382            "hit_rate_health": if self.stats.hit_rate > 0.8 { "excellent" } else if self.stats.hit_rate > 0.6 { "good" } else { "poor" },
383            "fragmentation_health": if self.stats.fragmentation_ratio < 0.2 { "excellent" } else if self.stats.fragmentation_ratio < 0.4 { "good" } else { "poor" },
384            "recommendations": self.generate_recommendations()
385        });
386        
387        serde_wasm_bindgen::to_value(&health).unwrap_or(JsValue::NULL)
388    }
389}
390
391impl FastCache {
392    // Add missing method implementations to prevent compilation errors
393    fn put_with_ttl_and_priority(&mut self, key: String, value: String, ttl: Option<u64>, priority: CachePriority) -> bool {
394        // Compress value if beneficial
395        let (compressed_value, compression_ratio) = Self::maybe_compress(&value);
396        let entry = self.create_enhanced_entry(compressed_value, ttl, priority, compression_ratio);
397        let entry_size = entry.size + key.len();
398
399        // Check capacity constraints
400        if self.data.len() >= self.max_entries || 
401           self.stats.size + entry_size > self.max_size {
402            if !self.evict_entries_intelligent(entry_size, &priority) {
403                return false;
404            }
405        }
406
407        // Remove existing entry if present
408        self.remove_internal(&key);
409
410        // Insert new entry
411        self.stats.size += entry_size;
412        self.stats.entries += 1;
413        self.data.insert(key.clone(), entry);
414        self.access_order.push_back(key.clone());
415        
416        // Update priority distribution stats
417        let priority_key = format!("{:?}", priority);
418        *self.stats.priority_distribution.entry(priority_key).or_insert(0) += 1;
419
420        true
421    }
422    
423    fn create_enhanced_entry(&self, value: String, ttl: Option<u64>, priority: CachePriority, compression_ratio: f32) -> CacheEntry {
424        CacheEntry {
425            size: value.len(),
426            value: value.clone(),
427            timestamp: js_sys::Date::now(),
428            ttl,
429            access_count: 1,
430            priority,
431            tags: SmallVec::new(),
432            compression_ratio,
433            last_modified: js_sys::Date::now(),
434            access_pattern: AccessPattern {
435                frequency_score: 1.0,
436                recency_score: 1.0,
437                seasonal_pattern: false,
438                burst_access: false,
439                access_intervals: SmallVec::new(),
440            },
441            validation_hash: self.calculate_hash(&value),
442        }
443    }
444    
445    fn maybe_compress(value: &str) -> (String, f32) {
446        // Simple compression heuristic - in a real implementation, use proper compression
447        if value.len() > 1000 {
448            // Simulate compression by removing redundant whitespace
449            let compressed = value.split_whitespace().collect::<Vec<_>>().join(" ");
450            let ratio = compressed.len() as f32 / value.len() as f32;
451            (compressed, ratio)
452        } else {
453            (value.to_string(), 1.0)
454        }
455    }
456    
457    fn calculate_hash(&self, value: &str) -> u64 {
458        use std::collections::hash_map::DefaultHasher;
459        use std::hash::{Hash, Hasher};
460        
461        let mut hasher = DefaultHasher::new();
462        value.hash(&mut hasher);
463        hasher.finish()
464    }
465    
466    fn evict_entries_intelligent(&mut self, needed_space: usize, incoming_priority: &CachePriority) -> bool {
467        let mut freed_space = 0;
468        let mut evicted_count = 0;
469
470        // First, remove expired entries
471        let expired_keys: Vec<String> = self.data
472            .iter()
473            .filter(|(_, entry)| entry.is_expired())
474            .map(|(key, _)| key.clone())
475            .collect();
476
477        for key in expired_keys {
478            if let Some(entry) = self.data.get(&key) {
479                freed_space += entry.size + key.len();
480                self.stats.compression_savings += (entry.size as f32 * (1.0 - entry.compression_ratio)) as u64;
481            }
482            self.remove_internal(&key);
483            self.stats.expired_entries += 1;
484        }
485
486        // If not enough space, use intelligent eviction based on priority and access patterns
487        let mut eviction_candidates: Vec<(String, f64)> = self.data
488            .iter()
489            .filter(|(_, entry)| entry.priority < *incoming_priority || 
490                   (entry.priority == *incoming_priority && entry.access_count < 2))
491            .map(|(key, entry)| {
492                let eviction_score = self.calculate_eviction_score(entry);
493                (key.clone(), eviction_score)
494            })
495            .collect();
496        
497        // Sort by eviction score (higher score = more likely to evict)
498        eviction_candidates.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
499        
500        for (key, _) in eviction_candidates {
501            if freed_space >= needed_space {
502                break;
503            }
504            
505            if let Some(entry) = self.data.get(&key) {
506                freed_space += entry.size + key.len();
507                self.stats.compression_savings += (entry.size as f32 * (1.0 - entry.compression_ratio)) as u64;
508            }
509            self.remove_internal(&key);
510            evicted_count += 1;
511        }
512        
513        // Fallback to LRU if still not enough space
514        while freed_space < needed_space && !self.access_order.is_empty() {
515            if let Some(lru_key) = self.access_order.pop_front() {
516                if let Some(entry) = self.data.get(&lru_key) {
517                    freed_space += entry.size + lru_key.len();
518                    self.stats.compression_savings += (entry.size as f32 * (1.0 - entry.compression_ratio)) as u64;
519                }
520                self.remove_internal(&lru_key);
521                evicted_count += 1;
522            } else {
523                break;
524            }
525        }
526
527        self.stats.evictions += evicted_count;
528        freed_space >= needed_space
529    }
530    
531    fn calculate_eviction_score(&self, entry: &CacheEntry) -> f64 {
532        let age_factor = (js_sys::Date::now() - entry.timestamp) / 1000.0; // seconds
533        let access_factor = 1.0 / (entry.access_count as f64).max(1.0);
534        let priority_factor = match entry.priority {
535            CachePriority::Critical => 0.1,
536            CachePriority::High => 0.3,
537            CachePriority::Medium => 0.5,
538            CachePriority::Low => 0.8,
539            CachePriority::Disposable => 1.0,
540        };
541        let size_factor = (entry.size as f64).log2() / 20.0; // Prefer evicting larger items
542        
543        age_factor * access_factor * priority_factor + size_factor
544    }
545    
546    fn compress_entries(&mut self) {
547        // Compress entries that haven't been accessed recently
548        let threshold_time = js_sys::Date::now() - 300000.0; // 5 minutes
549        
550        // Collect keys that need compression to avoid borrow conflicts
551        let keys_to_compress: Vec<String> = self.data
552            .iter()
553            .filter(|(_, entry)| entry.timestamp < threshold_time && entry.compression_ratio > 0.8)
554            .map(|(key, _)| key.clone())
555            .collect();
556        
557        for key in keys_to_compress {
558            if let Some(entry) = self.data.get_mut(&key) {
559                let value_clone = entry.value.clone();
560                let (compressed, ratio) = Self::maybe_compress(&value_clone);
561                if ratio < entry.compression_ratio {
562                    let old_size = entry.size;
563                    entry.value = compressed;
564                    entry.size = entry.value.len();
565                    entry.compression_ratio = ratio;
566                    
567                    self.stats.size -= old_size;
568                    self.stats.size += entry.size;
569                    self.stats.compression_savings += (old_size - entry.size) as u64;
570                }
571            }
572        }
573    }
574    
575    fn defragment_advanced(&mut self) {
576        // Rebuild access order based on actual access patterns and recency
577        let mut weighted_keys: Vec<(String, f64)> = self.data
578            .iter()
579            .map(|(key, entry)| {
580                let recency_weight = 1.0 / ((js_sys::Date::now() - entry.timestamp) / 1000.0 + 1.0);
581                let frequency_weight = (entry.access_count as f64).log2();
582                let priority_weight = match entry.priority {
583                    CachePriority::Critical => 10.0,
584                    CachePriority::High => 7.0,
585                    CachePriority::Medium => 5.0,
586                    CachePriority::Low => 3.0,
587                    CachePriority::Disposable => 1.0,
588                };
589                
590                let total_weight = recency_weight + frequency_weight + priority_weight;
591                (key.clone(), total_weight)
592            })
593            .collect();
594
595        weighted_keys.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
596        
597        self.access_order.clear();
598        for (key, _) in weighted_keys {
599            self.access_order.push_back(key);
600        }
601        
602        // Update hot/cold entry counts
603        self.update_hot_cold_counts();
604    }
605    
606    fn update_hot_cold_counts(&mut self) {
607        let (mut hot, mut cold) = (0, 0);
608        
609        for entry in self.data.values() {
610            if entry.access_count >= self.hot_threshold {
611                hot += 1;
612            } else {
613                cold += 1;
614            }
615        }
616        
617        self.stats.hot_entries = hot;
618        self.stats.cold_entries = cold;
619    }
620    
621    fn update_fragmentation_ratio(&mut self) {
622        // Calculate fragmentation based on memory usage patterns
623        let ideal_size = self.stats.entries * 100; // Assume 100 bytes average per entry
624        self.stats.fragmentation_ratio = if ideal_size > 0 {
625            (self.stats.size as f64 - ideal_size as f64).abs() / ideal_size as f64
626        } else {
627            0.0
628        };
629    }
630    
631    fn calculate_health_score(&self) -> f64 {
632        let hit_rate_score = self.stats.hit_rate * 0.4;
633        let memory_efficiency_score = (1.0 - self.stats.fragmentation_ratio).max(0.0) * 0.3;
634        let eviction_score = if self.stats.total_requests > 0 {
635            (1.0 - (self.stats.evictions as f64 / self.stats.total_requests as f64)).max(0.0) * 0.3
636        } else {
637            1.0 * 0.3
638        };
639        
640        (hit_rate_score + memory_efficiency_score + eviction_score).min(1.0)
641    }
642    
643    fn calculate_memory_pressure(&self) -> f64 {
644        self.stats.size as f64 / self.max_size as f64
645    }
646    
647    fn generate_recommendations(&self) -> Vec<String> {
648        let mut recommendations = Vec::new();
649        
650        if self.stats.hit_rate < 0.6 {
651            recommendations.push("Consider increasing cache size or improving cache key strategies".to_string());
652        }
653        
654        if self.stats.fragmentation_ratio > 0.4 {
655            recommendations.push("Run memory optimization to reduce fragmentation".to_string());
656        }
657        
658        if self.calculate_memory_pressure() > 0.9 {
659            recommendations.push("Cache is near capacity - consider eviction policy tuning".to_string());
660        }
661        
662        if self.stats.evictions > self.stats.total_requests / 4 {
663            recommendations.push("High eviction rate detected - consider larger cache size".to_string());
664        }
665        
666        recommendations
667    }
668    
669    fn put_with_ttl(&mut self, key: String, value: String, ttl: Option<u64>) -> bool {
670        self.put_with_ttl_and_priority(key, value, ttl, CachePriority::Medium)
671    }
672
673    fn remove_internal(&mut self, key: &str) -> bool {
674        if let Some(entry) = self.data.remove(key) {
675            self.stats.size -= entry.size + key.len();
676            self.stats.entries -= 1;
677
678            // Remove from access order
679            if let Some(pos) = self.access_order.iter().position(|k| k == key) {
680                self.access_order.remove(pos);
681            }
682
683            // Remove from hot keys
684            if let Some(pos) = self.hot_keys.iter().position(|k| k == key) {
685                self.hot_keys.remove(pos);
686            }
687
688            true
689        } else {
690            false
691        }
692    }
693
694    fn evict_entries(&mut self, needed_space: usize) -> bool {
695        let mut freed_space = 0;
696        let mut evicted_count = 0;
697
698        // First, remove expired entries
699        let expired_keys: Vec<String> = self.data
700            .iter()
701            .filter(|(_, entry)| entry.is_expired())
702            .map(|(key, _)| key.clone())
703            .collect();
704
705        for key in expired_keys {
706            if let Some(entry) = self.data.get(&key) {
707                freed_space += entry.size + key.len();
708            }
709            self.remove_internal(&key);
710            self.stats.expired_entries += 1;
711        }
712
713        // If not enough space, use LRU eviction
714        while freed_space < needed_space && !self.access_order.is_empty() {
715            if let Some(lru_key) = self.access_order.pop_front() {
716                if let Some(entry) = self.data.get(&lru_key) {
717                    freed_space += entry.size + lru_key.len();
718                }
719                self.remove_internal(&lru_key);
720                evicted_count += 1;
721            } else {
722                break;
723            }
724        }
725
726        self.stats.evictions += evicted_count;
727        freed_space >= needed_space
728    }
729
730    fn promote_hot_key(&mut self, key: &str) {
731        if let Some(entry) = self.data.get(key) {
732            if entry.access_count >= self.hot_threshold && 
733               !self.hot_keys.contains(&key.to_string()) &&
734               self.hot_keys.len() < self.hot_keys.capacity() {
735                self.hot_keys.push(key.to_string());
736            }
737        }
738    }
739
740    fn update_access_order(&mut self, key: &str) {
741        // Remove from current position
742        if let Some(pos) = self.access_order.iter().position(|k| k == key) {
743            self.access_order.remove(pos);
744        }
745        // Add to end (most recently used)
746        self.access_order.push_back(key.to_string());
747    }
748
749    fn cleanup_expired(&mut self) {
750        let expired_keys: Vec<String> = self.data
751            .iter()
752            .filter(|(_, entry)| entry.is_expired())
753            .map(|(key, _)| key.clone())
754            .collect();
755
756        for key in expired_keys {
757            self.remove_internal(&key);
758            self.stats.expired_entries += 1;
759        }
760    }
761
762    fn optimize_hot_keys(&mut self) {
763        // Remove hot keys that are no longer frequently accessed
764        self.hot_keys.retain(|key| {
765            if let Some(entry) = self.data.get(key) {
766                entry.access_count >= self.hot_threshold
767            } else {
768                false
769            }
770        });
771    }
772
773    fn defragment(&mut self) {
774        // Rebuild access order based on actual access patterns
775        let mut ordered_keys: Vec<(String, f64)> = self.data
776            .iter()
777            .map(|(key, entry)| (key.clone(), entry.timestamp))
778            .collect();
779
780        ordered_keys.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
781        
782        self.access_order.clear();
783        for (key, _) in ordered_keys {
784            self.access_order.push_back(key);
785        }
786    }
787
788    fn update_rates(&mut self) {
789        if self.stats.total_requests > 0 {
790            self.stats.hit_rate = self.stats.cache_hits as f64 / self.stats.total_requests as f64;
791            self.stats.miss_rate = self.stats.cache_misses as f64 / self.stats.total_requests as f64;
792        }
793    }
794}
795
796impl Default for FastCache {
797    fn default() -> Self {
798        Self::new()
799    }
800}
801
802#[cfg(test)]
803mod tests {
804    use super::*;
805
806    #[test]
807    fn test_cache_basic_operations() {
808        let mut cache = FastCache::new();
809        
810        // Test put and get
811        assert!(cache.put("key1".to_string(), "value1".to_string()));
812        assert_eq!(cache.get("key1"), Some("value1".to_string()));
813        
814        // Test non-existent key
815        assert_eq!(cache.get("key2"), None);
816        
817        // Test remove
818        assert!(cache.remove("key1"));
819        assert_eq!(cache.get("key1"), None);
820    }
821
822    #[test]
823    fn test_cache_stats() {
824        let mut cache = FastCache::new();
825        cache.put("key1".to_string(), "value1".to_string());
826        
827        let _ = cache.get("key1"); // Hit
828        let _ = cache.get("key2"); // Miss
829        
830        assert_eq!(cache.stats.cache_hits, 1);
831        assert_eq!(cache.stats.cache_misses, 1);
832        assert_eq!(cache.stats.total_requests, 2);
833    }
834
835    #[test]
836    fn test_cache_capacity() {
837        let mut cache = FastCache::with_capacity(100);
838        
839        // Fill beyond capacity
840        for i in 0..50 {
841            cache.put(format!("key{}", i), format!("value{}", i));
842        }
843        
844        // Should have evicted some entries
845        assert!(cache.size() <= cache.max_entries);
846        assert!(cache.memory_usage() <= cache.max_size);
847    }
848}