chie_core/
tiered_cache.rs

1//! Multi-level cache hierarchy for optimal performance and cost.
2//!
3//! This module implements a multi-tiered caching system with automatic promotion
4//! and demotion between levels based on access patterns:
5//! - L1: Hot data in memory (fastest, smallest)
6//! - L2: Warm data on SSD (fast, medium)
7//! - L3: Cold data on HDD (slow, largest)
8//!
9//! # Example
10//!
11//! ```rust
12//! use chie_core::tiered_cache::{TieredCache, TieredCacheConfig};
13//! use std::path::PathBuf;
14//!
15//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
16//! let config = TieredCacheConfig {
17//!     l1_capacity_bytes: 100 * 1024 * 1024,      // 100 MB in memory
18//!     l2_capacity_bytes: 1024 * 1024 * 1024,     // 1 GB on SSD
19//!     l3_capacity_bytes: 10 * 1024 * 1024 * 1024, // 10 GB on HDD
20//!     l2_path: PathBuf::from("/fast-ssd/cache"),
21//!     l3_path: PathBuf::from("/slow-hdd/cache"),
22//!     promotion_threshold: 3,  // Promote after 3 accesses
23//! };
24//!
25//! let mut cache = TieredCache::new(config).await?;
26//!
27//! // Insert data (starts in L1)
28//! cache.put("key1".to_string(), b"hot data".to_vec()).await?;
29//!
30//! // Get data (automatically promotes if accessed frequently)
31//! if let Some(data) = cache.get("key1").await? {
32//!     println!("Found data: {} bytes", data.len());
33//! }
34//!
35//! // Get cache statistics
36//! let stats = cache.stats();
37//! println!("L1 hit rate: {:.2}%", stats.l1_hit_rate() * 100.0);
38//! # Ok(())
39//! # }
40//! ```
41
42use crate::compression::{CompressionAlgorithm, Compressor};
43use serde::{Deserialize, Serialize};
44use std::cell::RefCell;
45use std::collections::HashMap;
46use std::path::PathBuf;
47use thiserror::Error;
48use tokio::fs;
49use tokio::io::{AsyncReadExt, AsyncWriteExt};
50
51/// Tiered cache error types.
52#[derive(Debug, Error)]
53pub enum TieredCacheError {
54    #[error("IO error: {0}")]
55    Io(#[from] std::io::Error),
56
57    #[error("Key not found: {0}")]
58    KeyNotFound(String),
59
60    #[error("Tier full: {tier}")]
61    TierFull { tier: String },
62
63    #[error("Serialization error: {0}")]
64    Serialization(String),
65}
66
67/// Cache tier levels.
68#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
69pub enum CacheTier {
70    /// L1: In-memory cache (hottest data).
71    L1 = 1,
72    /// L2: SSD cache (warm data).
73    L2 = 2,
74    /// L3: HDD cache (cold data).
75    L3 = 3,
76}
77
78impl CacheTier {
79    /// Get tier name.
80    #[must_use]
81    #[inline]
82    pub const fn name(&self) -> &'static str {
83        match self {
84            Self::L1 => "L1-Memory",
85            Self::L2 => "L2-SSD",
86            Self::L3 => "L3-HDD",
87        }
88    }
89}
90
91/// Metadata for a cached item.
92#[derive(Debug, Clone, Serialize, Deserialize)]
93struct CacheItemMetadata {
94    key: String,
95    size_bytes: u64,
96    tier: CacheTier,
97    access_count: u64,
98    last_access_ms: i64,
99    created_ms: i64,
100}
101
102impl CacheItemMetadata {
103    /// Create new metadata.
104    fn new(key: String, size_bytes: u64, tier: CacheTier) -> Self {
105        let now_ms = std::time::SystemTime::now()
106            .duration_since(std::time::UNIX_EPOCH)
107            .unwrap_or_default()
108            .as_millis() as i64;
109
110        Self {
111            key,
112            size_bytes,
113            tier,
114            access_count: 0,
115            last_access_ms: now_ms,
116            created_ms: now_ms,
117        }
118    }
119
120    /// Record an access.
121    fn record_access(&mut self) {
122        self.access_count += 1;
123        self.last_access_ms = std::time::SystemTime::now()
124            .duration_since(std::time::UNIX_EPOCH)
125            .unwrap_or_default()
126            .as_millis() as i64;
127    }
128
129    /// Check if item should be promoted.
130    #[must_use]
131    #[inline]
132    const fn should_promote(&self, threshold: u64) -> bool {
133        self.access_count >= threshold
134    }
135}
136
137/// Configuration for tiered cache.
138#[derive(Debug, Clone)]
139pub struct TieredCacheConfig {
140    /// L1 (memory) capacity in bytes.
141    pub l1_capacity_bytes: u64,
142    /// L2 (SSD) capacity in bytes.
143    pub l2_capacity_bytes: u64,
144    /// L3 (HDD) capacity in bytes.
145    pub l3_capacity_bytes: u64,
146    /// Path for L2 cache.
147    pub l2_path: PathBuf,
148    /// Path for L3 cache.
149    pub l3_path: PathBuf,
150    /// Number of accesses before promotion.
151    pub promotion_threshold: u64,
152    /// Compression algorithm for L2/L3 tiers (None = no compression).
153    pub compression: CompressionAlgorithm,
154}
155
156impl Default for TieredCacheConfig {
157    fn default() -> Self {
158        Self {
159            l1_capacity_bytes: 100 * 1024 * 1024,       // 100 MB
160            l2_capacity_bytes: 1024 * 1024 * 1024,      // 1 GB
161            l3_capacity_bytes: 10 * 1024 * 1024 * 1024, // 10 GB
162            l2_path: PathBuf::from("./cache/l2"),
163            l3_path: PathBuf::from("./cache/l3"),
164            promotion_threshold: 3,
165            compression: CompressionAlgorithm::Balanced, // Default to balanced compression
166        }
167    }
168}
169
170/// Statistics for tiered cache.
171#[derive(Debug, Clone, Default)]
172pub struct TieredCacheStats {
173    /// L1 hits.
174    pub l1_hits: u64,
175    /// L2 hits.
176    pub l2_hits: u64,
177    /// L3 hits.
178    pub l3_hits: u64,
179    /// Cache misses.
180    pub misses: u64,
181    /// Items promoted from L2 to L1.
182    pub promotions_l2_to_l1: u64,
183    /// Items promoted from L3 to L2.
184    pub promotions_l3_to_l2: u64,
185    /// Items demoted from L1 to L2.
186    pub demotions_l1_to_l2: u64,
187    /// Items demoted from L2 to L3.
188    pub demotions_l2_to_l3: u64,
189    /// Items evicted from L3.
190    pub evictions: u64,
191}
192
193impl TieredCacheStats {
194    /// Calculate L1 hit rate.
195    #[must_use]
196    #[inline]
197    pub fn l1_hit_rate(&self) -> f64 {
198        let total = self.l1_hits + self.l2_hits + self.l3_hits + self.misses;
199        if total == 0 {
200            0.0
201        } else {
202            self.l1_hits as f64 / total as f64
203        }
204    }
205
206    /// Calculate overall hit rate.
207    #[must_use]
208    #[inline]
209    pub fn overall_hit_rate(&self) -> f64 {
210        let hits = self.l1_hits + self.l2_hits + self.l3_hits;
211        let total = hits + self.misses;
212        if total == 0 {
213            0.0
214        } else {
215            hits as f64 / total as f64
216        }
217    }
218
219    /// Calculate average tier (1.0 = all L1, 3.0 = all L3).
220    #[must_use]
221    #[inline]
222    pub fn average_tier(&self) -> f64 {
223        let hits = self.l1_hits + self.l2_hits + self.l3_hits;
224        if hits == 0 {
225            0.0
226        } else {
227            (self.l1_hits as f64 + self.l2_hits as f64 * 2.0 + self.l3_hits as f64 * 3.0)
228                / hits as f64
229        }
230    }
231}
232
233/// Multi-level cache with automatic tiering.
234pub struct TieredCache {
235    config: TieredCacheConfig,
236    /// L1 cache (in-memory).
237    l1: HashMap<String, Vec<u8>>,
238    /// Metadata for all items.
239    metadata: HashMap<String, CacheItemMetadata>,
240    /// Current usage per tier.
241    l1_used: u64,
242    l2_used: u64,
243    l3_used: u64,
244    /// Statistics.
245    stats: TieredCacheStats,
246    /// Compressor for L2/L3 tiers (RefCell for interior mutability).
247    compressor: RefCell<Compressor>,
248}
249
250impl TieredCache {
251    /// Create a new tiered cache.
252    pub async fn new(config: TieredCacheConfig) -> Result<Self, TieredCacheError> {
253        // Create directories for L2 and L3
254        fs::create_dir_all(&config.l2_path).await?;
255        fs::create_dir_all(&config.l3_path).await?;
256
257        let compressor = RefCell::new(Compressor::new(config.compression));
258
259        Ok(Self {
260            compressor,
261            config,
262            l1: HashMap::new(),
263            metadata: HashMap::new(),
264            l1_used: 0,
265            l2_used: 0,
266            l3_used: 0,
267            stats: TieredCacheStats::default(),
268        })
269    }
270
271    /// Put data into cache (starts in L1).
272    pub async fn put(&mut self, key: String, data: Vec<u8>) -> Result<(), TieredCacheError> {
273        let size = data.len() as u64;
274
275        // Remove old entry if exists
276        if let Some(old_meta) = self.metadata.get(&key) {
277            self.remove_from_tier(&key, old_meta.tier).await?;
278        }
279
280        // Try to place in L1
281        if self.l1_used + size <= self.config.l1_capacity_bytes {
282            self.l1.insert(key.clone(), data);
283            self.l1_used += size;
284            self.metadata.insert(
285                key.clone(),
286                CacheItemMetadata::new(key, size, CacheTier::L1),
287            );
288            Ok(())
289        } else {
290            // Evict from L1 to make space or place in L2
291            self.evict_from_l1().await?;
292            if self.l1_used + size <= self.config.l1_capacity_bytes {
293                self.l1.insert(key.clone(), data);
294                self.l1_used += size;
295                self.metadata.insert(
296                    key.clone(),
297                    CacheItemMetadata::new(key, size, CacheTier::L1),
298                );
299                Ok(())
300            } else {
301                // Place directly in L2
302                self.place_in_l2(key, data, size).await
303            }
304        }
305    }
306
307    /// Get data from cache.
308    pub async fn get(&mut self, key: &str) -> Result<Option<Vec<u8>>, TieredCacheError> {
309        // Record access and get tier info
310        let (tier, should_promote) = if let Some(meta) = self.metadata.get_mut(key) {
311            meta.record_access();
312            let should_promote = meta.should_promote(self.config.promotion_threshold);
313            (meta.tier, should_promote)
314        } else {
315            self.stats.misses += 1;
316            return Ok(None);
317        };
318
319        match tier {
320            CacheTier::L1 => {
321                self.stats.l1_hits += 1;
322                Ok(self.l1.get(key).cloned())
323            }
324            CacheTier::L2 => {
325                self.stats.l2_hits += 1;
326                let data = self.read_from_l2(key).await?;
327
328                // Promote to L1 if accessed frequently
329                if should_promote {
330                    self.promote_to_l1(key.to_string(), data.clone()).await?;
331                }
332
333                Ok(Some(data))
334            }
335            CacheTier::L3 => {
336                self.stats.l3_hits += 1;
337                let data = self.read_from_l3(key).await?;
338
339                // Promote to L2 if accessed frequently
340                if should_promote {
341                    self.promote_to_l2(key.to_string(), data.clone()).await?;
342                }
343
344                Ok(Some(data))
345            }
346        }
347    }
348
349    /// Remove item from cache.
350    pub async fn remove(&mut self, key: &str) -> Result<(), TieredCacheError> {
351        if let Some(meta) = self.metadata.remove(key) {
352            self.remove_from_tier(key, meta.tier).await?;
353        }
354        Ok(())
355    }
356
357    /// Get cache statistics.
358    #[must_use]
359    #[inline]
360    pub const fn stats(&self) -> &TieredCacheStats {
361        &self.stats
362    }
363
364    /// Get L1 usage percentage.
365    #[must_use]
366    #[inline]
367    pub fn l1_usage_percent(&self) -> f64 {
368        if self.config.l1_capacity_bytes == 0 {
369            0.0
370        } else {
371            self.l1_used as f64 / self.config.l1_capacity_bytes as f64
372        }
373    }
374
375    /// Warm the cache with a list of key-value pairs.
376    ///
377    /// This is useful for cold starts where you want to pre-populate
378    /// frequently accessed data. Items are placed according to available
379    /// capacity, starting from L1.
380    pub async fn warm_with_data(
381        &mut self,
382        items: Vec<(String, Vec<u8>)>,
383    ) -> Result<usize, TieredCacheError> {
384        let mut warmed = 0;
385
386        for (key, data) in items {
387            if self.put(key, data).await.is_ok() {
388                warmed += 1;
389            }
390        }
391
392        Ok(warmed)
393    }
394
395    /// Warm the cache by loading keys from a list.
396    ///
397    /// This method attempts to load data from storage tiers (L2/L3)
398    /// and promote them to L1 for faster access on startup.
399    pub async fn warm_from_keys(&mut self, keys: &[String]) -> Result<usize, TieredCacheError> {
400        let mut warmed = 0;
401
402        for key in keys {
403            // Try to load from L2
404            if let Ok(data) = self.read_from_l2(key).await {
405                if self.put(key.clone(), data).await.is_ok() {
406                    warmed += 1;
407                    continue;
408                }
409            }
410
411            // Try to load from L3
412            if let Ok(data) = self.read_from_l3(key).await {
413                if self.put(key.clone(), data).await.is_ok() {
414                    warmed += 1;
415                }
416            }
417        }
418
419        Ok(warmed)
420    }
421
422    /// Export hot keys (most frequently accessed) for warming on next startup.
423    ///
424    /// Returns keys sorted by access count in descending order.
425    #[must_use]
426    pub fn export_hot_keys(&self, limit: usize) -> Vec<String> {
427        let mut items: Vec<_> = self
428            .metadata
429            .iter()
430            .map(|(key, meta)| (key.clone(), meta.access_count))
431            .collect();
432
433        items.sort_by(|a, b| b.1.cmp(&a.1));
434
435        items.into_iter().take(limit).map(|(key, _)| key).collect()
436    }
437
438    /// Get the number of cached items.
439    #[must_use]
440    #[inline]
441    pub fn len(&self) -> usize {
442        self.metadata.len()
443    }
444
445    /// Check if the cache is empty.
446    #[must_use]
447    #[inline]
448    pub fn is_empty(&self) -> bool {
449        self.metadata.is_empty()
450    }
451
452    // Helper methods
453
454    async fn place_in_l2(
455        &mut self,
456        key: String,
457        data: Vec<u8>,
458        size: u64,
459    ) -> Result<(), TieredCacheError> {
460        if self.l2_used + size > self.config.l2_capacity_bytes {
461            self.evict_from_l2().await?;
462        }
463
464        if self.l2_used + size <= self.config.l2_capacity_bytes {
465            self.write_to_l2(&key, &data).await?;
466            self.l2_used += size;
467            self.metadata.insert(
468                key.clone(),
469                CacheItemMetadata::new(key, size, CacheTier::L2),
470            );
471            Ok(())
472        } else {
473            self.place_in_l3(key, data, size).await
474        }
475    }
476
477    async fn place_in_l3(
478        &mut self,
479        key: String,
480        data: Vec<u8>,
481        size: u64,
482    ) -> Result<(), TieredCacheError> {
483        if self.l3_used + size > self.config.l3_capacity_bytes {
484            self.evict_from_l3().await?;
485        }
486
487        if self.l3_used + size <= self.config.l3_capacity_bytes {
488            self.write_to_l3(&key, &data).await?;
489            self.l3_used += size;
490            self.metadata.insert(
491                key.clone(),
492                CacheItemMetadata::new(key, size, CacheTier::L3),
493            );
494            Ok(())
495        } else {
496            Err(TieredCacheError::TierFull {
497                tier: "L3".to_string(),
498            })
499        }
500    }
501
502    async fn evict_from_l1(&mut self) -> Result<(), TieredCacheError> {
503        // Find LRU item in L1
504        let lru_key = self
505            .metadata
506            .iter()
507            .filter(|(_, meta)| meta.tier == CacheTier::L1)
508            .min_by_key(|(_, meta)| meta.last_access_ms)
509            .map(|(key, _)| key.clone());
510
511        if let Some(key) = lru_key {
512            if let Some(data) = self.l1.remove(&key) {
513                // Get size before calling write methods
514                let size = self.metadata.get(&key).map(|m| m.size_bytes).unwrap_or(0);
515
516                self.l1_used -= size;
517                // Demote to L2
518                self.write_to_l2(&key, &data).await?;
519                self.l2_used += size;
520
521                // Update metadata tier
522                if let Some(meta) = self.metadata.get_mut(&key) {
523                    meta.tier = CacheTier::L2;
524                }
525
526                self.stats.demotions_l1_to_l2 += 1;
527            }
528        }
529
530        Ok(())
531    }
532
533    async fn evict_from_l2(&mut self) -> Result<(), TieredCacheError> {
534        // Find LRU item in L2
535        let lru_key = self
536            .metadata
537            .iter()
538            .filter(|(_, meta)| meta.tier == CacheTier::L2)
539            .min_by_key(|(_, meta)| meta.last_access_ms)
540            .map(|(key, _)| key.clone());
541
542        if let Some(key) = lru_key {
543            // Get size before calling methods
544            let size = self.metadata.get(&key).map(|m| m.size_bytes).unwrap_or(0);
545
546            let data = self.read_from_l2(&key).await?;
547
548            self.l2_used -= size;
549            // Demote to L3
550            self.write_to_l3(&key, &data).await?;
551            self.l3_used += size;
552
553            // Update metadata tier
554            if let Some(meta) = self.metadata.get_mut(&key) {
555                meta.tier = CacheTier::L3;
556            }
557
558            self.stats.demotions_l2_to_l3 += 1;
559
560            // Remove from L2
561            let _ = fs::remove_file(self.l2_path(&key)).await;
562        }
563
564        Ok(())
565    }
566
567    async fn evict_from_l3(&mut self) -> Result<(), TieredCacheError> {
568        // Find LRU item in L3
569        let lru_key = self
570            .metadata
571            .iter()
572            .filter(|(_, meta)| meta.tier == CacheTier::L3)
573            .min_by_key(|(_, meta)| meta.last_access_ms)
574            .map(|(key, _)| key.clone());
575
576        if let Some(key) = lru_key {
577            if let Some(meta) = self.metadata.remove(&key) {
578                self.l3_used -= meta.size_bytes;
579                let _ = fs::remove_file(self.l3_path(&key)).await;
580                self.stats.evictions += 1;
581            }
582        }
583
584        Ok(())
585    }
586
587    async fn promote_to_l1(&mut self, key: String, data: Vec<u8>) -> Result<(), TieredCacheError> {
588        // Extract metadata without holding a mutable borrow
589        let (size, current_tier) = if let Some(meta) = self.metadata.get(&key) {
590            (meta.size_bytes, meta.tier)
591        } else {
592            return Ok(());
593        };
594
595        // Early return if already in L1
596        if current_tier == CacheTier::L1 {
597            return Ok(());
598        }
599
600        // Make space in L1 if needed
601        while self.l1_used + size > self.config.l1_capacity_bytes {
602            self.evict_from_l1().await?;
603        }
604
605        // Remove from current tier
606        match current_tier {
607            CacheTier::L2 => {
608                self.l2_used -= size;
609                let _ = fs::remove_file(self.l2_path(&key)).await;
610                self.stats.promotions_l2_to_l1 += 1;
611            }
612            CacheTier::L3 => {
613                self.l3_used -= size;
614                let _ = fs::remove_file(self.l3_path(&key)).await;
615            }
616            CacheTier::L1 => return Ok(()), // Already in L1
617        }
618
619        // Add to L1
620        self.l1.insert(key.clone(), data);
621        self.l1_used += size;
622
623        // Update metadata tier
624        if let Some(meta) = self.metadata.get_mut(&key) {
625            meta.tier = CacheTier::L1;
626        }
627
628        Ok(())
629    }
630
631    async fn promote_to_l2(&mut self, key: String, data: Vec<u8>) -> Result<(), TieredCacheError> {
632        // Extract metadata without holding a mutable borrow
633        let (size, current_tier) = if let Some(meta) = self.metadata.get(&key) {
634            (meta.size_bytes, meta.tier)
635        } else {
636            return Ok(());
637        };
638
639        if current_tier == CacheTier::L3 {
640            // Make space in L2 if needed
641            while self.l2_used + size > self.config.l2_capacity_bytes {
642                self.evict_from_l2().await?;
643            }
644
645            // Remove from L3
646            self.l3_used -= size;
647            let _ = fs::remove_file(self.l3_path(&key)).await;
648
649            // Add to L2
650            self.write_to_l2(&key, &data).await?;
651            self.l2_used += size;
652
653            // Update metadata tier
654            if let Some(meta) = self.metadata.get_mut(&key) {
655                meta.tier = CacheTier::L2;
656            }
657
658            self.stats.promotions_l3_to_l2 += 1;
659        }
660
661        Ok(())
662    }
663
664    async fn remove_from_tier(
665        &mut self,
666        key: &str,
667        tier: CacheTier,
668    ) -> Result<(), TieredCacheError> {
669        if let Some(meta) = self.metadata.get(key) {
670            match tier {
671                CacheTier::L1 => {
672                    self.l1.remove(key);
673                    self.l1_used -= meta.size_bytes;
674                }
675                CacheTier::L2 => {
676                    let _ = fs::remove_file(self.l2_path(key)).await;
677                    self.l2_used -= meta.size_bytes;
678                }
679                CacheTier::L3 => {
680                    let _ = fs::remove_file(self.l3_path(key)).await;
681                    self.l3_used -= meta.size_bytes;
682                }
683            }
684        }
685        Ok(())
686    }
687
688    fn l2_path(&self, key: &str) -> PathBuf {
689        self.config.l2_path.join(format!("{}.cache", key))
690    }
691
692    fn l3_path(&self, key: &str) -> PathBuf {
693        self.config.l3_path.join(format!("{}.cache", key))
694    }
695
696    async fn write_to_l2(&self, key: &str, data: &[u8]) -> Result<(), TieredCacheError> {
697        let path = self.l2_path(key);
698
699        // Compress data if compression is enabled
700        let write_data = if !self.config.compression.is_none() {
701            self.compressor
702                .borrow_mut()
703                .compress(data)
704                .map_err(|e| TieredCacheError::Io(std::io::Error::other(e)))?
705        } else {
706            data.to_vec()
707        };
708
709        let mut file = fs::File::create(path).await?;
710        file.write_all(&write_data).await?;
711        file.sync_all().await?;
712        Ok(())
713    }
714
715    async fn write_to_l3(&self, key: &str, data: &[u8]) -> Result<(), TieredCacheError> {
716        let path = self.l3_path(key);
717
718        // Compress data if compression is enabled
719        let write_data = if !self.config.compression.is_none() {
720            self.compressor
721                .borrow_mut()
722                .compress(data)
723                .map_err(|e| TieredCacheError::Io(std::io::Error::other(e)))?
724        } else {
725            data.to_vec()
726        };
727
728        let mut file = fs::File::create(path).await?;
729        file.write_all(&write_data).await?;
730        file.sync_all().await?;
731        Ok(())
732    }
733
734    async fn read_from_l2(&self, key: &str) -> Result<Vec<u8>, TieredCacheError> {
735        let path = self.l2_path(key);
736        let mut file = fs::File::open(path).await?;
737        let mut compressed_data = Vec::new();
738        file.read_to_end(&mut compressed_data).await?;
739
740        // Decompress if compression is enabled
741        let data = if !self.config.compression.is_none() {
742            self.compressor
743                .borrow_mut()
744                .decompress(&compressed_data)
745                .map_err(|e| TieredCacheError::Io(std::io::Error::other(e)))?
746        } else {
747            compressed_data
748        };
749
750        Ok(data)
751    }
752
753    async fn read_from_l3(&self, key: &str) -> Result<Vec<u8>, TieredCacheError> {
754        let path = self.l3_path(key);
755        let mut file = fs::File::open(path).await?;
756        let mut compressed_data = Vec::new();
757        file.read_to_end(&mut compressed_data).await?;
758
759        // Decompress if compression is enabled
760        let data = if !self.config.compression.is_none() {
761            self.compressor
762                .borrow_mut()
763                .decompress(&compressed_data)
764                .map_err(|e| TieredCacheError::Io(std::io::Error::other(e)))?
765        } else {
766            compressed_data
767        };
768
769        Ok(data)
770    }
771}
772
773#[cfg(test)]
774mod tests {
775    use super::*;
776    use tempfile::TempDir;
777
778    async fn create_test_cache() -> (TempDir, TieredCache) {
779        let temp_dir = TempDir::new().unwrap();
780        let config = TieredCacheConfig {
781            l1_capacity_bytes: 100,
782            l2_capacity_bytes: 200,
783            l3_capacity_bytes: 300,
784            l2_path: temp_dir.path().join("l2"),
785            l3_path: temp_dir.path().join("l3"),
786            promotion_threshold: 2,
787            compression: CompressionAlgorithm::None, // No compression in tests for predictable sizes
788        };
789        let cache = TieredCache::new(config).await.unwrap();
790        (temp_dir, cache)
791    }
792
793    #[tokio::test]
794    async fn test_tiered_cache_creation() {
795        let (_temp, cache) = create_test_cache().await;
796        assert_eq!(cache.l1_used, 0);
797        assert_eq!(cache.l2_used, 0);
798        assert_eq!(cache.l3_used, 0);
799    }
800
801    #[tokio::test]
802    async fn test_put_and_get_l1() {
803        let (_temp, mut cache) = create_test_cache().await;
804
805        cache
806            .put("key1".to_string(), b"small".to_vec())
807            .await
808            .unwrap();
809
810        let data = cache.get("key1").await.unwrap();
811        assert_eq!(data, Some(b"small".to_vec()));
812        assert_eq!(cache.stats.l1_hits, 1);
813    }
814
815    #[tokio::test]
816    async fn test_automatic_demotion() {
817        let (_temp, mut cache) = create_test_cache().await;
818
819        // Fill L1 beyond capacity
820        cache.put("key1".to_string(), vec![1; 60]).await.unwrap();
821        cache.put("key2".to_string(), vec![2; 60]).await.unwrap();
822
823        // This should demote key1 to L2
824        assert!(cache.stats.demotions_l1_to_l2 >= 1);
825    }
826
827    #[tokio::test]
828    async fn test_promotion_on_access() {
829        let (_temp, mut cache) = create_test_cache().await;
830
831        // Fill L1 to force item to L2
832        cache.put("key1".to_string(), vec![1; 60]).await.unwrap();
833        cache.put("key2".to_string(), vec![2; 60]).await.unwrap();
834
835        // Access key1 multiple times to trigger promotion
836        let _ = cache.get("key1").await;
837        let _ = cache.get("key1").await;
838        let _ = cache.get("key1").await;
839
840        // key1 should be promoted back to L1
841        if let Some(meta) = cache.metadata.get("key1") {
842            assert_eq!(meta.tier, CacheTier::L1);
843        }
844    }
845
846    #[tokio::test]
847    async fn test_hit_rate_calculation() {
848        let (_temp, mut cache) = create_test_cache().await;
849
850        cache
851            .put("key1".to_string(), b"data".to_vec())
852            .await
853            .unwrap();
854
855        let _ = cache.get("key1").await;
856        let _ = cache.get("key1").await;
857        let _ = cache.get("nonexistent").await;
858
859        let hit_rate = cache.stats.overall_hit_rate();
860        assert!((hit_rate - 0.666).abs() < 0.01);
861    }
862
863    #[tokio::test]
864    async fn test_remove() {
865        let (_temp, mut cache) = create_test_cache().await;
866
867        cache
868            .put("key1".to_string(), b"data".to_vec())
869            .await
870            .unwrap();
871        assert!(cache.get("key1").await.unwrap().is_some());
872
873        cache.remove("key1").await.unwrap();
874        assert!(cache.get("key1").await.unwrap().is_none());
875    }
876
877    #[tokio::test]
878    async fn test_warm_with_data() {
879        let (_temp, mut cache) = create_test_cache().await;
880
881        let warm_data = vec![
882            ("key1".to_string(), b"data1".to_vec()),
883            ("key2".to_string(), b"data2".to_vec()),
884            ("key3".to_string(), b"data3".to_vec()),
885        ];
886
887        let warmed = cache.warm_with_data(warm_data).await.unwrap();
888        assert_eq!(warmed, 3);
889
890        assert!(cache.get("key1").await.unwrap().is_some());
891        assert!(cache.get("key2").await.unwrap().is_some());
892        assert!(cache.get("key3").await.unwrap().is_some());
893    }
894
895    #[tokio::test]
896    async fn test_warm_from_keys() {
897        let (_temp, mut cache) = create_test_cache().await;
898
899        // Put some data in L2/L3 first
900        cache.put("key1".to_string(), vec![0u8; 150]).await.unwrap();
901        cache.put("key2".to_string(), vec![0u8; 150]).await.unwrap();
902
903        // These should be in L2 or L3 now
904        let _metadata_before = cache.metadata.clone();
905
906        // Create a new cache instance
907        let config = TieredCacheConfig {
908            l1_capacity_bytes: 100,
909            l2_capacity_bytes: 200,
910            l3_capacity_bytes: 300,
911            l2_path: cache.config.l2_path.clone(),
912            l3_path: cache.config.l3_path.clone(),
913            promotion_threshold: 2,
914            compression: CompressionAlgorithm::None,
915        };
916        let mut new_cache = TieredCache::new(config).await.unwrap();
917
918        // Warm from keys
919        let keys = vec!["key1".to_string(), "key2".to_string()];
920        let _warmed = new_cache.warm_from_keys(&keys).await.unwrap();
921        // Warmed count may vary depending on file system state
922    }
923
924    #[tokio::test]
925    async fn test_export_hot_keys() {
926        let (_temp, mut cache) = create_test_cache().await;
927
928        // Add some data with different access patterns
929        cache
930            .put("hot1".to_string(), b"data".to_vec())
931            .await
932            .unwrap();
933        cache
934            .put("hot2".to_string(), b"data".to_vec())
935            .await
936            .unwrap();
937        cache
938            .put("cold".to_string(), b"data".to_vec())
939            .await
940            .unwrap();
941
942        // Access hot keys multiple times
943        for _ in 0..5 {
944            let _ = cache.get("hot1").await;
945        }
946        for _ in 0..3 {
947            let _ = cache.get("hot2").await;
948        }
949        let _ = cache.get("cold").await;
950
951        // Export top 2 hot keys
952        let hot_keys = cache.export_hot_keys(2);
953        assert_eq!(hot_keys.len(), 2);
954        assert!(hot_keys.contains(&"hot1".to_string()));
955        assert!(hot_keys.contains(&"hot2".to_string()));
956    }
957
958    #[tokio::test]
959    async fn test_len_and_is_empty() {
960        let (_temp, mut cache) = create_test_cache().await;
961
962        assert!(cache.is_empty());
963        assert_eq!(cache.len(), 0);
964
965        cache
966            .put("key1".to_string(), b"data".to_vec())
967            .await
968            .unwrap();
969        assert!(!cache.is_empty());
970        assert_eq!(cache.len(), 1);
971
972        cache
973            .put("key2".to_string(), b"data".to_vec())
974            .await
975            .unwrap();
976        assert_eq!(cache.len(), 2);
977
978        cache.remove("key1").await.unwrap();
979        assert_eq!(cache.len(), 1);
980    }
981}