things3_core/
disk_cache.rs

1//! L2 Disk cache implementation using `SQLite` for persistent caching
2
3use anyhow::Result;
4use chrono::{DateTime, Utc};
5use rusqlite::{params, Connection};
6use serde::{Deserialize, Serialize};
7use std::path::Path;
8use std::sync::Arc;
9use std::time::Duration;
10use tokio::sync::RwLock;
11use tracing::{debug, error, info};
12
13/// L2 Disk cache configuration
14#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct DiskCacheConfig {
16    /// Cache database path
17    pub db_path: String,
18    /// Maximum cache size in bytes
19    pub max_size: u64,
20    /// Time to live for cache entries
21    pub ttl: Duration,
22    /// Compression enabled
23    pub compression: bool,
24    /// Cleanup interval
25    pub cleanup_interval: Duration,
26    /// Maximum number of entries
27    pub max_entries: usize,
28}
29
30impl Default for DiskCacheConfig {
31    fn default() -> Self {
32        Self {
33            db_path: "cache.db".to_string(),
34            max_size: 100 * 1024 * 1024,    // 100MB
35            ttl: Duration::from_secs(3600), // 1 hour
36            compression: true,
37            cleanup_interval: Duration::from_secs(300), // 5 minutes
38            max_entries: 10000,
39        }
40    }
41}
42
43/// Disk cache entry
44#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct DiskCacheEntry {
46    pub key: String,
47    pub data: Vec<u8>,
48    pub created_at: DateTime<Utc>,
49    pub last_accessed: DateTime<Utc>,
50    pub access_count: u64,
51    pub size_bytes: usize,
52    pub compressed: bool,
53    pub cache_type: String, // "tasks", "projects", "areas", "search_results"
54}
55
56/// Disk cache statistics
57#[derive(Debug, Clone, Default, Serialize, Deserialize)]
58pub struct DiskCacheStats {
59    pub total_entries: u64,
60    pub total_size_bytes: u64,
61    pub hits: u64,
62    pub misses: u64,
63    pub hit_rate: f64,
64    pub compressed_entries: u64,
65    pub uncompressed_entries: u64,
66}
67
68impl DiskCacheStats {
69    pub fn calculate_hit_rate(&mut self) {
70        let total = self.hits + self.misses;
71        self.hit_rate = if total > 0 {
72            #[allow(clippy::cast_precision_loss)]
73            {
74                self.hits as f64 / total as f64
75            }
76        } else {
77            0.0
78        };
79    }
80}
81
82/// L2 Disk cache implementation
83pub struct DiskCache {
84    config: DiskCacheConfig,
85    stats: Arc<RwLock<DiskCacheStats>>,
86    cleanup_task: Option<tokio::task::JoinHandle<()>>,
87}
88
89impl DiskCache {
90    /// Create a new disk cache
91    ///
92    /// # Errors
93    ///
94    /// Returns an error if the database connection fails or if the cache cannot be initialized
95    pub async fn new(config: DiskCacheConfig) -> Result<Self> {
96        let db_path = Path::new(&config.db_path);
97
98        // Ensure parent directory exists
99        if let Some(parent) = db_path.parent() {
100            tokio::fs::create_dir_all(parent).await?;
101        }
102
103        // Initialize database
104        Self::init_database(&config.db_path)?;
105
106        let mut cache = Self {
107            config,
108            stats: Arc::new(RwLock::new(DiskCacheStats::default())),
109            cleanup_task: None,
110        };
111
112        // Start cleanup task
113        cache.start_cleanup_task();
114
115        // Load initial statistics
116        cache.update_stats().await?;
117
118        Ok(cache)
119    }
120
121    /// Initialize the cache database
122    fn init_database(db_path: &str) -> Result<()> {
123        let conn = Connection::open(db_path)?;
124
125        // Create cache entries table
126        conn.execute(
127            r"
128            CREATE TABLE IF NOT EXISTS cache_entries (
129                key TEXT PRIMARY KEY,
130                data BLOB NOT NULL,
131                created_at INTEGER NOT NULL,
132                last_accessed INTEGER NOT NULL,
133                access_count INTEGER NOT NULL DEFAULT 0,
134                size_bytes INTEGER NOT NULL,
135                compressed BOOLEAN NOT NULL DEFAULT 0,
136                cache_type TEXT NOT NULL,
137                ttl INTEGER NOT NULL
138            )
139            ",
140            [],
141        )?;
142
143        // Create indexes for better performance
144        conn.execute(
145            "CREATE INDEX IF NOT EXISTS idx_cache_created_at ON cache_entries(created_at)",
146            [],
147        )?;
148        conn.execute(
149            "CREATE INDEX IF NOT EXISTS idx_cache_last_accessed ON cache_entries(last_accessed)",
150            [],
151        )?;
152        conn.execute(
153            "CREATE INDEX IF NOT EXISTS idx_cache_type ON cache_entries(cache_type)",
154            [],
155        )?;
156        conn.execute(
157            "CREATE INDEX IF NOT EXISTS idx_cache_ttl ON cache_entries(ttl)",
158            [],
159        )?;
160
161        info!("Disk cache database initialized at: {}", db_path);
162        Ok(())
163    }
164
165    /// Start the cleanup background task
166    fn start_cleanup_task(&mut self) {
167        let config = self.config.clone();
168
169        let handle = tokio::spawn(async move {
170            let mut interval = tokio::time::interval(config.cleanup_interval);
171            loop {
172                interval.tick().await;
173
174                if let Err(e) = Self::cleanup_expired_entries(&config) {
175                    error!("Failed to cleanup expired cache entries: {}", e);
176                }
177
178                if let Err(e) = Self::cleanup_oversized_entries(&config) {
179                    error!("Failed to cleanup oversized cache entries: {}", e);
180                }
181            }
182        });
183
184        self.cleanup_task = Some(handle);
185    }
186
187    /// Cleanup expired entries
188    fn cleanup_expired_entries(config: &DiskCacheConfig) -> Result<()> {
189        let conn = Connection::open(&config.db_path)?;
190        let now = Utc::now().timestamp();
191        #[allow(clippy::cast_possible_wrap)]
192        let ttl_seconds = config.ttl.as_secs() as i64;
193
194        let deleted = conn.execute(
195            "DELETE FROM cache_entries WHERE created_at + ttl < ?",
196            params![now - ttl_seconds],
197        )?;
198
199        if deleted > 0 {
200            debug!("Cleaned up {} expired cache entries", deleted);
201        }
202
203        Ok(())
204    }
205
206    /// Cleanup oversized entries
207    #[allow(clippy::cast_sign_loss)]
208    fn cleanup_oversized_entries(config: &DiskCacheConfig) -> Result<()> {
209        let conn = Connection::open(&config.db_path)?;
210
211        // Get current total size
212        let total_size: i64 = conn.query_row(
213            "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries",
214            [],
215            |row| row.get(0),
216        )?;
217
218        #[allow(clippy::cast_sign_loss)]
219        if total_size as u64 <= config.max_size {
220            return Ok(());
221        }
222
223        // Remove oldest entries until we're under the size limit
224        let mut deleted = 0;
225        #[allow(
226            clippy::cast_possible_truncation,
227            clippy::cast_sign_loss,
228            clippy::cast_precision_loss
229        )]
230        let target_size = (config.max_size as f64 * 0.8) as u64; // Remove to 80% of max size
231
232        #[allow(clippy::cast_sign_loss)]
233        let mut current_size = total_size as u64;
234        while current_size > target_size {
235            let result = conn.execute(
236                "DELETE FROM cache_entries WHERE key IN (
237                    SELECT key FROM cache_entries 
238                    ORDER BY last_accessed ASC 
239                    LIMIT 100
240                )",
241                [],
242            )?;
243
244            if result == 0 {
245                break; // No more entries to delete
246            }
247
248            deleted += result;
249
250            // Check new total size
251            let new_total_size: i64 = conn.query_row(
252                "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries",
253                [],
254                |row| row.get(0),
255            )?;
256
257            current_size = new_total_size as u64;
258            if current_size <= target_size {
259                break;
260            }
261        }
262
263        if deleted > 0 {
264            debug!("Cleaned up {} oversized cache entries", deleted);
265        }
266
267        Ok(())
268    }
269
270    /// Store data in the disk cache
271    ///
272    /// # Errors
273    ///
274    /// This function will return an error if:
275    /// - Serialization fails
276    /// - Compression fails (if enabled)
277    /// - Database operations fail
278    /// - File I/O operations fail
279    pub fn store<T>(&self, key: &str, data: &T, cache_type: &str) -> Result<()>
280    where
281        T: Serialize,
282    {
283        let serialized = if self.config.compression {
284            // Compress the data
285            let json_data = serde_json::to_vec(data)?;
286            zstd::encode_all(&json_data[..], 3)?
287        } else {
288            serde_json::to_vec(data)?
289        };
290
291        let size_bytes = serialized.len();
292        let entry = DiskCacheEntry {
293            key: key.to_string(),
294            data: serialized,
295            created_at: Utc::now(),
296            last_accessed: Utc::now(),
297            access_count: 0,
298            size_bytes,
299            compressed: self.config.compression,
300            cache_type: cache_type.to_string(),
301        };
302
303        let conn = Connection::open(&self.config.db_path)?;
304        let _now = Utc::now().timestamp();
305        #[allow(clippy::cast_possible_wrap)]
306        let ttl_seconds = self.config.ttl.as_secs() as i64;
307
308        conn.execute(
309            r"
310            INSERT OR REPLACE INTO cache_entries 
311            (key, data, created_at, last_accessed, access_count, size_bytes, compressed, cache_type, ttl)
312            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
313            ",
314            params![
315                entry.key,
316                entry.data,
317                entry.created_at.timestamp(),
318                entry.last_accessed.timestamp(),
319                entry.access_count,
320                entry.size_bytes,
321                entry.compressed,
322                entry.cache_type,
323                ttl_seconds
324            ],
325        )?;
326
327        debug!(
328            "Stored cache entry: {} ({} bytes, compressed: {})",
329            key, entry.size_bytes, entry.compressed
330        );
331
332        Ok(())
333    }
334
335    /// Retrieve data from the disk cache
336    ///
337    /// # Errors
338    ///
339    /// This function will return an error if:
340    /// - Database operations fail
341    /// - Deserialization fails
342    /// - Decompression fails (if data was compressed)
343    pub async fn get<T>(&self, key: &str) -> Result<Option<T>>
344    where
345        T: for<'de> Deserialize<'de>,
346    {
347        let conn = Connection::open(&self.config.db_path)?;
348        let now = Utc::now().timestamp();
349
350        let mut stmt = conn.prepare(
351            r"
352            SELECT data, compressed, created_at, ttl, access_count
353            FROM cache_entries 
354            WHERE key = ? AND created_at + ttl > ?
355            ",
356        )?;
357
358        let mut rows = stmt.query(params![key, now])?;
359
360        if let Some(row) = rows.next()? {
361            let data: Vec<u8> = row.get(0)?;
362            let compressed: bool = row.get(1)?;
363            let access_count: i64 = row.get(4)?;
364
365            // Update access count and last accessed time
366            conn.execute(
367                "UPDATE cache_entries SET access_count = ?, last_accessed = ? WHERE key = ?",
368                params![access_count + 1, now, key],
369            )?;
370
371            // Deserialize the data
372            let deserialized = if compressed {
373                let decompressed = zstd::decode_all(&data[..])?;
374                serde_json::from_slice(&decompressed)?
375            } else {
376                serde_json::from_slice(&data)?
377            };
378
379            // Update statistics
380            {
381                let mut stats = self.stats.write().await;
382                stats.hits += 1;
383                stats.calculate_hit_rate();
384            }
385
386            debug!("Cache hit for key: {}", key);
387            Ok(Some(deserialized))
388        } else {
389            // Update statistics
390            {
391                let mut stats = self.stats.write().await;
392                stats.misses += 1;
393                stats.calculate_hit_rate();
394            }
395
396            debug!("Cache miss for key: {}", key);
397            Ok(None)
398        }
399    }
400
401    /// Remove an entry from the disk cache
402    ///
403    /// # Errors
404    ///
405    /// This function will return an error if database operations fail
406    pub fn remove(&self, key: &str) -> Result<bool> {
407        let conn = Connection::open(&self.config.db_path)?;
408        let deleted = conn.execute("DELETE FROM cache_entries WHERE key = ?", params![key])?;
409        Ok(deleted > 0)
410    }
411
412    /// Clear all entries from the disk cache
413    ///
414    /// # Errors
415    ///
416    /// This function will return an error if database operations fail
417    pub fn clear(&self) -> Result<()> {
418        let conn = Connection::open(&self.config.db_path)?;
419        conn.execute("DELETE FROM cache_entries", [])?;
420        info!("Cleared all disk cache entries");
421        Ok(())
422    }
423
424    /// Clear entries by cache type
425    ///
426    /// # Errors
427    ///
428    /// This function will return an error if database operations fail
429    pub fn clear_by_type(&self, cache_type: &str) -> Result<()> {
430        let conn = Connection::open(&self.config.db_path)?;
431        let deleted = conn.execute(
432            "DELETE FROM cache_entries WHERE cache_type = ?",
433            params![cache_type],
434        )?;
435        debug!("Cleared {} entries of type: {}", deleted, cache_type);
436        Ok(())
437    }
438
439    /// Get cache statistics
440    pub async fn get_stats(&self) -> DiskCacheStats {
441        self.update_stats().await.ok();
442        self.stats.read().await.clone()
443    }
444
445    /// Update cache statistics
446    async fn update_stats(&self) -> Result<()> {
447        let conn = Connection::open(&self.config.db_path)?;
448        let now = Utc::now().timestamp();
449
450        // Get total entries and size
451        let (total_entries, total_size): (i64, i64) = conn.query_row(
452            "SELECT COUNT(*), COALESCE(SUM(size_bytes), 0) FROM cache_entries WHERE created_at + ttl > ?",
453            params![now],
454            |row| Ok((row.get(0)?, row.get(1)?)),
455        )?;
456
457        // Get compressed/uncompressed counts
458        let compressed_entries: i64 = conn.query_row(
459            "SELECT COUNT(*) FROM cache_entries WHERE compressed = 1 AND created_at + ttl > ?",
460            params![now],
461            |row| row.get(0),
462        )?;
463
464        let uncompressed_entries = total_entries - compressed_entries;
465
466        let mut stats = self.stats.write().await;
467        #[allow(clippy::cast_sign_loss)]
468        {
469            stats.total_entries = total_entries as u64;
470            stats.total_size_bytes = total_size as u64;
471            stats.compressed_entries = compressed_entries as u64;
472            stats.uncompressed_entries = uncompressed_entries as u64;
473        }
474
475        Ok(())
476    }
477
478    /// Get cache size in bytes
479    ///
480    /// # Errors
481    ///
482    /// This function will return an error if database operations fail
483    pub fn get_size(&self) -> Result<u64> {
484        let conn = Connection::open(&self.config.db_path)?;
485        let now = Utc::now().timestamp();
486
487        let size: i64 = conn.query_row(
488            "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries WHERE created_at + ttl > ?",
489            params![now],
490            |row| row.get(0),
491        )?;
492
493        #[allow(clippy::cast_sign_loss)]
494        Ok(size as u64)
495    }
496
497    /// Check if cache is full
498    ///
499    /// # Errors
500    ///
501    /// This function will return an error if database operations fail
502    pub fn is_full(&self) -> Result<bool> {
503        let current_size = self.get_size()?;
504        Ok(current_size >= self.config.max_size)
505    }
506
507    /// Get cache utilization percentage
508    ///
509    /// # Errors
510    ///
511    /// This function will return an error if database operations fail
512    pub fn get_utilization(&self) -> Result<f64> {
513        let current_size = self.get_size()?;
514        #[allow(clippy::cast_precision_loss)]
515        Ok((current_size as f64 / self.config.max_size as f64) * 100.0)
516    }
517}
518
519impl Drop for DiskCache {
520    fn drop(&mut self) {
521        if let Some(handle) = self.cleanup_task.take() {
522            handle.abort();
523        }
524    }
525}
526
527#[cfg(test)]
528mod tests {
529    use super::*;
530    use tempfile::tempdir;
531
532    #[tokio::test]
533    async fn test_disk_cache_basic_operations() {
534        let temp_dir = tempdir().unwrap();
535        let db_path = temp_dir.path().join("test_cache.db");
536
537        let config = DiskCacheConfig {
538            db_path: db_path.to_string_lossy().to_string(),
539            max_size: 1024 * 1024, // 1MB
540            ttl: Duration::from_secs(60),
541            compression: false,
542            cleanup_interval: Duration::from_secs(10),
543            max_entries: 100,
544        };
545
546        let cache = DiskCache::new(config).await.unwrap();
547
548        // Test storing and retrieving data
549        let test_data = vec!["hello".to_string(), "world".to_string()];
550        cache.store("test_key", &test_data, "test").unwrap();
551
552        let retrieved: Option<Vec<String>> = cache.get("test_key").await.unwrap();
553        assert_eq!(retrieved, Some(test_data));
554
555        // Test cache miss
556        let missing: Option<Vec<String>> = cache.get("missing_key").await.unwrap();
557        assert_eq!(missing, None);
558
559        // Test removal
560        let removed = cache.remove("test_key").unwrap();
561        assert!(removed);
562
563        let after_removal: Option<Vec<String>> = cache.get("test_key").await.unwrap();
564        assert_eq!(after_removal, None);
565    }
566
567    #[tokio::test]
568    async fn test_disk_cache_compression() {
569        let temp_dir = tempdir().unwrap();
570        let db_path = temp_dir.path().join("test_cache_compressed.db");
571
572        let config = DiskCacheConfig {
573            db_path: db_path.to_string_lossy().to_string(),
574            max_size: 1024 * 1024, // 1MB
575            ttl: Duration::from_secs(60),
576            compression: true,
577            cleanup_interval: Duration::from_secs(10),
578            max_entries: 100,
579        };
580
581        let cache = DiskCache::new(config).await.unwrap();
582
583        // Test storing and retrieving compressed data
584        let test_data = vec![
585            "hello".to_string(),
586            "world".to_string(),
587            "this".to_string(),
588            "is".to_string(),
589            "a".to_string(),
590            "test".to_string(),
591        ];
592        cache.store("compressed_key", &test_data, "test").unwrap();
593
594        let retrieved: Option<Vec<String>> = cache.get("compressed_key").await.unwrap();
595        assert_eq!(retrieved, Some(test_data));
596    }
597
598    #[tokio::test]
599    async fn test_disk_cache_statistics() {
600        let temp_dir = tempdir().unwrap();
601        let db_path = temp_dir.path().join("test_cache_stats.db");
602
603        let config = DiskCacheConfig {
604            db_path: db_path.to_string_lossy().to_string(),
605            max_size: 1024 * 1024, // 1MB
606            ttl: Duration::from_secs(60),
607            compression: false,
608            cleanup_interval: Duration::from_secs(10),
609            max_entries: 100,
610        };
611
612        let cache = DiskCache::new(config).await.unwrap();
613
614        // Store some data
615        cache.store("key1", &vec!["data1"], "test").unwrap();
616        cache.store("key2", &vec!["data2"], "test").unwrap();
617
618        // Retrieve data to generate hits
619        let _: Option<Vec<String>> = cache.get("key1").await.unwrap();
620        let _: Option<Vec<String>> = cache.get("key2").await.unwrap();
621
622        // Try to get non-existent key for miss
623        let _: Option<Vec<String>> = cache.get("missing").await.unwrap();
624
625        let stats = cache.get_stats().await;
626        assert_eq!(stats.total_entries, 2);
627        assert!(stats.hits >= 2);
628        assert!(stats.misses >= 1);
629        assert!(stats.hit_rate > 0.0);
630    }
631
632    #[tokio::test]
633    async fn test_disk_cache_clear() {
634        let temp_dir = tempdir().unwrap();
635        let db_path = temp_dir.path().join("test_cache_clear.db");
636
637        let config = DiskCacheConfig {
638            db_path: db_path.to_string_lossy().to_string(),
639            max_size: 1024 * 1024,
640            ttl: Duration::from_secs(60),
641            compression: false,
642            cleanup_interval: Duration::from_secs(10),
643            max_entries: 100,
644        };
645
646        let cache = DiskCache::new(config).await.unwrap();
647
648        // Store some data
649        cache.store("key1", &vec!["data1"], "test").unwrap();
650        cache.store("key2", &vec!["data2"], "test").unwrap();
651
652        // Verify data exists
653        let stats_before = cache.get_stats().await;
654        assert_eq!(stats_before.total_entries, 2);
655
656        // Clear all data
657        cache.clear().unwrap();
658
659        // Verify data is gone
660        let stats_after = cache.get_stats().await;
661        assert_eq!(stats_after.total_entries, 0);
662
663        // Verify individual keys are gone
664        let missing: Option<Vec<String>> = cache.get("key1").await.unwrap();
665        assert_eq!(missing, None);
666    }
667
668    #[tokio::test]
669    async fn test_disk_cache_clear_by_type() {
670        let temp_dir = tempdir().unwrap();
671        let db_path = temp_dir.path().join("test_cache_clear_by_type.db");
672
673        let config = DiskCacheConfig {
674            db_path: db_path.to_string_lossy().to_string(),
675            max_size: 1024 * 1024,
676            ttl: Duration::from_secs(60),
677            compression: false,
678            cleanup_interval: Duration::from_secs(10),
679            max_entries: 100,
680        };
681
682        let cache = DiskCache::new(config).await.unwrap();
683
684        // Store data with different cache types
685        cache.store("key1", &vec!["data1"], "type1").unwrap();
686        cache.store("key2", &vec!["data2"], "type1").unwrap();
687        cache.store("key3", &vec!["data3"], "type2").unwrap();
688
689        // Clear only type1
690        cache.clear_by_type("type1").unwrap();
691
692        // Verify type1 keys are gone
693        let missing1: Option<Vec<String>> = cache.get("key1").await.unwrap();
694        let missing2: Option<Vec<String>> = cache.get("key2").await.unwrap();
695        assert_eq!(missing1, None);
696        assert_eq!(missing2, None);
697
698        // Verify type2 key still exists
699        let existing: Option<Vec<String>> = cache.get("key3").await.unwrap();
700        assert_eq!(existing, Some(vec!["data3".to_string()]));
701    }
702
703    #[tokio::test]
704    async fn test_disk_cache_get_size() {
705        let temp_dir = tempdir().unwrap();
706        let db_path = temp_dir.path().join("test_cache_size.db");
707
708        let config = DiskCacheConfig {
709            db_path: db_path.to_string_lossy().to_string(),
710            max_size: 1024 * 1024,
711            ttl: Duration::from_secs(60),
712            compression: false,
713            cleanup_interval: Duration::from_secs(10),
714            max_entries: 100,
715        };
716
717        let cache = DiskCache::new(config).await.unwrap();
718
719        // Initially empty
720        let initial_size = cache.get_size().unwrap();
721        assert_eq!(initial_size, 0);
722
723        // Store some data
724        cache.store("key1", &vec!["data1"], "test").unwrap();
725        cache.store("key2", &vec!["data2"], "test").unwrap();
726
727        // Size should be greater than 0
728        let size_after_store = cache.get_size().unwrap();
729        assert!(size_after_store > 0);
730    }
731
732    #[tokio::test]
733    async fn test_disk_cache_is_full() {
734        let temp_dir = tempdir().unwrap();
735        let db_path = temp_dir.path().join("test_cache_full.db");
736
737        let config = DiskCacheConfig {
738            db_path: db_path.to_string_lossy().to_string(),
739            max_size: 100, // Very small size
740            ttl: Duration::from_secs(60),
741            compression: false,
742            cleanup_interval: Duration::from_secs(10),
743            max_entries: 100,
744        };
745
746        let cache = DiskCache::new(config).await.unwrap();
747
748        // Initially not full
749        let initially_full = cache.is_full().unwrap();
750        assert!(!initially_full);
751
752        // Store data until full
753        for i in 0..10 {
754            let data = vec![format!("data_{}", i); 100]; // Large data
755            cache.store(&format!("key{i}"), &data, "test").unwrap();
756        }
757
758        // Should be full now
759        let is_full = cache.is_full().unwrap();
760        assert!(is_full);
761    }
762
763    #[tokio::test]
764    async fn test_disk_cache_get_utilization() {
765        let temp_dir = tempdir().unwrap();
766        let db_path = temp_dir.path().join("test_cache_utilization.db");
767
768        let config = DiskCacheConfig {
769            db_path: db_path.to_string_lossy().to_string(),
770            max_size: 1000, // 1KB
771            ttl: Duration::from_secs(60),
772            compression: false,
773            cleanup_interval: Duration::from_secs(10),
774            max_entries: 100,
775        };
776
777        let cache = DiskCache::new(config).await.unwrap();
778
779        // Initially 0% utilization
780        let initial_utilization = cache.get_utilization().unwrap();
781        assert!((initial_utilization - 0.0).abs() < f64::EPSILON);
782
783        // Store some data
784        cache.store("key1", &vec!["data1"], "test").unwrap();
785
786        // Utilization should be > 0%
787        let utilization = cache.get_utilization().unwrap();
788        assert!(utilization > 0.0);
789        assert!(utilization <= 100.0);
790    }
791
792    #[tokio::test]
793    async fn test_disk_cache_ttl_expiration() {
794        let temp_dir = tempdir().unwrap();
795        let db_path = temp_dir.path().join("test_cache_ttl.db");
796
797        let config = DiskCacheConfig {
798            db_path: db_path.to_string_lossy().to_string(),
799            max_size: 1024 * 1024,
800            ttl: Duration::from_millis(1000), // 1 second TTL
801            compression: false,
802            cleanup_interval: Duration::from_millis(50),
803            max_entries: 100,
804        };
805
806        let cache = DiskCache::new(config).await.unwrap();
807
808        // Store data
809        cache.store("key1", &vec!["data1"], "test").unwrap();
810
811        // Data should exist initially
812        let initial: Option<Vec<String>> = cache.get("key1").await.unwrap();
813        assert_eq!(initial, Some(vec!["data1".to_string()]));
814
815        // Wait for TTL to expire
816        tokio::time::sleep(Duration::from_millis(1200)).await;
817
818        // Manually trigger cleanup to ensure expired entries are removed
819        DiskCache::cleanup_expired_entries(&cache.config).unwrap();
820
821        // Data should be expired
822        let expired: Option<Vec<String>> = cache.get("key1").await.unwrap();
823        assert_eq!(expired, None);
824    }
825
826    #[tokio::test]
827    async fn test_disk_cache_cleanup_expired_entries() {
828        let temp_dir = tempdir().unwrap();
829        let db_path = temp_dir.path().join("test_cache_cleanup.db");
830
831        let config = DiskCacheConfig {
832            db_path: db_path.to_string_lossy().to_string(),
833            max_size: 1024 * 1024,
834            ttl: Duration::from_millis(100),
835            compression: false,
836            cleanup_interval: Duration::from_millis(50),
837            max_entries: 100,
838        };
839
840        let cache = DiskCache::new(config).await.unwrap();
841
842        // Store data
843        cache.store("key1", &vec!["data1"], "test").unwrap();
844        cache.store("key2", &vec!["data2"], "test").unwrap();
845
846        // Wait for TTL to expire
847        tokio::time::sleep(Duration::from_millis(200)).await;
848
849        // Manually trigger cleanup
850        DiskCache::cleanup_expired_entries(&cache.config).unwrap();
851
852        // Data should be cleaned up
853        let stats = cache.get_stats().await;
854        assert_eq!(stats.total_entries, 0);
855    }
856
857    #[tokio::test]
858    async fn test_disk_cache_cleanup_oversized_entries() {
859        let temp_dir = tempdir().unwrap();
860        let db_path = temp_dir.path().join("test_cache_oversized.db");
861
862        let config = DiskCacheConfig {
863            db_path: db_path.to_string_lossy().to_string(),
864            max_size: 100, // Very small size
865            ttl: Duration::from_secs(60),
866            compression: false,
867            cleanup_interval: Duration::from_secs(10),
868            max_entries: 100,
869        };
870
871        let cache = DiskCache::new(config).await.unwrap();
872
873        // Store oversized data
874        let large_data = vec!["data"; 1000]; // Very large data
875        cache.store("key1", &large_data, "test").unwrap();
876
877        // Manually trigger cleanup
878        DiskCache::cleanup_oversized_entries(&cache.config).unwrap();
879
880        // Data should be cleaned up
881        let stats = cache.get_stats().await;
882        assert_eq!(stats.total_entries, 0);
883    }
884
885    #[tokio::test]
886    async fn test_disk_cache_error_handling() {
887        let temp_dir = tempdir().unwrap();
888        let db_path = temp_dir.path().join("test_cache_errors.db");
889
890        let config = DiskCacheConfig {
891            db_path: db_path.to_string_lossy().to_string(),
892            max_size: 1024 * 1024,
893            ttl: Duration::from_secs(60),
894            compression: false,
895            cleanup_interval: Duration::from_secs(10),
896            max_entries: 100,
897        };
898
899        let cache = DiskCache::new(config).await.unwrap();
900
901        // Test storing with invalid data (this should work fine)
902        let valid_data = vec!["valid".to_string()];
903        let result = cache.store("valid_key", &valid_data, "test");
904        assert!(result.is_ok());
905
906        // Test getting non-existent key (should return None, not error)
907        let missing: Option<Vec<String>> = cache.get("missing_key").await.unwrap();
908        assert_eq!(missing, None);
909
910        // Test removing non-existent key (should return false, not error)
911        let removed = cache.remove("missing_key").unwrap();
912        assert!(!removed);
913    }
914
915    #[tokio::test]
916    async fn test_disk_cache_concurrent_access() {
917        let temp_dir = tempdir().unwrap();
918        let db_path = temp_dir.path().join("test_cache_concurrent.db");
919
920        let config = DiskCacheConfig {
921            db_path: db_path.to_string_lossy().to_string(),
922            max_size: 1024 * 1024,
923            ttl: Duration::from_secs(60),
924            compression: false,
925            cleanup_interval: Duration::from_secs(10),
926            max_entries: 100,
927        };
928
929        let cache = DiskCache::new(config).await.unwrap();
930
931        // Test sequential operations
932        for i in 0..5 {
933            let key = format!("key_{i}");
934            let data = vec![format!("data_{}", i)];
935
936            // Store data
937            cache.store(&key, &data, "test").unwrap();
938
939            // Retrieve data
940            let retrieved: Option<Vec<String>> = cache.get(&key).await.unwrap();
941            assert_eq!(retrieved, Some(data));
942        }
943
944        // Verify all data is still there
945        let stats = cache.get_stats().await;
946        assert_eq!(stats.total_entries, 5);
947    }
948}