1use anyhow::Result;
4use chrono::{DateTime, Utc};
5use rusqlite::{params, Connection};
6use serde::{Deserialize, Serialize};
7use std::path::Path;
8use std::sync::Arc;
9use std::time::Duration;
10use tokio::sync::RwLock;
11use tracing::{debug, error, info};
12
13#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct DiskCacheConfig {
16 pub db_path: String,
18 pub max_size: u64,
20 pub ttl: Duration,
22 pub compression: bool,
24 pub cleanup_interval: Duration,
26 pub max_entries: usize,
28}
29
30impl Default for DiskCacheConfig {
31 fn default() -> Self {
32 Self {
33 db_path: "cache.db".to_string(),
34 max_size: 100 * 1024 * 1024, ttl: Duration::from_secs(3600), compression: true,
37 cleanup_interval: Duration::from_secs(300), max_entries: 10000,
39 }
40 }
41}
42
43#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct DiskCacheEntry {
46 pub key: String,
47 pub data: Vec<u8>,
48 pub created_at: DateTime<Utc>,
49 pub last_accessed: DateTime<Utc>,
50 pub access_count: u64,
51 pub size_bytes: usize,
52 pub compressed: bool,
53 pub cache_type: String, }
55
56#[derive(Debug, Clone, Default, Serialize, Deserialize)]
58pub struct DiskCacheStats {
59 pub total_entries: u64,
60 pub total_size_bytes: u64,
61 pub hits: u64,
62 pub misses: u64,
63 pub hit_rate: f64,
64 pub compressed_entries: u64,
65 pub uncompressed_entries: u64,
66}
67
68impl DiskCacheStats {
69 pub fn calculate_hit_rate(&mut self) {
70 let total = self.hits + self.misses;
71 self.hit_rate = if total > 0 {
72 #[allow(clippy::cast_precision_loss)]
73 {
74 self.hits as f64 / total as f64
75 }
76 } else {
77 0.0
78 };
79 }
80}
81
82pub struct DiskCache {
84 config: DiskCacheConfig,
85 stats: Arc<RwLock<DiskCacheStats>>,
86 cleanup_task: Option<tokio::task::JoinHandle<()>>,
87}
88
89impl DiskCache {
90 pub async fn new(config: DiskCacheConfig) -> Result<Self> {
96 let db_path = Path::new(&config.db_path);
97
98 if let Some(parent) = db_path.parent() {
100 tokio::fs::create_dir_all(parent).await?;
101 }
102
103 Self::init_database(&config.db_path)?;
105
106 let mut cache = Self {
107 config,
108 stats: Arc::new(RwLock::new(DiskCacheStats::default())),
109 cleanup_task: None,
110 };
111
112 cache.start_cleanup_task();
114
115 cache.update_stats().await?;
117
118 Ok(cache)
119 }
120
121 fn init_database(db_path: &str) -> Result<()> {
123 let conn = Connection::open(db_path)?;
124
125 conn.execute(
127 r"
128 CREATE TABLE IF NOT EXISTS cache_entries (
129 key TEXT PRIMARY KEY,
130 data BLOB NOT NULL,
131 created_at INTEGER NOT NULL,
132 last_accessed INTEGER NOT NULL,
133 access_count INTEGER NOT NULL DEFAULT 0,
134 size_bytes INTEGER NOT NULL,
135 compressed BOOLEAN NOT NULL DEFAULT 0,
136 cache_type TEXT NOT NULL,
137 ttl INTEGER NOT NULL
138 )
139 ",
140 [],
141 )?;
142
143 conn.execute(
145 "CREATE INDEX IF NOT EXISTS idx_cache_created_at ON cache_entries(created_at)",
146 [],
147 )?;
148 conn.execute(
149 "CREATE INDEX IF NOT EXISTS idx_cache_last_accessed ON cache_entries(last_accessed)",
150 [],
151 )?;
152 conn.execute(
153 "CREATE INDEX IF NOT EXISTS idx_cache_type ON cache_entries(cache_type)",
154 [],
155 )?;
156 conn.execute(
157 "CREATE INDEX IF NOT EXISTS idx_cache_ttl ON cache_entries(ttl)",
158 [],
159 )?;
160
161 info!("Disk cache database initialized at: {}", db_path);
162 Ok(())
163 }
164
165 fn start_cleanup_task(&mut self) {
167 let config = self.config.clone();
168
169 let handle = tokio::spawn(async move {
170 let mut interval = tokio::time::interval(config.cleanup_interval);
171 loop {
172 interval.tick().await;
173
174 if let Err(e) = Self::cleanup_expired_entries(&config) {
175 error!("Failed to cleanup expired cache entries: {}", e);
176 }
177
178 if let Err(e) = Self::cleanup_oversized_entries(&config) {
179 error!("Failed to cleanup oversized cache entries: {}", e);
180 }
181 }
182 });
183
184 self.cleanup_task = Some(handle);
185 }
186
187 fn cleanup_expired_entries(config: &DiskCacheConfig) -> Result<()> {
189 let conn = Connection::open(&config.db_path)?;
190 let now = Utc::now().timestamp();
191 #[allow(clippy::cast_possible_wrap)]
192 let ttl_seconds = config.ttl.as_secs() as i64;
193
194 let deleted = conn.execute(
195 "DELETE FROM cache_entries WHERE created_at + ttl < ?",
196 params![now - ttl_seconds],
197 )?;
198
199 if deleted > 0 {
200 debug!("Cleaned up {} expired cache entries", deleted);
201 }
202
203 Ok(())
204 }
205
206 #[allow(clippy::cast_sign_loss)]
208 fn cleanup_oversized_entries(config: &DiskCacheConfig) -> Result<()> {
209 let conn = Connection::open(&config.db_path)?;
210
211 let total_size: i64 = conn.query_row(
213 "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries",
214 [],
215 |row| row.get(0),
216 )?;
217
218 #[allow(clippy::cast_sign_loss)]
219 if total_size as u64 <= config.max_size {
220 return Ok(());
221 }
222
223 let mut deleted = 0;
225 #[allow(
226 clippy::cast_possible_truncation,
227 clippy::cast_sign_loss,
228 clippy::cast_precision_loss
229 )]
230 let target_size = (config.max_size as f64 * 0.8) as u64; #[allow(clippy::cast_sign_loss)]
233 let mut current_size = total_size as u64;
234 while current_size > target_size {
235 let result = conn.execute(
236 "DELETE FROM cache_entries WHERE key IN (
237 SELECT key FROM cache_entries
238 ORDER BY last_accessed ASC
239 LIMIT 100
240 )",
241 [],
242 )?;
243
244 if result == 0 {
245 break; }
247
248 deleted += result;
249
250 let new_total_size: i64 = conn.query_row(
252 "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries",
253 [],
254 |row| row.get(0),
255 )?;
256
257 current_size = new_total_size as u64;
258 if current_size <= target_size {
259 break;
260 }
261 }
262
263 if deleted > 0 {
264 debug!("Cleaned up {} oversized cache entries", deleted);
265 }
266
267 Ok(())
268 }
269
270 pub fn store<T>(&self, key: &str, data: &T, cache_type: &str) -> Result<()>
280 where
281 T: Serialize,
282 {
283 let serialized = if self.config.compression {
284 let json_data = serde_json::to_vec(data)?;
286 zstd::encode_all(&json_data[..], 3)?
287 } else {
288 serde_json::to_vec(data)?
289 };
290
291 let size_bytes = serialized.len();
292 let entry = DiskCacheEntry {
293 key: key.to_string(),
294 data: serialized,
295 created_at: Utc::now(),
296 last_accessed: Utc::now(),
297 access_count: 0,
298 size_bytes,
299 compressed: self.config.compression,
300 cache_type: cache_type.to_string(),
301 };
302
303 let conn = Connection::open(&self.config.db_path)?;
304 let _now = Utc::now().timestamp();
305 #[allow(clippy::cast_possible_wrap)]
306 let ttl_seconds = self.config.ttl.as_secs() as i64;
307
308 conn.execute(
309 r"
310 INSERT OR REPLACE INTO cache_entries
311 (key, data, created_at, last_accessed, access_count, size_bytes, compressed, cache_type, ttl)
312 VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
313 ",
314 params![
315 entry.key,
316 entry.data,
317 entry.created_at.timestamp(),
318 entry.last_accessed.timestamp(),
319 entry.access_count,
320 entry.size_bytes,
321 entry.compressed,
322 entry.cache_type,
323 ttl_seconds
324 ],
325 )?;
326
327 debug!(
328 "Stored cache entry: {} ({} bytes, compressed: {})",
329 key, entry.size_bytes, entry.compressed
330 );
331
332 Ok(())
333 }
334
335 pub async fn get<T>(&self, key: &str) -> Result<Option<T>>
344 where
345 T: for<'de> Deserialize<'de>,
346 {
347 let conn = Connection::open(&self.config.db_path)?;
348 let now = Utc::now().timestamp();
349
350 let mut stmt = conn.prepare(
351 r"
352 SELECT data, compressed, created_at, ttl, access_count
353 FROM cache_entries
354 WHERE key = ? AND created_at + ttl > ?
355 ",
356 )?;
357
358 let mut rows = stmt.query(params![key, now])?;
359
360 if let Some(row) = rows.next()? {
361 let data: Vec<u8> = row.get(0)?;
362 let compressed: bool = row.get(1)?;
363 let access_count: i64 = row.get(4)?;
364
365 conn.execute(
367 "UPDATE cache_entries SET access_count = ?, last_accessed = ? WHERE key = ?",
368 params![access_count + 1, now, key],
369 )?;
370
371 let deserialized = if compressed {
373 let decompressed = zstd::decode_all(&data[..])?;
374 serde_json::from_slice(&decompressed)?
375 } else {
376 serde_json::from_slice(&data)?
377 };
378
379 {
381 let mut stats = self.stats.write().await;
382 stats.hits += 1;
383 stats.calculate_hit_rate();
384 }
385
386 debug!("Cache hit for key: {}", key);
387 Ok(Some(deserialized))
388 } else {
389 {
391 let mut stats = self.stats.write().await;
392 stats.misses += 1;
393 stats.calculate_hit_rate();
394 }
395
396 debug!("Cache miss for key: {}", key);
397 Ok(None)
398 }
399 }
400
401 pub fn remove(&self, key: &str) -> Result<bool> {
407 let conn = Connection::open(&self.config.db_path)?;
408 let deleted = conn.execute("DELETE FROM cache_entries WHERE key = ?", params![key])?;
409 Ok(deleted > 0)
410 }
411
412 pub fn clear(&self) -> Result<()> {
418 let conn = Connection::open(&self.config.db_path)?;
419 conn.execute("DELETE FROM cache_entries", [])?;
420 info!("Cleared all disk cache entries");
421 Ok(())
422 }
423
424 pub fn clear_by_type(&self, cache_type: &str) -> Result<()> {
430 let conn = Connection::open(&self.config.db_path)?;
431 let deleted = conn.execute(
432 "DELETE FROM cache_entries WHERE cache_type = ?",
433 params![cache_type],
434 )?;
435 debug!("Cleared {} entries of type: {}", deleted, cache_type);
436 Ok(())
437 }
438
439 pub async fn get_stats(&self) -> DiskCacheStats {
441 self.update_stats().await.ok();
442 self.stats.read().await.clone()
443 }
444
445 async fn update_stats(&self) -> Result<()> {
447 let conn = Connection::open(&self.config.db_path)?;
448 let now = Utc::now().timestamp();
449
450 let (total_entries, total_size): (i64, i64) = conn.query_row(
452 "SELECT COUNT(*), COALESCE(SUM(size_bytes), 0) FROM cache_entries WHERE created_at + ttl > ?",
453 params![now],
454 |row| Ok((row.get(0)?, row.get(1)?)),
455 )?;
456
457 let compressed_entries: i64 = conn.query_row(
459 "SELECT COUNT(*) FROM cache_entries WHERE compressed = 1 AND created_at + ttl > ?",
460 params![now],
461 |row| row.get(0),
462 )?;
463
464 let uncompressed_entries = total_entries - compressed_entries;
465
466 let mut stats = self.stats.write().await;
467 #[allow(clippy::cast_sign_loss)]
468 {
469 stats.total_entries = total_entries as u64;
470 stats.total_size_bytes = total_size as u64;
471 stats.compressed_entries = compressed_entries as u64;
472 stats.uncompressed_entries = uncompressed_entries as u64;
473 }
474
475 Ok(())
476 }
477
478 pub fn get_size(&self) -> Result<u64> {
484 let conn = Connection::open(&self.config.db_path)?;
485 let now = Utc::now().timestamp();
486
487 let size: i64 = conn.query_row(
488 "SELECT COALESCE(SUM(size_bytes), 0) FROM cache_entries WHERE created_at + ttl > ?",
489 params![now],
490 |row| row.get(0),
491 )?;
492
493 #[allow(clippy::cast_sign_loss)]
494 Ok(size as u64)
495 }
496
497 pub fn is_full(&self) -> Result<bool> {
503 let current_size = self.get_size()?;
504 Ok(current_size >= self.config.max_size)
505 }
506
507 pub fn get_utilization(&self) -> Result<f64> {
513 let current_size = self.get_size()?;
514 #[allow(clippy::cast_precision_loss)]
515 Ok((current_size as f64 / self.config.max_size as f64) * 100.0)
516 }
517}
518
519impl Drop for DiskCache {
520 fn drop(&mut self) {
521 if let Some(handle) = self.cleanup_task.take() {
522 handle.abort();
523 }
524 }
525}
526
527#[cfg(test)]
528mod tests {
529 use super::*;
530 use tempfile::tempdir;
531
532 #[tokio::test]
533 async fn test_disk_cache_basic_operations() {
534 let temp_dir = tempdir().unwrap();
535 let db_path = temp_dir.path().join("test_cache.db");
536
537 let config = DiskCacheConfig {
538 db_path: db_path.to_string_lossy().to_string(),
539 max_size: 1024 * 1024, ttl: Duration::from_secs(60),
541 compression: false,
542 cleanup_interval: Duration::from_secs(10),
543 max_entries: 100,
544 };
545
546 let cache = DiskCache::new(config).await.unwrap();
547
548 let test_data = vec!["hello".to_string(), "world".to_string()];
550 cache.store("test_key", &test_data, "test").unwrap();
551
552 let retrieved: Option<Vec<String>> = cache.get("test_key").await.unwrap();
553 assert_eq!(retrieved, Some(test_data));
554
555 let missing: Option<Vec<String>> = cache.get("missing_key").await.unwrap();
557 assert_eq!(missing, None);
558
559 let removed = cache.remove("test_key").unwrap();
561 assert!(removed);
562
563 let after_removal: Option<Vec<String>> = cache.get("test_key").await.unwrap();
564 assert_eq!(after_removal, None);
565 }
566
567 #[tokio::test]
568 async fn test_disk_cache_compression() {
569 let temp_dir = tempdir().unwrap();
570 let db_path = temp_dir.path().join("test_cache_compressed.db");
571
572 let config = DiskCacheConfig {
573 db_path: db_path.to_string_lossy().to_string(),
574 max_size: 1024 * 1024, ttl: Duration::from_secs(60),
576 compression: true,
577 cleanup_interval: Duration::from_secs(10),
578 max_entries: 100,
579 };
580
581 let cache = DiskCache::new(config).await.unwrap();
582
583 let test_data = vec![
585 "hello".to_string(),
586 "world".to_string(),
587 "this".to_string(),
588 "is".to_string(),
589 "a".to_string(),
590 "test".to_string(),
591 ];
592 cache.store("compressed_key", &test_data, "test").unwrap();
593
594 let retrieved: Option<Vec<String>> = cache.get("compressed_key").await.unwrap();
595 assert_eq!(retrieved, Some(test_data));
596 }
597
598 #[tokio::test]
599 async fn test_disk_cache_statistics() {
600 let temp_dir = tempdir().unwrap();
601 let db_path = temp_dir.path().join("test_cache_stats.db");
602
603 let config = DiskCacheConfig {
604 db_path: db_path.to_string_lossy().to_string(),
605 max_size: 1024 * 1024, ttl: Duration::from_secs(60),
607 compression: false,
608 cleanup_interval: Duration::from_secs(10),
609 max_entries: 100,
610 };
611
612 let cache = DiskCache::new(config).await.unwrap();
613
614 cache.store("key1", &vec!["data1"], "test").unwrap();
616 cache.store("key2", &vec!["data2"], "test").unwrap();
617
618 let _: Option<Vec<String>> = cache.get("key1").await.unwrap();
620 let _: Option<Vec<String>> = cache.get("key2").await.unwrap();
621
622 let _: Option<Vec<String>> = cache.get("missing").await.unwrap();
624
625 let stats = cache.get_stats().await;
626 assert_eq!(stats.total_entries, 2);
627 assert!(stats.hits >= 2);
628 assert!(stats.misses >= 1);
629 assert!(stats.hit_rate > 0.0);
630 }
631
632 #[tokio::test]
633 async fn test_disk_cache_clear() {
634 let temp_dir = tempdir().unwrap();
635 let db_path = temp_dir.path().join("test_cache_clear.db");
636
637 let config = DiskCacheConfig {
638 db_path: db_path.to_string_lossy().to_string(),
639 max_size: 1024 * 1024,
640 ttl: Duration::from_secs(60),
641 compression: false,
642 cleanup_interval: Duration::from_secs(10),
643 max_entries: 100,
644 };
645
646 let cache = DiskCache::new(config).await.unwrap();
647
648 cache.store("key1", &vec!["data1"], "test").unwrap();
650 cache.store("key2", &vec!["data2"], "test").unwrap();
651
652 let stats_before = cache.get_stats().await;
654 assert_eq!(stats_before.total_entries, 2);
655
656 cache.clear().unwrap();
658
659 let stats_after = cache.get_stats().await;
661 assert_eq!(stats_after.total_entries, 0);
662
663 let missing: Option<Vec<String>> = cache.get("key1").await.unwrap();
665 assert_eq!(missing, None);
666 }
667
668 #[tokio::test]
669 async fn test_disk_cache_clear_by_type() {
670 let temp_dir = tempdir().unwrap();
671 let db_path = temp_dir.path().join("test_cache_clear_by_type.db");
672
673 let config = DiskCacheConfig {
674 db_path: db_path.to_string_lossy().to_string(),
675 max_size: 1024 * 1024,
676 ttl: Duration::from_secs(60),
677 compression: false,
678 cleanup_interval: Duration::from_secs(10),
679 max_entries: 100,
680 };
681
682 let cache = DiskCache::new(config).await.unwrap();
683
684 cache.store("key1", &vec!["data1"], "type1").unwrap();
686 cache.store("key2", &vec!["data2"], "type1").unwrap();
687 cache.store("key3", &vec!["data3"], "type2").unwrap();
688
689 cache.clear_by_type("type1").unwrap();
691
692 let missing1: Option<Vec<String>> = cache.get("key1").await.unwrap();
694 let missing2: Option<Vec<String>> = cache.get("key2").await.unwrap();
695 assert_eq!(missing1, None);
696 assert_eq!(missing2, None);
697
698 let existing: Option<Vec<String>> = cache.get("key3").await.unwrap();
700 assert_eq!(existing, Some(vec!["data3".to_string()]));
701 }
702
703 #[tokio::test]
704 async fn test_disk_cache_get_size() {
705 let temp_dir = tempdir().unwrap();
706 let db_path = temp_dir.path().join("test_cache_size.db");
707
708 let config = DiskCacheConfig {
709 db_path: db_path.to_string_lossy().to_string(),
710 max_size: 1024 * 1024,
711 ttl: Duration::from_secs(60),
712 compression: false,
713 cleanup_interval: Duration::from_secs(10),
714 max_entries: 100,
715 };
716
717 let cache = DiskCache::new(config).await.unwrap();
718
719 let initial_size = cache.get_size().unwrap();
721 assert_eq!(initial_size, 0);
722
723 cache.store("key1", &vec!["data1"], "test").unwrap();
725 cache.store("key2", &vec!["data2"], "test").unwrap();
726
727 let size_after_store = cache.get_size().unwrap();
729 assert!(size_after_store > 0);
730 }
731
732 #[tokio::test]
733 async fn test_disk_cache_is_full() {
734 let temp_dir = tempdir().unwrap();
735 let db_path = temp_dir.path().join("test_cache_full.db");
736
737 let config = DiskCacheConfig {
738 db_path: db_path.to_string_lossy().to_string(),
739 max_size: 100, ttl: Duration::from_secs(60),
741 compression: false,
742 cleanup_interval: Duration::from_secs(10),
743 max_entries: 100,
744 };
745
746 let cache = DiskCache::new(config).await.unwrap();
747
748 let initially_full = cache.is_full().unwrap();
750 assert!(!initially_full);
751
752 for i in 0..10 {
754 let data = vec![format!("data_{}", i); 100]; cache.store(&format!("key{i}"), &data, "test").unwrap();
756 }
757
758 let is_full = cache.is_full().unwrap();
760 assert!(is_full);
761 }
762
763 #[tokio::test]
764 async fn test_disk_cache_get_utilization() {
765 let temp_dir = tempdir().unwrap();
766 let db_path = temp_dir.path().join("test_cache_utilization.db");
767
768 let config = DiskCacheConfig {
769 db_path: db_path.to_string_lossy().to_string(),
770 max_size: 1000, ttl: Duration::from_secs(60),
772 compression: false,
773 cleanup_interval: Duration::from_secs(10),
774 max_entries: 100,
775 };
776
777 let cache = DiskCache::new(config).await.unwrap();
778
779 let initial_utilization = cache.get_utilization().unwrap();
781 assert!((initial_utilization - 0.0).abs() < f64::EPSILON);
782
783 cache.store("key1", &vec!["data1"], "test").unwrap();
785
786 let utilization = cache.get_utilization().unwrap();
788 assert!(utilization > 0.0);
789 assert!(utilization <= 100.0);
790 }
791
792 #[tokio::test]
793 async fn test_disk_cache_ttl_expiration() {
794 let temp_dir = tempdir().unwrap();
795 let db_path = temp_dir.path().join("test_cache_ttl.db");
796
797 let config = DiskCacheConfig {
798 db_path: db_path.to_string_lossy().to_string(),
799 max_size: 1024 * 1024,
800 ttl: Duration::from_millis(1000), compression: false,
802 cleanup_interval: Duration::from_millis(50),
803 max_entries: 100,
804 };
805
806 let cache = DiskCache::new(config).await.unwrap();
807
808 cache.store("key1", &vec!["data1"], "test").unwrap();
810
811 let initial: Option<Vec<String>> = cache.get("key1").await.unwrap();
813 assert_eq!(initial, Some(vec!["data1".to_string()]));
814
815 tokio::time::sleep(Duration::from_millis(1200)).await;
817
818 DiskCache::cleanup_expired_entries(&cache.config).unwrap();
820
821 let expired: Option<Vec<String>> = cache.get("key1").await.unwrap();
823 assert_eq!(expired, None);
824 }
825
826 #[tokio::test]
827 async fn test_disk_cache_cleanup_expired_entries() {
828 let temp_dir = tempdir().unwrap();
829 let db_path = temp_dir.path().join("test_cache_cleanup.db");
830
831 let config = DiskCacheConfig {
832 db_path: db_path.to_string_lossy().to_string(),
833 max_size: 1024 * 1024,
834 ttl: Duration::from_millis(100),
835 compression: false,
836 cleanup_interval: Duration::from_millis(50),
837 max_entries: 100,
838 };
839
840 let cache = DiskCache::new(config).await.unwrap();
841
842 cache.store("key1", &vec!["data1"], "test").unwrap();
844 cache.store("key2", &vec!["data2"], "test").unwrap();
845
846 tokio::time::sleep(Duration::from_millis(200)).await;
848
849 DiskCache::cleanup_expired_entries(&cache.config).unwrap();
851
852 let stats = cache.get_stats().await;
854 assert_eq!(stats.total_entries, 0);
855 }
856
857 #[tokio::test]
858 async fn test_disk_cache_cleanup_oversized_entries() {
859 let temp_dir = tempdir().unwrap();
860 let db_path = temp_dir.path().join("test_cache_oversized.db");
861
862 let config = DiskCacheConfig {
863 db_path: db_path.to_string_lossy().to_string(),
864 max_size: 100, ttl: Duration::from_secs(60),
866 compression: false,
867 cleanup_interval: Duration::from_secs(10),
868 max_entries: 100,
869 };
870
871 let cache = DiskCache::new(config).await.unwrap();
872
873 let large_data = vec!["data"; 1000]; cache.store("key1", &large_data, "test").unwrap();
876
877 DiskCache::cleanup_oversized_entries(&cache.config).unwrap();
879
880 let stats = cache.get_stats().await;
882 assert_eq!(stats.total_entries, 0);
883 }
884
885 #[tokio::test]
886 async fn test_disk_cache_error_handling() {
887 let temp_dir = tempdir().unwrap();
888 let db_path = temp_dir.path().join("test_cache_errors.db");
889
890 let config = DiskCacheConfig {
891 db_path: db_path.to_string_lossy().to_string(),
892 max_size: 1024 * 1024,
893 ttl: Duration::from_secs(60),
894 compression: false,
895 cleanup_interval: Duration::from_secs(10),
896 max_entries: 100,
897 };
898
899 let cache = DiskCache::new(config).await.unwrap();
900
901 let valid_data = vec!["valid".to_string()];
903 let result = cache.store("valid_key", &valid_data, "test");
904 assert!(result.is_ok());
905
906 let missing: Option<Vec<String>> = cache.get("missing_key").await.unwrap();
908 assert_eq!(missing, None);
909
910 let removed = cache.remove("missing_key").unwrap();
912 assert!(!removed);
913 }
914
915 #[tokio::test]
916 async fn test_disk_cache_concurrent_access() {
917 let temp_dir = tempdir().unwrap();
918 let db_path = temp_dir.path().join("test_cache_concurrent.db");
919
920 let config = DiskCacheConfig {
921 db_path: db_path.to_string_lossy().to_string(),
922 max_size: 1024 * 1024,
923 ttl: Duration::from_secs(60),
924 compression: false,
925 cleanup_interval: Duration::from_secs(10),
926 max_entries: 100,
927 };
928
929 let cache = DiskCache::new(config).await.unwrap();
930
931 for i in 0..5 {
933 let key = format!("key_{i}");
934 let data = vec![format!("data_{}", i)];
935
936 cache.store(&key, &data, "test").unwrap();
938
939 let retrieved: Option<Vec<String>> = cache.get(&key).await.unwrap();
941 assert_eq!(retrieved, Some(data));
942 }
943
944 let stats = cache.get_stats().await;
946 assert_eq!(stats.total_entries, 5);
947 }
948}