nntp_proxy/cache/
article.rs

1//! Article caching implementation using LRU cache with TTL
2
3use moka::future::Cache;
4use std::sync::Arc;
5use std::time::Duration;
6
7/// Cached article data
8#[derive(Clone, Debug)]
9pub struct CachedArticle {
10    /// The complete response including status line and article data
11    /// Wrapped in Arc for cheap cloning when retrieving from cache
12    pub response: Arc<Vec<u8>>,
13}
14
15/// Article cache using LRU eviction with TTL
16#[derive(Clone)]
17pub struct ArticleCache {
18    cache: Arc<Cache<String, CachedArticle>>,
19}
20
21impl ArticleCache {
22    /// Create a new article cache
23    ///
24    /// # Arguments
25    /// * `max_capacity` - Maximum number of articles to cache
26    /// * `ttl` - Time-to-live for cached articles
27    pub fn new(max_capacity: u64, ttl: Duration) -> Self {
28        let cache = Cache::builder()
29            .max_capacity(max_capacity)
30            .time_to_live(ttl)
31            .build();
32
33        Self {
34            cache: Arc::new(cache),
35        }
36    }
37
38    /// Get an article from the cache
39    pub async fn get(&self, message_id: &str) -> Option<CachedArticle> {
40        self.cache.get(message_id).await
41    }
42
43    /// Store an article in the cache
44    pub async fn insert(&self, message_id: String, article: CachedArticle) {
45        self.cache.insert(message_id, article).await;
46    }
47
48    /// Get cache statistics
49    pub async fn stats(&self) -> CacheStats {
50        CacheStats {
51            entry_count: self.cache.entry_count(),
52            weighted_size: self.cache.weighted_size(),
53        }
54    }
55}
56
57/// Cache statistics
58#[derive(Debug, Clone)]
59pub struct CacheStats {
60    pub entry_count: u64,
61    pub weighted_size: u64,
62}