aptu_core/
cache.rs

1// SPDX-License-Identifier: Apache-2.0
2
3//! TTL-based file caching for GitHub API responses.
4//!
5//! Stores issue and repository data as JSON files with embedded metadata
6//! (timestamp, optional etag). Cache entries are validated against TTL settings
7//! from configuration.
8
9use std::fs;
10use std::path::PathBuf;
11
12use anyhow::{Context, Result};
13use chrono::{DateTime, Duration, Utc};
14use serde::{Deserialize, Serialize};
15
16/// A cached entry with metadata.
17///
18/// Wraps cached data with timestamp and optional etag for validation.
19#[derive(Debug, Clone, Serialize, Deserialize)]
20pub struct CacheEntry<T> {
21    /// The cached data.
22    pub data: T,
23    /// When the entry was cached.
24    pub cached_at: DateTime<Utc>,
25    /// Optional `ETag` for future conditional requests.
26    #[serde(skip_serializing_if = "Option::is_none")]
27    pub etag: Option<String>,
28}
29
30impl<T> CacheEntry<T> {
31    /// Create a new cache entry.
32    pub fn new(data: T) -> Self {
33        Self {
34            data,
35            cached_at: Utc::now(),
36            etag: None,
37        }
38    }
39
40    /// Create a new cache entry with an etag.
41    pub fn with_etag(data: T, etag: String) -> Self {
42        Self {
43            data,
44            cached_at: Utc::now(),
45            etag: Some(etag),
46        }
47    }
48
49    /// Check if this entry is still valid based on TTL.
50    ///
51    /// # Arguments
52    ///
53    /// * `ttl` - Time-to-live duration
54    ///
55    /// # Returns
56    ///
57    /// `true` if the entry is within its TTL, `false` if expired.
58    pub fn is_valid(&self, ttl: Duration) -> bool {
59        let now = Utc::now();
60        now.signed_duration_since(self.cached_at) < ttl
61    }
62}
63
64/// Returns the cache directory.
65///
66/// - Linux: `~/.cache/aptu`
67/// - macOS: `~/Library/Caches/aptu`
68/// - Windows: `C:\Users\<User>\AppData\Local\aptu`
69#[must_use]
70pub fn cache_dir() -> PathBuf {
71    dirs::cache_dir()
72        .expect("Failed to determine cache directory")
73        .join("aptu")
74}
75
76/// Generate a cache key for an issue list.
77///
78/// # Arguments
79///
80/// * `owner` - Repository owner
81/// * `repo` - Repository name
82///
83/// # Returns
84///
85/// Cache key in format: `issues/{owner}_{repo}.json`
86/// Generates a cache key for repository metadata (labels and milestones).
87///
88/// # Arguments
89///
90/// * `owner` - Repository owner
91/// * `repo` - Repository name
92///
93/// # Returns
94///
95/// A cache key string in the format `repo_metadata/{owner}_{repo}.json`
96#[must_use]
97pub fn cache_key_repo_metadata(owner: &str, repo: &str) -> String {
98    format!("repo_metadata/{owner}_{repo}.json")
99}
100
101/// A cache key string in the format `issues/{owner}_{repo}.json`
102#[must_use]
103pub fn cache_key_issues(owner: &str, repo: &str) -> String {
104    format!("issues/{owner}_{repo}.json")
105}
106
107/// Generate a cache key for model lists.
108///
109/// # Arguments
110///
111/// * `provider` - Provider name (e.g., "openrouter", "gemini")
112///
113/// # Returns
114///
115/// A cache key string in the format `models/{provider}.json`
116#[must_use]
117pub fn cache_key_models(provider: &str) -> String {
118    format!("models/{provider}.json")
119}
120
121/// Read a cache entry from disk.
122///
123/// # Arguments
124///
125/// * `key` - Cache key (relative path within cache directory)
126///
127/// # Returns
128///
129/// The deserialized cache entry, or `None` if the file doesn't exist.
130///
131/// # Errors
132///
133/// Returns an error if the file exists but cannot be read or parsed.
134pub fn read_cache<T: for<'de> Deserialize<'de>>(key: &str) -> Result<Option<CacheEntry<T>>> {
135    let path = cache_dir().join(key);
136
137    if !path.exists() {
138        return Ok(None);
139    }
140
141    let contents = fs::read_to_string(&path)
142        .with_context(|| format!("Failed to read cache file: {}", path.display()))?;
143
144    let entry: CacheEntry<T> = serde_json::from_str(&contents)
145        .with_context(|| format!("Failed to parse cache file: {}", path.display()))?;
146
147    Ok(Some(entry))
148}
149
150/// Write a cache entry to disk.
151///
152/// Creates parent directories if they don't exist.
153/// Uses atomic write pattern (write to temp, rename) to prevent corruption.
154///
155/// # Arguments
156///
157/// * `key` - Cache key (relative path within cache directory)
158/// * `entry` - Cache entry to write
159///
160/// # Errors
161///
162/// Returns an error if the file cannot be written.
163pub fn write_cache<T: Serialize>(key: &str, entry: &CacheEntry<T>) -> Result<()> {
164    let path = cache_dir().join(key);
165
166    // Create parent directories if needed
167    if let Some(parent) = path.parent() {
168        fs::create_dir_all(parent)
169            .with_context(|| format!("Failed to create cache directory: {}", parent.display()))?;
170    }
171
172    let contents =
173        serde_json::to_string_pretty(entry).context("Failed to serialize cache entry")?;
174
175    // Atomic write: write to temp file, then rename
176    let temp_path = path.with_extension("tmp");
177    fs::write(&temp_path, contents)
178        .with_context(|| format!("Failed to write cache temp file: {}", temp_path.display()))?;
179
180    fs::rename(&temp_path, &path)
181        .with_context(|| format!("Failed to rename cache file: {}", path.display()))?;
182
183    Ok(())
184}
185
186#[cfg(test)]
187mod tests {
188    use super::*;
189
190    #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
191    struct TestData {
192        value: String,
193        count: u32,
194    }
195
196    #[test]
197    fn test_cache_entry_new() {
198        let data = TestData {
199            value: "test".to_string(),
200            count: 42,
201        };
202        let entry = CacheEntry::new(data.clone());
203
204        assert_eq!(entry.data, data);
205        assert!(entry.etag.is_none());
206    }
207
208    #[test]
209    fn test_cache_entry_with_etag() {
210        let data = TestData {
211            value: "test".to_string(),
212            count: 42,
213        };
214        let etag = "abc123".to_string();
215        let entry = CacheEntry::with_etag(data.clone(), etag.clone());
216
217        assert_eq!(entry.data, data);
218        assert_eq!(entry.etag, Some(etag));
219    }
220
221    #[test]
222    fn test_cache_entry_is_valid_within_ttl() {
223        let data = TestData {
224            value: "test".to_string(),
225            count: 42,
226        };
227        let entry = CacheEntry::new(data);
228        let ttl = Duration::hours(1);
229
230        assert!(entry.is_valid(ttl));
231    }
232
233    #[test]
234    fn test_cache_entry_is_valid_expired() {
235        let data = TestData {
236            value: "test".to_string(),
237            count: 42,
238        };
239        let mut entry = CacheEntry::new(data);
240        // Manually set cached_at to 2 hours ago
241        entry.cached_at = Utc::now() - Duration::hours(2);
242        let ttl = Duration::hours(1);
243
244        assert!(!entry.is_valid(ttl));
245    }
246
247    #[test]
248    fn test_cache_key_issues() {
249        let key = cache_key_issues("owner", "repo");
250        assert_eq!(key, "issues/owner_repo.json");
251    }
252
253    #[test]
254    fn test_cache_dir_path() {
255        let dir = cache_dir();
256        assert!(dir.ends_with("aptu"));
257    }
258
259    #[test]
260    fn test_cache_serialization_with_etag() {
261        let data = TestData {
262            value: "test".to_string(),
263            count: 42,
264        };
265        let etag = "xyz789".to_string();
266        let entry = CacheEntry::with_etag(data.clone(), etag.clone());
267
268        let json = serde_json::to_string(&entry).expect("serialize");
269        let parsed: CacheEntry<TestData> = serde_json::from_str(&json).expect("deserialize");
270
271        assert_eq!(parsed.data, data);
272        assert_eq!(parsed.etag, Some(etag));
273    }
274
275    #[test]
276    fn test_read_cache_nonexistent() {
277        let result: Result<Option<CacheEntry<TestData>>> = read_cache("nonexistent/file.json");
278        assert!(result.is_ok());
279        assert!(result.unwrap().is_none());
280    }
281
282    #[test]
283    fn test_write_and_read_cache() {
284        let data = TestData {
285            value: "cached".to_string(),
286            count: 99,
287        };
288        let entry = CacheEntry::new(data.clone());
289        let key = "test/data.json";
290
291        // Write cache
292        write_cache(key, &entry).expect("write cache");
293
294        // Read cache
295        let read_entry: CacheEntry<TestData> =
296            read_cache(key).expect("read cache").expect("cache exists");
297
298        assert_eq!(read_entry.data, data);
299        assert_eq!(read_entry.etag, entry.etag);
300
301        // Cleanup
302        let path = cache_dir().join(key);
303        if path.exists() {
304            fs::remove_file(path).ok();
305        }
306    }
307}