1use crate::commands::commit::CommitPlan;
2use anyhow::{Context, Result};
3use serde::{Deserialize, Serialize};
4use std::collections::BTreeMap;
5use std::fs;
6use std::path::{Path, PathBuf};
7use std::time::{SystemTime, UNIX_EPOCH};
8
9use super::fingerprint::sha256_hex;
10
11const MAX_ENTRIES: usize = 20;
12const TTL_SECS: u64 = 24 * 60 * 60; #[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct CacheEntry {
16 pub state_key: String,
17 pub fingerprints: BTreeMap<String, String>,
18 pub plan: CommitPlan,
19 pub created_at: u64,
20 pub backend: String,
21 pub model: String,
22}
23
24pub fn cache_dir(repo_root: &Path) -> Option<PathBuf> {
27 let base = dirs::cache_dir()?;
28 let repo_id = &sha256_hex(repo_root.to_string_lossy().as_bytes())[..16];
29 Some(base.join("sr").join("ai").join(repo_id).join("entries"))
30}
31
32pub fn entry_path(dir: &Path, state_key: &str) -> PathBuf {
33 dir.join(format!("{state_key}.json"))
34}
35
36pub fn read_entry(path: &Path) -> Result<CacheEntry> {
37 let data = fs::read_to_string(path).context("reading cache entry")?;
38 serde_json::from_str(&data).context("parsing cache entry")
39}
40
41pub fn write_entry(dir: &Path, entry: &CacheEntry) -> Result<()> {
42 fs::create_dir_all(dir).context("creating cache directory")?;
43 let path = entry_path(dir, &entry.state_key);
44 let data = serde_json::to_string_pretty(entry).context("serializing cache entry")?;
45 fs::write(&path, data).context("writing cache entry")?;
46 evict(dir)?;
47 Ok(())
48}
49
50pub fn list_entries(dir: &Path) -> Result<Vec<CacheEntry>> {
52 if !dir.exists() {
53 return Ok(Vec::new());
54 }
55
56 let mut entries = Vec::new();
57 for de in fs::read_dir(dir).context("reading cache directory")? {
58 let de = de?;
59 let path = de.path();
60 if path.extension().is_some_and(|e| e == "json")
61 && let Ok(entry) = read_entry(&path)
62 {
63 entries.push(entry);
64 }
65 }
66 entries.sort_by(|a, b| b.created_at.cmp(&a.created_at));
67 Ok(entries)
68}
69
70fn evict(dir: &Path) -> Result<()> {
72 let now = SystemTime::now()
73 .duration_since(UNIX_EPOCH)
74 .unwrap_or_default()
75 .as_secs();
76
77 let mut entries: Vec<(PathBuf, u64)> = Vec::new();
78
79 if let Ok(rd) = fs::read_dir(dir) {
80 for de in rd.flatten() {
81 let path = de.path();
82 if path.extension().is_some_and(|e| e == "json")
83 && let Ok(entry) = read_entry(&path)
84 {
85 if now.saturating_sub(entry.created_at) > TTL_SECS {
86 let _ = fs::remove_file(&path);
87 } else {
88 entries.push((path, entry.created_at));
89 }
90 }
91 }
92 }
93
94 entries.sort_by(|a, b| b.1.cmp(&a.1));
96 for (path, _) in entries.iter().skip(MAX_ENTRIES) {
97 let _ = fs::remove_file(path);
98 }
99
100 Ok(())
101}
102
103pub fn clear(dir: &Path) -> Result<usize> {
105 if !dir.exists() {
106 return Ok(0);
107 }
108
109 let mut count = 0;
110 for de in fs::read_dir(dir)?.flatten() {
111 let path = de.path();
112 if path.extension().is_some_and(|e| e == "json") {
113 let _ = fs::remove_file(&path);
114 count += 1;
115 }
116 }
117 Ok(count)
118}
119
120pub fn clear_all() -> Result<usize> {
122 let base = dirs::cache_dir()
123 .map(|d| d.join("sr").join("ai"))
124 .filter(|d| d.exists());
125
126 let Some(base) = base else {
127 return Ok(0);
128 };
129
130 let mut count = 0;
131 for repo_dir in fs::read_dir(&base)?.flatten() {
132 let entries_dir = repo_dir.path().join("entries");
133 if entries_dir.is_dir() {
134 count += clear(&entries_dir)?;
135 }
136 }
137
138 Ok(count)
139}
140
141pub fn now_secs() -> u64 {
142 SystemTime::now()
143 .duration_since(UNIX_EPOCH)
144 .unwrap_or_default()
145 .as_secs()
146}