Skip to main content

oxigaf_cli/
cache.rs

1use anyhow::{Context, Result};
2use serde::{Deserialize, Serialize};
3use std::path::{Path, PathBuf};
4use std::time::{SystemTime, UNIX_EPOCH};
5
6#[derive(Debug, Serialize, Deserialize)]
7pub struct CacheMetadata {
8    pub version: String,
9    pub entries: Vec<CacheEntry>,
10}
11
12#[derive(Debug, Serialize, Deserialize, Clone)]
13pub struct CacheEntry {
14    pub name: String,
15    pub path: PathBuf,
16    pub size_bytes: u64,
17    pub downloaded_at: u64,
18    pub last_accessed: u64,
19    pub access_count: u64,
20    pub checksum: Option<String>,
21}
22
23impl CacheMetadata {
24    /// Load cache metadata from cache directory
25    pub fn load(cache_dir: &Path) -> Result<Self> {
26        let metadata_path = cache_dir.join("cache.json");
27
28        if !metadata_path.exists() {
29            return Ok(Self {
30                version: env!("CARGO_PKG_VERSION").to_string(),
31                entries: Vec::new(),
32            });
33        }
34
35        let data = std::fs::read_to_string(&metadata_path).with_context(|| {
36            format!("Failed to read cache metadata: {}", metadata_path.display())
37        })?;
38
39        serde_json::from_str(&data).with_context(|| "Failed to parse cache metadata")
40    }
41
42    /// Save cache metadata to cache directory
43    pub fn save(&self, cache_dir: &Path) -> Result<()> {
44        // Ensure cache directory exists
45        std::fs::create_dir_all(cache_dir).with_context(|| {
46            format!("Failed to create cache directory: {}", cache_dir.display())
47        })?;
48
49        let metadata_path = cache_dir.join("cache.json");
50        let data = serde_json::to_string_pretty(self)
51            .with_context(|| "Failed to serialize cache metadata")?;
52
53        std::fs::write(&metadata_path, data).with_context(|| {
54            format!(
55                "Failed to write cache metadata: {}",
56                metadata_path.display()
57            )
58        })?;
59
60        Ok(())
61    }
62
63    /// Update access timestamp and count for a cache entry
64    #[allow(dead_code)]
65    pub fn update_access(&mut self, name: &str) {
66        if let Some(entry) = self.entries.iter_mut().find(|e| e.name == name) {
67            entry.last_accessed = current_timestamp();
68            entry.access_count += 1;
69        }
70    }
71
72    /// Calculate total size of all cached assets
73    pub fn total_size(&self) -> u64 {
74        self.entries.iter().map(|e| e.size_bytes).sum()
75    }
76}
77
78/// Get current Unix timestamp in seconds
79fn current_timestamp() -> u64 {
80    SystemTime::now()
81        .duration_since(UNIX_EPOCH)
82        .map(|d| d.as_secs())
83        .unwrap_or(0)
84}
85
86/// List all cached assets with details
87pub fn list_cache(cache_dir: &Path) -> Result<()> {
88    let metadata = CacheMetadata::load(cache_dir)?;
89
90    if metadata.entries.is_empty() {
91        println!("Cache is empty");
92        return Ok(());
93    }
94
95    println!(
96        "\nšŸ“¦ Cached Assets ({} items, {} MB total)\n",
97        metadata.entries.len(),
98        metadata.total_size() / 1_000_000
99    );
100
101    println!(
102        "{:<40} {:>12} {:>15} {:>10}",
103        "Name", "Size", "Last Accessed", "Count"
104    );
105    println!("{}", "─".repeat(80));
106
107    for entry in &metadata.entries {
108        let size_mb = entry.size_bytes as f64 / 1_000_000.0;
109        let days_ago = (current_timestamp().saturating_sub(entry.last_accessed)) / 86400;
110
111        println!(
112            "{:<40} {:>10.1} MB {:>12} days {:>10}",
113            entry.name, size_mb, days_ago, entry.access_count
114        );
115    }
116
117    println!();
118    Ok(())
119}
120
121/// Clean old cached assets
122pub fn clean_cache(cache_dir: &Path, max_age_days: u64, dry_run: bool) -> Result<()> {
123    let mut metadata = CacheMetadata::load(cache_dir)?;
124    let cutoff = current_timestamp().saturating_sub(max_age_days * 86400);
125
126    let to_remove: Vec<_> = metadata
127        .entries
128        .iter()
129        .filter(|e| e.last_accessed < cutoff)
130        .cloned()
131        .collect();
132
133    if to_remove.is_empty() {
134        println!(
135            "āœ… No assets to clean (all accessed within {} days)",
136            max_age_days
137        );
138        return Ok(());
139    }
140
141    println!("šŸ—‘ļø  Will remove {} assets:", to_remove.len());
142
143    let mut total_freed = 0u64;
144    for entry in &to_remove {
145        println!(
146            "  - {} ({:.1} MB)",
147            entry.name,
148            entry.size_bytes as f64 / 1_000_000.0
149        );
150        total_freed += entry.size_bytes;
151    }
152
153    println!(
154        "\nTotal space to free: {:.1} MB",
155        total_freed as f64 / 1_000_000.0
156    );
157
158    if dry_run {
159        println!("\n(Dry run - no files deleted)");
160        return Ok(());
161    }
162
163    for entry in &to_remove {
164        if entry.path.exists() {
165            std::fs::remove_file(&entry.path)
166                .with_context(|| format!("Failed to remove file: {}", entry.path.display()))?;
167        }
168        metadata.entries.retain(|e| e.name != entry.name);
169    }
170
171    metadata.save(cache_dir)?;
172    println!("\nāœ… Cleaned {} assets", to_remove.len());
173
174    Ok(())
175}
176
177/// Verify cache integrity
178pub fn verify_cache(cache_dir: &Path) -> Result<()> {
179    let metadata = CacheMetadata::load(cache_dir)?;
180
181    println!("šŸ” Verifying cache integrity...\n");
182
183    let mut issues = 0;
184    for entry in &metadata.entries {
185        print!("  {} ... ", entry.name);
186
187        if !entry.path.exists() {
188            println!("āŒ MISSING");
189            issues += 1;
190            continue;
191        }
192
193        let actual_size = std::fs::metadata(&entry.path)
194            .with_context(|| format!("Failed to read metadata for {}", entry.path.display()))?
195            .len();
196        if actual_size != entry.size_bytes {
197            println!(
198                "āŒ SIZE MISMATCH (expected {}, got {})",
199                entry.size_bytes, actual_size
200            );
201            issues += 1;
202            continue;
203        }
204
205        if let Some(ref expected_checksum) = entry.checksum {
206            let actual_checksum = compute_sha256(&entry.path)?;
207            if &actual_checksum != expected_checksum {
208                println!("āŒ CHECKSUM MISMATCH");
209                issues += 1;
210                continue;
211            }
212        }
213
214        println!("āœ… OK");
215    }
216
217    println!();
218    if issues == 0 {
219        println!(
220            "āœ… All {} assets verified successfully",
221            metadata.entries.len()
222        );
223    } else {
224        println!("āš ļø  Found {} issues", issues);
225    }
226
227    Ok(())
228}
229
230/// Compute SHA-256 checksum of a file
231fn compute_sha256(path: &Path) -> Result<String> {
232    use sha2::{Digest, Sha256};
233
234    let data = std::fs::read(path)
235        .with_context(|| format!("Failed to read file for checksum: {}", path.display()))?;
236    let hash = Sha256::digest(&data);
237    Ok(format!("{:x}", hash))
238}