mermaid_cli/cache/
file_cache.rs

1use anyhow::Result;
2use sha2::{Digest, Sha256};
3use std::fs;
4use std::path::Path;
5use std::time::SystemTime;
6
7use super::types::{CacheEntry, CacheKey, CacheMetadata};
8
9/// File-level cache operations
10#[derive(Debug)]
11pub struct FileCache {
12    cache_dir: std::path::PathBuf,
13}
14
15impl FileCache {
16    /// Create a new file cache
17    pub fn new(cache_dir: std::path::PathBuf) -> Result<Self> {
18        // Ensure cache directory exists
19        fs::create_dir_all(&cache_dir)?;
20        Ok(Self { cache_dir })
21    }
22
23    /// Compute SHA256 hash of a file
24    pub fn hash_file(path: &Path) -> Result<String> {
25        let content = fs::read(path)?;
26        let mut hasher = Sha256::new();
27        hasher.update(&content);
28        let result = hasher.finalize();
29        Ok(format!("{:x}", result))
30    }
31
32    /// Generate cache key for a file
33    pub fn generate_key(path: &Path) -> Result<CacheKey> {
34        let file_hash = Self::hash_file(path)?;
35        Ok(CacheKey {
36            file_path: path.to_path_buf(),
37            file_hash,
38        })
39    }
40
41    /// Save data to cache with compression
42    pub fn save<T>(&self, key: &CacheKey, data: &T) -> Result<()>
43    where
44        T: serde::Serialize,
45    {
46        // Serialize data
47        let serialized = bincode::serialize(data)?;
48        let original_size = serialized.len();
49
50        // Compress data
51        let compressed = lz4::block::compress(&serialized, None, true)?;
52        let compressed_size = compressed.len();
53
54        // Create metadata
55        let metadata = CacheMetadata {
56            created_at: SystemTime::now(),
57            last_accessed: SystemTime::now(),
58            file_size: original_size as u64,
59            compressed_size,
60            compression_ratio: original_size as f32 / compressed_size as f32,
61        };
62
63        // Create cache entry
64        let entry = CacheEntry {
65            key: key.clone(),
66            data: compressed,
67            metadata,
68        };
69
70        // Generate cache file path
71        let cache_path = self.cache_path(key);
72
73        // Ensure parent directory exists
74        if let Some(parent) = cache_path.parent() {
75            fs::create_dir_all(parent)?;
76        }
77
78        // Write to file
79        let entry_data = bincode::serialize(&entry)?;
80        fs::write(cache_path, entry_data)?;
81
82        Ok(())
83    }
84
85    /// Load data from cache
86    pub fn load<T>(&self, key: &CacheKey) -> Result<Option<T>>
87    where
88        T: serde::de::DeserializeOwned,
89    {
90        let cache_path = self.cache_path(key);
91
92        // Check if cache file exists
93        if !cache_path.exists() {
94            return Ok(None);
95        }
96
97        // Read cache entry
98        let entry_data = fs::read(&cache_path)?;
99        let mut entry: CacheEntry<Vec<u8>> = bincode::deserialize(&entry_data)?;
100
101        // Update last accessed time
102        entry.metadata.last_accessed = SystemTime::now();
103
104        // Decompress data
105        let decompressed =
106            lz4::block::decompress(&entry.data, Some(entry.metadata.file_size as i32))?;
107
108        // Deserialize data
109        let data: T = bincode::deserialize(&decompressed)?;
110
111        Ok(Some(data))
112    }
113
114    /// Check if cache entry is valid (file hasn't changed)
115    pub fn is_valid(&self, key: &CacheKey) -> Result<bool> {
116        // Check if file still exists
117        if !key.file_path.exists() {
118            return Ok(false);
119        }
120
121        // Check if hash matches
122        let current_hash = Self::hash_file(&key.file_path)?;
123        Ok(current_hash == key.file_hash)
124    }
125
126    /// Remove cache entry
127    pub fn remove(&self, key: &CacheKey) -> Result<()> {
128        let cache_path = self.cache_path(key);
129        if cache_path.exists() {
130            fs::remove_file(cache_path)?;
131        }
132        Ok(())
133    }
134
135    /// Generate cache file path for a key
136    fn cache_path(&self, key: &CacheKey) -> std::path::PathBuf {
137        // Use first 2 chars of hash for directory sharding
138        let hash_prefix = &key.file_hash[..2];
139        let cache_name = format!(
140            "{}_{}.cache",
141            key.file_path
142                .file_name()
143                .and_then(|n| n.to_str())
144                .unwrap_or("unknown"),
145            &key.file_hash[..8]
146        );
147
148        self.cache_dir.join(hash_prefix).join(cache_name)
149    }
150
151    /// Get cache statistics
152    pub fn get_stats(&self) -> Result<CacheStats> {
153        let mut total_entries = 0;
154        let mut total_size = 0;
155        let mut total_compressed_size = 0;
156
157        // Walk cache directory
158        for entry in fs::read_dir(&self.cache_dir)? {
159            let entry = entry?;
160            if entry.path().is_dir() {
161                for cache_file in fs::read_dir(entry.path())? {
162                    let cache_file = cache_file?;
163                    let metadata = cache_file.metadata()?;
164                    total_entries += 1;
165                    total_compressed_size += metadata.len() as usize;
166                    // Estimate original size (we'd need to read entries for exact)
167                    total_size += (metadata.len() as f32 * 3.0) as usize;
168                }
169            }
170        }
171
172        Ok(CacheStats {
173            total_entries,
174            total_size,
175            total_compressed_size,
176            compression_ratio: if total_compressed_size > 0 {
177                total_size as f32 / total_compressed_size as f32
178            } else {
179                1.0
180            },
181            cache_dir: self.cache_dir.clone(),
182        })
183    }
184}
185
186/// Cache statistics
187#[derive(Debug, Clone)]
188pub struct CacheStats {
189    pub total_entries: usize,
190    pub total_size: usize,
191    pub total_compressed_size: usize,
192    pub compression_ratio: f32,
193    pub cache_dir: std::path::PathBuf,
194}
195
196#[cfg(test)]
197mod tests {
198    use super::*;
199
200    // Phase 4 Test Suite: FileCache - core file cache operations
201
202    #[test]
203    fn test_cache_key_structure() {
204        // Test CacheKey components
205        let path = Path::new("src/main.rs");
206        let file_hash = "abc123def456".to_string();
207
208        let key = CacheKey {
209            file_path: path.to_path_buf(),
210            file_hash: file_hash.clone(),
211        };
212
213        assert_eq!(key.file_hash, "abc123def456");
214        assert_eq!(key.file_path.file_name().unwrap(), "main.rs");
215    }
216
217    #[test]
218    fn test_cache_metadata_structure() {
219        // Test CacheMetadata creation and structure
220        let now = SystemTime::now();
221
222        let metadata = CacheMetadata {
223            created_at: now,
224            last_accessed: now,
225            file_size: 1024,
226            compressed_size: 512,
227            compression_ratio: 2.0,
228        };
229
230        assert_eq!(metadata.file_size, 1024);
231        assert_eq!(metadata.compressed_size, 512);
232        assert_eq!(metadata.compression_ratio, 2.0);
233    }
234
235    #[test]
236    fn test_cache_entry_structure() {
237        // Test CacheEntry structure
238        let key = CacheKey {
239            file_path: Path::new("test.rs").to_path_buf(),
240            file_hash: "test_hash".to_string(),
241        };
242
243        let metadata = CacheMetadata {
244            created_at: SystemTime::now(),
245            last_accessed: SystemTime::now(),
246            file_size: 100,
247            compressed_size: 50,
248            compression_ratio: 2.0,
249        };
250
251        let entry = CacheEntry {
252            key: key.clone(),
253            data: vec![1, 2, 3, 4, 5],
254            metadata,
255        };
256
257        assert_eq!(entry.data.len(), 5);
258        assert_eq!(entry.key.file_hash, "test_hash");
259    }
260
261    #[test]
262    fn test_cache_stats_structure() {
263        // Test CacheStats structure and values
264        let stats = CacheStats {
265            total_entries: 100,
266            total_size: 1_000_000,
267            total_compressed_size: 500_000,
268            compression_ratio: 2.0,
269            cache_dir: Path::new("/cache").to_path_buf(),
270        };
271
272        assert_eq!(stats.total_entries, 100);
273        assert_eq!(stats.total_size, 1_000_000);
274        assert_eq!(stats.compression_ratio, 2.0);
275    }
276
277    #[test]
278    fn test_compression_ratio_calculation() {
279        // Test compression ratio calculation logic
280        let test_cases = vec![
281            (1000, 500, 2.0),
282            (2000, 1000, 2.0),
283            (3000, 1000, 3.0),
284            (1000, 250, 4.0),
285        ];
286
287        for (original, compressed, expected) in test_cases {
288            let ratio = original as f32 / compressed as f32;
289            assert!((ratio - expected).abs() < 0.01);
290        }
291    }
292
293    #[test]
294    fn test_cache_path_construction() {
295        // Test cache path construction with hash prefixing
296        let hash = "abc123def456";
297        let prefix = &hash[..2]; // "ab"
298
299        assert_eq!(prefix, "ab", "Prefix should be first 2 chars of hash");
300    }
301
302    #[test]
303    fn test_cache_file_naming() {
304        // Test cache file naming convention
305        let file_name = "main.rs";
306        let hash_prefix = "ab";
307        let hash_short = "abc12345";
308
309        let cache_name = format!("{}_{}.cache", file_name, hash_short);
310
311        assert!(
312            cache_name.contains("main.rs"),
313            "Should include original filename"
314        );
315        assert!(cache_name.contains("abc12345"), "Should include hash short");
316        assert!(cache_name.ends_with(".cache"), "Should end with .cache");
317    }
318
319    #[test]
320    fn test_cache_stats_compression_ratio_zero_handling() {
321        // Test compression ratio when compressed size is zero
322        let total_size = 1000;
323        let compressed_size = 0;
324
325        let ratio = if compressed_size > 0 {
326            total_size as f32 / compressed_size as f32
327        } else {
328            1.0 // Default when no compression
329        };
330
331        assert_eq!(
332            ratio, 1.0,
333            "Should default to 1.0 when compressed size is 0"
334        );
335    }
336}