1use std::collections::HashMap;
12use std::io::{BufReader, BufWriter, Read, Write};
13use std::path::Path;
14use std::time::SystemTime;
15
16pub const CACHE_MAGIC: &[u8; 4] = b"OXCA";
18pub const CACHE_VERSION: u32 = 1;
20
21#[derive(Debug, thiserror::Error)]
27pub enum DiskCacheError {
28 #[error("I/O error: {0}")]
30 Io(#[from] std::io::Error),
31 #[error("invalid cache magic")]
33 InvalidMagic,
34 #[error("unsupported cache version: {0}")]
36 UnsupportedVersion(u32),
37 #[error("metadata parse error: {0}")]
39 MetadataParse(String),
40 #[error("cache is stale")]
42 StaleCache,
43}
44
45#[derive(Debug, Clone)]
51pub struct CacheEntry {
52 pub name: String,
54 pub data: Vec<u8>,
56 pub quant_type: String,
58}
59
60impl CacheEntry {
61 pub fn new(name: impl Into<String>, data: Vec<u8>, quant_type: impl Into<String>) -> Self {
63 Self {
64 name: name.into(),
65 data,
66 quant_type: quant_type.into(),
67 }
68 }
69
70 pub fn size_bytes(&self) -> usize {
72 self.data.len()
73 }
74}
75
76#[derive(Debug)]
82pub struct DiskCache {
83 entries: Vec<CacheEntry>,
84 metadata: HashMap<String, String>,
85}
86
87impl Default for DiskCache {
88 fn default() -> Self {
89 Self::new()
90 }
91}
92
93impl DiskCache {
94 pub fn new() -> Self {
96 Self {
97 entries: Vec::new(),
98 metadata: HashMap::new(),
99 }
100 }
101
102 pub fn add_entry(&mut self, entry: CacheEntry) {
104 self.entries.push(entry);
105 }
106
107 pub fn set_metadata(&mut self, key: impl Into<String>, value: impl Into<String>) {
109 self.metadata.insert(key.into(), value.into());
110 }
111
112 pub fn get_metadata(&self, key: &str) -> Option<&str> {
114 self.metadata.get(key).map(|s| s.as_str())
115 }
116
117 pub fn get_entry(&self, name: &str) -> Option<&CacheEntry> {
119 self.entries.iter().find(|e| e.name == name)
120 }
121
122 pub fn num_entries(&self) -> usize {
124 self.entries.len()
125 }
126
127 pub fn total_data_bytes(&self) -> usize {
129 self.entries.iter().map(|e| e.data.len()).sum()
130 }
131
132 pub fn save(&self, path: &Path) -> Result<(), DiskCacheError> {
136 let file = std::fs::File::create(path)?;
137 let mut writer = BufWriter::new(file);
138 self.write_to(&mut writer)
139 }
140
141 pub fn load(path: &Path) -> Result<Self, DiskCacheError> {
143 let file = std::fs::File::open(path)?;
144 let mut reader = BufReader::new(file);
145 Self::read_from(&mut reader)
146 }
147
148 pub fn write_to<W: Write>(&self, writer: &mut W) -> Result<(), DiskCacheError> {
150 writer.write_all(CACHE_MAGIC)?;
152
153 writer.write_all(&CACHE_VERSION.to_le_bytes())?;
155
156 writer.write_all(&(self.entries.len() as u64).to_le_bytes())?;
158
159 let meta_json = metadata_to_json(&self.metadata);
161 let meta_bytes = meta_json.as_bytes();
162 writer.write_all(&(meta_bytes.len() as u32).to_le_bytes())?;
163 writer.write_all(meta_bytes)?;
164
165 for entry in &self.entries {
167 let name_bytes = entry.name.as_bytes();
169 writer.write_all(&(name_bytes.len() as u32).to_le_bytes())?;
170 writer.write_all(name_bytes)?;
171
172 let qt_bytes = entry.quant_type.as_bytes();
174 writer.write_all(&(qt_bytes.len() as u32).to_le_bytes())?;
175 writer.write_all(qt_bytes)?;
176
177 writer.write_all(&(entry.data.len() as u64).to_le_bytes())?;
179 writer.write_all(&entry.data)?;
180 }
181
182 writer.flush()?;
183 Ok(())
184 }
185
186 pub fn read_from<R: Read>(reader: &mut R) -> Result<Self, DiskCacheError> {
188 let mut magic = [0u8; 4];
190 reader.read_exact(&mut magic)?;
191 if &magic != CACHE_MAGIC {
192 return Err(DiskCacheError::InvalidMagic);
193 }
194
195 let mut buf4 = [0u8; 4];
197 reader.read_exact(&mut buf4)?;
198 let version = u32::from_le_bytes(buf4);
199 if version != CACHE_VERSION {
200 return Err(DiskCacheError::UnsupportedVersion(version));
201 }
202
203 let mut buf8 = [0u8; 8];
205 reader.read_exact(&mut buf8)?;
206 let num_entries = u64::from_le_bytes(buf8) as usize;
207
208 reader.read_exact(&mut buf4)?;
210 let meta_len = u32::from_le_bytes(buf4) as usize;
211 let mut meta_buf = vec![0u8; meta_len];
212 reader.read_exact(&mut meta_buf)?;
213 let meta_str = String::from_utf8(meta_buf)
214 .map_err(|e| DiskCacheError::MetadataParse(e.to_string()))?;
215 let metadata = metadata_from_json(&meta_str)?;
216
217 let mut entries = Vec::with_capacity(num_entries);
219 for _ in 0..num_entries {
220 reader.read_exact(&mut buf4)?;
222 let name_len = u32::from_le_bytes(buf4) as usize;
223 let mut name_buf = vec![0u8; name_len];
224 reader.read_exact(&mut name_buf)?;
225 let name = String::from_utf8(name_buf)
226 .map_err(|e| DiskCacheError::MetadataParse(e.to_string()))?;
227
228 reader.read_exact(&mut buf4)?;
230 let qt_len = u32::from_le_bytes(buf4) as usize;
231 let mut qt_buf = vec![0u8; qt_len];
232 reader.read_exact(&mut qt_buf)?;
233 let quant_type = String::from_utf8(qt_buf)
234 .map_err(|e| DiskCacheError::MetadataParse(e.to_string()))?;
235
236 reader.read_exact(&mut buf8)?;
238 let data_len = u64::from_le_bytes(buf8) as usize;
239 let mut data = vec![0u8; data_len];
240 reader.read_exact(&mut data)?;
241
242 entries.push(CacheEntry {
243 name,
244 data,
245 quant_type,
246 });
247 }
248
249 Ok(Self { entries, metadata })
250 }
251
252 pub fn is_valid_cache(path: &Path) -> bool {
254 let file = match std::fs::File::open(path) {
255 Ok(f) => f,
256 Err(_) => return false,
257 };
258 let mut reader = BufReader::new(file);
259
260 let mut magic = [0u8; 4];
261 if reader.read_exact(&mut magic).is_err() {
262 return false;
263 }
264 if &magic != CACHE_MAGIC {
265 return false;
266 }
267
268 let mut buf4 = [0u8; 4];
269 if reader.read_exact(&mut buf4).is_err() {
270 return false;
271 }
272 let version = u32::from_le_bytes(buf4);
273 version == CACHE_VERSION
274 }
275
276 pub fn is_fresh(cache_path: &Path, source_path: &Path) -> Result<bool, DiskCacheError> {
278 let cache_meta = std::fs::metadata(cache_path)?;
279 let source_meta = std::fs::metadata(source_path)?;
280
281 let cache_time = cache_meta.modified().map_err(DiskCacheError::Io)?;
282 let source_time = source_meta.modified().map_err(DiskCacheError::Io)?;
283
284 Ok(cache_time >= source_time)
285 }
286}
287
288#[derive(Debug)]
294pub struct CacheManager {
295 cache_dir: String,
296 max_cache_size_bytes: usize,
297 entries: Vec<CacheFileInfo>,
298}
299
300#[derive(Debug, Clone)]
302pub struct CacheFileInfo {
303 pub path: String,
305 pub size_bytes: usize,
307 pub last_accessed: SystemTime,
309 pub model_name: String,
311}
312
313impl CacheManager {
314 pub fn new(cache_dir: impl Into<String>, max_size_bytes: usize) -> Self {
316 Self {
317 cache_dir: cache_dir.into(),
318 max_cache_size_bytes: max_size_bytes,
319 entries: Vec::new(),
320 }
321 }
322
323 pub fn register(&mut self, info: CacheFileInfo) {
325 self.entries.push(info);
326 }
327
328 pub fn total_used_bytes(&self) -> usize {
330 self.entries.iter().map(|e| e.size_bytes).sum()
331 }
332
333 pub fn should_evict(&self) -> bool {
335 self.total_used_bytes() > self.max_cache_size_bytes
336 }
337
338 pub fn eviction_candidates(&self) -> Vec<&CacheFileInfo> {
340 let mut sorted: Vec<&CacheFileInfo> = self.entries.iter().collect();
341 sorted.sort_by_key(|e| e.last_accessed);
342 sorted
343 }
344
345 pub fn utilization(&self) -> f32 {
347 if self.max_cache_size_bytes == 0 {
348 return 0.0;
349 }
350 self.total_used_bytes() as f32 / self.max_cache_size_bytes as f32
351 }
352
353 pub fn summary(&self) -> String {
355 let used_mb = self.total_used_bytes() as f64 / (1024.0 * 1024.0);
356 let max_mb = self.max_cache_size_bytes as f64 / (1024.0 * 1024.0);
357 let pct = self.utilization() * 100.0;
358 format!(
359 "Cache dir: {dir}, {n} models, {used:.1}/{max:.1} MB ({pct:.1}%)",
360 dir = self.cache_dir,
361 n = self.entries.len(),
362 used = used_mb,
363 max = max_mb,
364 )
365 }
366}
367
368fn metadata_to_json(map: &HashMap<String, String>) -> String {
374 let mut out = String::from("{");
375 let mut first = true;
376 let mut keys: Vec<&String> = map.keys().collect();
378 keys.sort();
379 for key in keys {
380 let value = &map[key];
381 if !first {
382 out.push(',');
383 }
384 first = false;
385 out.push('"');
386 json_escape_into(&mut out, key);
387 out.push_str("\":\"");
388 json_escape_into(&mut out, value);
389 out.push('"');
390 }
391 out.push('}');
392 out
393}
394
395fn metadata_from_json(s: &str) -> Result<HashMap<String, String>, DiskCacheError> {
397 let s = s.trim();
398 if s == "{}" || s.is_empty() {
399 return Ok(HashMap::new());
400 }
401 let bytes = s.as_bytes();
402 if bytes.first() != Some(&b'{') || bytes.last() != Some(&b'}') {
403 return Err(DiskCacheError::MetadataParse(format!(
404 "expected JSON object, got: {s}"
405 )));
406 }
407 let inner = &s[1..s.len() - 1];
408 let mut map = HashMap::new();
409 if inner.trim().is_empty() {
410 return Ok(map);
411 }
412
413 let chars: Vec<char> = inner.chars().collect();
414 let mut pos = 0usize;
415
416 loop {
417 while pos < chars.len() && (chars[pos] == ',' || chars[pos].is_whitespace()) {
419 pos += 1;
420 }
421 if pos >= chars.len() {
422 break;
423 }
424 if chars[pos] != '"' {
425 return Err(DiskCacheError::MetadataParse(format!(
426 "expected '\"' at position {pos}, got '{}'",
427 chars[pos]
428 )));
429 }
430 pos += 1;
431 let (key, new_pos) = parse_json_string(&chars, pos)?;
432 pos = new_pos;
433
434 skip_ws(&chars, &mut pos);
436 if pos >= chars.len() || chars[pos] != ':' {
437 return Err(DiskCacheError::MetadataParse(format!(
438 "expected ':' after key '{key}'"
439 )));
440 }
441 pos += 1;
442 skip_ws(&chars, &mut pos);
443
444 if pos >= chars.len() || chars[pos] != '"' {
445 return Err(DiskCacheError::MetadataParse(format!(
446 "expected '\"' for value of key '{key}'"
447 )));
448 }
449 pos += 1;
450 let (value, new_pos) = parse_json_string(&chars, pos)?;
451 pos = new_pos;
452
453 map.insert(key, value);
454 }
455
456 Ok(map)
457}
458
459fn parse_json_string(chars: &[char], mut pos: usize) -> Result<(String, usize), DiskCacheError> {
460 let mut s = String::new();
461 while pos < chars.len() {
462 match chars[pos] {
463 '"' => {
464 pos += 1;
465 return Ok((s, pos));
466 }
467 '\\' => {
468 pos += 1;
469 if pos >= chars.len() {
470 return Err(DiskCacheError::MetadataParse(
471 "unexpected end after backslash".into(),
472 ));
473 }
474 match chars[pos] {
475 '"' => s.push('"'),
476 '\\' => s.push('\\'),
477 'n' => s.push('\n'),
478 'r' => s.push('\r'),
479 't' => s.push('\t'),
480 other => {
481 return Err(DiskCacheError::MetadataParse(format!(
482 "unknown escape '\\{other}'"
483 )));
484 }
485 }
486 pos += 1;
487 }
488 ch => {
489 s.push(ch);
490 pos += 1;
491 }
492 }
493 }
494 Err(DiskCacheError::MetadataParse("unterminated string".into()))
495}
496
497fn skip_ws(chars: &[char], pos: &mut usize) {
498 while *pos < chars.len() && chars[*pos].is_whitespace() {
499 *pos += 1;
500 }
501}
502
503fn json_escape_into(out: &mut String, s: &str) {
504 for ch in s.chars() {
505 match ch {
506 '"' => out.push_str("\\\""),
507 '\\' => out.push_str("\\\\"),
508 '\n' => out.push_str("\\n"),
509 '\r' => out.push_str("\\r"),
510 '\t' => out.push_str("\\t"),
511 c => out.push(c),
512 }
513 }
514}