use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use parking_lot::RwLock;
use rkyv::{
Archive, Deserialize, Serialize,
rancor::Error as RkyvError,
to_bytes, access,
};
const DEFAULT_MAX_ENTRIES: usize = 10_000;
const DEFAULT_MAX_DIR_ENTRIES: usize = 1_000;
#[derive(Archive, Deserialize, Serialize, Debug, Clone)]
pub struct CachedFileEntry {
pub id: i64,
pub parent_id: Option<i64>,
pub name: String,
pub is_dir: bool,
pub size: u64,
pub content_hash: Option<String>,
pub created_at: i64,
pub modified_at: i64,
}
#[derive(Archive, Deserialize, Serialize, Debug, Clone)]
pub struct CachedDirListing {
pub path: String,
pub entries: Vec<CachedDirEntry>,
pub cached_at: i64,
}
#[derive(Archive, Deserialize, Serialize, Debug, Clone)]
pub struct CachedDirEntry {
pub name: String,
pub is_dir: bool,
pub size: u64,
pub modified_at: i64,
}
#[derive(Debug, Default)]
pub struct CacheStats {
pub hits: AtomicU64,
pub misses: AtomicU64,
pub entries: AtomicU64,
}
impl CacheStats {
pub fn hit(&self) {
self.hits.fetch_add(1, Ordering::Relaxed);
}
pub fn miss(&self) {
self.misses.fetch_add(1, Ordering::Relaxed);
}
pub fn hit_rate(&self) -> f64 {
let hits = self.hits.load(Ordering::Relaxed);
let misses = self.misses.load(Ordering::Relaxed);
let total = hits + misses;
if total == 0 {
0.0
} else {
(hits as f64 / total as f64) * 100.0
}
}
}
pub struct FileEntryCache {
entries: RwLock<HashMap<String, Vec<u8>>>,
max_entries: usize,
pub stats: CacheStats,
}
impl FileEntryCache {
pub fn new() -> Self {
Self::with_capacity(DEFAULT_MAX_ENTRIES)
}
pub fn with_capacity(max_entries: usize) -> Self {
Self {
entries: RwLock::new(HashMap::with_capacity(max_entries / 4)),
max_entries,
stats: CacheStats::default(),
}
}
pub fn get(&self, path: &str) -> Option<CachedFileEntry> {
let entries = self.entries.read();
if let Some(bytes) = entries.get(path) {
match access::<ArchivedCachedFileEntry, RkyvError>(bytes) {
Ok(archived) => {
self.stats.hit();
match rkyv::deserialize::<CachedFileEntry, RkyvError>(archived) {
Ok(entry) => Some(entry),
Err(_) => None,
}
}
Err(_) => {
self.stats.miss();
None
}
}
} else {
self.stats.miss();
None
}
}
pub fn insert(&self, path: String, entry: CachedFileEntry) {
match to_bytes::<RkyvError>(&entry) {
Ok(bytes) => {
let mut entries = self.entries.write();
if entries.len() >= self.max_entries {
let to_remove: Vec<_> = entries.keys()
.take(self.max_entries / 2)
.cloned()
.collect();
for key in to_remove {
entries.remove(&key);
}
}
entries.insert(path, bytes.to_vec());
self.stats.entries.store(entries.len() as u64, Ordering::Relaxed);
}
Err(_) => {
}
}
}
pub fn invalidate(&self, path: &str) {
let mut entries = self.entries.write();
entries.remove(path);
self.stats.entries.store(entries.len() as u64, Ordering::Relaxed);
}
pub fn invalidate_prefix(&self, prefix: &str) {
let mut entries = self.entries.write();
entries.retain(|k, _| !k.starts_with(prefix));
self.stats.entries.store(entries.len() as u64, Ordering::Relaxed);
}
pub fn clear(&self) {
let mut entries = self.entries.write();
entries.clear();
self.stats.entries.store(0, Ordering::Relaxed);
}
pub fn len(&self) -> usize {
self.entries.read().len()
}
pub fn is_empty(&self) -> bool {
self.entries.read().is_empty()
}
}
impl Default for FileEntryCache {
fn default() -> Self {
Self::new()
}
}
pub struct DirListingCache {
listings: RwLock<HashMap<String, Vec<u8>>>,
max_entries: usize,
pub stats: CacheStats,
}
impl DirListingCache {
pub fn new() -> Self {
Self::with_capacity(DEFAULT_MAX_DIR_ENTRIES)
}
pub fn with_capacity(max_entries: usize) -> Self {
Self {
listings: RwLock::new(HashMap::with_capacity(max_entries / 4)),
max_entries,
stats: CacheStats::default(),
}
}
pub fn get(&self, path: &str) -> Option<CachedDirListing> {
let listings = self.listings.read();
if let Some(bytes) = listings.get(path) {
match access::<ArchivedCachedDirListing, RkyvError>(bytes) {
Ok(archived) => {
self.stats.hit();
match rkyv::deserialize::<CachedDirListing, RkyvError>(archived) {
Ok(listing) => Some(listing),
Err(_) => None,
}
}
Err(_) => {
self.stats.miss();
None
}
}
} else {
self.stats.miss();
None
}
}
pub fn insert(&self, path: String, listing: CachedDirListing) {
match to_bytes::<RkyvError>(&listing) {
Ok(bytes) => {
let mut listings = self.listings.write();
if listings.len() >= self.max_entries {
let to_remove: Vec<_> = listings.keys()
.take(self.max_entries / 2)
.cloned()
.collect();
for key in to_remove {
listings.remove(&key);
}
}
listings.insert(path, bytes.to_vec());
self.stats.entries.store(listings.len() as u64, Ordering::Relaxed);
}
Err(_) => {
}
}
}
pub fn invalidate(&self, path: &str) {
let mut listings = self.listings.write();
listings.remove(path);
if let Some(parent) = path.rsplit_once('/').map(|(p, _)| p) {
let parent_path = if parent.is_empty() { "/" } else { parent };
listings.remove(parent_path);
}
self.stats.entries.store(listings.len() as u64, Ordering::Relaxed);
}
pub fn clear(&self) {
let mut listings = self.listings.write();
listings.clear();
self.stats.entries.store(0, Ordering::Relaxed);
}
}
impl Default for DirListingCache {
fn default() -> Self {
Self::new()
}
}
pub struct Cache {
pub files: FileEntryCache,
pub dirs: DirListingCache,
}
impl Cache {
pub fn new() -> Self {
Self {
files: FileEntryCache::new(),
dirs: DirListingCache::new(),
}
}
pub fn invalidate(&self, path: &str) {
self.files.invalidate(path);
self.dirs.invalidate(path);
}
pub fn clear(&self) {
self.files.clear();
self.dirs.clear();
}
}
impl Default for Cache {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_file_entry_cache() {
let cache = FileEntryCache::new();
let entry = CachedFileEntry {
id: 1,
parent_id: Some(0),
name: "test.txt".to_string(),
is_dir: false,
size: 1024,
content_hash: Some("abc123".to_string()),
created_at: 1234567890,
modified_at: 1234567890,
};
cache.insert("/test.txt".to_string(), entry.clone());
let cached = cache.get("/test.txt").unwrap();
assert_eq!(cached.name, "test.txt");
assert_eq!(cached.size, 1024);
cache.invalidate("/test.txt");
assert!(cache.get("/test.txt").is_none());
}
#[test]
fn test_dir_listing_cache() {
let cache = DirListingCache::new();
let listing = CachedDirListing {
path: "/docs".to_string(),
entries: vec![
CachedDirEntry {
name: "readme.txt".to_string(),
is_dir: false,
size: 512,
modified_at: 1234567890,
},
],
cached_at: 1234567890,
};
cache.insert("/docs".to_string(), listing);
let cached = cache.get("/docs").unwrap();
assert_eq!(cached.entries.len(), 1);
assert_eq!(cached.entries[0].name, "readme.txt");
}
}