velesdb_core/cache/lru.rs
1//! LRU Cache implementation for VelesDB.
2//!
3//! Thread-safe LRU cache with O(1) operations using IndexMap.
4//! Based on arXiv:2310.11703v2 recommendations.
5//!
6//! # Performance (US-CORE-003-14)
7//!
8//! | Operation | Complexity | Notes |
9//! |-----------|------------|-------|
10//! | insert | O(1) | Amortized |
11//! | get | O(1) | With recency update |
12//! | remove | O(1) | swap_remove |
13//! | eviction | O(1) | shift_remove from front |
14
15#![allow(clippy::cast_precision_loss)] // Precision loss acceptable for hit rate calculation
16
17use indexmap::IndexMap;
18use parking_lot::RwLock;
19use std::hash::Hash;
20use std::sync::atomic::{AtomicU64, Ordering};
21
22/// Cache statistics for monitoring.
23#[derive(Debug, Clone, Default)]
24pub struct CacheStats {
25 /// Number of cache hits.
26 pub hits: u64,
27 /// Number of cache misses.
28 pub misses: u64,
29 /// Number of evictions.
30 pub evictions: u64,
31}
32
33impl CacheStats {
34 /// Calculate hit rate (0.0 to 1.0).
35 #[must_use]
36 pub fn hit_rate(&self) -> f64 {
37 let total = self.hits + self.misses;
38 if total == 0 {
39 0.0
40 } else {
41 self.hits as f64 / total as f64
42 }
43 }
44}
45
46/// Thread-safe LRU cache with O(1) operations.
47///
48/// Uses `IndexMap` internally which preserves insertion order
49/// and provides O(1) access, making move-to-back operations efficient.
50pub struct LruCache<K, V>
51where
52 K: Hash + Eq + Clone,
53 V: Clone,
54{
55 /// Maximum capacity.
56 capacity: usize,
57 /// Internal data protected by `RwLock`.
58 /// IndexMap preserves insertion order (front = LRU, back = MRU).
59 inner: RwLock<IndexMap<K, V>>,
60 /// Statistics (atomic for lock-free reads).
61 hits: AtomicU64,
62 misses: AtomicU64,
63 evictions: AtomicU64,
64}
65
66impl<K, V> LruCache<K, V>
67where
68 K: Hash + Eq + Clone,
69 V: Clone,
70{
71 /// Create a new LRU cache with the given capacity.
72 #[must_use]
73 pub fn new(capacity: usize) -> Self {
74 Self {
75 capacity,
76 inner: RwLock::new(IndexMap::with_capacity(capacity)),
77 hits: AtomicU64::new(0),
78 misses: AtomicU64::new(0),
79 evictions: AtomicU64::new(0),
80 }
81 }
82
83 /// Get the capacity of the cache.
84 #[must_use]
85 pub fn capacity(&self) -> usize {
86 self.capacity
87 }
88
89 /// Get the current number of entries.
90 #[must_use]
91 pub fn len(&self) -> usize {
92 self.inner.read().len()
93 }
94
95 /// Check if the cache is empty.
96 #[must_use]
97 pub fn is_empty(&self) -> bool {
98 self.inner.read().is_empty()
99 }
100
101 /// Insert a key-value pair, evicting LRU entry if at capacity.
102 ///
103 /// O(1) amortized complexity.
104 pub fn insert(&self, key: K, value: V) {
105 let mut inner = self.inner.write();
106
107 // Check if key already exists - if so, remove and re-insert to move to back
108 if inner.shift_remove(&key).is_some() {
109 // Key existed, just re-insert at back
110 inner.insert(key, value);
111 return;
112 }
113
114 // Evict LRU (front) if at capacity
115 if inner.len() >= self.capacity {
116 // shift_remove(0) removes the first element (LRU)
117 if inner.shift_remove_index(0).is_some() {
118 self.evictions.fetch_add(1, Ordering::Relaxed);
119 }
120 }
121
122 // Insert new entry at back (MRU)
123 inner.insert(key, value);
124 }
125
126 /// Get a value by key, updating recency.
127 ///
128 /// O(1) complexity with read lock for lookup, write lock for recency update.
129 #[must_use]
130 pub fn get(&self, key: &K) -> Option<V> {
131 // Fast path: read lock to check existence and get value
132 let value = {
133 let inner = self.inner.read();
134 inner.get(key).cloned()
135 };
136
137 match value {
138 Some(v) => {
139 self.hits.fetch_add(1, Ordering::Relaxed);
140 // Update recency: remove and re-insert at back
141 self.move_to_back(key, &v);
142 Some(v)
143 }
144 None => {
145 self.misses.fetch_add(1, Ordering::Relaxed);
146 None
147 }
148 }
149 }
150
151 /// Get a value without updating recency (peek).
152 ///
153 /// O(1) complexity with only read lock.
154 #[must_use]
155 pub fn peek(&self, key: &K) -> Option<V> {
156 let inner = self.inner.read();
157 inner.get(key).cloned()
158 }
159
160 /// Remove a key from the cache.
161 ///
162 /// O(1) complexity using swap_remove (doesn't preserve order of other elements).
163 pub fn remove(&self, key: &K) {
164 let mut inner = self.inner.write();
165 inner.swap_remove(key);
166 }
167
168 /// Clear all entries.
169 pub fn clear(&self) {
170 let mut inner = self.inner.write();
171 inner.clear();
172 }
173
174 /// Get cache statistics.
175 #[must_use]
176 pub fn stats(&self) -> CacheStats {
177 CacheStats {
178 hits: self.hits.load(Ordering::Relaxed),
179 misses: self.misses.load(Ordering::Relaxed),
180 evictions: self.evictions.load(Ordering::Relaxed),
181 }
182 }
183
184 /// Move a key to the back (most recently used).
185 /// O(1) amortized using shift_remove + insert.
186 fn move_to_back(&self, key: &K, value: &V) {
187 let mut inner = self.inner.write();
188 // Remove from current position
189 inner.shift_remove(key);
190 // Re-insert at back
191 inner.insert(key.clone(), value.clone());
192 }
193}
194
195impl<K, V> Default for LruCache<K, V>
196where
197 K: Hash + Eq + Clone,
198 V: Clone,
199{
200 fn default() -> Self {
201 Self::new(10_000) // Default 10K entries
202 }
203}