1use lru::LruCache;
25use moka::sync::Cache as MokaCache;
26use std::num::NonZeroUsize;
27use std::sync::atomic::{AtomicU64, Ordering};
28use std::sync::{Arc, RwLock};
29
30#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
32pub enum CacheStrategy {
33 Lru,
36
37 #[default]
41 Moka,
42}
43
44#[derive(Debug, Clone)]
46pub struct CacheStats {
47 pub hits: u64,
49 pub misses: u64,
51 pub entry_count: u64,
53 pub capacity: u64,
55}
56
57impl CacheStats {
58 pub fn hit_rate(&self) -> f64 {
60 let total = self.hits + self.misses;
61 if total == 0 {
62 0.0
63 } else {
64 (self.hits as f64 / total as f64) * 100.0
65 }
66 }
67}
68
69pub(crate) trait Cache: Send + Sync {
71 fn get(&self, key: &str) -> Option<u64>;
73
74 fn put(&self, key: String, value: u64);
76
77 fn stats(&self) -> CacheStats;
79}
80
81pub(crate) struct LruCacheWrapper {
83 cache: Arc<RwLock<LruCache<String, u64>>>,
84 capacity: usize,
85 hits: Arc<AtomicU64>,
86 misses: Arc<AtomicU64>,
87}
88
89impl LruCacheWrapper {
90 pub fn new(capacity: usize) -> Self {
91 let capacity_nz = NonZeroUsize::new(capacity).expect("Cache capacity must be non-zero");
92 Self {
93 cache: Arc::new(RwLock::new(LruCache::new(capacity_nz))),
94 capacity,
95 hits: Arc::new(AtomicU64::new(0)),
96 misses: Arc::new(AtomicU64::new(0)),
97 }
98 }
99}
100
101impl Cache for LruCacheWrapper {
102 fn get(&self, key: &str) -> Option<u64> {
103 if let Ok(mut cache) = self.cache.write() {
104 let result = cache.get(key).copied();
105 if result.is_some() {
106 self.hits.fetch_add(1, Ordering::Relaxed);
107 } else {
108 self.misses.fetch_add(1, Ordering::Relaxed);
109 }
110 result
111 } else {
112 None
113 }
114 }
115
116 fn put(&self, key: String, value: u64) {
117 if let Ok(mut cache) = self.cache.write() {
118 cache.put(key, value);
119 }
120 }
121
122 fn stats(&self) -> CacheStats {
123 let entry_count = if let Ok(cache) = self.cache.read() {
124 cache.len() as u64
125 } else {
126 0
127 };
128
129 CacheStats {
130 hits: self.hits.load(Ordering::Relaxed),
131 misses: self.misses.load(Ordering::Relaxed),
132 entry_count,
133 capacity: self.capacity as u64,
134 }
135 }
136}
137
138pub(crate) struct MokaCacheWrapper {
140 cache: MokaCache<String, u64>,
141 capacity: usize,
142 hits: Arc<AtomicU64>,
143 misses: Arc<AtomicU64>,
144}
145
146impl MokaCacheWrapper {
147 pub fn new(capacity: usize) -> Self {
148 Self {
149 cache: MokaCache::builder().max_capacity(capacity as u64).build(),
150 capacity,
151 hits: Arc::new(AtomicU64::new(0)),
152 misses: Arc::new(AtomicU64::new(0)),
153 }
154 }
155}
156
157impl Cache for MokaCacheWrapper {
158 fn get(&self, key: &str) -> Option<u64> {
159 let result = self.cache.get(key);
160 if result.is_some() {
161 self.hits.fetch_add(1, Ordering::Relaxed);
162 } else {
163 self.misses.fetch_add(1, Ordering::Relaxed);
164 }
165 result
166 }
167
168 fn put(&self, key: String, value: u64) {
169 self.cache.insert(key, value);
170 }
171
172 fn stats(&self) -> CacheStats {
173 let hits = self.hits.load(Ordering::Relaxed);
174 let misses = self.misses.load(Ordering::Relaxed);
175 let entry_count = self.cache.entry_count();
176
177 CacheStats {
178 hits,
179 misses,
180 entry_count,
181 capacity: self.capacity as u64,
182 }
183 }
184}
185
186pub(crate) fn create_cache(strategy: CacheStrategy, capacity: usize) -> Arc<dyn Cache> {
188 match strategy {
189 CacheStrategy::Lru => Arc::new(LruCacheWrapper::new(capacity)),
190 CacheStrategy::Moka => Arc::new(MokaCacheWrapper::new(capacity)),
191 }
192}