cachelito_core/global_cache.rs
1use crate::{CacheEntry, EvictionPolicy};
2use once_cell::sync::Lazy;
3use parking_lot::lock_api::MutexGuard;
4use parking_lot::{Mutex, RawMutex, RwLock};
5use std::collections::{HashMap, VecDeque};
6use std::fmt::Debug;
7
8use crate::utils::{
9 find_arc_eviction_key, find_min_frequency_key, find_tlru_eviction_key, move_key_to_end,
10 remove_key_from_global_cache,
11};
12#[cfg(feature = "stats")]
13use crate::CacheStats;
14
15/// A thread-safe global cache that can be shared across multiple threads.
16///
17/// Unlike `ThreadLocalCache` which uses thread-local storage, `GlobalCache` stores
18/// cached values in global static variables protected by locks, allowing cache
19/// sharing across all threads in the application.
20///
21/// # Type Parameters
22///
23/// * `R` - The return type to be cached. Must be `'static` to be stored in global state.
24///
25/// # Features
26///
27/// - **Thread-safe sharing**: Multiple threads access the same cache through RwLock/Mutex
28/// - **Eviction policies**: FIFO, LRU, LFU, ARC, Random, and TLRU
29/// - **FIFO**: First In, First Out - simple and predictable
30/// - **LRU**: Least Recently Used - evicts least recently accessed entries
31/// - **LFU**: Least Frequently Used - evicts least frequently accessed entries
32/// - **ARC**: Adaptive Replacement Cache - hybrid policy combining recency and frequency
33/// - **Random**: Random replacement - O(1) eviction with minimal overhead
34/// - **TLRU**: Time-aware LRU - combines recency, frequency, and age factors
35/// - Customizable with `frequency_weight` parameter
36/// - Formula: `score = frequency^weight × position × age_factor`
37/// - `frequency_weight < 1.0`: Emphasize recency (time-sensitive data)
38/// - `frequency_weight > 1.0`: Emphasize frequency (popular content)
39/// - **Cache limits**: Entry count limits (`limit`) and memory-based limits (`max_memory`)
40/// - **TTL support**: Automatic expiration of entries based on age
41/// - **Statistics**: Optional cache hit/miss tracking (with `stats` feature)
42/// - **Frequency tracking**: For LFU, ARC, and TLRU policies
43/// - **Memory estimation**: Support for memory-based eviction (requires `MemoryEstimator`)
44///
45/// # Cache Entry Structure
46///
47/// Cache entries are stored as `CacheEntry<R>` which contains:
48/// - `value`: The cached value of type R
49/// - `timestamp`: Unix timestamp when the entry was created (for TTL and TLRU age factor)
50/// - `frequency`: Access counter for LFU, ARC, and TLRU policies
51///
52/// # Eviction Behavior
53///
54/// When the cache reaches its limit (entry count or memory), entries are evicted according
55/// to the configured policy:
56///
57/// - **FIFO**: Oldest entry (first in order queue) is evicted
58/// - **LRU**: Least recently accessed entry (first in order queue) is evicted
59/// - **LFU**: Entry with lowest frequency counter is evicted
60/// - **ARC**: Entry with lowest score (frequency × position_weight) is evicted
61/// - **Random**: Randomly selected entry is evicted
62/// - **TLRU**: Entry with lowest score (frequency^weight × position × age_factor) is evicted
63///
64/// # Thread Safety
65///
66/// This cache uses `parking_lot::RwLock` for the cache map and `parking_lot::Mutex` for the order queue.
67/// The `parking_lot` implementation provides:
68/// - **RwLock for reads**: Multiple threads can read concurrently without blocking
69/// - **No lock poisoning** (simpler API, no `Result` wrapping)
70/// - **Better performance** under contention (30-50% faster than std::sync)
71/// - **Smaller memory footprint** (~40x smaller than std::sync)
72/// - **Fair locking algorithm** prevents thread starvation
73///
74/// **Read-heavy workloads** (typical for caches) see 4-5x performance improvement with RwLock
75/// compared to Mutex, as multiple threads can read the cache simultaneously.
76///
77/// # Performance Characteristics
78///
79/// - **Get**: O(1) for cache lookup, O(n) for LRU/ARC/TLRU reordering
80/// - **Insert**: O(1) for FIFO/Random, O(n) for LRU/LFU/ARC/TLRU eviction
81/// - **Memory**: O(n) where n is the number of cached entries
82/// - **Synchronization**: Lock acquisition overhead on every operation
83///
84/// # Performance Considerations
85///
86/// - **Synchronization overhead**: Each cache operation requires acquiring locks
87/// - **Lock contention**: High concurrent access may cause threads to wait
88/// - **Read optimization**: RwLock allows concurrent reads (no blocking for cache hits)
89/// - **Write bottleneck**: Only one thread can modify cache at a time
90/// - **Shared benefits**: All threads benefit from cached results
91/// - **Best for**: Expensive computations where sharing outweighs synchronization cost
92///
93/// # Examples
94///
95/// ## Basic Usage
96///
97/// ```ignore
98/// use cachelito_core::{GlobalCache, EvictionPolicy, CacheEntry};
99/// use once_cell::sync::Lazy;
100/// use parking_lot::{Mutex, RwLock};
101/// use std::collections::{HashMap, VecDeque};
102///
103/// static CACHE_MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
104/// Lazy::new(|| RwLock::new(HashMap::new()));
105/// static CACHE_ORDER: Lazy<Mutex<VecDeque<String>>> =
106/// Lazy::new(|| Mutex::new(VecDeque::new()));
107///
108/// let cache = GlobalCache::new(
109/// &CACHE_MAP,
110/// &CACHE_ORDER,
111/// Some(100), // Max 100 entries
112/// None, // No memory limit
113/// EvictionPolicy::LRU,
114/// Some(60), // 60 second TTL
115/// None, // Default frequency_weight
116/// );
117///
118/// // All threads can access the same cache
119/// cache.insert("key1", 42);
120/// assert_eq!(cache.get("key1"), Some(42));
121/// ```
122///
123/// ## TLRU with Custom Frequency Weight
124///
125/// ```ignore
126/// use cachelito_core::{GlobalCache, EvictionPolicy};
127///
128/// // Emphasize frequency over recency (good for popular content)
129/// let cache = GlobalCache::new(
130/// &CACHE_MAP,
131/// &CACHE_ORDER,
132/// Some(100),
133/// None,
134/// EvictionPolicy::TLRU,
135/// Some(300),
136/// Some(1.5), // frequency_weight > 1.0
137/// );
138///
139/// // Emphasize recency over frequency (good for time-sensitive data)
140/// let cache = GlobalCache::new(
141/// &CACHE_MAP,
142/// &CACHE_ORDER,
143/// Some(100),
144/// None,
145/// EvictionPolicy::TLRU,
146/// Some(300),
147/// Some(0.3), // frequency_weight < 1.0
148/// );
149/// ```
150///
151/// ## With Memory Limits
152///
153/// ```ignore
154/// use cachelito_core::{GlobalCache, EvictionPolicy, MemoryEstimator};
155///
156/// let cache = GlobalCache::new(
157/// &CACHE_MAP,
158/// &CACHE_ORDER,
159/// Some(1000),
160/// Some(100 * 1024 * 1024), // 100MB max
161/// EvictionPolicy::LRU,
162/// Some(300),
163/// None,
164/// );
165///
166/// // Insert with memory tracking (requires MemoryEstimator implementation)
167/// cache.insert_with_memory("key", value);
168/// ```
169pub struct GlobalCache<R: 'static> {
170 pub map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
171 pub order: &'static Lazy<Mutex<VecDeque<String>>>,
172 pub limit: Option<usize>,
173 pub max_memory: Option<usize>,
174 pub policy: EvictionPolicy,
175 pub ttl: Option<u64>,
176 pub frequency_weight: Option<f64>,
177 pub window_ratio: Option<f64>,
178 pub sketch_width: Option<usize>,
179 pub sketch_depth: Option<usize>,
180 pub decay_interval: Option<u64>,
181 #[cfg(feature = "stats")]
182 pub stats: &'static Lazy<CacheStats>,
183}
184
185impl<R: Clone + 'static> GlobalCache<R> {
186 /// Creates a new global cache instance.
187 ///
188 /// # Parameters
189 ///
190 /// * `map` - Static reference to a RwLock-protected HashMap for storing cache entries
191 /// * `order` - Static reference to a Mutex-protected VecDeque for tracking entry order
192 /// * `limit` - Optional maximum number of entries (None for unlimited)
193 /// * `max_memory` - Optional maximum memory size in bytes (None for unlimited)
194 /// * `policy` - Eviction policy (FIFO, LRU, LFU, ARC, Random, or TLRU)
195 /// * `ttl` - Optional time-to-live in seconds for cache entries (None for no expiration)
196 /// * `frequency_weight` - Optional weight factor for frequency in TLRU policy
197 /// - Values < 1.0: Emphasize recency and age
198 /// - Values > 1.0: Emphasize frequency
199 /// - None or 1.0: Balanced approach (default)
200 /// - Only used when policy is TLRU, ignored otherwise
201 /// * `window_ratio` - Optional window ratio for W-TinyLFU policy (between 0.0 and 1.0)
202 /// * `sketch_width` - Optional sketch width for W-TinyLFU policy
203 /// * `sketch_depth` - Optional sketch depth for W-TinyLFU policy
204 /// * `decay_interval` - Optional decay interval for W-TinyLFU policy
205 /// * `stats` - Static reference to CacheStats for tracking hit/miss statistics (stats feature only)
206 ///
207 /// # Returns
208 ///
209 /// A new `GlobalCache` instance configured with the provided parameters.
210 ///
211 /// # Examples
212 ///
213 /// ## Basic LRU cache with TTL
214 ///
215 /// ```ignore
216 /// let cache = GlobalCache::new(
217 /// &CACHE_MAP,
218 /// &CACHE_ORDER,
219 /// Some(1000), // Max 1000 entries
220 /// None, // No memory limit
221 /// EvictionPolicy::LRU, // LRU eviction
222 /// Some(300), // 5 minute TTL
223 /// None, // No frequency_weight (not needed for LRU)
224 /// None, // No window_ratio
225 /// None, // No sketch_width
226 /// None, // No sketch_depth
227 /// None, // No decay_interval
228 /// #[cfg(feature = "stats")]
229 /// &CACHE_STATS,
230 /// );
231 /// ```
232 ///
233 /// ## TLRU with memory limit and custom frequency weight
234 ///
235 /// ```ignore
236 /// let cache = GlobalCache::new(
237 /// &CACHE_MAP,
238 /// &CACHE_ORDER,
239 /// Some(1000),
240 /// Some(100 * 1024 * 1024), // 100MB max
241 /// EvictionPolicy::TLRU, // TLRU eviction
242 /// Some(300), // 5 minute TTL
243 /// Some(1.5), // Emphasize frequency (popular content)
244 /// None, // No window_ratio
245 /// None, // No sketch_width
246 /// None, // No sketch_depth
247 /// None, // No decay_interval
248 /// #[cfg(feature = "stats")]
249 /// &CACHE_STATS,
250 /// );
251 /// ```
252 #[cfg(feature = "stats")]
253 pub fn new(
254 map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
255 order: &'static Lazy<Mutex<VecDeque<String>>>,
256 limit: Option<usize>,
257 max_memory: Option<usize>,
258 policy: EvictionPolicy,
259 ttl: Option<u64>,
260 frequency_weight: Option<f64>,
261 window_ratio: Option<f64>,
262 sketch_width: Option<usize>,
263 sketch_depth: Option<usize>,
264 decay_interval: Option<u64>,
265 stats: &'static Lazy<CacheStats>,
266 ) -> Self {
267 Self {
268 map,
269 order,
270 limit,
271 max_memory,
272 policy,
273 ttl,
274 frequency_weight,
275 window_ratio,
276 sketch_width,
277 sketch_depth,
278 decay_interval,
279 stats,
280 }
281 }
282
283 #[cfg(not(feature = "stats"))]
284 pub fn new(
285 map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
286 order: &'static Lazy<Mutex<VecDeque<String>>>,
287 limit: Option<usize>,
288 max_memory: Option<usize>,
289 policy: EvictionPolicy,
290 ttl: Option<u64>,
291 frequency_weight: Option<f64>,
292 window_ratio: Option<f64>,
293 sketch_width: Option<usize>,
294 sketch_depth: Option<usize>,
295 decay_interval: Option<u64>,
296 ) -> Self {
297 Self {
298 map,
299 order,
300 limit,
301 max_memory,
302 policy,
303 ttl,
304 frequency_weight,
305 window_ratio,
306 sketch_width,
307 sketch_depth,
308 decay_interval,
309 }
310 }
311
312 /// Retrieves a cached value by key.
313 ///
314 /// This method attempts to retrieve a cached value, checking for expiration
315 /// and updating access patterns based on the eviction policy.
316 ///
317 /// # Parameters
318 ///
319 /// * `key` - The cache key to retrieve
320 ///
321 /// # Returns
322 ///
323 /// * `Some(R)` - The cached value if found and not expired
324 /// * `None` - If the key is not in cache or the entry has expired
325 ///
326 /// # Behavior by Policy
327 ///
328 /// - **FIFO**: No updates on cache hit (order remains unchanged)
329 /// - **LRU**: Moves the key to the end of the order queue (most recently used)
330 /// - **LFU**: Increments the frequency counter for the entry
331 /// - **ARC**: Increments frequency counter and updates position in order queue
332 /// - **Random**: No updates on cache hit
333 /// - **TLRU**: Increments frequency counter and updates position in order queue
334 ///
335 /// # TTL Expiration
336 ///
337 /// If a TTL is configured and the entry has expired:
338 /// - The entry is removed from both the cache map and order queue
339 /// - A cache miss is recorded (if stats feature is enabled)
340 /// - `None` is returned
341 ///
342 /// # Statistics
343 ///
344 /// When the `stats` feature is enabled:
345 /// - Cache hits are recorded when a valid entry is found
346 /// - Cache misses are recorded when the key doesn't exist or has expired
347 ///
348 /// # Thread Safety
349 ///
350 /// This method is thread-safe and uses a multi-phase locking strategy:
351 /// 1. **Read lock** for initial lookup (allows concurrent reads)
352 /// 2. **Mutex + Write lock** for expired entry removal (if needed)
353 /// 3. **Mutex lock** for order queue updates (for LRU/ARC/TLRU)
354 ///
355 /// Multiple threads can safely call this method concurrently. Read-heavy
356 /// workloads benefit from RwLock's concurrent read capability.
357 ///
358 /// # Performance
359 ///
360 /// - **FIFO, Random**: O(1) - no reordering needed
361 /// - **LRU, ARC, TLRU**: O(n) - requires finding and moving key in order queue
362 /// - **LFU**: O(1) - only increments counter
363 /// - **Lock overhead**: Read lock for lookup + potential write lock for updates
364 ///
365 /// # Examples
366 ///
367 /// ```ignore
368 /// // Insert and retrieve
369 /// cache.insert("user:123", user_data);
370 /// assert_eq!(cache.get("user:123"), Some(user_data));
371 ///
372 /// // Non-existent key
373 /// assert_eq!(cache.get("user:999"), None);
374 ///
375 /// // Expired entry (with TTL)
376 /// cache.insert("temp", data);
377 /// std::thread::sleep(Duration::from_secs(61)); // Wait for TTL expiration
378 /// assert_eq!(cache.get("temp"), None);
379 /// ```
380 pub fn get(&self, key: &str) -> Option<R> {
381 let mut result = None;
382 let mut expired = false;
383
384 // Acquire read lock - allows concurrent reads
385 {
386 let m = self.map.read();
387 if let Some(entry) = m.get(key) {
388 if entry.is_expired(self.ttl) {
389 expired = true;
390 } else {
391 result = Some(entry.value.clone());
392 }
393 }
394 } // Read lock released here
395
396 if expired {
397 // Acquiring order lock to modify order queue
398 let mut o = self.order.lock();
399 // Acquire write lock to modify the map
400 let mut map_write = self.map.write();
401 remove_key_from_global_cache(&mut map_write, &mut o, key);
402 #[cfg(feature = "stats")]
403 self.stats.record_miss();
404 return None;
405 }
406
407 // Record stats
408 #[cfg(feature = "stats")]
409 {
410 if result.is_some() {
411 self.stats.record_hit();
412 } else {
413 self.stats.record_miss();
414 }
415 }
416
417 // Update access patterns based on policy
418 if result.is_some() {
419 match self.policy {
420 EvictionPolicy::LRU => {
421 // Move key to end of order queue (most recently used)
422 move_key_to_end(&mut self.order.lock(), key);
423 }
424 EvictionPolicy::LFU => {
425 // Increment frequency counter
426 self.increment_frequency(key);
427 }
428 EvictionPolicy::ARC => {
429 // Adaptive Replacement: Update both recency (LRU) and frequency (LFU)
430 // Move key to end (recency) - lock is automatically released after this call
431 move_key_to_end(&mut self.order.lock(), key);
432 // Increment frequency counter
433 self.increment_frequency(key);
434 }
435 EvictionPolicy::TLRU => {
436 // Time-aware LRU: Update both recency and frequency
437 // Similar to ARC but considers age in eviction
438 move_key_to_end(&mut self.order.lock(), key);
439 self.increment_frequency(key);
440 }
441 EvictionPolicy::WTinyLFU => {
442 // Simplified W-TinyLFU: Behaves like a hybrid of LRU and LFU
443 // Full implementation with Count-Min Sketch would require additional state
444 // For now, update both position (LRU) and frequency (LFU)
445 move_key_to_end(&mut self.order.lock(), key);
446 self.increment_frequency(key);
447 }
448 EvictionPolicy::FIFO | EvictionPolicy::Random => {
449 // No update needed for FIFO or Random
450 }
451 }
452 }
453
454 result
455 }
456
457 /// Increments the frequency counter for the specified key.
458 fn increment_frequency(&self, key: &str) {
459 let mut m = self.map.write();
460 if let Some(entry) = m.get_mut(key) {
461 entry.increment_frequency();
462 }
463 }
464
465 /// Inserts or updates a value in the cache.
466 ///
467 /// This method stores a new value in the cache or updates an existing one.
468 /// It handles cache limit enforcement and eviction according to the configured policy.
469 ///
470 /// # Parameters
471 ///
472 /// * `key` - The cache key
473 /// * `value` - The value to cache
474 ///
475 /// # Behavior
476 ///
477 /// 1. Creates a new `CacheEntry` with the current timestamp
478 /// 2. Inserts/updates the entry in the map
479 /// 3. Updates the order queue:
480 /// - If key already exists in queue, removes old position
481 /// - Adds key to the end of the queue
482 /// 4. Enforces cache limit:
483 /// - If limit is set and exceeded, evicts the oldest entry (front of queue)
484 /// - Removes evicted entry from both map and order queue
485 ///
486 /// # Eviction Policies
487 ///
488 /// When the cache limit is reached, entries are evicted according to the policy:
489 /// - **FIFO**: Evicts oldest inserted entry (front of queue)
490 /// - **LRU**: Evicts least recently used entry (front of queue, updated by `get()`)
491 /// - **LFU**: Evicts entry with lowest frequency counter
492 /// - **ARC**: Evicts based on hybrid score (frequency × position_weight)
493 /// - **Random**: Evicts randomly selected entry
494 /// - **TLRU**: Evicts based on TLRU score (frequency^weight × position × age_factor)
495 ///
496 /// # Thread Safety
497 ///
498 /// This method is thread-safe and uses mutex locks to ensure consistency
499 /// between the map and order queue.
500 ///
501 /// # Example
502 ///
503 /// ```ignore
504 /// // Insert a value
505 /// cache.insert("user:123", user_data);
506 ///
507 /// // Update existing value
508 /// cache.insert("user:123", updated_user_data);
509 ///
510 /// // With limit=2, this will evict the oldest entry
511 /// cache.insert("user:456", another_user);
512 /// cache.insert("user:789", yet_another_user); // Evicts first entry
513 /// ```
514 ///
515 /// # Note
516 ///
517 /// This method does NOT require `MemoryEstimator` trait. It only handles entry-count limits.
518 /// If `max_memory` is configured, use `insert_with_memory()` instead, which requires
519 /// the type to implement `MemoryEstimator`.
520 pub fn insert(&self, key: &str, value: R) {
521 let key_s = key.to_string();
522 let entry = CacheEntry::new(value);
523
524 // Acquire write lock for modification
525 self.map.write().insert(key_s.clone(), entry);
526
527 let mut o = self.order.lock();
528 if let Some(pos) = o.iter().position(|k| *k == key_s) {
529 o.remove(pos);
530 }
531 o.push_back(key_s.clone());
532
533 // Always handle entry-count limits, regardless of memory limits
534 self.handle_entry_limit_eviction(&mut o);
535 }
536
537 /// Handles the eviction of entries from a global cache when the number of entries exceeds the limit.
538 ///
539 /// The eviction behavior depends on the specified eviction policy. The function ensures that the cache
540 /// adheres to the defined entry limit by evicting entries based on the configured policy:
541 ///
542 /// - **LFU (Least Frequently Used):** Finds and evicts the entry with the minimum frequency of usage.
543 /// - **ARC (Adaptive Replacement Cache):** Leverages the ARC eviction strategy to find and evict a specific entry.
544 /// - **FIFO (First In First Out):** Evicts the oldest entry in the queue to ensure the limit is maintained.
545 /// - **LRU (Least Recently Used):** Evicts the least recently accessed entry from the queue.
546 ///
547 /// # Parameters
548 ///
549 /// - `o`: A mutable reference to a `MutexGuard` that holds a `VecDeque<String>`.
550 /// This represents the global cache where entries are stored.
551 ///
552 /// # Behavior
553 ///
554 /// 1. **Check Limit:** The function first checks if the `limit` is defined and if the length of the
555 /// cache (`o`) exceeds the defined `limit`.
556 ///
557 /// 2. **Eviction By Policy:** Based on the configured `EvictionPolicy`, different eviction strategies
558 /// are employed:
559 ///
560 /// - **LFU:** The method identifies the key with the minimum frequency count by inspecting the
561 /// associated frequency map and removes it from the cache.
562 /// - **ARC:** Uses an ARC strategy to determine which key should be evicted and removes it from the cache.
563 /// - **FIFO or LRU:** Dequeues entries in sequence (from the front of the queue) and checks if the
564 /// entry still exists in the global cache. If found, the entry is removed from both the queue and cache.
565 ///
566 /// 3. **Thread-Safe Access:** The function ensures thread-safe read/write access to the cache and
567 /// associated data structures using mutexes.
568 fn handle_entry_limit_eviction(&self, mut o: &mut MutexGuard<RawMutex, VecDeque<String>>) {
569 if let Some(limit) = self.limit {
570 if o.len() > limit {
571 match self.policy {
572 EvictionPolicy::LFU => {
573 // Find and evict the entry with the minimum frequency
574 let mut map_write = self.map.write();
575 let min_freq_key = find_min_frequency_key(&map_write, &o);
576
577 if let Some(evict_key) = min_freq_key {
578 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
579 }
580 }
581 EvictionPolicy::ARC => {
582 let mut map_write = self.map.write();
583 if let Some(evict_key) =
584 find_arc_eviction_key(&map_write, o.iter().enumerate())
585 {
586 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
587 }
588 }
589 EvictionPolicy::TLRU => {
590 let mut map_write = self.map.write();
591 if let Some(evict_key) = find_tlru_eviction_key(
592 &map_write,
593 o.iter().enumerate(),
594 self.ttl,
595 self.frequency_weight,
596 ) {
597 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
598 }
599 }
600 EvictionPolicy::WTinyLFU => {
601 // W-TinyLFU: Window segment (first entries) + Protected segment (rest)
602 let window_ratio = self.window_ratio.unwrap_or(0.20); // Default 20%
603 let window_size = crate::utils::calculate_window_size(limit, window_ratio);
604
605 let mut map_write = self.map.write();
606
607 if o.len() <= window_size {
608 // Everything is in window segment - evict FIFO
609 while let Some(evict_key) = o.pop_front() {
610 if map_write.contains_key(&evict_key) {
611 map_write.remove(&evict_key);
612 break;
613 }
614 }
615 } else {
616 // We have both window and protected segments
617 let mut evicted = false;
618
619 // Try to evict from window first (first window_size entries)
620 for i in 0..window_size.min(o.len()) {
621 if let Some(evict_key) = o.get(i) {
622 if map_write.contains_key(evict_key) {
623 let key_to_remove = evict_key.clone();
624 map_write.remove(&key_to_remove);
625 o.remove(i);
626 evicted = true;
627 break;
628 }
629 }
630 }
631
632 // If window eviction failed, evict from protected (LFU)
633 if !evicted {
634 // Protected segment is from window_size to end
635 let protected_keys: VecDeque<String> =
636 o.iter().skip(window_size).cloned().collect();
637
638 if let Some(evict_key) =
639 find_min_frequency_key(&map_write, &protected_keys)
640 {
641 remove_key_from_global_cache(
642 &mut map_write,
643 &mut o,
644 &evict_key,
645 );
646 }
647 }
648 }
649 }
650 EvictionPolicy::Random => {
651 // O(1) random eviction: select random position and remove directly
652 if !o.is_empty() {
653 let pos = fastrand::usize(..o.len());
654 if let Some(evict_key) = o.remove(pos) {
655 let mut map_write = self.map.write();
656 map_write.remove(&evict_key);
657 }
658 }
659 }
660 EvictionPolicy::FIFO | EvictionPolicy::LRU => {
661 // Keep trying to evict until we find a valid entry or queue is empty
662 let mut map_write = self.map.write();
663 while let Some(evict_key) = o.pop_front() {
664 // Check if the key still exists in the cache before removing
665 if map_write.contains_key(&evict_key) {
666 map_write.remove(&evict_key);
667 break;
668 }
669 }
670 }
671 }
672 }
673 }
674 }
675}
676
677// Separate implementation for types that implement MemoryEstimator
678// This allows memory-based eviction
679impl<R: Clone + 'static + crate::MemoryEstimator> GlobalCache<R> {
680 /// Insert with memory limit support.
681 ///
682 /// This method requires `R` to implement `MemoryEstimator` and handles both
683 /// memory-based and entry-count-based eviction.
684 ///
685 /// Use this method when `max_memory` is configured in the cache.
686 ///
687 /// # Arguments
688 ///
689 /// * `key` - The cache key
690 /// * `value` - The value to cache (must implement `MemoryEstimator`)
691 ///
692 /// # Memory Management
693 ///
694 /// The method calculates the memory footprint of all cached entries and evicts
695 /// entries as needed to stay within the `max_memory` limit. Eviction follows
696 /// the configured policy.
697 ///
698 /// # Safety Check
699 ///
700 /// If the value to be inserted is larger than `max_memory`, the insertion is
701 /// skipped entirely to avoid infinite eviction loops. This ensures the cache
702 /// respects the memory limit even if individual values are very large.
703 ///
704 /// # Eviction Behavior by Policy
705 ///
706 /// When memory limit is exceeded:
707 /// - **FIFO/LRU**: Evicts from front of order queue
708 /// - **LFU**: Evicts entry with lowest frequency
709 /// - **ARC**: Evicts based on hybrid score (frequency × position_weight)
710 /// - **TLRU**: Evicts based on TLRU score (frequency^weight × position × age_factor)
711 /// - **Random**: Evicts randomly selected entry
712 ///
713 /// The eviction loop continues until there's enough memory for the new value.
714 ///
715 /// # Entry Count Limit
716 ///
717 /// After satisfying memory constraints, this method also checks the entry count
718 /// limit (if configured) and evicts additional entries if needed.
719 ///
720 /// # Thread Safety
721 ///
722 /// This method uses write locks to ensure consistency between the map and
723 /// order queue during eviction and insertion.
724 ///
725 /// # Examples
726 ///
727 /// ```ignore
728 /// use cachelito_core::MemoryEstimator;
729 ///
730 /// // Type must implement MemoryEstimator
731 /// impl MemoryEstimator for MyLargeStruct {
732 /// fn estimate_memory(&self) -> usize {
733 /// std::mem::size_of::<Self>() + self.data.capacity()
734 /// }
735 /// }
736 ///
737 /// // Insert with automatic memory-based eviction
738 /// cache.insert_with_memory("large_data", expensive_value);
739 /// ```
740 ///
741 /// # Performance
742 ///
743 /// - **Memory calculation**: O(n) - iterates all entries to sum memory
744 /// - **Eviction**: Varies by policy (see individual policy documentation)
745 /// - May evict multiple entries in one call if memory limit is tight
746 pub fn insert_with_memory(&self, key: &str, value: R) {
747 let key_s = key.to_string();
748 let entry = CacheEntry::new(value);
749
750 // Acquire write lock for modification
751 self.map.write().insert(key_s.clone(), entry);
752
753 let mut o = self.order.lock();
754 if let Some(pos) = o.iter().position(|k| *k == key_s) {
755 o.remove(pos);
756 }
757 o.push_back(key_s.clone());
758
759 // Check memory limit first (if specified)
760 if let Some(max_mem) = self.max_memory {
761 // First, check if the new value by itself exceeds max_mem
762 // This is a safety check to prevent infinite eviction loop
763 let new_value_size = {
764 let map_read = self.map.read();
765 map_read
766 .get(&key_s)
767 .map(|e| e.value.estimate_memory())
768 .unwrap_or(0)
769 };
770
771 if new_value_size > max_mem {
772 // The value itself is too large for the cache
773 // Remove it and return early to respect memory limit
774 self.map.write().remove(&key_s);
775 o.pop_back(); // Remove from order queue as well
776 return;
777 }
778
779 loop {
780 let current_mem = {
781 let map_read = self.map.read();
782 map_read
783 .values()
784 .map(|e| e.value.estimate_memory())
785 .sum::<usize>()
786 };
787
788 if current_mem <= max_mem {
789 break;
790 }
791
792 // Need to evict based on policy
793 let evicted = match self.policy {
794 EvictionPolicy::LFU => {
795 let mut map_write = self.map.write();
796 let min_freq_key = find_min_frequency_key(&map_write, &o);
797 if let Some(evict_key) = min_freq_key {
798 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
799 true
800 } else {
801 false
802 }
803 }
804 EvictionPolicy::ARC => {
805 let mut map_write = self.map.write();
806 if let Some(evict_key) =
807 find_arc_eviction_key(&map_write, o.iter().enumerate())
808 {
809 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
810 true
811 } else {
812 false
813 }
814 }
815 EvictionPolicy::TLRU => {
816 let mut map_write = self.map.write();
817 if let Some(evict_key) = find_tlru_eviction_key(
818 &map_write,
819 o.iter().enumerate(),
820 self.ttl,
821 self.frequency_weight,
822 ) {
823 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
824 true
825 } else {
826 false
827 }
828 }
829 EvictionPolicy::WTinyLFU => {
830 // Simplified W-TinyLFU: Use LFU-like eviction
831 // Full implementation would use window segment + Count-Min Sketch
832 let mut map_write = self.map.write();
833 if let Some(evict_key) = find_min_frequency_key(&map_write, &o) {
834 remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
835 true
836 } else {
837 false
838 }
839 }
840 EvictionPolicy::Random => {
841 // O(1) random eviction: select random position and remove directly
842 if !o.is_empty() {
843 let pos = fastrand::usize(..o.len());
844 if let Some(evict_key) = o.remove(pos) {
845 let mut map_write = self.map.write();
846 map_write.remove(&evict_key);
847 true
848 } else {
849 false
850 }
851 } else {
852 false
853 }
854 }
855 EvictionPolicy::FIFO | EvictionPolicy::LRU => {
856 // Ensure we only count as evicted if we actually remove from the map
857 let mut successfully_evicted = false;
858 let mut map_write = self.map.write();
859 while let Some(evict_key) = o.pop_front() {
860 if map_write.contains_key(&evict_key) {
861 map_write.remove(&evict_key);
862 successfully_evicted = true;
863 break;
864 }
865 // If key wasn't in map (orphan), continue popping until we remove a real one
866 }
867 successfully_evicted
868 }
869 };
870
871 if !evicted {
872 break; // Nothing left to evict
873 }
874 }
875 }
876
877 // Handle entry-count limits
878 self.handle_entry_limit_eviction(&mut o);
879 }
880
881 /// Returns a reference to the cache statistics.
882 ///
883 /// This method is only available when the `stats` feature is enabled.
884 ///
885 /// # Available Metrics
886 ///
887 /// The returned CacheStats provides:
888 /// - **hits()**: Number of successful cache lookups
889 /// - **misses()**: Number of cache misses (key not found or expired)
890 /// - **hit_rate()**: Ratio of hits to total accesses (0.0 to 1.0)
891 /// - **total_accesses()**: Total number of get operations
892 ///
893 /// # Thread Safety
894 ///
895 /// Statistics use atomic counters (`AtomicU64`) and can be safely accessed
896 /// from multiple threads without additional synchronization.
897 ///
898 /// # Examples
899 ///
900 /// ```ignore
901 /// // Get basic statistics
902 /// let stats = cache.stats();
903 /// println!("Hits: {}", stats.hits());
904 /// println!("Misses: {}", stats.misses());
905 /// println!("Hit rate: {:.2}%", stats.hit_rate() * 100.0);
906 /// println!("Total accesses: {}", stats.total_accesses());
907 ///
908 /// // Monitor cache performance
909 /// let total = stats.total_accesses();
910 /// if total > 1000 && stats.hit_rate() < 0.5 {
911 /// println!("Warning: Low cache hit rate");
912 /// }
913 /// ```
914 ///
915 /// # See Also
916 ///
917 /// - [`CacheStats`] - The statistics structure
918 /// - [`crate::stats_registry::get()`] - Access stats by cache name
919 #[cfg(feature = "stats")]
920 pub fn stats(&self) -> &CacheStats {
921 self.stats
922 }
923
924 /// Clears all entries from the cache.
925 ///
926 /// This method removes all entries from both the cache map and the order queue.
927 /// It's useful for testing or when you need to completely reset the cache state.
928 ///
929 /// # Thread Safety
930 ///
931 /// This method is thread-safe and can be safely called from multiple threads.
932 ///
933 /// # Example
934 ///
935 /// ```ignore
936 /// cache.insert("key1", 42);
937 /// cache.insert("key2", 84);
938 ///
939 /// cache.clear();
940 ///
941 /// assert_eq!(cache.get("key1"), None);
942 /// assert_eq!(cache.get("key2"), None);
943 /// ```
944 pub fn clear(&self) {
945 self.map.write().clear();
946 self.order.lock().clear();
947 }
948}
949
950/// Implementation of `GlobalCache` for `Result` types.
951///
952/// This specialized implementation provides a `insert_result` method that only
953/// caches successful (`Ok`) results, preventing error values from being cached.
954///
955/// # Type Parameters
956///
957/// * `T` - The success type, must be `Clone` and `Debug`
958/// * `E` - The error type, must be `Clone` and `Debug`
959///
960/// # Rationale
961///
962/// Errors are typically transient (network failures, temporary resource unavailability)
963/// and should not be cached. Only successful results should be memoized to avoid
964/// repeatedly returning stale errors.
965///
966/// # Example
967///
968/// ```ignore
969/// let cache: GlobalCache<Result<String, Error>> = GlobalCache::new(...);
970///
971/// // Only Ok values are cached
972/// let result = fetch_data();
973/// cache.insert_result("key1", &result);
974///
975/// // If result was Err, nothing is cached
976/// // If result was Ok, the value is cached
977/// ```
978impl<T: Clone + Debug + 'static, E: Clone + Debug + 'static> GlobalCache<Result<T, E>> {
979 /// Inserts a Result into the cache, but only if it's an `Ok` variant.
980 ///
981 /// This method intelligently caches only successful results, preventing
982 /// error values from polluting the cache.
983 ///
984 /// This version does NOT require MemoryEstimator. Use `insert_result_with_memory()`
985 /// when max_memory is configured.
986 ///
987 /// # Parameters
988 ///
989 /// * `key` - The cache key
990 /// * `value` - The Result to potentially cache
991 ///
992 /// # Behavior
993 ///
994 /// - If `value` is `Ok(v)`: Caches `Ok(v.clone())` under the given key
995 /// - If `value` is `Err(_)`: Does nothing, no cache entry is created
996 ///
997 /// # Thread Safety
998 ///
999 /// This method is thread-safe and can be called concurrently from multiple threads.
1000 ///
1001 /// # Example
1002 ///
1003 /// ```ignore
1004 /// fn fetch_user(id: u64) -> Result<User, DbError> {
1005 /// // ... database query ...
1006 /// }
1007 ///
1008 /// let result = fetch_user(123);
1009 /// cache.insert_result("user:123", &result);
1010 ///
1011 /// // Success: cached
1012 /// // Ok(user) -> cache contains Ok(user)
1013 ///
1014 /// // Failure: not cached (will retry next time)
1015 /// // Err(db_error) -> cache remains empty for this key
1016 /// ```
1017 pub fn insert_result(&self, key: &str, value: &Result<T, E>) {
1018 if let Ok(v) = value {
1019 self.insert(key, Ok(v.clone()));
1020 }
1021 }
1022}
1023
1024/// Implementation of `GlobalCache` for `Result` types WITH MemoryEstimator support.
1025///
1026/// This specialized implementation provides memory-aware caching for Result types.
1027///
1028/// # Type Parameters
1029///
1030/// * `T` - The success type, must be `Clone`, `Debug`, and implement `MemoryEstimator`
1031/// * `E` - The error type, must be `Clone`, `Debug`, and implement `MemoryEstimator`
1032impl<
1033 T: Clone + Debug + 'static + crate::MemoryEstimator,
1034 E: Clone + Debug + 'static + crate::MemoryEstimator,
1035 > GlobalCache<Result<T, E>>
1036{
1037 /// Inserts a Result into the cache with memory limit support.
1038 ///
1039 /// This method requires both T and E to implement MemoryEstimator.
1040 /// Use this when max_memory is configured.
1041 ///
1042 /// # Parameters
1043 ///
1044 /// * `key` - The cache key
1045 /// * `value` - The Result to potentially cache
1046 ///
1047 /// # Behavior
1048 ///
1049 /// - If `value` is `Ok(v)`: Caches `Ok(v.clone())` under the given key
1050 /// - If `value` is `Err(_)`: Does nothing, no cache entry is created
1051 pub fn insert_result_with_memory(&self, key: &str, value: &Result<T, E>) {
1052 if let Ok(v) = value {
1053 self.insert_with_memory(key, Ok(v.clone()));
1054 }
1055 }
1056}
1057
1058#[cfg(test)]
1059mod tests {
1060 use super::*;
1061 use std::thread;
1062 use std::time::Duration;
1063
1064 #[test]
1065 fn test_global_basic_insert_get() {
1066 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1067 Lazy::new(|| RwLock::new(HashMap::new()));
1068 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1069 #[cfg(feature = "stats")]
1070 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1071
1072 let cache = GlobalCache::new(
1073 &MAP,
1074 &ORDER,
1075 None,
1076 None,
1077 EvictionPolicy::FIFO,
1078 None,
1079 None,
1080 None,
1081 None,
1082 None,
1083 None,
1084 #[cfg(feature = "stats")]
1085 &STATS,
1086 );
1087 cache.insert("key1", 100);
1088 assert_eq!(cache.get("key1"), Some(100));
1089 }
1090
1091 #[test]
1092 fn test_global_missing_key() {
1093 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1094 Lazy::new(|| RwLock::new(HashMap::new()));
1095 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1096
1097 #[cfg(feature = "stats")]
1098 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1099
1100 let cache = GlobalCache::new(
1101 &MAP,
1102 &ORDER,
1103 None,
1104 None,
1105 EvictionPolicy::FIFO,
1106 None,
1107 None,
1108 None,
1109 None,
1110 None,
1111 None,
1112 #[cfg(feature = "stats")]
1113 &STATS,
1114 );
1115 assert_eq!(cache.get("nonexistent"), None);
1116 }
1117
1118 #[test]
1119 fn test_global_update_existing() {
1120 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1121 Lazy::new(|| RwLock::new(HashMap::new()));
1122 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1123
1124 #[cfg(feature = "stats")]
1125 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1126
1127 let cache = GlobalCache::new(
1128 &MAP,
1129 &ORDER,
1130 None,
1131 None,
1132 EvictionPolicy::FIFO,
1133 None,
1134 None,
1135 None,
1136 None,
1137 None,
1138 None,
1139 #[cfg(feature = "stats")]
1140 &STATS,
1141 );
1142 cache.insert("key", 1);
1143 cache.insert("key", 2);
1144 assert_eq!(cache.get("key"), Some(2));
1145 }
1146
1147 #[test]
1148 fn test_global_fifo_eviction() {
1149 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1150 Lazy::new(|| RwLock::new(HashMap::new()));
1151 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1152
1153 #[cfg(feature = "stats")]
1154 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1155
1156 let cache = GlobalCache::new(
1157 &MAP,
1158 &ORDER,
1159 Some(2),
1160 None,
1161 EvictionPolicy::FIFO,
1162 None,
1163 None,
1164 None,
1165 None,
1166 None,
1167 None,
1168 #[cfg(feature = "stats")]
1169 &STATS,
1170 );
1171 cache.insert("k1", 1);
1172 cache.insert("k2", 2);
1173 cache.insert("k3", 3);
1174
1175 assert_eq!(cache.get("k1"), None);
1176 assert_eq!(cache.get("k2"), Some(2));
1177 assert_eq!(cache.get("k3"), Some(3));
1178 }
1179
1180 #[test]
1181 fn test_global_lru_eviction() {
1182 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1183 Lazy::new(|| RwLock::new(HashMap::new()));
1184 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1185
1186 #[cfg(feature = "stats")]
1187 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1188
1189 let cache = GlobalCache::new(
1190 &MAP,
1191 &ORDER,
1192 Some(2),
1193 None,
1194 EvictionPolicy::LRU,
1195 None,
1196 None,
1197 None,
1198 None,
1199 None,
1200 None,
1201 #[cfg(feature = "stats")]
1202 &STATS,
1203 );
1204 cache.insert("k1", 1);
1205 cache.insert("k2", 2);
1206 let _ = cache.get("k1");
1207 cache.insert("k3", 3);
1208
1209 assert_eq!(cache.get("k1"), Some(1));
1210 assert_eq!(cache.get("k2"), None);
1211 assert_eq!(cache.get("k3"), Some(3));
1212 }
1213
1214 #[test]
1215 fn test_global_lru_multiple_accesses() {
1216 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1217 Lazy::new(|| RwLock::new(HashMap::new()));
1218 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1219
1220 #[cfg(feature = "stats")]
1221 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1222
1223 let cache = GlobalCache::new(
1224 &MAP,
1225 &ORDER,
1226 Some(3),
1227 None,
1228 EvictionPolicy::LRU,
1229 None,
1230 None,
1231 None,
1232 None,
1233 None,
1234 None,
1235 #[cfg(feature = "stats")]
1236 &STATS,
1237 );
1238 cache.insert("k1", 1);
1239 cache.insert("k2", 2);
1240 cache.insert("k3", 3);
1241
1242 // Access k1 to make it most recent
1243 let _ = cache.get("k1");
1244 let _ = cache.get("k1");
1245
1246 // k2 should be evicted (least recently used)
1247 cache.insert("k4", 4);
1248
1249 assert_eq!(cache.get("k1"), Some(1));
1250 assert_eq!(cache.get("k2"), None);
1251 assert_eq!(cache.get("k3"), Some(3));
1252 assert_eq!(cache.get("k4"), Some(4));
1253 }
1254
1255 #[test]
1256 fn test_global_thread_safety() {
1257 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1258 Lazy::new(|| RwLock::new(HashMap::new()));
1259 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1260
1261 #[cfg(feature = "stats")]
1262 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1263
1264 let handles: Vec<_> = (0..10)
1265 .map(|i| {
1266 thread::spawn(move || {
1267 let cache = GlobalCache::new(
1268 &MAP,
1269 &ORDER,
1270 None,
1271 None,
1272 EvictionPolicy::FIFO,
1273 None,
1274 None,
1275 None,
1276 None,
1277 None,
1278 None,
1279 #[cfg(feature = "stats")]
1280 &STATS,
1281 );
1282 cache.insert(&format!("key{}", i), i);
1283 thread::sleep(Duration::from_millis(10));
1284 cache.get(&format!("key{}", i))
1285 })
1286 })
1287 .collect();
1288
1289 for (i, handle) in handles.into_iter().enumerate() {
1290 let result = handle.join().unwrap();
1291 assert_eq!(result, Some(i as i32));
1292 }
1293 }
1294
1295 #[test]
1296 fn test_global_ttl_expiration() {
1297 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1298 Lazy::new(|| RwLock::new(HashMap::new()));
1299 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1300
1301 #[cfg(feature = "stats")]
1302 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1303
1304 let cache = GlobalCache::new(
1305 &MAP,
1306 &ORDER,
1307 None,
1308 None,
1309 EvictionPolicy::FIFO,
1310 Some(1),
1311 None,
1312 None,
1313 None,
1314 None,
1315 None,
1316 #[cfg(feature = "stats")]
1317 &STATS,
1318 );
1319 cache.insert("expires", 999);
1320
1321 // Should be valid immediately
1322 assert_eq!(cache.get("expires"), Some(999));
1323
1324 thread::sleep(Duration::from_secs(2));
1325
1326 // Should be expired now
1327 assert_eq!(cache.get("expires"), None);
1328 }
1329
1330 #[test]
1331 fn test_global_result_ok() {
1332 static RES_MAP: Lazy<RwLock<HashMap<String, CacheEntry<Result<i32, String>>>>> =
1333 Lazy::new(|| RwLock::new(HashMap::new()));
1334 static RES_ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1335 #[cfg(feature = "stats")]
1336 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1337
1338 let cache = GlobalCache::new(
1339 &RES_MAP,
1340 &RES_ORDER,
1341 None,
1342 None,
1343 EvictionPolicy::FIFO,
1344 None,
1345 None,
1346 None,
1347 None,
1348 None,
1349 None,
1350 #[cfg(feature = "stats")]
1351 &STATS,
1352 );
1353 let ok_result = Ok(42);
1354 cache.insert_result("success", &ok_result);
1355 assert_eq!(cache.get("success"), Some(Ok(42)));
1356 }
1357
1358 #[test]
1359 fn test_global_result_err() {
1360 static RES_MAP: Lazy<RwLock<HashMap<String, CacheEntry<Result<i32, String>>>>> =
1361 Lazy::new(|| RwLock::new(HashMap::new()));
1362 static RES_ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1363 #[cfg(feature = "stats")]
1364 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1365
1366 let cache = GlobalCache::new(
1367 &RES_MAP,
1368 &RES_ORDER,
1369 None,
1370 None,
1371 EvictionPolicy::FIFO,
1372 None,
1373 None,
1374 None,
1375 None,
1376 None,
1377 None,
1378 #[cfg(feature = "stats")]
1379 &STATS,
1380 );
1381 let err_result: Result<i32, String> = Err("error".to_string());
1382 cache.insert_result("failure", &err_result);
1383 assert_eq!(cache.get("failure"), None); // Errors not cached
1384 }
1385
1386 #[test]
1387 fn test_global_concurrent_lru_access() {
1388 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1389 Lazy::new(|| RwLock::new(HashMap::new()));
1390 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1391
1392 #[cfg(feature = "stats")]
1393 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1394
1395 let cache = GlobalCache::new(
1396 &MAP,
1397 &ORDER,
1398 Some(5),
1399 None,
1400 EvictionPolicy::LRU,
1401 None,
1402 None,
1403 None,
1404 None,
1405 None,
1406 None,
1407 #[cfg(feature = "stats")]
1408 &STATS,
1409 );
1410 // Pre-populate cache
1411 for i in 0..5 {
1412 cache.insert(&format!("k{}", i), i);
1413 }
1414
1415 // Concurrent access to k0
1416 let handles: Vec<_> = (0..5)
1417 .map(|_| {
1418 thread::spawn(|| {
1419 let cache = GlobalCache::new(
1420 &MAP,
1421 &ORDER,
1422 Some(5),
1423 None,
1424 EvictionPolicy::LRU,
1425 None,
1426 None,
1427 None,
1428 None,
1429 None,
1430 None,
1431 #[cfg(feature = "stats")]
1432 &STATS,
1433 );
1434 for _ in 0..10 {
1435 let _ = cache.get("k0");
1436 }
1437 })
1438 })
1439 .collect();
1440
1441 for handle in handles {
1442 handle.join().unwrap();
1443 }
1444
1445 // k0 should still be in cache (frequently accessed)
1446 assert_eq!(cache.get("k0"), Some(0));
1447 }
1448
1449 #[test]
1450 fn test_global_no_limit() {
1451 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1452 Lazy::new(|| RwLock::new(HashMap::new()));
1453 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1454
1455 #[cfg(feature = "stats")]
1456 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1457
1458 let cache = GlobalCache::new(
1459 &MAP,
1460 &ORDER,
1461 None,
1462 None,
1463 EvictionPolicy::FIFO,
1464 None,
1465 None,
1466 None,
1467 None,
1468 None,
1469 None,
1470 #[cfg(feature = "stats")]
1471 &STATS,
1472 );
1473
1474 for i in 0..100 {
1475 cache.insert(&format!("k{}", i), i);
1476 }
1477
1478 // All should still be present
1479 for i in 0..100 {
1480 assert_eq!(cache.get(&format!("k{}", i)), Some(i));
1481 }
1482 }
1483
1484 #[test]
1485 fn test_memory_eviction_skips_orphan_and_removes_real_entry() {
1486 // Shared structures
1487 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1488 Lazy::new(|| RwLock::new(HashMap::new()));
1489 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1490
1491 #[cfg(feature = "stats")]
1492 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1493
1494 // max_memory allows only a single i32 (size 4)
1495 let cache = GlobalCache::new(
1496 &MAP,
1497 &ORDER,
1498 None,
1499 Some(std::mem::size_of::<i32>()),
1500 EvictionPolicy::FIFO,
1501 None,
1502 None,
1503 None,
1504 None,
1505 None,
1506 None,
1507 #[cfg(feature = "stats")]
1508 &STATS,
1509 );
1510
1511 // Insert first real entry
1512 cache.insert_with_memory("k1", 1i32);
1513
1514 // Introduce an orphan key at the front of the order queue
1515 {
1516 let mut o = ORDER.lock();
1517 o.push_front("orphan".to_string());
1518 }
1519
1520 // Insert second entry which forces memory eviction
1521 cache.insert_with_memory("k2", 2i32);
1522
1523 // The orphan should be ignored for memory purposes and a real key should be evicted.
1524 // Expect k1 to be evicted and k2 to remain.
1525 assert_eq!(cache.get("k1"), None);
1526 assert_eq!(cache.get("k2"), Some(2));
1527
1528 // Ensure the orphan key is gone from the order
1529 let order_contents: Vec<String> = {
1530 let o = ORDER.lock();
1531 o.iter().cloned().collect()
1532 };
1533 assert!(order_contents.iter().all(|k| k != "orphan"));
1534 }
1535
1536 /// Test RwLock allows concurrent reads (no blocking)
1537 #[test]
1538 fn test_rwlock_concurrent_reads() {
1539 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1540 Lazy::new(|| RwLock::new(HashMap::new()));
1541 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1542
1543 #[cfg(feature = "stats")]
1544 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1545
1546 let cache = GlobalCache::new(
1547 &MAP,
1548 &ORDER,
1549 None,
1550 None,
1551 EvictionPolicy::FIFO,
1552 None,
1553 None,
1554 None,
1555 None,
1556 None,
1557 None,
1558 #[cfg(feature = "stats")]
1559 &STATS,
1560 );
1561
1562 // Populate cache
1563 for i in 0..10 {
1564 cache.insert(&format!("key{}", i), i);
1565 }
1566
1567 // Spawn many threads reading concurrently
1568 let handles: Vec<_> = (0..20)
1569 .map(|_thread_id| {
1570 thread::spawn(move || {
1571 let cache = GlobalCache::new(
1572 &MAP,
1573 &ORDER,
1574 None,
1575 None,
1576 EvictionPolicy::FIFO,
1577 None,
1578 None,
1579 None,
1580 None,
1581 None,
1582 None,
1583 #[cfg(feature = "stats")]
1584 &STATS,
1585 );
1586 let mut results = Vec::new();
1587 for i in 0..10 {
1588 results.push(cache.get(&format!("key{}", i)));
1589 }
1590 results
1591 })
1592 })
1593 .collect();
1594
1595 // All threads should complete without blocking
1596 for handle in handles {
1597 let results = handle.join().unwrap();
1598 for (i, result) in results.iter().enumerate() {
1599 assert_eq!(*result, Some(i as i32));
1600 }
1601 }
1602 }
1603
1604 /// Test RwLock write blocks reads temporarily
1605 #[test]
1606 fn test_rwlock_write_excludes_reads() {
1607 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1608 Lazy::new(|| RwLock::new(HashMap::new()));
1609 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1610
1611 #[cfg(feature = "stats")]
1612 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1613
1614 let cache = GlobalCache::new(
1615 &MAP,
1616 &ORDER,
1617 None,
1618 None,
1619 EvictionPolicy::FIFO,
1620 None,
1621 None,
1622 None,
1623 None,
1624 None,
1625 None,
1626 #[cfg(feature = "stats")]
1627 &STATS,
1628 );
1629
1630 cache.insert("key1", 100);
1631
1632 // Write and read interleaved - should not deadlock
1633 let write_handle = thread::spawn(|| {
1634 let cache = GlobalCache::new(
1635 &MAP,
1636 &ORDER,
1637 None,
1638 None,
1639 EvictionPolicy::FIFO,
1640 None,
1641 None,
1642 None,
1643 None,
1644 None,
1645 None,
1646 #[cfg(feature = "stats")]
1647 &STATS,
1648 );
1649 for i in 0..50 {
1650 cache.insert(&format!("key{}", i), i);
1651 thread::sleep(Duration::from_micros(100));
1652 }
1653 });
1654
1655 let read_handles: Vec<_> = (0..5)
1656 .map(|_| {
1657 thread::spawn(|| {
1658 let cache = GlobalCache::new(
1659 &MAP,
1660 &ORDER,
1661 None,
1662 None,
1663 EvictionPolicy::FIFO,
1664 None,
1665 None,
1666 None,
1667 None,
1668 None,
1669 None,
1670 #[cfg(feature = "stats")]
1671 &STATS,
1672 );
1673 for i in 0..50 {
1674 let _ = cache.get(&format!("key{}", i));
1675 thread::sleep(Duration::from_micros(100));
1676 }
1677 })
1678 })
1679 .collect();
1680
1681 write_handle.join().unwrap();
1682 for handle in read_handles {
1683 handle.join().unwrap();
1684 }
1685 }
1686
1687 #[test]
1688 #[cfg(feature = "stats")]
1689 fn test_global_stats_basic() {
1690 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1691 Lazy::new(|| RwLock::new(HashMap::new()));
1692 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1693
1694 #[cfg(feature = "stats")]
1695 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1696
1697 let cache = GlobalCache::new(
1698 &MAP,
1699 &ORDER,
1700 None,
1701 None,
1702 EvictionPolicy::FIFO,
1703 None,
1704 None,
1705 None,
1706 None,
1707 None,
1708 None,
1709 #[cfg(feature = "stats")]
1710 &STATS,
1711 );
1712 cache.insert("k1", 1);
1713 cache.insert("k2", 2);
1714
1715 let _ = cache.get("k1"); // Hit
1716 let _ = cache.get("k2"); // Hit
1717 let _ = cache.get("k3"); // Miss
1718
1719 let stats = cache.stats();
1720 assert_eq!(stats.hits(), 2);
1721 assert_eq!(stats.misses(), 1);
1722 assert_eq!(stats.total_accesses(), 3);
1723 assert!((stats.hit_rate() - 0.6666).abs() < 0.001);
1724 }
1725
1726 #[test]
1727 #[cfg(feature = "stats")]
1728 fn test_global_stats_expired_counts_as_miss() {
1729 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1730 Lazy::new(|| RwLock::new(HashMap::new()));
1731 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1732
1733 #[cfg(feature = "stats")]
1734 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1735
1736 let cache = GlobalCache::new(
1737 &MAP,
1738 &ORDER,
1739 None,
1740 None,
1741 EvictionPolicy::FIFO,
1742 Some(1),
1743 None,
1744 None,
1745 None,
1746 None,
1747 None,
1748 #[cfg(feature = "stats")]
1749 &STATS,
1750 );
1751 cache.insert("expires", 999);
1752
1753 // Immediate access - should be a hit
1754 let _ = cache.get("expires");
1755 assert_eq!(cache.stats().hits(), 1);
1756 assert_eq!(cache.stats().misses(), 0);
1757
1758 // Wait for expiration
1759 thread::sleep(Duration::from_secs(2));
1760
1761 // Access after expiration - should be a miss
1762 let _ = cache.get("expires");
1763 assert_eq!(cache.stats().hits(), 1);
1764 assert_eq!(cache.stats().misses(), 1);
1765 }
1766
1767 #[test]
1768 #[cfg(feature = "stats")]
1769 fn test_global_stats_reset() {
1770 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1771 Lazy::new(|| RwLock::new(HashMap::new()));
1772 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1773
1774 #[cfg(feature = "stats")]
1775 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1776
1777 let cache = GlobalCache::new(
1778 &MAP,
1779 &ORDER,
1780 None,
1781 None,
1782 EvictionPolicy::FIFO,
1783 None,
1784 None,
1785 None,
1786 None,
1787 None,
1788 None,
1789 #[cfg(feature = "stats")]
1790 &STATS,
1791 );
1792 cache.insert("k1", 1);
1793 let _ = cache.get("k1");
1794 let _ = cache.get("k2");
1795
1796 let stats = cache.stats();
1797 assert_eq!(stats.hits(), 1);
1798 assert_eq!(stats.misses(), 1);
1799
1800 stats.reset();
1801 assert_eq!(stats.hits(), 0);
1802 assert_eq!(stats.misses(), 0);
1803 }
1804
1805 #[test]
1806 #[cfg(feature = "stats")]
1807 fn test_global_stats_concurrent_access() {
1808 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1809 Lazy::new(|| RwLock::new(HashMap::new()));
1810 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1811
1812 #[cfg(feature = "stats")]
1813 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1814
1815 let cache = GlobalCache::new(
1816 &MAP,
1817 &ORDER,
1818 None,
1819 None,
1820 EvictionPolicy::FIFO,
1821 None,
1822 None,
1823 None,
1824 None,
1825 None,
1826 None,
1827 #[cfg(feature = "stats")]
1828 &STATS,
1829 );
1830 cache.insert("k1", 1);
1831 cache.insert("k2", 2);
1832
1833 let handles: Vec<_> = (0..10)
1834 .map(|_| {
1835 thread::spawn(|| {
1836 let cache = GlobalCache::new(
1837 &MAP,
1838 &ORDER,
1839 None,
1840 None,
1841 EvictionPolicy::FIFO,
1842 None,
1843 None,
1844 None,
1845 None,
1846 None,
1847 None,
1848 #[cfg(feature = "stats")]
1849 &STATS,
1850 );
1851 for _ in 0..10 {
1852 let _ = cache.get("k1"); // Hit
1853 let _ = cache.get("k2"); // Hit
1854 let _ = cache.get("k3"); // Miss
1855 }
1856 })
1857 })
1858 .collect();
1859
1860 for handle in handles {
1861 handle.join().unwrap();
1862 }
1863
1864 let stats = cache.stats();
1865 // 10 threads * 10 iterations * 2 hits = 200 hits
1866 // 10 threads * 10 iterations * 1 miss = 100 misses
1867 assert_eq!(stats.hits(), 200);
1868 assert_eq!(stats.misses(), 100);
1869 assert_eq!(stats.total_accesses(), 300);
1870 }
1871
1872 #[test]
1873 #[cfg(feature = "stats")]
1874 fn test_global_stats_all_hits() {
1875 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1876 Lazy::new(|| RwLock::new(HashMap::new()));
1877 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1878
1879 #[cfg(feature = "stats")]
1880 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1881
1882 let cache = GlobalCache::new(
1883 &MAP,
1884 &ORDER,
1885 None,
1886 None,
1887 EvictionPolicy::FIFO,
1888 None,
1889 None,
1890 None,
1891 None,
1892 None,
1893 None,
1894 #[cfg(feature = "stats")]
1895 &STATS,
1896 );
1897 cache.insert("k1", 1);
1898 cache.insert("k2", 2);
1899
1900 for _ in 0..10 {
1901 let _ = cache.get("k1");
1902 let _ = cache.get("k2");
1903 }
1904
1905 let stats = cache.stats();
1906 assert_eq!(stats.hits(), 20);
1907 assert_eq!(stats.misses(), 0);
1908 assert_eq!(stats.hit_rate(), 1.0);
1909 assert_eq!(stats.miss_rate(), 0.0);
1910 }
1911
1912 #[test]
1913 #[cfg(feature = "stats")]
1914 fn test_global_stats_all_misses() {
1915 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1916 Lazy::new(|| RwLock::new(HashMap::new()));
1917 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1918
1919 #[cfg(feature = "stats")]
1920 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1921
1922 let cache = GlobalCache::new(
1923 &MAP,
1924 &ORDER,
1925 None,
1926 None,
1927 EvictionPolicy::FIFO,
1928 None,
1929 None,
1930 None,
1931 None,
1932 None,
1933 None,
1934 #[cfg(feature = "stats")]
1935 &STATS,
1936 );
1937
1938 for i in 0..10 {
1939 let _ = cache.get(&format!("k{}", i));
1940 }
1941
1942 let stats = cache.stats();
1943 assert_eq!(stats.hits(), 0);
1944 assert_eq!(stats.misses(), 10);
1945 assert_eq!(stats.hit_rate(), 0.0);
1946 assert_eq!(stats.miss_rate(), 1.0);
1947 }
1948
1949 // ========== TLRU with frequency_weight tests ==========
1950
1951 #[test]
1952 fn test_tlru_with_low_frequency_weight() {
1953 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1954 Lazy::new(|| RwLock::new(HashMap::new()));
1955 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1956
1957 #[cfg(feature = "stats")]
1958 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1959
1960 // Low frequency_weight (0.3) - emphasizes recency over frequency
1961 let cache = GlobalCache::new(
1962 &MAP,
1963 &ORDER,
1964 Some(3),
1965 None,
1966 EvictionPolicy::TLRU,
1967 Some(10),
1968 Some(0.3), // Low weight
1969 None,
1970 None,
1971 None,
1972 None,
1973 #[cfg(feature = "stats")]
1974 &STATS,
1975 );
1976
1977 // Fill cache
1978 cache.insert("k1", 1);
1979 cache.insert("k2", 2);
1980 cache.insert("k3", 3);
1981
1982 // Make k1 very frequent
1983 for _ in 0..10 {
1984 let _ = cache.get("k1");
1985 }
1986
1987 // Wait a bit to age k1
1988 thread::sleep(Duration::from_millis(100));
1989
1990 // Add new entry (cache is full)
1991 cache.insert("k4", 4);
1992
1993 // With low frequency_weight, even frequent entries can be evicted
1994 // if they're older (recency and age matter more)
1995 assert_eq!(cache.get("k4"), Some(4));
1996 }
1997
1998 #[test]
1999 fn test_tlru_with_high_frequency_weight() {
2000 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2001 Lazy::new(|| RwLock::new(HashMap::new()));
2002 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2003
2004 #[cfg(feature = "stats")]
2005 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2006
2007 // High frequency_weight (1.5) - emphasizes frequency over recency
2008 let cache = GlobalCache::new(
2009 &MAP,
2010 &ORDER,
2011 Some(3),
2012 None,
2013 EvictionPolicy::TLRU,
2014 Some(10),
2015 Some(1.5), // High weight
2016 None,
2017 None,
2018 None,
2019 None,
2020 #[cfg(feature = "stats")]
2021 &STATS,
2022 );
2023
2024 // Fill cache
2025 cache.insert("k1", 1);
2026 cache.insert("k2", 2);
2027 cache.insert("k3", 3);
2028
2029 // Make k1 very frequent
2030 for _ in 0..10 {
2031 let _ = cache.get("k1");
2032 }
2033
2034 // Wait a bit to age k1
2035 thread::sleep(Duration::from_millis(100));
2036
2037 // Add new entry (cache is full)
2038 cache.insert("k4", 4);
2039
2040 // With high frequency_weight, frequent entries are protected
2041 // k1 should remain cached despite being older
2042 assert_eq!(cache.get("k1"), Some(1));
2043 assert_eq!(cache.get("k4"), Some(4));
2044 }
2045
2046 #[test]
2047 fn test_tlru_default_frequency_weight() {
2048 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2049 Lazy::new(|| RwLock::new(HashMap::new()));
2050 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2051
2052 #[cfg(feature = "stats")]
2053 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2054
2055 // Default frequency_weight (None = 1.0) - balanced approach
2056 let cache = GlobalCache::new(
2057 &MAP,
2058 &ORDER,
2059 Some(2),
2060 None,
2061 EvictionPolicy::TLRU,
2062 Some(5),
2063 None, // Default weight
2064 None,
2065 None,
2066 None,
2067 None,
2068 #[cfg(feature = "stats")]
2069 &STATS,
2070 );
2071
2072 cache.insert("k1", 1);
2073 cache.insert("k2", 2);
2074
2075 // Access k1 a few times
2076 for _ in 0..3 {
2077 let _ = cache.get("k1");
2078 }
2079
2080 // Add third entry
2081 cache.insert("k3", 3);
2082
2083 // With balanced weight, both frequency and recency matter
2084 // k1 has higher frequency, so it should remain
2085 assert_eq!(cache.get("k1"), Some(1));
2086 assert_eq!(cache.get("k3"), Some(3));
2087 }
2088
2089 #[test]
2090 fn test_tlru_frequency_weight_comparison() {
2091 // Test that different weights produce different behavior
2092 static MAP_LOW: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2093 Lazy::new(|| RwLock::new(HashMap::new()));
2094 static ORDER_LOW: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2095
2096 static MAP_HIGH: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2097 Lazy::new(|| RwLock::new(HashMap::new()));
2098 static ORDER_HIGH: Lazy<Mutex<VecDeque<String>>> =
2099 Lazy::new(|| Mutex::new(VecDeque::new()));
2100
2101 #[cfg(feature = "stats")]
2102 static STATS_LOW: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2103 #[cfg(feature = "stats")]
2104 static STATS_HIGH: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2105
2106 let cache_low = GlobalCache::new(
2107 &MAP_LOW,
2108 &ORDER_LOW,
2109 Some(2),
2110 None,
2111 EvictionPolicy::TLRU,
2112 Some(10),
2113 Some(0.3), // Low weight
2114 None,
2115 None,
2116 None,
2117 None,
2118 #[cfg(feature = "stats")]
2119 &STATS_LOW,
2120 );
2121
2122 let cache_high = GlobalCache::new(
2123 &MAP_HIGH,
2124 &ORDER_HIGH,
2125 Some(2),
2126 None,
2127 EvictionPolicy::TLRU,
2128 Some(10),
2129 Some(2.0), // High weight
2130 None,
2131 None,
2132 None,
2133 None,
2134 #[cfg(feature = "stats")]
2135 &STATS_HIGH,
2136 );
2137
2138 // Same operations on both caches
2139 cache_low.insert("k1", 1);
2140 cache_low.insert("k2", 2);
2141 cache_high.insert("k1", 1);
2142 cache_high.insert("k2", 2);
2143
2144 // Make k1 frequent in both
2145 for _ in 0..5 {
2146 let _ = cache_low.get("k1");
2147 let _ = cache_high.get("k1");
2148 }
2149
2150 thread::sleep(Duration::from_millis(50));
2151
2152 // Add new entry to both
2153 cache_low.insert("k3", 3);
2154 cache_high.insert("k3", 3);
2155
2156 // Both should work correctly with their respective weights
2157 assert_eq!(cache_low.get("k3"), Some(3));
2158 assert_eq!(cache_high.get("k3"), Some(3));
2159 }
2160
2161 #[test]
2162 fn test_tlru_no_ttl_with_frequency_weight() {
2163 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2164 Lazy::new(|| RwLock::new(HashMap::new()));
2165 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2166
2167 #[cfg(feature = "stats")]
2168 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2169
2170 // TLRU without TTL (behaves like ARC but with frequency_weight)
2171 let cache = GlobalCache::new(
2172 &MAP,
2173 &ORDER,
2174 Some(3),
2175 None,
2176 EvictionPolicy::TLRU,
2177 None, // No TTL - age_factor will be 1.0
2178 Some(1.5),
2179 None,
2180 None,
2181 None,
2182 None,
2183 #[cfg(feature = "stats")]
2184 &STATS,
2185 );
2186
2187 cache.insert("k1", 1);
2188 cache.insert("k2", 2);
2189 cache.insert("k3", 3);
2190
2191 // Make k1 very frequent
2192 for _ in 0..10 {
2193 let _ = cache.get("k1");
2194 }
2195
2196 // Add new entry
2197 cache.insert("k4", 4);
2198
2199 // Without TTL, TLRU focuses on frequency and position
2200 // k1 should remain due to high frequency
2201 assert_eq!(cache.get("k1"), Some(1));
2202 }
2203
2204 #[test]
2205 fn test_tlru_concurrent_with_frequency_weight() {
2206 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2207 Lazy::new(|| RwLock::new(HashMap::new()));
2208 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2209
2210 #[cfg(feature = "stats")]
2211 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2212
2213 let cache = GlobalCache::new(
2214 &MAP,
2215 &ORDER,
2216 Some(5),
2217 None,
2218 EvictionPolicy::TLRU,
2219 Some(10),
2220 Some(1.2), // Slightly emphasize frequency
2221 None,
2222 None,
2223 None,
2224 None,
2225 #[cfg(feature = "stats")]
2226 &STATS,
2227 );
2228
2229 // Insert initial entries
2230 cache.insert("k1", 1);
2231 cache.insert("k2", 2);
2232
2233 // Spawn multiple threads accessing the cache
2234 let handles: Vec<_> = (0..5)
2235 .map(|i| {
2236 thread::spawn(move || {
2237 let cache = GlobalCache::new(
2238 &MAP,
2239 &ORDER,
2240 Some(5),
2241 None,
2242 EvictionPolicy::TLRU,
2243 Some(10),
2244 Some(1.2),
2245 None,
2246 None,
2247 None,
2248 None,
2249 #[cfg(feature = "stats")]
2250 &STATS,
2251 );
2252
2253 // Access k1 frequently
2254 for _ in 0..3 {
2255 let _ = cache.get("k1");
2256 }
2257
2258 // Insert new entry
2259 cache.insert(&format!("k{}", i + 3), i + 3);
2260 })
2261 })
2262 .collect();
2263
2264 for handle in handles {
2265 handle.join().unwrap();
2266 }
2267
2268 // k1 should remain cached due to high frequency and frequency_weight > 1.0
2269 assert_eq!(cache.get("k1"), Some(1));
2270 }
2271
2272 #[test]
2273 fn test_tlru_frequency_weight_edge_cases() {
2274 static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2275 Lazy::new(|| RwLock::new(HashMap::new()));
2276 static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2277
2278 #[cfg(feature = "stats")]
2279 static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2280
2281 // Test with very low weight (close to 0)
2282 let cache = GlobalCache::new(
2283 &MAP,
2284 &ORDER,
2285 Some(2),
2286 None,
2287 EvictionPolicy::TLRU,
2288 Some(5),
2289 Some(0.1), // Very low weight
2290 None,
2291 None,
2292 None,
2293 None,
2294 #[cfg(feature = "stats")]
2295 &STATS,
2296 );
2297
2298 cache.insert("k1", 1);
2299 cache.insert("k2", 2);
2300
2301 // Make k1 extremely frequent
2302 for _ in 0..100 {
2303 let _ = cache.get("k1");
2304 }
2305
2306 thread::sleep(Duration::from_millis(50));
2307
2308 // Even with very high frequency, k1 might be evicted with very low weight
2309 cache.insert("k3", 3);
2310
2311 // The cache should still work correctly
2312 assert!(cache.get("k3").is_some());
2313 }
2314}