cachelito_core/
global_cache.rs

1use crate::{CacheEntry, EvictionPolicy};
2use once_cell::sync::Lazy;
3use parking_lot::lock_api::MutexGuard;
4use parking_lot::{Mutex, RawMutex, RwLock};
5use std::collections::{HashMap, VecDeque};
6use std::fmt::Debug;
7
8use crate::utils::{
9    find_arc_eviction_key, find_min_frequency_key, find_tlru_eviction_key, move_key_to_end,
10    remove_key_from_global_cache,
11};
12#[cfg(feature = "stats")]
13use crate::CacheStats;
14
15/// A thread-safe global cache that can be shared across multiple threads.
16///
17/// Unlike `ThreadLocalCache` which uses thread-local storage, `GlobalCache` stores
18/// cached values in global static variables protected by locks, allowing cache
19/// sharing across all threads in the application.
20///
21/// # Type Parameters
22///
23/// * `R` - The return type to be cached. Must be `'static` to be stored in global state.
24///
25/// # Features
26///
27/// - **Thread-safe sharing**: Multiple threads access the same cache through RwLock/Mutex
28/// - **Eviction policies**: FIFO, LRU, LFU, ARC, Random, and TLRU
29///   - **FIFO**: First In, First Out - simple and predictable
30///   - **LRU**: Least Recently Used - evicts least recently accessed entries
31///   - **LFU**: Least Frequently Used - evicts least frequently accessed entries
32///   - **ARC**: Adaptive Replacement Cache - hybrid policy combining recency and frequency
33///   - **Random**: Random replacement - O(1) eviction with minimal overhead
34///   - **TLRU**: Time-aware LRU - combines recency, frequency, and age factors
35///     - Customizable with `frequency_weight` parameter
36///     - Formula: `score = frequency^weight × position × age_factor`
37///     - `frequency_weight < 1.0`: Emphasize recency (time-sensitive data)
38///     - `frequency_weight > 1.0`: Emphasize frequency (popular content)
39/// - **Cache limits**: Entry count limits (`limit`) and memory-based limits (`max_memory`)
40/// - **TTL support**: Automatic expiration of entries based on age
41/// - **Statistics**: Optional cache hit/miss tracking (with `stats` feature)
42/// - **Frequency tracking**: For LFU, ARC, and TLRU policies
43/// - **Memory estimation**: Support for memory-based eviction (requires `MemoryEstimator`)
44///
45/// # Cache Entry Structure
46///
47/// Cache entries are stored as `CacheEntry<R>` which contains:
48/// - `value`: The cached value of type R
49/// - `timestamp`: Unix timestamp when the entry was created (for TTL and TLRU age factor)
50/// - `frequency`: Access counter for LFU, ARC, and TLRU policies
51///
52/// # Eviction Behavior
53///
54/// When the cache reaches its limit (entry count or memory), entries are evicted according
55/// to the configured policy:
56///
57/// - **FIFO**: Oldest entry (first in order queue) is evicted
58/// - **LRU**: Least recently accessed entry (first in order queue) is evicted
59/// - **LFU**: Entry with lowest frequency counter is evicted
60/// - **ARC**: Entry with lowest score (frequency × position_weight) is evicted
61/// - **Random**: Randomly selected entry is evicted
62/// - **TLRU**: Entry with lowest score (frequency^weight × position × age_factor) is evicted
63///
64/// # Thread Safety
65///
66/// This cache uses `parking_lot::RwLock` for the cache map and `parking_lot::Mutex` for the order queue.
67/// The `parking_lot` implementation provides:
68/// - **RwLock for reads**: Multiple threads can read concurrently without blocking
69/// - **No lock poisoning** (simpler API, no `Result` wrapping)
70/// - **Better performance** under contention (30-50% faster than std::sync)
71/// - **Smaller memory footprint** (~40x smaller than std::sync)
72/// - **Fair locking algorithm** prevents thread starvation
73///
74/// **Read-heavy workloads** (typical for caches) see 4-5x performance improvement with RwLock
75/// compared to Mutex, as multiple threads can read the cache simultaneously.
76///
77/// # Performance Characteristics
78///
79/// - **Get**: O(1) for cache lookup, O(n) for LRU/ARC/TLRU reordering
80/// - **Insert**: O(1) for FIFO/Random, O(n) for LRU/LFU/ARC/TLRU eviction
81/// - **Memory**: O(n) where n is the number of cached entries
82/// - **Synchronization**: Lock acquisition overhead on every operation
83///
84/// # Performance Considerations
85///
86/// - **Synchronization overhead**: Each cache operation requires acquiring locks
87/// - **Lock contention**: High concurrent access may cause threads to wait
88/// - **Read optimization**: RwLock allows concurrent reads (no blocking for cache hits)
89/// - **Write bottleneck**: Only one thread can modify cache at a time
90/// - **Shared benefits**: All threads benefit from cached results
91/// - **Best for**: Expensive computations where sharing outweighs synchronization cost
92///
93/// # Examples
94///
95/// ## Basic Usage
96///
97/// ```ignore
98/// use cachelito_core::{GlobalCache, EvictionPolicy, CacheEntry};
99/// use once_cell::sync::Lazy;
100/// use parking_lot::{Mutex, RwLock};
101/// use std::collections::{HashMap, VecDeque};
102///
103/// static CACHE_MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
104///     Lazy::new(|| RwLock::new(HashMap::new()));
105/// static CACHE_ORDER: Lazy<Mutex<VecDeque<String>>> =
106///     Lazy::new(|| Mutex::new(VecDeque::new()));
107///
108/// let cache = GlobalCache::new(
109///     &CACHE_MAP,
110///     &CACHE_ORDER,
111///     Some(100),         // Max 100 entries
112///     None,              // No memory limit
113///     EvictionPolicy::LRU,
114///     Some(60),          // 60 second TTL
115///     None,              // Default frequency_weight
116/// );
117///
118/// // All threads can access the same cache
119/// cache.insert("key1", 42);
120/// assert_eq!(cache.get("key1"), Some(42));
121/// ```
122///
123/// ## TLRU with Custom Frequency Weight
124///
125/// ```ignore
126/// use cachelito_core::{GlobalCache, EvictionPolicy};
127///
128/// // Emphasize frequency over recency (good for popular content)
129/// let cache = GlobalCache::new(
130///     &CACHE_MAP,
131///     &CACHE_ORDER,
132///     Some(100),
133///     None,
134///     EvictionPolicy::TLRU,
135///     Some(300),
136///     Some(1.5),         // frequency_weight > 1.0
137/// );
138///
139/// // Emphasize recency over frequency (good for time-sensitive data)
140/// let cache = GlobalCache::new(
141///     &CACHE_MAP,
142///     &CACHE_ORDER,
143///     Some(100),
144///     None,
145///     EvictionPolicy::TLRU,
146///     Some(300),
147///     Some(0.3),         // frequency_weight < 1.0
148/// );
149/// ```
150///
151/// ## With Memory Limits
152///
153/// ```ignore
154/// use cachelito_core::{GlobalCache, EvictionPolicy, MemoryEstimator};
155///
156/// let cache = GlobalCache::new(
157///     &CACHE_MAP,
158///     &CACHE_ORDER,
159///     Some(1000),
160///     Some(100 * 1024 * 1024), // 100MB max
161///     EvictionPolicy::LRU,
162///     Some(300),
163///     None,
164/// );
165///
166/// // Insert with memory tracking (requires MemoryEstimator implementation)
167/// cache.insert_with_memory("key", value);
168/// ```
169pub struct GlobalCache<R: 'static> {
170    pub map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
171    pub order: &'static Lazy<Mutex<VecDeque<String>>>,
172    pub limit: Option<usize>,
173    pub max_memory: Option<usize>,
174    pub policy: EvictionPolicy,
175    pub ttl: Option<u64>,
176    pub frequency_weight: Option<f64>,
177    #[cfg(feature = "stats")]
178    pub stats: &'static Lazy<CacheStats>,
179}
180
181impl<R: Clone + 'static> GlobalCache<R> {
182    /// Creates a new global cache instance.
183    ///
184    /// # Parameters
185    ///
186    /// * `map` - Static reference to a RwLock-protected HashMap for storing cache entries
187    /// * `order` - Static reference to a Mutex-protected VecDeque for tracking entry order
188    /// * `limit` - Optional maximum number of entries (None for unlimited)
189    /// * `max_memory` - Optional maximum memory size in bytes (None for unlimited)
190    /// * `policy` - Eviction policy (FIFO, LRU, LFU, ARC, Random, or TLRU)
191    /// * `ttl` - Optional time-to-live in seconds for cache entries (None for no expiration)
192    /// * `frequency_weight` - Optional weight factor for frequency in TLRU policy
193    ///   - Values < 1.0: Emphasize recency and age
194    ///   - Values > 1.0: Emphasize frequency
195    ///   - None or 1.0: Balanced approach (default)
196    ///   - Only used when policy is TLRU, ignored otherwise
197    /// * `stats` - Static reference to CacheStats for tracking hit/miss statistics (stats feature only)
198    ///
199    /// # Returns
200    ///
201    /// A new `GlobalCache` instance configured with the provided parameters.
202    ///
203    /// # Examples
204    ///
205    /// ## Basic LRU cache with TTL
206    ///
207    /// ```ignore
208    /// let cache = GlobalCache::new(
209    ///     &CACHE_MAP,
210    ///     &CACHE_ORDER,
211    ///     Some(1000),              // Max 1000 entries
212    ///     None,                    // No memory limit
213    ///     EvictionPolicy::LRU,     // LRU eviction
214    ///     Some(300),               // 5 minute TTL
215    ///     None,                    // No frequency_weight (not needed for LRU)
216    ///     #[cfg(feature = "stats")]
217    ///     &CACHE_STATS,
218    /// );
219    /// ```
220    ///
221    /// ## TLRU with memory limit and custom frequency weight
222    ///
223    /// ```ignore
224    /// let cache = GlobalCache::new(
225    ///     &CACHE_MAP,
226    ///     &CACHE_ORDER,
227    ///     Some(1000),
228    ///     Some(100 * 1024 * 1024), // 100MB max
229    ///     EvictionPolicy::TLRU,    // TLRU eviction
230    ///     Some(300),               // 5 minute TTL
231    ///     Some(1.5),               // Emphasize frequency (popular content)
232    ///     #[cfg(feature = "stats")]
233    ///     &CACHE_STATS,
234    /// );
235    /// ```
236    #[cfg(feature = "stats")]
237    pub fn new(
238        map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
239        order: &'static Lazy<Mutex<VecDeque<String>>>,
240        limit: Option<usize>,
241        max_memory: Option<usize>,
242        policy: EvictionPolicy,
243        ttl: Option<u64>,
244        frequency_weight: Option<f64>,
245        stats: &'static Lazy<CacheStats>,
246    ) -> Self {
247        Self {
248            map,
249            order,
250            limit,
251            max_memory,
252            policy,
253            ttl,
254            frequency_weight,
255            stats,
256        }
257    }
258
259    #[cfg(not(feature = "stats"))]
260    pub fn new(
261        map: &'static Lazy<RwLock<HashMap<String, CacheEntry<R>>>>,
262        order: &'static Lazy<Mutex<VecDeque<String>>>,
263        limit: Option<usize>,
264        max_memory: Option<usize>,
265        policy: EvictionPolicy,
266        ttl: Option<u64>,
267        frequency_weight: Option<f64>,
268    ) -> Self {
269        Self {
270            map,
271            order,
272            limit,
273            max_memory,
274            policy,
275            ttl,
276            frequency_weight,
277        }
278    }
279
280    /// Retrieves a cached value by key.
281    ///
282    /// This method attempts to retrieve a cached value, checking for expiration
283    /// and updating access patterns based on the eviction policy.
284    ///
285    /// # Parameters
286    ///
287    /// * `key` - The cache key to retrieve
288    ///
289    /// # Returns
290    ///
291    /// * `Some(R)` - The cached value if found and not expired
292    /// * `None` - If the key is not in cache or the entry has expired
293    ///
294    /// # Behavior by Policy
295    ///
296    /// - **FIFO**: No updates on cache hit (order remains unchanged)
297    /// - **LRU**: Moves the key to the end of the order queue (most recently used)
298    /// - **LFU**: Increments the frequency counter for the entry
299    /// - **ARC**: Increments frequency counter and updates position in order queue
300    /// - **Random**: No updates on cache hit
301    /// - **TLRU**: Increments frequency counter and updates position in order queue
302    ///
303    /// # TTL Expiration
304    ///
305    /// If a TTL is configured and the entry has expired:
306    /// - The entry is removed from both the cache map and order queue
307    /// - A cache miss is recorded (if stats feature is enabled)
308    /// - `None` is returned
309    ///
310    /// # Statistics
311    ///
312    /// When the `stats` feature is enabled:
313    /// - Cache hits are recorded when a valid entry is found
314    /// - Cache misses are recorded when the key doesn't exist or has expired
315    ///
316    /// # Thread Safety
317    ///
318    /// This method is thread-safe and uses a multi-phase locking strategy:
319    /// 1. **Read lock** for initial lookup (allows concurrent reads)
320    /// 2. **Mutex + Write lock** for expired entry removal (if needed)
321    /// 3. **Mutex lock** for order queue updates (for LRU/ARC/TLRU)
322    ///
323    /// Multiple threads can safely call this method concurrently. Read-heavy
324    /// workloads benefit from RwLock's concurrent read capability.
325    ///
326    /// # Performance
327    ///
328    /// - **FIFO, Random**: O(1) - no reordering needed
329    /// - **LRU, ARC, TLRU**: O(n) - requires finding and moving key in order queue
330    /// - **LFU**: O(1) - only increments counter
331    /// - **Lock overhead**: Read lock for lookup + potential write lock for updates
332    ///
333    /// # Examples
334    ///
335    /// ```ignore
336    /// // Insert and retrieve
337    /// cache.insert("user:123", user_data);
338    /// assert_eq!(cache.get("user:123"), Some(user_data));
339    ///
340    /// // Non-existent key
341    /// assert_eq!(cache.get("user:999"), None);
342    ///
343    /// // Expired entry (with TTL)
344    /// cache.insert("temp", data);
345    /// std::thread::sleep(Duration::from_secs(61)); // Wait for TTL expiration
346    /// assert_eq!(cache.get("temp"), None);
347    /// ```
348    pub fn get(&self, key: &str) -> Option<R> {
349        let mut result = None;
350        let mut expired = false;
351
352        // Acquire read lock - allows concurrent reads
353        {
354            let m = self.map.read();
355            if let Some(entry) = m.get(key) {
356                if entry.is_expired(self.ttl) {
357                    expired = true;
358                } else {
359                    result = Some(entry.value.clone());
360                }
361            }
362        } // Read lock released here
363
364        if expired {
365            // Acquiring order lock to modify order queue
366            let mut o = self.order.lock();
367            // Acquire write lock to modify the map
368            let mut map_write = self.map.write();
369            remove_key_from_global_cache(&mut map_write, &mut o, key);
370            #[cfg(feature = "stats")]
371            self.stats.record_miss();
372            return None;
373        }
374
375        // Record stats
376        #[cfg(feature = "stats")]
377        {
378            if result.is_some() {
379                self.stats.record_hit();
380            } else {
381                self.stats.record_miss();
382            }
383        }
384
385        // Update access patterns based on policy
386        if result.is_some() {
387            match self.policy {
388                EvictionPolicy::LRU => {
389                    // Move key to end of order queue (most recently used)
390                    move_key_to_end(&mut self.order.lock(), key);
391                }
392                EvictionPolicy::LFU => {
393                    // Increment frequency counter
394                    self.increment_frequency(key);
395                }
396                EvictionPolicy::ARC => {
397                    // Adaptive Replacement: Update both recency (LRU) and frequency (LFU)
398                    // Move key to end (recency) - lock is automatically released after this call
399                    move_key_to_end(&mut self.order.lock(), key);
400                    // Increment frequency counter
401                    self.increment_frequency(key);
402                }
403                EvictionPolicy::TLRU => {
404                    // Time-aware LRU: Update both recency and frequency
405                    // Similar to ARC but considers age in eviction
406                    move_key_to_end(&mut self.order.lock(), key);
407                    self.increment_frequency(key);
408                }
409                EvictionPolicy::FIFO | EvictionPolicy::Random => {
410                    // No update needed for FIFO or Random
411                }
412            }
413        }
414
415        result
416    }
417
418    /// Increments the frequency counter for the specified key.
419    fn increment_frequency(&self, key: &str) {
420        let mut m = self.map.write();
421        if let Some(entry) = m.get_mut(key) {
422            entry.increment_frequency();
423        }
424    }
425
426    /// Inserts or updates a value in the cache.
427    ///
428    /// This method stores a new value in the cache or updates an existing one.
429    /// It handles cache limit enforcement and eviction according to the configured policy.
430    ///
431    /// # Parameters
432    ///
433    /// * `key` - The cache key
434    /// * `value` - The value to cache
435    ///
436    /// # Behavior
437    ///
438    /// 1. Creates a new `CacheEntry` with the current timestamp
439    /// 2. Inserts/updates the entry in the map
440    /// 3. Updates the order queue:
441    ///    - If key already exists in queue, removes old position
442    ///    - Adds key to the end of the queue
443    /// 4. Enforces cache limit:
444    ///    - If limit is set and exceeded, evicts the oldest entry (front of queue)
445    ///    - Removes evicted entry from both map and order queue
446    ///
447    /// # Eviction Policies
448    ///
449    /// When the cache limit is reached, entries are evicted according to the policy:
450    /// - **FIFO**: Evicts oldest inserted entry (front of queue)
451    /// - **LRU**: Evicts least recently used entry (front of queue, updated by `get()`)
452    /// - **LFU**: Evicts entry with lowest frequency counter
453    /// - **ARC**: Evicts based on hybrid score (frequency × position_weight)
454    /// - **Random**: Evicts randomly selected entry
455    /// - **TLRU**: Evicts based on TLRU score (frequency^weight × position × age_factor)
456    ///
457    /// # Thread Safety
458    ///
459    /// This method is thread-safe and uses mutex locks to ensure consistency
460    /// between the map and order queue.
461    ///
462    /// # Example
463    ///
464    /// ```ignore
465    /// // Insert a value
466    /// cache.insert("user:123", user_data);
467    ///
468    /// // Update existing value
469    /// cache.insert("user:123", updated_user_data);
470    ///
471    /// // With limit=2, this will evict the oldest entry
472    /// cache.insert("user:456", another_user);
473    /// cache.insert("user:789", yet_another_user); // Evicts first entry
474    /// ```
475    ///
476    /// # Note
477    ///
478    /// This method does NOT require `MemoryEstimator` trait. It only handles entry-count limits.
479    /// If `max_memory` is configured, use `insert_with_memory()` instead, which requires
480    /// the type to implement `MemoryEstimator`.
481    pub fn insert(&self, key: &str, value: R) {
482        let key_s = key.to_string();
483        let entry = CacheEntry::new(value);
484
485        // Acquire write lock for modification
486        self.map.write().insert(key_s.clone(), entry);
487
488        let mut o = self.order.lock();
489        if let Some(pos) = o.iter().position(|k| *k == key_s) {
490            o.remove(pos);
491        }
492        o.push_back(key_s.clone());
493
494        // Always handle entry-count limits, regardless of memory limits
495        self.handle_entry_limit_eviction(&mut o);
496    }
497
498    /// Handles the eviction of entries from a global cache when the number of entries exceeds the limit.
499    ///
500    /// The eviction behavior depends on the specified eviction policy. The function ensures that the cache
501    /// adheres to the defined entry limit by evicting entries based on the configured policy:
502    ///
503    /// - **LFU (Least Frequently Used):** Finds and evicts the entry with the minimum frequency of usage.
504    /// - **ARC (Adaptive Replacement Cache):** Leverages the ARC eviction strategy to find and evict a specific entry.
505    /// - **FIFO (First In First Out):** Evicts the oldest entry in the queue to ensure the limit is maintained.
506    /// - **LRU (Least Recently Used):** Evicts the least recently accessed entry from the queue.
507    ///
508    /// # Parameters
509    ///
510    /// - `o`: A mutable reference to a `MutexGuard` that holds a `VecDeque<String>`.
511    ///   This represents the global cache where entries are stored.
512    ///
513    /// # Behavior
514    ///
515    /// 1. **Check Limit:** The function first checks if the `limit` is defined and if the length of the
516    ///    cache (`o`) exceeds the defined `limit`.
517    ///
518    /// 2. **Eviction By Policy:** Based on the configured `EvictionPolicy`, different eviction strategies
519    ///    are employed:
520    ///
521    ///   - **LFU:** The method identifies the key with the minimum frequency count by inspecting the
522    ///     associated frequency map and removes it from the cache.
523    ///   - **ARC:** Uses an ARC strategy to determine which key should be evicted and removes it from the cache.
524    ///   - **FIFO or LRU:** Dequeues entries in sequence (from the front of the queue) and checks if the
525    ///     entry still exists in the global cache. If found, the entry is removed from both the queue and cache.
526    ///
527    /// 3. **Thread-Safe Access:** The function ensures thread-safe read/write access to the cache and
528    ///    associated data structures using mutexes.
529    fn handle_entry_limit_eviction(&self, mut o: &mut MutexGuard<RawMutex, VecDeque<String>>) {
530        if let Some(limit) = self.limit {
531            if o.len() > limit {
532                match self.policy {
533                    EvictionPolicy::LFU => {
534                        // Find and evict the entry with the minimum frequency
535                        let mut map_write = self.map.write();
536                        let min_freq_key = find_min_frequency_key(&map_write, &o);
537
538                        if let Some(evict_key) = min_freq_key {
539                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
540                        }
541                    }
542                    EvictionPolicy::ARC => {
543                        let mut map_write = self.map.write();
544                        if let Some(evict_key) =
545                            find_arc_eviction_key(&map_write, o.iter().enumerate())
546                        {
547                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
548                        }
549                    }
550                    EvictionPolicy::TLRU => {
551                        let mut map_write = self.map.write();
552                        if let Some(evict_key) = find_tlru_eviction_key(
553                            &map_write,
554                            o.iter().enumerate(),
555                            self.ttl,
556                            self.frequency_weight,
557                        ) {
558                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
559                        }
560                    }
561                    EvictionPolicy::Random => {
562                        // O(1) random eviction: select random position and remove directly
563                        if !o.is_empty() {
564                            let pos = fastrand::usize(..o.len());
565                            if let Some(evict_key) = o.remove(pos) {
566                                let mut map_write = self.map.write();
567                                map_write.remove(&evict_key);
568                            }
569                        }
570                    }
571                    EvictionPolicy::FIFO | EvictionPolicy::LRU => {
572                        // Keep trying to evict until we find a valid entry or queue is empty
573                        let mut map_write = self.map.write();
574                        while let Some(evict_key) = o.pop_front() {
575                            // Check if the key still exists in the cache before removing
576                            if map_write.contains_key(&evict_key) {
577                                map_write.remove(&evict_key);
578                                break;
579                            }
580                        }
581                    }
582                }
583            }
584        }
585    }
586}
587
588// Separate implementation for types that implement MemoryEstimator
589// This allows memory-based eviction
590impl<R: Clone + 'static + crate::MemoryEstimator> GlobalCache<R> {
591    /// Insert with memory limit support.
592    ///
593    /// This method requires `R` to implement `MemoryEstimator` and handles both
594    /// memory-based and entry-count-based eviction.
595    ///
596    /// Use this method when `max_memory` is configured in the cache.
597    ///
598    /// # Arguments
599    ///
600    /// * `key` - The cache key
601    /// * `value` - The value to cache (must implement `MemoryEstimator`)
602    ///
603    /// # Memory Management
604    ///
605    /// The method calculates the memory footprint of all cached entries and evicts
606    /// entries as needed to stay within the `max_memory` limit. Eviction follows
607    /// the configured policy.
608    ///
609    /// # Safety Check
610    ///
611    /// If the value to be inserted is larger than `max_memory`, the insertion is
612    /// skipped entirely to avoid infinite eviction loops. This ensures the cache
613    /// respects the memory limit even if individual values are very large.
614    ///
615    /// # Eviction Behavior by Policy
616    ///
617    /// When memory limit is exceeded:
618    /// - **FIFO/LRU**: Evicts from front of order queue
619    /// - **LFU**: Evicts entry with lowest frequency
620    /// - **ARC**: Evicts based on hybrid score (frequency × position_weight)
621    /// - **TLRU**: Evicts based on TLRU score (frequency^weight × position × age_factor)
622    /// - **Random**: Evicts randomly selected entry
623    ///
624    /// The eviction loop continues until there's enough memory for the new value.
625    ///
626    /// # Entry Count Limit
627    ///
628    /// After satisfying memory constraints, this method also checks the entry count
629    /// limit (if configured) and evicts additional entries if needed.
630    ///
631    /// # Thread Safety
632    ///
633    /// This method uses write locks to ensure consistency between the map and
634    /// order queue during eviction and insertion.
635    ///
636    /// # Examples
637    ///
638    /// ```ignore
639    /// use cachelito_core::MemoryEstimator;
640    ///
641    /// // Type must implement MemoryEstimator
642    /// impl MemoryEstimator for MyLargeStruct {
643    ///     fn estimate_memory(&self) -> usize {
644    ///         std::mem::size_of::<Self>() + self.data.capacity()
645    ///     }
646    /// }
647    ///
648    /// // Insert with automatic memory-based eviction
649    /// cache.insert_with_memory("large_data", expensive_value);
650    /// ```
651    ///
652    /// # Performance
653    ///
654    /// - **Memory calculation**: O(n) - iterates all entries to sum memory
655    /// - **Eviction**: Varies by policy (see individual policy documentation)
656    /// - May evict multiple entries in one call if memory limit is tight
657    pub fn insert_with_memory(&self, key: &str, value: R) {
658        let key_s = key.to_string();
659        let entry = CacheEntry::new(value);
660
661        // Acquire write lock for modification
662        self.map.write().insert(key_s.clone(), entry);
663
664        let mut o = self.order.lock();
665        if let Some(pos) = o.iter().position(|k| *k == key_s) {
666            o.remove(pos);
667        }
668        o.push_back(key_s.clone());
669
670        // Check memory limit first (if specified)
671        if let Some(max_mem) = self.max_memory {
672            // First, check if the new value by itself exceeds max_mem
673            // This is a safety check to prevent infinite eviction loop
674            let new_value_size = {
675                let map_read = self.map.read();
676                map_read
677                    .get(&key_s)
678                    .map(|e| e.value.estimate_memory())
679                    .unwrap_or(0)
680            };
681
682            if new_value_size > max_mem {
683                // The value itself is too large for the cache
684                // Remove it and return early to respect memory limit
685                self.map.write().remove(&key_s);
686                o.pop_back(); // Remove from order queue as well
687                return;
688            }
689
690            loop {
691                let current_mem = {
692                    let map_read = self.map.read();
693                    map_read
694                        .values()
695                        .map(|e| e.value.estimate_memory())
696                        .sum::<usize>()
697                };
698
699                if current_mem <= max_mem {
700                    break;
701                }
702
703                // Need to evict based on policy
704                let evicted = match self.policy {
705                    EvictionPolicy::LFU => {
706                        let mut map_write = self.map.write();
707                        let min_freq_key = find_min_frequency_key(&map_write, &o);
708                        if let Some(evict_key) = min_freq_key {
709                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
710                            true
711                        } else {
712                            false
713                        }
714                    }
715                    EvictionPolicy::ARC => {
716                        let mut map_write = self.map.write();
717                        if let Some(evict_key) =
718                            find_arc_eviction_key(&map_write, o.iter().enumerate())
719                        {
720                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
721                            true
722                        } else {
723                            false
724                        }
725                    }
726                    EvictionPolicy::TLRU => {
727                        let mut map_write = self.map.write();
728                        if let Some(evict_key) = find_tlru_eviction_key(
729                            &map_write,
730                            o.iter().enumerate(),
731                            self.ttl,
732                            self.frequency_weight,
733                        ) {
734                            remove_key_from_global_cache(&mut map_write, &mut o, &evict_key);
735                            true
736                        } else {
737                            false
738                        }
739                    }
740                    EvictionPolicy::Random => {
741                        // O(1) random eviction: select random position and remove directly
742                        if !o.is_empty() {
743                            let pos = fastrand::usize(..o.len());
744                            if let Some(evict_key) = o.remove(pos) {
745                                let mut map_write = self.map.write();
746                                map_write.remove(&evict_key);
747                                true
748                            } else {
749                                false
750                            }
751                        } else {
752                            false
753                        }
754                    }
755                    EvictionPolicy::FIFO | EvictionPolicy::LRU => {
756                        // Ensure we only count as evicted if we actually remove from the map
757                        let mut successfully_evicted = false;
758                        let mut map_write = self.map.write();
759                        while let Some(evict_key) = o.pop_front() {
760                            if map_write.contains_key(&evict_key) {
761                                map_write.remove(&evict_key);
762                                successfully_evicted = true;
763                                break;
764                            }
765                            // If key wasn't in map (orphan), continue popping until we remove a real one
766                        }
767                        successfully_evicted
768                    }
769                };
770
771                if !evicted {
772                    break; // Nothing left to evict
773                }
774            }
775        }
776
777        // Handle entry-count limits
778        self.handle_entry_limit_eviction(&mut o);
779    }
780
781    /// Returns a reference to the cache statistics.
782    ///
783    /// This method is only available when the `stats` feature is enabled.
784    ///
785    /// # Available Metrics
786    ///
787    /// The returned CacheStats provides:
788    /// - **hits()**: Number of successful cache lookups
789    /// - **misses()**: Number of cache misses (key not found or expired)
790    /// - **hit_rate()**: Ratio of hits to total accesses (0.0 to 1.0)
791    /// - **total_accesses()**: Total number of get operations
792    ///
793    /// # Thread Safety
794    ///
795    /// Statistics use atomic counters (`AtomicU64`) and can be safely accessed
796    /// from multiple threads without additional synchronization.
797    ///
798    /// # Examples
799    ///
800    /// ```ignore
801    /// // Get basic statistics
802    /// let stats = cache.stats();
803    /// println!("Hits: {}", stats.hits());
804    /// println!("Misses: {}", stats.misses());
805    /// println!("Hit rate: {:.2}%", stats.hit_rate() * 100.0);
806    /// println!("Total accesses: {}", stats.total_accesses());
807    ///
808    /// // Monitor cache performance
809    /// let total = stats.total_accesses();
810    /// if total > 1000 && stats.hit_rate() < 0.5 {
811    ///     println!("Warning: Low cache hit rate");
812    /// }
813    /// ```
814    ///
815    /// # See Also
816    ///
817    /// - [`CacheStats`] - The statistics structure
818    /// - [`crate::stats_registry::get()`] - Access stats by cache name
819    #[cfg(feature = "stats")]
820    pub fn stats(&self) -> &CacheStats {
821        self.stats
822    }
823
824    /// Clears all entries from the cache.
825    ///
826    /// This method removes all entries from both the cache map and the order queue.
827    /// It's useful for testing or when you need to completely reset the cache state.
828    ///
829    /// # Thread Safety
830    ///
831    /// This method is thread-safe and can be safely called from multiple threads.
832    ///
833    /// # Example
834    ///
835    /// ```ignore
836    /// cache.insert("key1", 42);
837    /// cache.insert("key2", 84);
838    ///
839    /// cache.clear();
840    ///
841    /// assert_eq!(cache.get("key1"), None);
842    /// assert_eq!(cache.get("key2"), None);
843    /// ```
844    pub fn clear(&self) {
845        self.map.write().clear();
846        self.order.lock().clear();
847    }
848}
849
850/// Implementation of `GlobalCache` for `Result` types.
851///
852/// This specialized implementation provides a `insert_result` method that only
853/// caches successful (`Ok`) results, preventing error values from being cached.
854///
855/// # Type Parameters
856///
857/// * `T` - The success type, must be `Clone` and `Debug`
858/// * `E` - The error type, must be `Clone` and `Debug`
859///
860/// # Rationale
861///
862/// Errors are typically transient (network failures, temporary resource unavailability)
863/// and should not be cached. Only successful results should be memoized to avoid
864/// repeatedly returning stale errors.
865///
866/// # Example
867///
868/// ```ignore
869/// let cache: GlobalCache<Result<String, Error>> = GlobalCache::new(...);
870///
871/// // Only Ok values are cached
872/// let result = fetch_data();
873/// cache.insert_result("key1", &result);
874///
875/// // If result was Err, nothing is cached
876/// // If result was Ok, the value is cached
877/// ```
878impl<T: Clone + Debug + 'static, E: Clone + Debug + 'static> GlobalCache<Result<T, E>> {
879    /// Inserts a Result into the cache, but only if it's an `Ok` variant.
880    ///
881    /// This method intelligently caches only successful results, preventing
882    /// error values from polluting the cache.
883    ///
884    /// This version does NOT require MemoryEstimator. Use `insert_result_with_memory()`
885    /// when max_memory is configured.
886    ///
887    /// # Parameters
888    ///
889    /// * `key` - The cache key
890    /// * `value` - The Result to potentially cache
891    ///
892    /// # Behavior
893    ///
894    /// - If `value` is `Ok(v)`: Caches `Ok(v.clone())` under the given key
895    /// - If `value` is `Err(_)`: Does nothing, no cache entry is created
896    ///
897    /// # Thread Safety
898    ///
899    /// This method is thread-safe and can be called concurrently from multiple threads.
900    ///
901    /// # Example
902    ///
903    /// ```ignore
904    /// fn fetch_user(id: u64) -> Result<User, DbError> {
905    ///     // ... database query ...
906    /// }
907    ///
908    /// let result = fetch_user(123);
909    /// cache.insert_result("user:123", &result);
910    ///
911    /// // Success: cached
912    /// // Ok(user) -> cache contains Ok(user)
913    ///
914    /// // Failure: not cached (will retry next time)
915    /// // Err(db_error) -> cache remains empty for this key
916    /// ```
917    pub fn insert_result(&self, key: &str, value: &Result<T, E>) {
918        if let Ok(v) = value {
919            self.insert(key, Ok(v.clone()));
920        }
921    }
922}
923
924/// Implementation of `GlobalCache` for `Result` types WITH MemoryEstimator support.
925///
926/// This specialized implementation provides memory-aware caching for Result types.
927///
928/// # Type Parameters
929///
930/// * `T` - The success type, must be `Clone`, `Debug`, and implement `MemoryEstimator`
931/// * `E` - The error type, must be `Clone`, `Debug`, and implement `MemoryEstimator`
932impl<
933        T: Clone + Debug + 'static + crate::MemoryEstimator,
934        E: Clone + Debug + 'static + crate::MemoryEstimator,
935    > GlobalCache<Result<T, E>>
936{
937    /// Inserts a Result into the cache with memory limit support.
938    ///
939    /// This method requires both T and E to implement MemoryEstimator.
940    /// Use this when max_memory is configured.
941    ///
942    /// # Parameters
943    ///
944    /// * `key` - The cache key
945    /// * `value` - The Result to potentially cache
946    ///
947    /// # Behavior
948    ///
949    /// - If `value` is `Ok(v)`: Caches `Ok(v.clone())` under the given key
950    /// - If `value` is `Err(_)`: Does nothing, no cache entry is created
951    pub fn insert_result_with_memory(&self, key: &str, value: &Result<T, E>) {
952        if let Ok(v) = value {
953            self.insert_with_memory(key, Ok(v.clone()));
954        }
955    }
956}
957
958#[cfg(test)]
959mod tests {
960    use super::*;
961    use std::thread;
962    use std::time::Duration;
963
964    #[test]
965    fn test_global_basic_insert_get() {
966        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
967            Lazy::new(|| RwLock::new(HashMap::new()));
968        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
969        #[cfg(feature = "stats")]
970        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
971
972        let cache = GlobalCache::new(
973            &MAP,
974            &ORDER,
975            None,
976            None,
977            EvictionPolicy::FIFO,
978            None,
979            None,
980            #[cfg(feature = "stats")]
981            &STATS,
982        );
983        cache.insert("key1", 100);
984        assert_eq!(cache.get("key1"), Some(100));
985    }
986
987    #[test]
988    fn test_global_missing_key() {
989        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
990            Lazy::new(|| RwLock::new(HashMap::new()));
991        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
992
993        #[cfg(feature = "stats")]
994        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
995
996        let cache = GlobalCache::new(
997            &MAP,
998            &ORDER,
999            None,
1000            None,
1001            EvictionPolicy::FIFO,
1002            None,
1003            None,
1004            #[cfg(feature = "stats")]
1005            &STATS,
1006        );
1007        assert_eq!(cache.get("nonexistent"), None);
1008    }
1009
1010    #[test]
1011    fn test_global_update_existing() {
1012        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1013            Lazy::new(|| RwLock::new(HashMap::new()));
1014        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1015
1016        #[cfg(feature = "stats")]
1017        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1018
1019        let cache = GlobalCache::new(
1020            &MAP,
1021            &ORDER,
1022            None,
1023            None,
1024            EvictionPolicy::FIFO,
1025            None,
1026            None,
1027            #[cfg(feature = "stats")]
1028            &STATS,
1029        );
1030        cache.insert("key", 1);
1031        cache.insert("key", 2);
1032        assert_eq!(cache.get("key"), Some(2));
1033    }
1034
1035    #[test]
1036    fn test_global_fifo_eviction() {
1037        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1038            Lazy::new(|| RwLock::new(HashMap::new()));
1039        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1040
1041        #[cfg(feature = "stats")]
1042        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1043
1044        let cache = GlobalCache::new(
1045            &MAP,
1046            &ORDER,
1047            Some(2),
1048            None,
1049            EvictionPolicy::FIFO,
1050            None,
1051            None,
1052            #[cfg(feature = "stats")]
1053            &STATS,
1054        );
1055        cache.insert("k1", 1);
1056        cache.insert("k2", 2);
1057        cache.insert("k3", 3);
1058
1059        assert_eq!(cache.get("k1"), None);
1060        assert_eq!(cache.get("k2"), Some(2));
1061        assert_eq!(cache.get("k3"), Some(3));
1062    }
1063
1064    #[test]
1065    fn test_global_lru_eviction() {
1066        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1067            Lazy::new(|| RwLock::new(HashMap::new()));
1068        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1069
1070        #[cfg(feature = "stats")]
1071        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1072
1073        let cache = GlobalCache::new(
1074            &MAP,
1075            &ORDER,
1076            Some(2),
1077            None,
1078            EvictionPolicy::LRU,
1079            None,
1080            None,
1081            #[cfg(feature = "stats")]
1082            &STATS,
1083        );
1084        cache.insert("k1", 1);
1085        cache.insert("k2", 2);
1086        let _ = cache.get("k1");
1087        cache.insert("k3", 3);
1088
1089        assert_eq!(cache.get("k1"), Some(1));
1090        assert_eq!(cache.get("k2"), None);
1091        assert_eq!(cache.get("k3"), Some(3));
1092    }
1093
1094    #[test]
1095    fn test_global_lru_multiple_accesses() {
1096        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1097            Lazy::new(|| RwLock::new(HashMap::new()));
1098        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1099
1100        #[cfg(feature = "stats")]
1101        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1102
1103        let cache = GlobalCache::new(
1104            &MAP,
1105            &ORDER,
1106            Some(3),
1107            None,
1108            EvictionPolicy::LRU,
1109            None,
1110            None,
1111            #[cfg(feature = "stats")]
1112            &STATS,
1113        );
1114        cache.insert("k1", 1);
1115        cache.insert("k2", 2);
1116        cache.insert("k3", 3);
1117
1118        // Access k1 to make it most recent
1119        let _ = cache.get("k1");
1120        let _ = cache.get("k1");
1121
1122        // k2 should be evicted (least recently used)
1123        cache.insert("k4", 4);
1124
1125        assert_eq!(cache.get("k1"), Some(1));
1126        assert_eq!(cache.get("k2"), None);
1127        assert_eq!(cache.get("k3"), Some(3));
1128        assert_eq!(cache.get("k4"), Some(4));
1129    }
1130
1131    #[test]
1132    fn test_global_thread_safety() {
1133        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1134            Lazy::new(|| RwLock::new(HashMap::new()));
1135        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1136
1137        #[cfg(feature = "stats")]
1138        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1139
1140        let handles: Vec<_> = (0..10)
1141            .map(|i| {
1142                thread::spawn(move || {
1143                    let cache = GlobalCache::new(
1144                        &MAP,
1145                        &ORDER,
1146                        None,
1147                        None,
1148                        EvictionPolicy::FIFO,
1149                        None,
1150                        None,
1151                        #[cfg(feature = "stats")]
1152                        &STATS,
1153                    );
1154                    cache.insert(&format!("key{}", i), i);
1155                    thread::sleep(Duration::from_millis(10));
1156                    cache.get(&format!("key{}", i))
1157                })
1158            })
1159            .collect();
1160
1161        for (i, handle) in handles.into_iter().enumerate() {
1162            let result = handle.join().unwrap();
1163            assert_eq!(result, Some(i as i32));
1164        }
1165    }
1166
1167    #[test]
1168    fn test_global_ttl_expiration() {
1169        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1170            Lazy::new(|| RwLock::new(HashMap::new()));
1171        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1172
1173        #[cfg(feature = "stats")]
1174        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1175
1176        let cache = GlobalCache::new(
1177            &MAP,
1178            &ORDER,
1179            None,
1180            None,
1181            EvictionPolicy::FIFO,
1182            Some(1),
1183            None,
1184            #[cfg(feature = "stats")]
1185            &STATS,
1186        );
1187        cache.insert("expires", 999);
1188
1189        // Should be valid immediately
1190        assert_eq!(cache.get("expires"), Some(999));
1191
1192        thread::sleep(Duration::from_secs(2));
1193
1194        // Should be expired now
1195        assert_eq!(cache.get("expires"), None);
1196    }
1197
1198    #[test]
1199    fn test_global_result_ok() {
1200        static RES_MAP: Lazy<RwLock<HashMap<String, CacheEntry<Result<i32, String>>>>> =
1201            Lazy::new(|| RwLock::new(HashMap::new()));
1202        static RES_ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1203        #[cfg(feature = "stats")]
1204        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1205
1206        let cache = GlobalCache::new(
1207            &RES_MAP,
1208            &RES_ORDER,
1209            None,
1210            None,
1211            EvictionPolicy::FIFO,
1212            None,
1213            None,
1214            #[cfg(feature = "stats")]
1215            &STATS,
1216        );
1217        let ok_result = Ok(42);
1218        cache.insert_result("success", &ok_result);
1219        assert_eq!(cache.get("success"), Some(Ok(42)));
1220    }
1221
1222    #[test]
1223    fn test_global_result_err() {
1224        static RES_MAP: Lazy<RwLock<HashMap<String, CacheEntry<Result<i32, String>>>>> =
1225            Lazy::new(|| RwLock::new(HashMap::new()));
1226        static RES_ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1227        #[cfg(feature = "stats")]
1228        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1229
1230        let cache = GlobalCache::new(
1231            &RES_MAP,
1232            &RES_ORDER,
1233            None,
1234            None,
1235            EvictionPolicy::FIFO,
1236            None,
1237            None,
1238            #[cfg(feature = "stats")]
1239            &STATS,
1240        );
1241        let err_result: Result<i32, String> = Err("error".to_string());
1242        cache.insert_result("failure", &err_result);
1243        assert_eq!(cache.get("failure"), None); // Errors not cached
1244    }
1245
1246    #[test]
1247    fn test_global_concurrent_lru_access() {
1248        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1249            Lazy::new(|| RwLock::new(HashMap::new()));
1250        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1251
1252        #[cfg(feature = "stats")]
1253        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1254
1255        let cache = GlobalCache::new(
1256            &MAP,
1257            &ORDER,
1258            Some(5),
1259            None,
1260            EvictionPolicy::LRU,
1261            None,
1262            None,
1263            #[cfg(feature = "stats")]
1264            &STATS,
1265        );
1266        // Pre-populate cache
1267        for i in 0..5 {
1268            cache.insert(&format!("k{}", i), i);
1269        }
1270
1271        // Concurrent access to k0
1272        let handles: Vec<_> = (0..5)
1273            .map(|_| {
1274                thread::spawn(|| {
1275                    let cache = GlobalCache::new(
1276                        &MAP,
1277                        &ORDER,
1278                        Some(5),
1279                        None,
1280                        EvictionPolicy::LRU,
1281                        None,
1282                        None,
1283                        #[cfg(feature = "stats")]
1284                        &STATS,
1285                    );
1286                    for _ in 0..10 {
1287                        let _ = cache.get("k0");
1288                    }
1289                })
1290            })
1291            .collect();
1292
1293        for handle in handles {
1294            handle.join().unwrap();
1295        }
1296
1297        // k0 should still be in cache (frequently accessed)
1298        assert_eq!(cache.get("k0"), Some(0));
1299    }
1300
1301    #[test]
1302    fn test_global_no_limit() {
1303        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1304            Lazy::new(|| RwLock::new(HashMap::new()));
1305        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1306
1307        #[cfg(feature = "stats")]
1308        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1309
1310        let cache = GlobalCache::new(
1311            &MAP,
1312            &ORDER,
1313            None,
1314            None,
1315            EvictionPolicy::FIFO,
1316            None,
1317            None,
1318            #[cfg(feature = "stats")]
1319            &STATS,
1320        );
1321
1322        for i in 0..100 {
1323            cache.insert(&format!("k{}", i), i);
1324        }
1325
1326        // All should still be present
1327        for i in 0..100 {
1328            assert_eq!(cache.get(&format!("k{}", i)), Some(i));
1329        }
1330    }
1331
1332    #[test]
1333    fn test_memory_eviction_skips_orphan_and_removes_real_entry() {
1334        // Shared structures
1335        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1336            Lazy::new(|| RwLock::new(HashMap::new()));
1337        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1338
1339        #[cfg(feature = "stats")]
1340        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1341
1342        // max_memory allows only a single i32 (size 4)
1343        let cache = GlobalCache::new(
1344            &MAP,
1345            &ORDER,
1346            None,
1347            Some(std::mem::size_of::<i32>()),
1348            EvictionPolicy::FIFO,
1349            None,
1350            None,
1351            #[cfg(feature = "stats")]
1352            &STATS,
1353        );
1354
1355        // Insert first real entry
1356        cache.insert_with_memory("k1", 1i32);
1357
1358        // Introduce an orphan key at the front of the order queue
1359        {
1360            let mut o = ORDER.lock();
1361            o.push_front("orphan".to_string());
1362        }
1363
1364        // Insert second entry which forces memory eviction
1365        cache.insert_with_memory("k2", 2i32);
1366
1367        // The orphan should be ignored for memory purposes and a real key should be evicted.
1368        // Expect k1 to be evicted and k2 to remain.
1369        assert_eq!(cache.get("k1"), None);
1370        assert_eq!(cache.get("k2"), Some(2));
1371
1372        // Ensure the orphan key is gone from the order
1373        let order_contents: Vec<String> = {
1374            let o = ORDER.lock();
1375            o.iter().cloned().collect()
1376        };
1377        assert!(order_contents.iter().all(|k| k != "orphan"));
1378    }
1379
1380    /// Test RwLock allows concurrent reads (no blocking)
1381    #[test]
1382    fn test_rwlock_concurrent_reads() {
1383        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1384            Lazy::new(|| RwLock::new(HashMap::new()));
1385        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1386
1387        #[cfg(feature = "stats")]
1388        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1389
1390        let cache = GlobalCache::new(
1391            &MAP,
1392            &ORDER,
1393            None,
1394            None,
1395            EvictionPolicy::FIFO,
1396            None,
1397            None,
1398            #[cfg(feature = "stats")]
1399            &STATS,
1400        );
1401
1402        // Populate cache
1403        for i in 0..10 {
1404            cache.insert(&format!("key{}", i), i);
1405        }
1406
1407        // Spawn many threads reading concurrently
1408        let handles: Vec<_> = (0..20)
1409            .map(|_thread_id| {
1410                thread::spawn(move || {
1411                    let cache = GlobalCache::new(
1412                        &MAP,
1413                        &ORDER,
1414                        None,
1415                        None,
1416                        EvictionPolicy::FIFO,
1417                        None,
1418                        None,
1419                        #[cfg(feature = "stats")]
1420                        &STATS,
1421                    );
1422                    let mut results = Vec::new();
1423                    for i in 0..10 {
1424                        results.push(cache.get(&format!("key{}", i)));
1425                    }
1426                    results
1427                })
1428            })
1429            .collect();
1430
1431        // All threads should complete without blocking
1432        for handle in handles {
1433            let results = handle.join().unwrap();
1434            for (i, result) in results.iter().enumerate() {
1435                assert_eq!(*result, Some(i as i32));
1436            }
1437        }
1438    }
1439
1440    /// Test RwLock write blocks reads temporarily
1441    #[test]
1442    fn test_rwlock_write_excludes_reads() {
1443        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1444            Lazy::new(|| RwLock::new(HashMap::new()));
1445        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1446
1447        #[cfg(feature = "stats")]
1448        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1449
1450        let cache = GlobalCache::new(
1451            &MAP,
1452            &ORDER,
1453            None,
1454            None,
1455            EvictionPolicy::FIFO,
1456            None,
1457            None,
1458            #[cfg(feature = "stats")]
1459            &STATS,
1460        );
1461
1462        cache.insert("key1", 100);
1463
1464        // Write and read interleaved - should not deadlock
1465        let write_handle = thread::spawn(|| {
1466            let cache = GlobalCache::new(
1467                &MAP,
1468                &ORDER,
1469                None,
1470                None,
1471                EvictionPolicy::FIFO,
1472                None,
1473                None,
1474                #[cfg(feature = "stats")]
1475                &STATS,
1476            );
1477            for i in 0..50 {
1478                cache.insert(&format!("key{}", i), i);
1479                thread::sleep(Duration::from_micros(100));
1480            }
1481        });
1482
1483        let read_handles: Vec<_> = (0..5)
1484            .map(|_| {
1485                thread::spawn(|| {
1486                    let cache = GlobalCache::new(
1487                        &MAP,
1488                        &ORDER,
1489                        None,
1490                        None,
1491                        EvictionPolicy::FIFO,
1492                        None,
1493                        None,
1494                        #[cfg(feature = "stats")]
1495                        &STATS,
1496                    );
1497                    for i in 0..50 {
1498                        let _ = cache.get(&format!("key{}", i));
1499                        thread::sleep(Duration::from_micros(100));
1500                    }
1501                })
1502            })
1503            .collect();
1504
1505        write_handle.join().unwrap();
1506        for handle in read_handles {
1507            handle.join().unwrap();
1508        }
1509    }
1510
1511    #[test]
1512    #[cfg(feature = "stats")]
1513    fn test_global_stats_basic() {
1514        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1515            Lazy::new(|| RwLock::new(HashMap::new()));
1516        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1517
1518        #[cfg(feature = "stats")]
1519        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1520
1521        let cache = GlobalCache::new(
1522            &MAP,
1523            &ORDER,
1524            None,
1525            None,
1526            EvictionPolicy::FIFO,
1527            None,
1528            None,
1529            #[cfg(feature = "stats")]
1530            &STATS,
1531        );
1532        cache.insert("k1", 1);
1533        cache.insert("k2", 2);
1534
1535        let _ = cache.get("k1"); // Hit
1536        let _ = cache.get("k2"); // Hit
1537        let _ = cache.get("k3"); // Miss
1538
1539        let stats = cache.stats();
1540        assert_eq!(stats.hits(), 2);
1541        assert_eq!(stats.misses(), 1);
1542        assert_eq!(stats.total_accesses(), 3);
1543        assert!((stats.hit_rate() - 0.6666).abs() < 0.001);
1544    }
1545
1546    #[test]
1547    #[cfg(feature = "stats")]
1548    fn test_global_stats_expired_counts_as_miss() {
1549        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1550            Lazy::new(|| RwLock::new(HashMap::new()));
1551        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1552
1553        #[cfg(feature = "stats")]
1554        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1555
1556        let cache = GlobalCache::new(
1557            &MAP,
1558            &ORDER,
1559            None,
1560            None,
1561            EvictionPolicy::FIFO,
1562            Some(1),
1563            None,
1564            #[cfg(feature = "stats")]
1565            &STATS,
1566        );
1567        cache.insert("expires", 999);
1568
1569        // Immediate access - should be a hit
1570        let _ = cache.get("expires");
1571        assert_eq!(cache.stats().hits(), 1);
1572        assert_eq!(cache.stats().misses(), 0);
1573
1574        // Wait for expiration
1575        thread::sleep(Duration::from_secs(2));
1576
1577        // Access after expiration - should be a miss
1578        let _ = cache.get("expires");
1579        assert_eq!(cache.stats().hits(), 1);
1580        assert_eq!(cache.stats().misses(), 1);
1581    }
1582
1583    #[test]
1584    #[cfg(feature = "stats")]
1585    fn test_global_stats_reset() {
1586        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1587            Lazy::new(|| RwLock::new(HashMap::new()));
1588        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1589
1590        #[cfg(feature = "stats")]
1591        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1592
1593        let cache = GlobalCache::new(
1594            &MAP,
1595            &ORDER,
1596            None,
1597            None,
1598            EvictionPolicy::FIFO,
1599            None,
1600            None,
1601            #[cfg(feature = "stats")]
1602            &STATS,
1603        );
1604        cache.insert("k1", 1);
1605        let _ = cache.get("k1");
1606        let _ = cache.get("k2");
1607
1608        let stats = cache.stats();
1609        assert_eq!(stats.hits(), 1);
1610        assert_eq!(stats.misses(), 1);
1611
1612        stats.reset();
1613        assert_eq!(stats.hits(), 0);
1614        assert_eq!(stats.misses(), 0);
1615    }
1616
1617    #[test]
1618    #[cfg(feature = "stats")]
1619    fn test_global_stats_concurrent_access() {
1620        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1621            Lazy::new(|| RwLock::new(HashMap::new()));
1622        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1623
1624        #[cfg(feature = "stats")]
1625        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1626
1627        let cache = GlobalCache::new(
1628            &MAP,
1629            &ORDER,
1630            None,
1631            None,
1632            EvictionPolicy::FIFO,
1633            None,
1634            None,
1635            #[cfg(feature = "stats")]
1636            &STATS,
1637        );
1638        cache.insert("k1", 1);
1639        cache.insert("k2", 2);
1640
1641        let handles: Vec<_> = (0..10)
1642            .map(|_| {
1643                thread::spawn(|| {
1644                    let cache = GlobalCache::new(
1645                        &MAP,
1646                        &ORDER,
1647                        None,
1648                        None,
1649                        EvictionPolicy::FIFO,
1650                        None,
1651                        None,
1652                        #[cfg(feature = "stats")]
1653                        &STATS,
1654                    );
1655                    for _ in 0..10 {
1656                        let _ = cache.get("k1"); // Hit
1657                        let _ = cache.get("k2"); // Hit
1658                        let _ = cache.get("k3"); // Miss
1659                    }
1660                })
1661            })
1662            .collect();
1663
1664        for handle in handles {
1665            handle.join().unwrap();
1666        }
1667
1668        let stats = cache.stats();
1669        // 10 threads * 10 iterations * 2 hits = 200 hits
1670        // 10 threads * 10 iterations * 1 miss = 100 misses
1671        assert_eq!(stats.hits(), 200);
1672        assert_eq!(stats.misses(), 100);
1673        assert_eq!(stats.total_accesses(), 300);
1674    }
1675
1676    #[test]
1677    #[cfg(feature = "stats")]
1678    fn test_global_stats_all_hits() {
1679        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1680            Lazy::new(|| RwLock::new(HashMap::new()));
1681        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1682
1683        #[cfg(feature = "stats")]
1684        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1685
1686        let cache = GlobalCache::new(
1687            &MAP,
1688            &ORDER,
1689            None,
1690            None,
1691            EvictionPolicy::FIFO,
1692            None,
1693            None,
1694            #[cfg(feature = "stats")]
1695            &STATS,
1696        );
1697        cache.insert("k1", 1);
1698        cache.insert("k2", 2);
1699
1700        for _ in 0..10 {
1701            let _ = cache.get("k1");
1702            let _ = cache.get("k2");
1703        }
1704
1705        let stats = cache.stats();
1706        assert_eq!(stats.hits(), 20);
1707        assert_eq!(stats.misses(), 0);
1708        assert_eq!(stats.hit_rate(), 1.0);
1709        assert_eq!(stats.miss_rate(), 0.0);
1710    }
1711
1712    #[test]
1713    #[cfg(feature = "stats")]
1714    fn test_global_stats_all_misses() {
1715        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1716            Lazy::new(|| RwLock::new(HashMap::new()));
1717        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1718
1719        #[cfg(feature = "stats")]
1720        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1721
1722        let cache = GlobalCache::new(
1723            &MAP,
1724            &ORDER,
1725            None,
1726            None,
1727            EvictionPolicy::FIFO,
1728            None,
1729            None,
1730            #[cfg(feature = "stats")]
1731            &STATS,
1732        );
1733
1734        for i in 0..10 {
1735            let _ = cache.get(&format!("k{}", i));
1736        }
1737
1738        let stats = cache.stats();
1739        assert_eq!(stats.hits(), 0);
1740        assert_eq!(stats.misses(), 10);
1741        assert_eq!(stats.hit_rate(), 0.0);
1742        assert_eq!(stats.miss_rate(), 1.0);
1743    }
1744
1745    // ========== TLRU with frequency_weight tests ==========
1746
1747    #[test]
1748    fn test_tlru_with_low_frequency_weight() {
1749        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1750            Lazy::new(|| RwLock::new(HashMap::new()));
1751        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1752
1753        #[cfg(feature = "stats")]
1754        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1755
1756        // Low frequency_weight (0.3) - emphasizes recency over frequency
1757        let cache = GlobalCache::new(
1758            &MAP,
1759            &ORDER,
1760            Some(3),
1761            None,
1762            EvictionPolicy::TLRU,
1763            Some(10),
1764            Some(0.3), // Low weight
1765            #[cfg(feature = "stats")]
1766            &STATS,
1767        );
1768
1769        // Fill cache
1770        cache.insert("k1", 1);
1771        cache.insert("k2", 2);
1772        cache.insert("k3", 3);
1773
1774        // Make k1 very frequent
1775        for _ in 0..10 {
1776            let _ = cache.get("k1");
1777        }
1778
1779        // Wait a bit to age k1
1780        thread::sleep(Duration::from_millis(100));
1781
1782        // Add new entry (cache is full)
1783        cache.insert("k4", 4);
1784
1785        // With low frequency_weight, even frequent entries can be evicted
1786        // if they're older (recency and age matter more)
1787        assert_eq!(cache.get("k4"), Some(4));
1788    }
1789
1790    #[test]
1791    fn test_tlru_with_high_frequency_weight() {
1792        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1793            Lazy::new(|| RwLock::new(HashMap::new()));
1794        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1795
1796        #[cfg(feature = "stats")]
1797        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1798
1799        // High frequency_weight (1.5) - emphasizes frequency over recency
1800        let cache = GlobalCache::new(
1801            &MAP,
1802            &ORDER,
1803            Some(3),
1804            None,
1805            EvictionPolicy::TLRU,
1806            Some(10),
1807            Some(1.5), // High weight
1808            #[cfg(feature = "stats")]
1809            &STATS,
1810        );
1811
1812        // Fill cache
1813        cache.insert("k1", 1);
1814        cache.insert("k2", 2);
1815        cache.insert("k3", 3);
1816
1817        // Make k1 very frequent
1818        for _ in 0..10 {
1819            let _ = cache.get("k1");
1820        }
1821
1822        // Wait a bit to age k1
1823        thread::sleep(Duration::from_millis(100));
1824
1825        // Add new entry (cache is full)
1826        cache.insert("k4", 4);
1827
1828        // With high frequency_weight, frequent entries are protected
1829        // k1 should remain cached despite being older
1830        assert_eq!(cache.get("k1"), Some(1));
1831        assert_eq!(cache.get("k4"), Some(4));
1832    }
1833
1834    #[test]
1835    fn test_tlru_default_frequency_weight() {
1836        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1837            Lazy::new(|| RwLock::new(HashMap::new()));
1838        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1839
1840        #[cfg(feature = "stats")]
1841        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1842
1843        // Default frequency_weight (None = 1.0) - balanced approach
1844        let cache = GlobalCache::new(
1845            &MAP,
1846            &ORDER,
1847            Some(2),
1848            None,
1849            EvictionPolicy::TLRU,
1850            Some(5),
1851            None, // Default weight
1852            #[cfg(feature = "stats")]
1853            &STATS,
1854        );
1855
1856        cache.insert("k1", 1);
1857        cache.insert("k2", 2);
1858
1859        // Access k1 a few times
1860        for _ in 0..3 {
1861            let _ = cache.get("k1");
1862        }
1863
1864        // Add third entry
1865        cache.insert("k3", 3);
1866
1867        // With balanced weight, both frequency and recency matter
1868        // k1 has higher frequency, so it should remain
1869        assert_eq!(cache.get("k1"), Some(1));
1870        assert_eq!(cache.get("k3"), Some(3));
1871    }
1872
1873    #[test]
1874    fn test_tlru_frequency_weight_comparison() {
1875        // Test that different weights produce different behavior
1876        static MAP_LOW: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1877            Lazy::new(|| RwLock::new(HashMap::new()));
1878        static ORDER_LOW: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1879
1880        static MAP_HIGH: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1881            Lazy::new(|| RwLock::new(HashMap::new()));
1882        static ORDER_HIGH: Lazy<Mutex<VecDeque<String>>> =
1883            Lazy::new(|| Mutex::new(VecDeque::new()));
1884
1885        #[cfg(feature = "stats")]
1886        static STATS_LOW: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1887        #[cfg(feature = "stats")]
1888        static STATS_HIGH: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1889
1890        let cache_low = GlobalCache::new(
1891            &MAP_LOW,
1892            &ORDER_LOW,
1893            Some(2),
1894            None,
1895            EvictionPolicy::TLRU,
1896            Some(10),
1897            Some(0.3), // Low weight
1898            #[cfg(feature = "stats")]
1899            &STATS_LOW,
1900        );
1901
1902        let cache_high = GlobalCache::new(
1903            &MAP_HIGH,
1904            &ORDER_HIGH,
1905            Some(2),
1906            None,
1907            EvictionPolicy::TLRU,
1908            Some(10),
1909            Some(2.0), // High weight
1910            #[cfg(feature = "stats")]
1911            &STATS_HIGH,
1912        );
1913
1914        // Same operations on both caches
1915        cache_low.insert("k1", 1);
1916        cache_low.insert("k2", 2);
1917        cache_high.insert("k1", 1);
1918        cache_high.insert("k2", 2);
1919
1920        // Make k1 frequent in both
1921        for _ in 0..5 {
1922            let _ = cache_low.get("k1");
1923            let _ = cache_high.get("k1");
1924        }
1925
1926        thread::sleep(Duration::from_millis(50));
1927
1928        // Add new entry to both
1929        cache_low.insert("k3", 3);
1930        cache_high.insert("k3", 3);
1931
1932        // Both should work correctly with their respective weights
1933        assert_eq!(cache_low.get("k3"), Some(3));
1934        assert_eq!(cache_high.get("k3"), Some(3));
1935    }
1936
1937    #[test]
1938    fn test_tlru_no_ttl_with_frequency_weight() {
1939        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1940            Lazy::new(|| RwLock::new(HashMap::new()));
1941        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1942
1943        #[cfg(feature = "stats")]
1944        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1945
1946        // TLRU without TTL (behaves like ARC but with frequency_weight)
1947        let cache = GlobalCache::new(
1948            &MAP,
1949            &ORDER,
1950            Some(3),
1951            None,
1952            EvictionPolicy::TLRU,
1953            None, // No TTL - age_factor will be 1.0
1954            Some(1.5),
1955            #[cfg(feature = "stats")]
1956            &STATS,
1957        );
1958
1959        cache.insert("k1", 1);
1960        cache.insert("k2", 2);
1961        cache.insert("k3", 3);
1962
1963        // Make k1 very frequent
1964        for _ in 0..10 {
1965            let _ = cache.get("k1");
1966        }
1967
1968        // Add new entry
1969        cache.insert("k4", 4);
1970
1971        // Without TTL, TLRU focuses on frequency and position
1972        // k1 should remain due to high frequency
1973        assert_eq!(cache.get("k1"), Some(1));
1974    }
1975
1976    #[test]
1977    fn test_tlru_concurrent_with_frequency_weight() {
1978        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
1979            Lazy::new(|| RwLock::new(HashMap::new()));
1980        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
1981
1982        #[cfg(feature = "stats")]
1983        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
1984
1985        let cache = GlobalCache::new(
1986            &MAP,
1987            &ORDER,
1988            Some(5),
1989            None,
1990            EvictionPolicy::TLRU,
1991            Some(10),
1992            Some(1.2), // Slightly emphasize frequency
1993            #[cfg(feature = "stats")]
1994            &STATS,
1995        );
1996
1997        // Insert initial entries
1998        cache.insert("k1", 1);
1999        cache.insert("k2", 2);
2000
2001        // Spawn multiple threads accessing the cache
2002        let handles: Vec<_> = (0..5)
2003            .map(|i| {
2004                thread::spawn(move || {
2005                    let cache = GlobalCache::new(
2006                        &MAP,
2007                        &ORDER,
2008                        Some(5),
2009                        None,
2010                        EvictionPolicy::TLRU,
2011                        Some(10),
2012                        Some(1.2),
2013                        #[cfg(feature = "stats")]
2014                        &STATS,
2015                    );
2016
2017                    // Access k1 frequently
2018                    for _ in 0..3 {
2019                        let _ = cache.get("k1");
2020                    }
2021
2022                    // Insert new entry
2023                    cache.insert(&format!("k{}", i + 3), i + 3);
2024                })
2025            })
2026            .collect();
2027
2028        for handle in handles {
2029            handle.join().unwrap();
2030        }
2031
2032        // k1 should remain cached due to high frequency and frequency_weight > 1.0
2033        assert_eq!(cache.get("k1"), Some(1));
2034    }
2035
2036    #[test]
2037    fn test_tlru_frequency_weight_edge_cases() {
2038        static MAP: Lazy<RwLock<HashMap<String, CacheEntry<i32>>>> =
2039            Lazy::new(|| RwLock::new(HashMap::new()));
2040        static ORDER: Lazy<Mutex<VecDeque<String>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
2041
2042        #[cfg(feature = "stats")]
2043        static STATS: Lazy<CacheStats> = Lazy::new(|| CacheStats::new());
2044
2045        // Test with very low weight (close to 0)
2046        let cache = GlobalCache::new(
2047            &MAP,
2048            &ORDER,
2049            Some(2),
2050            None,
2051            EvictionPolicy::TLRU,
2052            Some(5),
2053            Some(0.1), // Very low weight
2054            #[cfg(feature = "stats")]
2055            &STATS,
2056        );
2057
2058        cache.insert("k1", 1);
2059        cache.insert("k2", 2);
2060
2061        // Make k1 extremely frequent
2062        for _ in 0..100 {
2063            let _ = cache.get("k1");
2064        }
2065
2066        thread::sleep(Duration::from_millis(50));
2067
2068        // Even with very high frequency, k1 might be evicted with very low weight
2069        cache.insert("k3", 3);
2070
2071        // The cache should still work correctly
2072        assert!(cache.get("k3").is_some());
2073    }
2074}