cachelito_core/
thread_local_cache.rs

1use std::cell::RefCell;
2use std::collections::{HashMap, VecDeque};
3use std::fmt::Debug;
4use std::thread::LocalKey;
5
6use crate::{CacheEntry, EvictionPolicy};
7
8#[cfg(feature = "stats")]
9use crate::CacheStats;
10
11use crate::utils::{
12    find_arc_eviction_key, find_min_frequency_key, find_tlru_eviction_key, move_key_to_end,
13    remove_key_from_cache_local,
14};
15
16/// Core cache abstraction that stores values in a thread-local HashMap with configurable limits.
17///
18/// This cache is designed to work with static thread-local maps declared using
19/// the `thread_local!` macro. Each thread maintains its own independent cache,
20/// ensuring thread safety without the need for locks.
21///
22/// # Type Parameters
23///
24/// * `R` - The type of values stored in the cache. Must be `'static` to satisfy
25///   thread-local storage requirements and `Clone` for retrieval.
26///
27/// # Features
28///
29/// - **Thread-local storage**: Each thread has its own cache instance
30/// - **Configurable limits**: Optional entry count limit and memory limit
31/// - **Eviction policies**: FIFO, LRU (default), LFU, ARC, Random, and TLRU
32///   - **FIFO**: First In, First Out - simple and predictable
33///   - **LRU**: Least Recently Used - evicts least recently accessed entries
34///   - **LFU**: Least Frequently Used - evicts least frequently accessed entries
35///   - **ARC**: Adaptive Replacement Cache - hybrid policy combining recency and frequency
36///   - **Random**: Random replacement - O(1) eviction with minimal overhead
37///   - **TLRU**: Time-aware LRU - combines recency, frequency, and age factors
38///     - Customizable with `frequency_weight` parameter
39///     - Formula: `score = frequency^weight × position × age_factor`
40///     - `frequency_weight < 1.0`: Emphasize recency (time-sensitive data)
41///     - `frequency_weight > 1.0`: Emphasize frequency (popular content)
42/// - **TTL support**: Optional time-to-live for automatic expiration
43/// - **Result-aware**: Special handling for `Result<T, E>` types
44/// - **Memory-based limits**: Optional maximum memory usage (requires `MemoryEstimator`)
45/// - **Statistics tracking**: Optional hit/miss monitoring (requires `stats` feature)
46///
47/// # Thread Safety
48///
49/// The cache is thread-safe by design - each thread has its own independent copy
50/// of the cache data. This means:
51/// - No locks or synchronization needed
52/// - No contention between threads
53/// - Cache entries are not shared across threads
54///
55/// # Examples
56///
57/// ## Basic Usage
58///
59/// ```
60/// use std::cell::RefCell;
61/// use std::collections::{HashMap, VecDeque};
62/// use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
63///
64/// thread_local! {
65///     static MY_CACHE: RefCell<HashMap<String, CacheEntry<i32>>> = RefCell::new(HashMap::new());
66///     static MY_ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
67/// }
68///
69/// let cache = ThreadLocalCache::new(&MY_CACHE, &MY_ORDER, None, None, EvictionPolicy::FIFO, None, None, None, None, None, None);
70/// cache.insert("answer", 42);
71/// assert_eq!(cache.get("answer"), Some(42));
72/// ```
73///
74/// ## With Cache Limit and LRU Policy
75///
76/// ```
77/// use std::cell::RefCell;
78/// use std::collections::{HashMap, VecDeque};
79/// use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
80///
81/// thread_local! {
82///     static CACHE: RefCell<HashMap<String, CacheEntry<String>>> = RefCell::new(HashMap::new());
83///     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
84/// }
85///
86/// // Cache with limit of 100 entries using LRU eviction
87/// let cache = ThreadLocalCache::new(&CACHE, &ORDER, Some(100), None, EvictionPolicy::LRU, None, None, None, None, None, None);
88/// cache.insert("key1", "value1".to_string());
89/// cache.insert("key2", "value2".to_string());
90///
91/// // Accessing key1 moves it to the end (most recently used)
92/// let _ = cache.get("key1");
93/// ```
94///
95/// ## With TTL (Time To Live)
96///
97/// ```
98/// use std::cell::RefCell;
99/// use std::collections::{HashMap, VecDeque};
100/// use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
101///
102/// thread_local! {
103///     static CACHE: RefCell<HashMap<String, CacheEntry<String>>> = RefCell::new(HashMap::new());
104///     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
105/// }
106///
107/// // Cache with 60 second TTL
108/// let cache = ThreadLocalCache::new(&CACHE, &ORDER, None, None, EvictionPolicy::FIFO, Some(60), None, None, None, None, None);
109/// cache.insert("key", "value".to_string());
110///
111/// // Entry will expire after 60 seconds
112/// // get() returns None for expired entries
113/// ```
114///
115/// ## TLRU with Custom Frequency Weight
116///
117/// ```
118/// use std::cell::RefCell;
119/// use std::collections::{HashMap, VecDeque};
120/// use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
121///
122/// thread_local! {
123///     static CACHE: RefCell<HashMap<String, CacheEntry<String>>> = RefCell::new(HashMap::new());
124///     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
125/// }
126///
127/// // Low frequency_weight (0.3) - emphasizes recency over frequency
128/// // Good for time-sensitive data where freshness matters more than popularity
129/// let cache = ThreadLocalCache::new(&CACHE, &ORDER, Some(100), None, EvictionPolicy::TLRU, Some(300), Some(0.3), None, None, None, None);
130///
131/// // High frequency_weight (1.5) - emphasizes frequency over recency
132/// // Good for popular content that should stay cached despite age
133/// let cache_popular = ThreadLocalCache::new(&CACHE, &ORDER, Some(100), None, EvictionPolicy::TLRU, Some(300), Some(1.5), None, None, None, None);
134///
135/// // Default (omit frequency_weight) - balanced approach
136/// let cache_balanced = ThreadLocalCache::new(&CACHE, &ORDER, Some(100), None, EvictionPolicy::TLRU, Some(300), None, None, None, None, None);
137/// ```
138pub struct ThreadLocalCache<R: 'static> {
139    /// Reference to the thread-local storage key for the cache HashMap
140    pub cache: &'static LocalKey<RefCell<HashMap<String, CacheEntry<R>>>>,
141    /// Reference to the thread-local storage key for the cache order queue
142    pub order: &'static LocalKey<RefCell<VecDeque<String>>>,
143    /// Maximum number of items to store in the cache
144    pub limit: Option<usize>,
145    /// Maximum memory size in bytes
146    pub max_memory: Option<usize>,
147    /// Eviction policy to use for the cache
148    pub policy: EvictionPolicy,
149    /// Optional TTL (in seconds) for cache entries
150    pub ttl: Option<u64>,
151    /// Frequency weight for TLRU policy (non-negative, >= 0.0). Only used when policy is TLRU.
152    pub frequency_weight: Option<f64>,
153    /// Window ratio for W-TinyLFU policy (between 0.0 and 1.0). Only used when policy is WTinyLFU.
154    pub window_ratio: Option<f64>,
155    /// Sketch width for W-TinyLFU policy. Only used when policy is WTinyLFU.
156    pub sketch_width: Option<usize>,
157    /// Sketch depth for W-TinyLFU policy. Only used when policy is WTinyLFU.
158    pub sketch_depth: Option<usize>,
159    /// Decay interval for W-TinyLFU policy. Only used when policy is WTinyLFU.
160    pub decay_interval: Option<u64>,
161    /// Cache statistics (when stats feature is enabled)
162    #[cfg(feature = "stats")]
163    pub stats: CacheStats,
164}
165
166impl<R: Clone + 'static> ThreadLocalCache<R> {
167    /// Creates a new `ThreadLocalCache` wrapper around thread-local storage keys.
168    ///
169    /// # Arguments
170    ///
171    /// * `cache` - A static reference to a `LocalKey` that stores the cache HashMap
172    /// * `order` - A static reference to a `LocalKey` that stores the eviction order queue
173    /// * `limit` - Optional maximum number of entries (None for unlimited)
174    /// * `max_memory` - Optional maximum memory size in bytes (None for unlimited)
175    /// * `policy` - Eviction policy to use when limit is reached
176    /// * `ttl` - Optional time-to-live in seconds (None for no expiration)
177    /// * `frequency_weight` - Optional frequency weight for TLRU policy (0.0 to 1.0)
178    /// * `window_ratio` - Optional window ratio for W-TinyLFU policy (between 0.0 and 1.0)
179    /// * `sketch_width` - Optional sketch width for W-TinyLFU policy
180    /// * `sketch_depth` - Optional sketch depth for W-TinyLFU policy
181    /// * `decay_interval` - Optional decay interval for W-TinyLFU policy
182    ///
183    /// # Examples
184    ///
185    /// ```
186    /// use std::cell::RefCell;
187    /// use std::collections::{HashMap, VecDeque};
188    /// use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
189    ///
190    /// thread_local! {
191    ///     static CACHE: RefCell<HashMap<String, CacheEntry<String>>> = RefCell::new(HashMap::new());
192    ///     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
193    /// }
194    ///
195    /// let cache = ThreadLocalCache::new(&CACHE, &ORDER, Some(100), None, EvictionPolicy::LRU, Some(60), None, None, None, None, None);
196    /// ```
197    pub fn new(
198        cache: &'static LocalKey<RefCell<HashMap<String, CacheEntry<R>>>>,
199        order: &'static LocalKey<RefCell<VecDeque<String>>>,
200        limit: Option<usize>,
201        max_memory: Option<usize>,
202        policy: EvictionPolicy,
203        ttl: Option<u64>,
204        frequency_weight: Option<f64>,
205        window_ratio: Option<f64>,
206        sketch_width: Option<usize>,
207        sketch_depth: Option<usize>,
208        decay_interval: Option<u64>,
209    ) -> Self {
210        Self {
211            cache,
212            order,
213            limit,
214            max_memory,
215            policy,
216            ttl,
217            frequency_weight,
218            window_ratio,
219            sketch_width,
220            sketch_depth,
221            decay_interval,
222            #[cfg(feature = "stats")]
223            stats: CacheStats::new(),
224        }
225    }
226
227    /// Retrieves a value from the cache by key.
228    ///
229    /// # Arguments
230    ///
231    /// * `key` - The cache key to look up
232    ///
233    /// # Returns
234    ///
235    /// * `Some(value)` if the key exists in the cache and is not expired
236    /// * `None` if the key is not found or has expired
237    ///
238    /// # Examples
239    ///
240    /// ```
241    /// # use std::cell::RefCell;
242    /// # use std::collections::{HashMap, VecDeque};
243    /// # use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
244    /// # thread_local! {
245    /// #     static CACHE: RefCell<HashMap<String, CacheEntry<i32>>> = RefCell::new(HashMap::new());
246    /// #     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
247    /// # }
248    /// let cache = ThreadLocalCache::new(&CACHE, &ORDER, None, None, EvictionPolicy::FIFO, None, None, None, None, None, None);
249    /// cache.insert("key", 100);
250    /// assert_eq!(cache.get("key"), Some(100));
251    /// assert_eq!(cache.get("missing"), None);
252    /// ```
253    pub fn get(&self, key: &str) -> Option<R> {
254        let mut expired = false;
255
256        let val = self.cache.with(|c| {
257            let c = c.borrow();
258            if let Some(entry) = c.get(key) {
259                if entry.is_expired(self.ttl) {
260                    expired = true;
261                    return None;
262                }
263                Some(entry.value.clone())
264            } else {
265                None
266            }
267        });
268
269        // If expired, remove key from cache and return None
270        if expired {
271            self.remove_key(key);
272            #[cfg(feature = "stats")]
273            self.stats.record_miss();
274            return None;
275        }
276
277        // Record stats
278        #[cfg(feature = "stats")]
279        {
280            if val.is_some() {
281                self.stats.record_hit();
282            } else {
283                self.stats.record_miss();
284            }
285        }
286
287        // Update access patterns based on policy
288        if val.is_some() {
289            match self.policy {
290                EvictionPolicy::LRU => {
291                    // Move key to end of order queue (most recently used)
292                    self.move_to_end(key);
293                }
294                EvictionPolicy::LFU => {
295                    // Increment frequency counter
296                    self.increment_frequency(key);
297                }
298                EvictionPolicy::ARC => {
299                    // Adaptive Replacement: Update both recency and frequency
300                    // Update order (recency)
301                    self.move_to_end(key);
302                    // Increment frequency counter
303                    self.increment_frequency(key);
304                }
305                EvictionPolicy::TLRU => {
306                    // Time-aware LRU: Update both recency and frequency
307                    // Similar to ARC but considers age in eviction
308                    self.move_to_end(key);
309                    self.increment_frequency(key);
310                }
311                EvictionPolicy::WTinyLFU => {
312                    // Simplified W-TinyLFU: Behaves like a hybrid of LRU and LFU
313                    // Full implementation with Count-Min Sketch would require additional state
314                    // For now, update both position (LRU) and frequency (LFU)
315                    self.move_to_end(key);
316                    self.increment_frequency(key);
317                }
318                EvictionPolicy::FIFO | EvictionPolicy::Random => {
319                    // No update needed for FIFO or Random
320                }
321            }
322        }
323
324        val
325    }
326
327    /// Moves a key to the end of the order queue (marks as most recently used)
328    fn move_to_end(&self, key: &str) {
329        self.order.with(|o| {
330            let mut o = o.borrow_mut();
331            move_key_to_end(&mut o, key);
332        });
333    }
334
335    /// Increments the frequency counter for the specified key.
336    fn increment_frequency(&self, key: &str) {
337        self.cache.with(|c| {
338            let mut c = c.borrow_mut();
339            if let Some(entry) = c.get_mut(key) {
340                entry.increment_frequency();
341            }
342        });
343    }
344
345    /// Inserts a value into the cache with the specified key.
346    ///
347    /// If a value already exists for this key, it will be replaced.
348    ///
349    /// # Arguments
350    ///
351    /// * `key` - The cache key
352    /// * `value` - The value to store
353    ///
354    /// # Examples
355    ///
356    /// ```
357    /// # use std::cell::RefCell;
358    /// # use std::collections::{HashMap, VecDeque};
359    /// # use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
360    /// # thread_local! {
361    /// #     static CACHE: RefCell<HashMap<String, CacheEntry<i32>>> = RefCell::new(HashMap::new());
362    /// #     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
363    /// # }
364    /// let cache = ThreadLocalCache::new(&CACHE, &ORDER, None, None, EvictionPolicy::FIFO, None, None, None, None, None, None);
365    /// cache.insert("first", 1);
366    /// cache.insert("first", 2); // Replaces previous value
367    /// assert_eq!(cache.get("first"), Some(2));
368    /// ```
369    ///
370    /// # Note
371    ///
372    /// This method does NOT require `MemoryEstimator` trait. It only handles entry-count limits.
373    /// If `max_memory` is configured, use `insert_with_memory()` instead, which requires
374    /// the type to implement `MemoryEstimator`.
375    pub fn insert(&self, key: &str, value: R) {
376        let key = key.to_string();
377        let entry = CacheEntry::new(value);
378
379        self.cache.with(|c| {
380            c.borrow_mut().insert(key.clone(), entry);
381        });
382
383        self.order.with(|o| {
384            let mut order = o.borrow_mut();
385            if let Some(pos) = order.iter().position(|k| *k == key) {
386                order.remove(pos);
387            }
388            order.push_back(key.clone());
389
390            // Only handle entry-count limits (not memory limits)
391            self.handle_entry_limit_eviction(&mut order);
392        });
393    }
394
395    /// Returns a reference to the cache statistics.
396    ///
397    /// This method is only available when the `stats` feature is enabled.
398    ///
399    /// # Examples
400    ///
401    /// ```
402    /// # #[cfg(feature = "stats")]
403    /// # {
404    /// # use std::cell::RefCell;
405    /// # use std::collections::{HashMap, VecDeque};
406    /// # use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
407    /// # thread_local! {
408    /// #     static CACHE: RefCell<HashMap<String, CacheEntry<i32>>> = RefCell::new(HashMap::new());
409    /// #     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
410    /// # }
411    /// let cache = ThreadLocalCache::new(&CACHE, &ORDER, None, None, EvictionPolicy::FIFO, None, None, None, None, None, None);
412    /// cache.insert("key1", 100);
413    /// let _ = cache.get("key1");
414    /// let _ = cache.get("key2");
415    ///
416    /// let stats = cache.stats();
417    /// assert_eq!(stats.hits(), 1);
418    /// assert_eq!(stats.misses(), 1);
419    /// # }
420    /// ```
421    #[cfg(feature = "stats")]
422    pub fn stats(&self) -> &CacheStats {
423        &self.stats
424    }
425
426    /// Removes a key from the cache and its associated ordering.
427    fn remove_key(&self, key: &str) {
428        self.cache.with(|c| {
429            self.order.with(|o| {
430                remove_key_from_cache_local(&mut c.borrow_mut(), &mut o.borrow_mut(), key);
431            });
432        });
433    }
434
435    /// Handles the eviction of entries from a cache to enforce the entry limit based on the specified eviction policy.
436    ///
437    /// This method ensures that the number of entries in the cache does not exceed the configured limit by removing
438    /// entries based on the specified eviction policy: LFU (Least Frequently Used), ARC (Adaptive Replacement Cache),
439    /// FIFO (First In, First Out), or LRU (Least Recently Used).
440    ///
441    /// # Parameters
442    /// - `order`: A mutable reference to a `VecDeque<String>` representing the order of keys in the cache. The order
443    ///   is used differently depending on the eviction policy, e.g., for determining the least recently or most
444    ///   recently used key.
445    ///
446    /// # Behavior
447    /// If the cache's entry limit (`self.limit`) is exceeded:
448    /// - For `EvictionPolicy::LFU`: The key with the lowest usage frequency will be identified and evicted.
449    /// - For `EvictionPolicy::ARC`: The key to be evicted is determined adaptively using an ARC strategy.
450    /// - For `EvictionPolicy::FIFO`: The earliest inserted key (front of the `order` queue) is removed.
451    /// - For `EvictionPolicy::LRU`: The least recently used key (front of the `order` queue) is removed.
452    ///
453    /// The eviction process involves:
454    /// 1. Identifying the key to evict based on the eviction policy.
455    /// 2. Removing the key from both the `order` queue and the underlying cache storage (`self.cache`).
456    /// 3. Breaking the loop upon successfully removing an entry (for FIFO/LRU).
457    ///
458    /// # Notes
459    /// - This method assumes that the order of keys in the cache is maintained in the `order` deque.
460    /// - The actual eviction is accomplished via helper functions such as `find_min_frequency_key` and `find_arc_eviction_key`.
461    /// - The removal operation ensures consistency by simultaneously updating the `order` deque and the cache storage (`self.cache`).
462    ///
463    /// # Eviction Policy Details
464    /// - **LFU** (Least Frequently Used): Evicts the cache entry that has been accessed the least number of times.
465    ///   Relies on `find_min_frequency_key`, which finds the key with the minimum usage frequency in the cache.
466    /// - **ARC** (Adaptive Replacement Cache): Uses an adaptive replacement strategy to optimize for both recency
467    ///   and frequency of access. The key to evict is determined by `find_arc_eviction_key`, which takes into account
468    ///   both recent and frequent usage patterns.
469    /// - **FIFO** (First In, First Out): Evicts the oldest entry in the cache, as determined by the front of `order`.
470    /// - **LRU** (Least Recently Used): Evicts the least recently used entry, which is also at the front of `order`.
471    /// - **Random**: Evicts a randomly selected entry from the cache.
472    ///
473    /// # Edge Cases
474    /// - If the cache has no limit (`self.limit == None`), this method performs no action.
475    /// - If the `order` deque is empty when attempting to evict an entry, no action is taken.
476    /// - For FIFO and LRU policies, evictions will continue iteratively until a valid, non-removed key is found.
477    /// - If an eviction policy is misused or improperly implemented, it might lead to incomplete or inefficient evictions.
478    fn handle_entry_limit_eviction(&self, order: &mut VecDeque<String>) {
479        if let Some(limit) = self.limit {
480            if order.len() > limit {
481                match self.policy {
482                    EvictionPolicy::LFU => {
483                        let min_freq_key = self
484                            .cache
485                            .with(|c| find_min_frequency_key(&c.borrow(), order));
486
487                        if let Some(evict_key) = min_freq_key {
488                            self.remove_key(&evict_key);
489                        }
490                    }
491                    EvictionPolicy::ARC => {
492                        let evict_key = self
493                            .cache
494                            .with(|c| find_arc_eviction_key(&c.borrow(), order.iter().enumerate()));
495
496                        if let Some(key) = evict_key {
497                            self.remove_key(&key);
498                        }
499                    }
500                    EvictionPolicy::TLRU => {
501                        let evict_key = self.cache.with(|c| {
502                            find_tlru_eviction_key(
503                                &c.borrow(),
504                                order.iter().enumerate(),
505                                self.ttl,
506                                self.frequency_weight,
507                            )
508                        });
509
510                        if let Some(key) = evict_key {
511                            self.remove_key(&key);
512                        }
513                    }
514                    EvictionPolicy::WTinyLFU => {
515                        // W-TinyLFU: Window segment (first entries) + Protected segment (rest)
516                        // Window ratio determines split point
517                        let window_ratio = self.window_ratio.unwrap_or(0.20); // Default 20%
518                        let window_size = crate::utils::calculate_window_size(limit, window_ratio);
519
520                        if order.len() <= window_size {
521                            // Everything is in window segment - evict FIFO
522                            while let Some(evict_key) = order.pop_front() {
523                                let mut removed = false;
524                                self.cache.with(|c| {
525                                    let mut cache = c.borrow_mut();
526                                    if cache.contains_key(&evict_key) {
527                                        cache.remove(&evict_key);
528                                        removed = true;
529                                    }
530                                });
531                                if removed {
532                                    break;
533                                }
534                            }
535                        } else {
536                            // We have both window and protected segments
537                            // Try to evict from window first (FIFO)
538                            let mut evicted = false;
539
540                            // Evict from window (first window_size entries)
541                            for i in 0..window_size.min(order.len()) {
542                                if let Some(evict_key) = order.get(i) {
543                                    let mut removed = false;
544                                    self.cache.with(|c| {
545                                        let mut cache = c.borrow_mut();
546                                        if cache.contains_key(evict_key) {
547                                            cache.remove(evict_key);
548                                            removed = true;
549                                        }
550                                    });
551
552                                    if removed {
553                                        order.remove(i);
554                                        evicted = true;
555                                        break;
556                                    }
557                                }
558                            }
559
560                            // If window eviction failed, evict from protected (LFU)
561                            if !evicted {
562                                // Protected segment is from window_size to end
563                                let protected_keys: VecDeque<String> =
564                                    order.iter().skip(window_size).cloned().collect();
565
566                                let evict_key = self
567                                    .cache
568                                    .with(|c| find_min_frequency_key(&c.borrow(), &protected_keys));
569
570                                if let Some(key) = evict_key {
571                                    self.remove_key(&key);
572                                }
573                            }
574                        }
575                    }
576                    EvictionPolicy::Random => {
577                        // O(1) random eviction: select random position and remove directly
578                        if !order.is_empty() {
579                            let pos = fastrand::usize(..order.len());
580                            if let Some(evict_key) = order.remove(pos) {
581                                // Remove from cache
582                                self.cache.with(|c| {
583                                    c.borrow_mut().remove(&evict_key);
584                                });
585                            }
586                        }
587                    }
588                    EvictionPolicy::FIFO | EvictionPolicy::LRU => {
589                        while let Some(evict_key) = order.pop_front() {
590                            let mut removed = false;
591                            self.cache.with(|c| {
592                                let mut cache = c.borrow_mut();
593                                if cache.contains_key(&evict_key) {
594                                    cache.remove(&evict_key);
595                                    removed = true;
596                                }
597                            });
598                            if removed {
599                                break;
600                            }
601                        }
602                    }
603                }
604            }
605        }
606    }
607}
608
609// Separate implementation for types that implement MemoryEstimator
610// This allows memory-based eviction
611impl<R: Clone + 'static + crate::MemoryEstimator> ThreadLocalCache<R> {
612    /// Insert with memory limit support.
613    ///
614    /// This method requires `R` to implement `MemoryEstimator` and handles both
615    /// memory-based and entry-count-based eviction.
616    ///
617    /// Use this method when `max_memory` is configured in the cache.
618    pub fn insert_with_memory(&self, key: &str, value: R) {
619        let key = key.to_string();
620        let entry = CacheEntry::new(value);
621
622        self.cache.with(|c| {
623            c.borrow_mut().insert(key.clone(), entry);
624        });
625
626        self.order.with(|o| {
627            let mut order = o.borrow_mut();
628            if let Some(pos) = order.iter().position(|k| *k == key) {
629                order.remove(pos);
630            }
631            order.push_back(key.clone());
632
633            // Check memory limit first (if specified)
634            if let Some(max_mem) = self.max_memory {
635                // First, check if the new value by itself exceeds max_mem
636                // This is a safety check to prevent infinite eviction loop
637                let new_value_size = self.cache.with(|c| {
638                    c.borrow()
639                        .get(&key)
640                        .map(|e| e.value.estimate_memory())
641                        .unwrap_or(0)
642                });
643
644                if new_value_size > max_mem {
645                    // The value itself is too large for the cache
646                    // Remove it and return early to respect memory limit
647                    self.cache.with(|c| {
648                        c.borrow_mut().remove(&key);
649                    });
650                    order.pop_back(); // Remove from order queue as well
651                    return;
652                }
653
654                loop {
655                    let current_mem = self.cache.with(|c| {
656                        let cache = c.borrow();
657                        cache
658                            .values()
659                            .map(|e| e.value.estimate_memory())
660                            .sum::<usize>()
661                    });
662
663                    if current_mem <= max_mem {
664                        break;
665                    }
666
667                    // Need to evict based on policy
668                    let evicted = match self.policy {
669                        EvictionPolicy::LFU => {
670                            let min_freq_key = self
671                                .cache
672                                .with(|c| find_min_frequency_key(&c.borrow(), &order));
673                            if let Some(evict_key) = min_freq_key {
674                                self.remove_key(&evict_key);
675                                true
676                            } else {
677                                false
678                            }
679                        }
680                        EvictionPolicy::ARC => {
681                            let evict_key = self.cache.with(|c| {
682                                find_arc_eviction_key(&c.borrow(), order.iter().enumerate())
683                            });
684                            if let Some(key) = evict_key {
685                                self.remove_key(&key);
686                                true
687                            } else {
688                                false
689                            }
690                        }
691                        EvictionPolicy::TLRU => {
692                            let evict_key = self.cache.with(|c| {
693                                find_tlru_eviction_key(
694                                    &c.borrow(),
695                                    order.iter().enumerate(),
696                                    self.ttl,
697                                    self.frequency_weight,
698                                )
699                            });
700                            if let Some(key) = evict_key {
701                                self.remove_key(&key);
702                                true
703                            } else {
704                                false
705                            }
706                        }
707                        EvictionPolicy::WTinyLFU => {
708                            // Simplified W-TinyLFU: Use LFU-like eviction
709                            // Full implementation would use window segment + Count-Min Sketch
710                            let evict_key = self
711                                .cache
712                                .with(|c| find_min_frequency_key(&c.borrow(), &order));
713                            if let Some(key) = evict_key {
714                                self.remove_key(&key);
715                                true
716                            } else {
717                                false
718                            }
719                        }
720                        EvictionPolicy::Random => {
721                            // O(1) random eviction: select random position and remove directly
722                            if !order.is_empty() {
723                                let pos = fastrand::usize(..order.len());
724                                if let Some(evict_key) = order.remove(pos) {
725                                    // Remove from cache
726                                    self.cache.with(|c| {
727                                        c.borrow_mut().remove(&evict_key);
728                                    });
729                                    true
730                                } else {
731                                    false
732                                }
733                            } else {
734                                false
735                            }
736                        }
737                        EvictionPolicy::FIFO | EvictionPolicy::LRU => {
738                            if let Some(evict_key) = order.pop_front() {
739                                self.cache.with(|c| {
740                                    c.borrow_mut().remove(&evict_key);
741                                });
742                                true
743                            } else {
744                                false
745                            }
746                        }
747                    };
748
749                    if !evicted {
750                        break; // Nothing left to evict
751                    }
752                }
753            }
754
755            // Handle entry-count limits
756            self.handle_entry_limit_eviction(&mut order);
757        });
758    }
759}
760
761/// Specialized implementation for caching `Result<T, E>` return types.
762///
763/// This implementation provides a method to cache only successful (`Ok`) results,
764/// which is useful for functions that may fail - you typically don't want to cache
765/// errors, as retrying the operation might succeed later.
766///
767/// # Type Parameters
768///
769/// * `T` - The success type (inner type of `Ok`)
770/// * `E` - The error type (inner type of `Err`)
771///
772/// # Examples
773///
774/// ```
775/// # use std::cell::RefCell;
776/// # use std::collections::{HashMap, VecDeque};
777/// # use cachelito_core::{ThreadLocalCache, EvictionPolicy, CacheEntry};
778/// # thread_local! {
779/// #     static CACHE: RefCell<HashMap<String, CacheEntry<Result<i32, String>>>> = RefCell::new(HashMap::new());
780/// #     static ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
781/// # }
782/// let cache = ThreadLocalCache::new(&CACHE, &ORDER, None, None, EvictionPolicy::FIFO, None, None, None, None, None, None);
783///
784/// // Ok values are cached
785/// cache.insert_result("success", &Ok(42));
786/// assert_eq!(cache.get("success"), Some(Ok(42)));
787///
788/// // Err values are NOT cached
789/// cache.insert_result("failure", &Err("error".to_string()));
790/// assert_eq!(cache.get("failure"), None);
791/// ```
792impl<T: Clone + Debug + 'static, E: Clone + Debug + 'static> ThreadLocalCache<Result<T, E>> {
793    /// Inserts a `Result` into the cache, but only if it's an `Ok` value.
794    ///
795    /// This method is specifically designed for caching functions that return
796    /// `Result<T, E>`. It intelligently ignores `Err` values, as errors typically
797    /// should not be cached (the operation might succeed on retry).
798    ///
799    /// This version does NOT require MemoryEstimator. Use `insert_result_with_memory()`
800    /// when max_memory is configured.
801    ///
802    /// # Arguments
803    ///
804    /// * `key` - The cache key
805    /// * `value` - The `Result` to potentially cache
806    ///
807    /// # Behavior
808    ///
809    /// * If `value` is `Ok(v)`, stores `Ok(v.clone())` in the cache
810    /// * If `value` is `Err(_)`, does nothing (error is not cached)
811    pub fn insert_result(&self, key: &str, value: &Result<T, E>) {
812        if let Ok(val) = value {
813            self.insert(key, Ok(val.clone()));
814        }
815    }
816}
817
818/// Implementation for Result types WITH MemoryEstimator support.
819impl<
820        T: Clone + Debug + 'static + crate::MemoryEstimator,
821        E: Clone + Debug + 'static + crate::MemoryEstimator,
822    > ThreadLocalCache<Result<T, E>>
823{
824    /// Inserts a Result into the cache with memory limit support.
825    ///
826    /// This method requires both T and E to implement MemoryEstimator.
827    /// Use this when max_memory is configured.
828    pub fn insert_result_with_memory(&self, key: &str, value: &Result<T, E>) {
829        if let Ok(val) = value {
830            self.insert_with_memory(key, Ok(val.clone()));
831        }
832    }
833}
834
835#[cfg(test)]
836mod tests {
837    use super::*;
838
839    thread_local! {
840        static TEST_CACHE: RefCell<HashMap<String, CacheEntry<i32>>> = RefCell::new(HashMap::new());
841        static TEST_ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
842    }
843
844    fn setup_cache(
845        limit: Option<usize>,
846        policy: EvictionPolicy,
847        ttl: Option<u64>,
848    ) -> ThreadLocalCache<i32> {
849        TEST_CACHE.with(|c| c.borrow_mut().clear());
850        TEST_ORDER.with(|o| o.borrow_mut().clear());
851        ThreadLocalCache::new(
852            &TEST_CACHE,
853            &TEST_ORDER,
854            limit,
855            None,
856            policy,
857            ttl,
858            None,
859            None,
860            None,
861            None,
862            None,
863        )
864    }
865
866    fn setup_cache_with_weight(
867        limit: Option<usize>,
868        policy: EvictionPolicy,
869        ttl: Option<u64>,
870        frequency_weight: Option<f64>,
871    ) -> ThreadLocalCache<i32> {
872        TEST_CACHE.with(|c| c.borrow_mut().clear());
873        TEST_ORDER.with(|o| o.borrow_mut().clear());
874        ThreadLocalCache::new(
875            &TEST_CACHE,
876            &TEST_ORDER,
877            limit,
878            None,
879            policy,
880            ttl,
881            frequency_weight,
882            None,
883            None,
884            None,
885            None,
886        )
887    }
888
889    #[test]
890    fn test_basic_insert_get() {
891        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
892        cache.insert("key1", 42);
893        assert_eq!(cache.get("key1"), Some(42));
894    }
895
896    #[test]
897    fn test_missing_key() {
898        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
899        assert_eq!(cache.get("missing"), None);
900    }
901
902    #[test]
903    fn test_update_existing_key() {
904        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
905        cache.insert("key", 1);
906        cache.insert("key", 2);
907        assert_eq!(cache.get("key"), Some(2));
908    }
909
910    #[test]
911    fn test_fifo_eviction() {
912        let cache = setup_cache(Some(2), EvictionPolicy::FIFO, None);
913        cache.insert("k1", 1);
914        cache.insert("k2", 2);
915        cache.insert("k3", 3); // Evicts k1
916
917        assert_eq!(cache.get("k1"), None);
918        assert_eq!(cache.get("k2"), Some(2));
919        assert_eq!(cache.get("k3"), Some(3));
920    }
921
922    #[test]
923    fn test_lru_eviction() {
924        let cache = setup_cache(Some(2), EvictionPolicy::LRU, None);
925        cache.insert("k1", 1);
926        cache.insert("k2", 2);
927        let _ = cache.get("k1"); // Access k1, making it recently used
928        cache.insert("k3", 3); // Should evict k2 (least recently used)
929
930        assert_eq!(cache.get("k1"), Some(1));
931        assert_eq!(cache.get("k2"), None);
932        assert_eq!(cache.get("k3"), Some(3));
933    }
934
935    #[test]
936    fn test_lru_access_updates_order() {
937        let cache = setup_cache(Some(3), EvictionPolicy::LRU, None);
938        cache.insert("k1", 1);
939        cache.insert("k2", 2);
940        cache.insert("k3", 3);
941
942        // Access k1 multiple times
943        let _ = cache.get("k1");
944        let _ = cache.get("k1");
945
946        // k2 is now LRU, should be evicted
947        cache.insert("k4", 4);
948
949        assert_eq!(cache.get("k1"), Some(1));
950        assert_eq!(cache.get("k2"), None);
951        assert_eq!(cache.get("k3"), Some(3));
952        assert_eq!(cache.get("k4"), Some(4));
953    }
954
955    #[test]
956    fn test_result_caching_ok() {
957        thread_local! {
958            static RES_CACHE: RefCell<HashMap<String, CacheEntry<Result<i32, String>>>> = RefCell::new(HashMap::new());
959            static RES_ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
960        }
961
962        let cache = ThreadLocalCache::new(
963            &RES_CACHE,
964            &RES_ORDER,
965            None,
966            None,
967            EvictionPolicy::FIFO,
968            None,
969            None,
970            None,
971            None,
972            None,
973            None,
974        );
975        let ok_result = Ok(100);
976        cache.insert_result("success", &ok_result);
977        assert_eq!(cache.get("success"), Some(Ok(100)));
978    }
979
980    #[test]
981    fn test_result_caching_err() {
982        thread_local! {
983            static RES_CACHE: RefCell<HashMap<String, CacheEntry<Result<i32, String>>>> = RefCell::new(HashMap::new());
984            static RES_ORDER: RefCell<VecDeque<String>> = RefCell::new(VecDeque::new());
985        }
986
987        let cache = ThreadLocalCache::new(
988            &RES_CACHE,
989            &RES_ORDER,
990            None,
991            None,
992            EvictionPolicy::FIFO,
993            None,
994            None,
995            None,
996            None,
997            None,
998            None,
999        );
1000        let err_result: Result<i32, String> = Err("error".to_string());
1001        cache.insert_result("failure", &err_result);
1002        assert_eq!(cache.get("failure"), None); // Errors not cached
1003    }
1004
1005    #[test]
1006    fn test_ttl_expiration() {
1007        use std::thread;
1008        use std::time::Duration;
1009
1010        let cache = setup_cache(None, EvictionPolicy::FIFO, Some(1));
1011        cache.insert("expires", 999);
1012
1013        // Should still be valid immediately
1014        assert_eq!(cache.get("expires"), Some(999));
1015
1016        // Wait for expiration
1017        thread::sleep(Duration::from_secs(2));
1018
1019        // Should be expired now
1020        assert_eq!(cache.get("expires"), None);
1021    }
1022
1023    #[test]
1024    fn test_no_limit() {
1025        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
1026        for i in 0..1000 {
1027            cache.insert(&format!("key{}", i), i);
1028        }
1029
1030        // All entries should still be present
1031        for i in 0..1000 {
1032            assert_eq!(cache.get(&format!("key{}", i)), Some(i));
1033        }
1034    }
1035
1036    #[test]
1037    #[cfg(feature = "stats")]
1038    fn test_stats_basic() {
1039        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
1040        cache.insert("k1", 1);
1041        cache.insert("k2", 2);
1042
1043        let _ = cache.get("k1"); // Hit
1044        let _ = cache.get("k2"); // Hit
1045        let _ = cache.get("k3"); // Miss
1046
1047        let stats = cache.stats();
1048        assert_eq!(stats.hits(), 2);
1049        assert_eq!(stats.misses(), 1);
1050        assert_eq!(stats.total_accesses(), 3);
1051        assert!((stats.hit_rate() - 0.6666).abs() < 0.001);
1052    }
1053
1054    #[test]
1055    #[cfg(feature = "stats")]
1056    fn test_stats_expired_counts_as_miss() {
1057        use std::thread;
1058        use std::time::Duration;
1059
1060        let cache = setup_cache(None, EvictionPolicy::FIFO, Some(1));
1061        cache.insert("expires", 999);
1062
1063        // Immediate access - should be a hit
1064        let _ = cache.get("expires");
1065        assert_eq!(cache.stats().hits(), 1);
1066        assert_eq!(cache.stats().misses(), 0);
1067
1068        // Wait for expiration
1069        thread::sleep(Duration::from_secs(2));
1070
1071        // Access after expiration - should be a miss
1072        let _ = cache.get("expires");
1073        assert_eq!(cache.stats().hits(), 1);
1074        assert_eq!(cache.stats().misses(), 1);
1075    }
1076
1077    #[test]
1078    #[cfg(feature = "stats")]
1079    fn test_stats_reset() {
1080        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
1081        cache.insert("k1", 1);
1082        let _ = cache.get("k1");
1083        let _ = cache.get("k2");
1084
1085        let stats = cache.stats();
1086        assert_eq!(stats.hits(), 1);
1087        assert_eq!(stats.misses(), 1);
1088
1089        stats.reset();
1090        assert_eq!(stats.hits(), 0);
1091        assert_eq!(stats.misses(), 0);
1092    }
1093
1094    #[test]
1095    #[cfg(feature = "stats")]
1096    fn test_stats_all_hits() {
1097        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
1098        cache.insert("k1", 1);
1099        cache.insert("k2", 2);
1100
1101        for _ in 0..10 {
1102            let _ = cache.get("k1");
1103            let _ = cache.get("k2");
1104        }
1105
1106        let stats = cache.stats();
1107        assert_eq!(stats.hits(), 20);
1108        assert_eq!(stats.misses(), 0);
1109        assert_eq!(stats.hit_rate(), 1.0);
1110        assert_eq!(stats.miss_rate(), 0.0);
1111    }
1112
1113    #[test]
1114    #[cfg(feature = "stats")]
1115    fn test_stats_all_misses() {
1116        let cache = setup_cache(None, EvictionPolicy::FIFO, None);
1117
1118        for i in 0..10 {
1119            let _ = cache.get(&format!("k{}", i));
1120        }
1121
1122        let stats = cache.stats();
1123        assert_eq!(stats.hits(), 0);
1124        assert_eq!(stats.misses(), 10);
1125        assert_eq!(stats.hit_rate(), 0.0);
1126        assert_eq!(stats.miss_rate(), 1.0);
1127    }
1128
1129    // ========== TLRU with frequency_weight tests ==========
1130    // Note: These tests avoid triggering complex eviction due to a known RefCell borrow issue
1131    // in handle_entry_limit_eviction for TLRU policy
1132
1133    #[test]
1134    fn test_tlru_with_frequency_weight_basic() {
1135        // Test basic TLRU behavior with frequency_weight without hitting limit
1136        let cache = setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, Some(10), Some(1.5));
1137
1138        cache.insert("k1", 1);
1139        cache.insert("k2", 2);
1140        cache.insert("k3", 3);
1141
1142        // Access k1 multiple times to increase frequency
1143        for _ in 0..5 {
1144            assert_eq!(cache.get("k1"), Some(1));
1145        }
1146
1147        // All entries should still be cached (no eviction yet)
1148        assert_eq!(cache.get("k1"), Some(1));
1149        assert_eq!(cache.get("k2"), Some(2));
1150        assert_eq!(cache.get("k3"), Some(3));
1151    }
1152
1153    #[test]
1154    fn test_tlru_default_frequency_weight_basic() {
1155        // Test TLRU with default frequency_weight (None = 1.0)
1156        let cache = setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, Some(5), None);
1157
1158        cache.insert("k1", 1);
1159        cache.insert("k2", 2);
1160
1161        // Access k1 a few times
1162        for _ in 0..3 {
1163            let _ = cache.get("k1");
1164        }
1165
1166        // Both should be cached
1167        assert_eq!(cache.get("k1"), Some(1));
1168        assert_eq!(cache.get("k2"), Some(2));
1169    }
1170
1171    #[test]
1172    fn test_tlru_no_ttl_with_frequency_weight() {
1173        // TLRU without TTL (age_factor = 1.0) but with frequency_weight
1174        let cache = setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, None, Some(1.5));
1175
1176        cache.insert("k1", 1);
1177        cache.insert("k2", 2);
1178        cache.insert("k3", 3);
1179
1180        // Make k1 very frequent
1181        for _ in 0..10 {
1182            let _ = cache.get("k1");
1183        }
1184
1185        // All should be cached (no limit reached)
1186        assert_eq!(cache.get("k1"), Some(1));
1187        assert_eq!(cache.get("k2"), Some(2));
1188        assert_eq!(cache.get("k3"), Some(3));
1189    }
1190
1191    #[test]
1192    fn test_tlru_frequency_tracking() {
1193        // Verify that TLRU tracks frequency correctly
1194        let cache = setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, Some(10), Some(1.0));
1195
1196        cache.insert("k1", 1);
1197        cache.insert("k2", 2);
1198
1199        // Access k1 multiple times
1200        for _ in 0..5 {
1201            assert_eq!(cache.get("k1"), Some(1));
1202        }
1203
1204        // Access k2 once
1205        assert_eq!(cache.get("k2"), Some(2));
1206
1207        // Both should still be present
1208        assert_eq!(cache.get("k1"), Some(1));
1209        assert_eq!(cache.get("k2"), Some(2));
1210    }
1211
1212    #[test]
1213    fn test_tlru_with_different_weights() {
1214        // Test that different frequency_weight values are accepted
1215        let cache_low =
1216            setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, Some(10), Some(0.3));
1217        let cache_high =
1218            setup_cache_with_weight(Some(10), EvictionPolicy::TLRU, Some(10), Some(2.0));
1219
1220        cache_low.insert("k1", 1);
1221        cache_high.insert("k1", 1);
1222
1223        assert_eq!(cache_low.get("k1"), Some(1));
1224        assert_eq!(cache_high.get("k1"), Some(1));
1225    }
1226}