Skip to main content

cache_rs/concurrent/
lru.rs

1//! Concurrent LRU Cache Implementation
2//!
3//! A thread-safe LRU cache using lock striping (segmented storage) for high-performance
4//! concurrent access. This is the multi-threaded counterpart to [`LruCache`](crate::LruCache).
5//!
6//! # How It Works
7//!
8//! The cache partitions keys across multiple independent segments, each with its own lock.
9//! This allows concurrent operations on different segments without contention.
10//!
11//! ```text
12//! ┌──────────────────────────────────────────────────────────────────────┐
13//! │                      ConcurrentLruCache                              │
14//! │                                                                      │
15//! │  hash(key) % N  ──▶  Segment Selection                               │
16//! │                                                                      │
17//! │  ┌──────────────┐ ┌──────────────┐     ┌──────────────┐              │
18//! │  │  Segment 0   │ │  Segment 1   │ ... │  Segment N-1 │              │
19//! │  │  ┌────────┐  │ │  ┌────────┐  │     │  ┌────────┐  │              │
20//! │  │  │ Mutex  │  │ │  │ Mutex  │  │     │  │ Mutex  │  │              │
21//! │  │  └────┬───┘  │ │  └────┬───┘  │     │  └────┬───┘  │              │
22//! │  │       │      │ │       │      │     │       │      │              │
23//! │  │  ┌────▼───┐  │ │  ┌────▼───┐  │     │  ┌────▼───┐  │              │
24//! │  │  │LruCache│  │ │  │LruCache│  │     │  │LruCache│  │              │
25//! │  │  └────────┘  │ │  └────────┘  │     │  └────────┘  │              │
26//! │  └──────────────┘ └──────────────┘     └──────────────┘              │
27//! └──────────────────────────────────────────────────────────────────────┘
28//! ```
29//!
30//! ## Segment Count
31//!
32//! The default segment count is based on available CPU cores (typically 16).
33//! More segments = less contention but more memory overhead.
34//!
35//! ## Trade-offs
36//!
37//! - **Pros**: Near-linear scaling with thread count, no global lock
38//! - **Cons**: LRU ordering is per-segment, not global. An item might be evicted
39//!   from one segment while another segment has older items.
40//!
41//! # Performance Characteristics
42//!
43//! | Metric | Value |
44//! |--------|-------|
45//! | Get/Put/Remove | O(1) average |
46//! | Concurrency | Near-linear scaling up to segment count |
47//! | Memory overhead | ~150 bytes per entry + one Mutex per segment |
48//!
49//! # When to Use
50//!
51//! **Use ConcurrentLruCache when:**
52//! - Multiple threads need cache access
53//! - You need better throughput than `Mutex<LruCache>`
54//! - Keys distribute evenly (hot keys in one segment will still contend)
55//!
56//! **Consider alternatives when:**
57//! - Single-threaded access only → use `LruCache`
58//! - Need strict global LRU ordering → use `Mutex<LruCache>`
59//! - Very hot keys → consider per-key caching or request coalescing
60//!
61//! # Thread Safety
62//!
63//! `ConcurrentLruCache` is `Send + Sync` and can be shared via `Arc`.
64//!
65//! # Example
66//!
67//! ```rust,ignore
68//! use cache_rs::concurrent::ConcurrentLruCache;
69//! use cache_rs::config::{ConcurrentLruCacheConfig, ConcurrentCacheConfig, LruCacheConfig};
70//! use std::num::NonZeroUsize;
71//! use std::sync::Arc;
72//! use std::thread;
73//!
74//! let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
75//!     base: LruCacheConfig {
76//!         capacity: NonZeroUsize::new(10_000).unwrap(),
77//!         max_size: u64::MAX,
78//!     },
79//!     segments: 16,
80//! };
81//! let cache = Arc::new(ConcurrentLruCache::init(config, None));
82//!
83//! let handles: Vec<_> = (0..4).map(|i| {
84//!     let cache = Arc::clone(&cache);
85//!     thread::spawn(move || {
86//!         for j in 0..1000 {
87//!             cache.put(format!("key-{}-{}", i, j), j);
88//!         }
89//!     })
90//! }).collect();
91//!
92//! for h in handles {
93//!     h.join().unwrap();
94//! }
95//!
96//! println!("Total entries: {}", cache.len());
97//! ```
98
99extern crate alloc;
100
101use crate::lru::LruSegment;
102use crate::metrics::CacheMetrics;
103use alloc::boxed::Box;
104use alloc::collections::BTreeMap;
105use alloc::string::String;
106use alloc::vec::Vec;
107use core::borrow::Borrow;
108use core::hash::{BuildHasher, Hash};
109use core::num::NonZeroUsize;
110use parking_lot::Mutex;
111
112#[cfg(feature = "hashbrown")]
113use hashbrown::DefaultHashBuilder;
114
115#[cfg(not(feature = "hashbrown"))]
116use std::collections::hash_map::RandomState as DefaultHashBuilder;
117
118/// A thread-safe LRU cache with segmented storage for high concurrency.
119///
120/// Keys are partitioned across multiple segments using hash-based sharding.
121/// Each segment has its own lock, allowing concurrent access to different
122/// segments without blocking.
123///
124/// # Type Parameters
125///
126/// - `K`: Key type. Must implement `Hash + Eq + Clone + Send`.
127/// - `V`: Value type. Must implement `Clone + Send`.
128/// - `S`: Hash builder type. Defaults to `DefaultHashBuilder`.
129///
130/// # Note on LRU Semantics
131///
132/// LRU ordering is maintained **per-segment**, not globally. This means an item
133/// in segment A might be evicted while segment B has items that were accessed
134/// less recently in wall-clock time. For most workloads with good key distribution,
135/// this approximation works well.
136///
137/// # Example
138///
139/// ```rust,ignore
140/// use cache_rs::concurrent::ConcurrentLruCache;
141/// use std::sync::Arc;
142///
143/// let cache = Arc::new(ConcurrentLruCache::new(1000));
144///
145/// // Safe to use from multiple threads
146/// cache.put("key".to_string(), 42, 1);
147/// assert_eq!(cache.get(&"key".to_string()), Some(42));
148/// ```
149pub struct ConcurrentLruCache<K, V, S = DefaultHashBuilder> {
150    segments: Box<[Mutex<LruSegment<K, V, S>>]>,
151    hash_builder: S,
152}
153
154impl<K, V> ConcurrentLruCache<K, V, DefaultHashBuilder>
155where
156    K: Hash + Eq + Clone + Send,
157    V: Clone + Send,
158{
159    /// Creates a new concurrent LRU cache from a configuration with an optional hasher.
160    ///
161    /// This is the **recommended** way to create a concurrent LRU cache.
162    ///
163    /// # Arguments
164    ///
165    /// * `config` - Configuration specifying capacity, segments, and optional size limit
166    /// * `hasher` - Optional custom hash builder. If `None`, uses `DefaultHashBuilder`
167    ///
168    /// # Example
169    ///
170    /// ```rust,ignore
171    /// use cache_rs::concurrent::ConcurrentLruCache;
172    /// use cache_rs::config::{ConcurrentLruCacheConfig, ConcurrentCacheConfig, LruCacheConfig};
173    /// use core::num::NonZeroUsize;
174    ///
175    /// // Simple capacity-only cache with default segments
176    /// let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
177    ///     base: LruCacheConfig {
178    ///         capacity: NonZeroUsize::new(10000).unwrap(),
179    ///         max_size: u64::MAX,
180    ///     },
181    ///     segments: 16,
182    /// };
183    /// let cache: ConcurrentLruCache<String, i32> = ConcurrentLruCache::init(config, None);
184    ///
185    /// // With custom segments and size limit
186    /// let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
187    ///     base: LruCacheConfig {
188    ///         capacity: NonZeroUsize::new(10000).unwrap(),
189    ///         max_size: 100 * 1024 * 1024,  // 100MB
190    ///     },
191    ///     segments: 32,
192    /// };
193    /// let cache: ConcurrentLruCache<String, Vec<u8>> = ConcurrentLruCache::init(config, None);
194    /// ```
195    pub fn init(
196        config: crate::config::ConcurrentLruCacheConfig,
197        hasher: Option<DefaultHashBuilder>,
198    ) -> Self {
199        let segment_count = config.segments;
200        let capacity = config.base.capacity;
201        let max_size = config.base.max_size;
202
203        let segment_capacity = capacity.get() / segment_count;
204        let segment_cap = NonZeroUsize::new(segment_capacity.max(1)).unwrap();
205        let segment_max_size = max_size / segment_count as u64;
206
207        let hash_builder = hasher.unwrap_or_default();
208        let segments: Vec<_> = (0..segment_count)
209            .map(|_| {
210                let segment_config = crate::config::LruCacheConfig {
211                    capacity: segment_cap,
212                    max_size: segment_max_size,
213                };
214                Mutex::new(crate::lru::LruSegment::init(
215                    segment_config,
216                    hash_builder.clone(),
217                ))
218            })
219            .collect();
220
221        Self {
222            segments: segments.into_boxed_slice(),
223            hash_builder,
224        }
225    }
226}
227
228impl<K, V, S> ConcurrentLruCache<K, V, S>
229where
230    K: Hash + Eq + Clone + Send,
231    V: Clone + Send,
232    S: BuildHasher + Clone + Send,
233{
234    /// Returns the segment index for the given key.
235    ///
236    /// This is used internally for routing operations to the correct segment.
237    #[inline]
238    fn segment_index<Q>(&self, key: &Q) -> usize
239    where
240        K: Borrow<Q>,
241        Q: ?Sized + Hash,
242    {
243        (self.hash_builder.hash_one(key) as usize) % self.segments.len()
244    }
245
246    /// Returns the total capacity across all segments.
247    pub fn capacity(&self) -> usize {
248        self.segments.iter().map(|s| s.lock().cap().get()).sum()
249    }
250
251    /// Returns the number of segments in the cache.
252    pub fn segment_count(&self) -> usize {
253        self.segments.len()
254    }
255
256    /// Returns the total number of entries across all segments.
257    ///
258    /// Note: This acquires a lock on each segment sequentially, so the
259    /// returned value may be slightly stale in high-concurrency scenarios.
260    pub fn len(&self) -> usize {
261        self.segments.iter().map(|s| s.lock().len()).sum()
262    }
263
264    /// Returns `true` if the cache contains no entries.
265    pub fn is_empty(&self) -> bool {
266        self.segments.iter().all(|s| s.lock().is_empty())
267    }
268
269    /// Retrieves a value from the cache.
270    ///
271    /// Returns a **clone** of the value to avoid holding the lock. For operations
272    /// that don't need ownership, use [`get_with()`](Self::get_with) instead.
273    ///
274    /// If the key exists, it is moved to the MRU position within its segment.
275    ///
276    /// # Example
277    ///
278    /// ```rust,ignore
279    /// let value = cache.get(&"key".to_string());
280    /// ```
281    pub fn get<Q>(&self, key: &Q) -> Option<V>
282    where
283        K: Borrow<Q>,
284        Q: ?Sized + Hash + Eq,
285    {
286        let idx = self.segment_index(key);
287        let mut segment = self.segments[idx].lock();
288        segment.get(key).cloned()
289    }
290
291    /// Retrieves a value and applies a function to it while holding the lock.
292    ///
293    /// More efficient than `get()` when you only need to read from the value,
294    /// as it avoids cloning. The lock is released after `f` returns.
295    ///
296    /// # Type Parameters
297    ///
298    /// - `F`: Function that takes `&V` and returns `R`
299    /// - `R`: Return type of the function
300    ///
301    /// # Example
302    ///
303    /// ```rust,ignore
304    /// // Get length without cloning the whole string
305    /// let len = cache.get_with(&key, |value| value.len());
306    /// ```
307    pub fn get_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
308    where
309        K: Borrow<Q>,
310        Q: ?Sized + Hash + Eq,
311        F: FnOnce(&V) -> R,
312    {
313        let idx = self.segment_index(key);
314        let mut segment = self.segments[idx].lock();
315        segment.get(key).map(f)
316    }
317
318    /// Retrieves a mutable reference and applies a function to it.
319    ///
320    /// Allows in-place modification of cached values without removing them.
321    ///
322    /// # Example
323    ///
324    /// ```rust,ignore
325    /// // Increment a counter in-place
326    /// cache.get_mut_with(&"counter".to_string(), |value| *value += 1);
327    /// ```
328    pub fn get_mut_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
329    where
330        K: Borrow<Q>,
331        Q: ?Sized + Hash + Eq,
332        F: FnOnce(&mut V) -> R,
333    {
334        let idx = self.segment_index(key);
335        let mut segment = self.segments[idx].lock();
336        segment.get_mut(key).map(f)
337    }
338
339    /// Inserts a key-value pair into the cache.
340    ///
341    /// If the key exists, the value is updated and moved to MRU position.
342    /// If at capacity, the LRU entry in the target segment is evicted.
343    /// Use `SIZE_UNIT` (1) for count-based caching.
344    ///
345    /// # Returns
346    ///
347    /// - `Some((old_key, old_value))` if key existed or entry was evicted
348    /// - `None` if inserted with available capacity
349    ///
350    /// # Example
351    ///
352    /// ```rust,ignore
353    /// cache.put("key".to_string(), 42, 1);
354    /// ```
355    pub fn put(&self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>> {
356        let idx = self.segment_index(&key);
357        let mut segment = self.segments[idx].lock();
358        segment.put(key, value, size)
359    }
360
361    /// Removes a key from the cache.
362    ///
363    /// # Returns
364    ///
365    /// - `Some(value)` if the key existed
366    /// - `None` if the key was not found
367    pub fn remove<Q>(&self, key: &Q) -> Option<V>
368    where
369        K: Borrow<Q>,
370        Q: ?Sized + Hash + Eq,
371    {
372        let idx = self.segment_index(key);
373        let mut segment = self.segments[idx].lock();
374        segment.remove(key)
375    }
376
377    /// Removes all entries from all segments.
378    ///
379    /// Acquires locks on each segment sequentially.
380    pub fn clear(&self) {
381        for segment in self.segments.iter() {
382            segment.lock().clear();
383        }
384    }
385
386    /// Returns the current total size across all segments.
387    ///
388    /// This is the sum of all `size` values from `put()` calls.
389    pub fn current_size(&self) -> u64 {
390        self.segments.iter().map(|s| s.lock().current_size()).sum()
391    }
392
393    /// Returns the maximum total content size across all segments.
394    pub fn max_size(&self) -> u64 {
395        self.segments.iter().map(|s| s.lock().max_size()).sum()
396    }
397
398    /// Records a cache miss for metrics tracking.
399    ///
400    /// Call this after a failed `get()` when you fetch from the origin.
401    pub fn record_miss(&self, object_size: u64) {
402        // Record on the first segment (metrics are aggregated anyway)
403        if let Some(segment) = self.segments.first() {
404            segment.lock().record_miss(object_size);
405        }
406    }
407
408    /// Checks if the cache contains a key without promoting it.
409    ///
410    /// This is a pure existence check that does **not** update the entry's recency.
411    ///
412    /// # Example
413    ///
414    /// ```rust,ignore
415    /// if cache.contains(&"key".to_string()) {
416    ///     println!("Key exists!");
417    /// }
418    /// ```
419    pub fn contains<Q>(&self, key: &Q) -> bool
420    where
421        K: Borrow<Q>,
422        Q: ?Sized + Hash + Eq,
423    {
424        let idx = self.segment_index(key);
425        let segment = self.segments[idx].lock();
426        segment.contains(key)
427    }
428
429    /// Returns a clone of the value without updating the LRU order.
430    ///
431    /// Unlike [`get()`](Self::get), this does NOT move the entry to the front
432    /// or update any access metadata. Returns a cloned value because the
433    /// internal lock cannot be held across the return boundary.
434    ///
435    /// # Example
436    ///
437    /// ```rust,ignore
438    /// let value = cache.peek(&"key".to_string());
439    /// ```
440    pub fn peek<Q>(&self, key: &Q) -> Option<V>
441    where
442        K: Borrow<Q>,
443        Q: ?Sized + Hash + Eq,
444        V: Clone,
445    {
446        let idx = self.segment_index(key);
447        let segment = self.segments[idx].lock();
448        segment.peek(key).cloned()
449    }
450}
451
452impl<K, V, S> CacheMetrics for ConcurrentLruCache<K, V, S>
453where
454    K: Hash + Eq + Clone + Send,
455    V: Clone + Send,
456    S: BuildHasher + Clone + Send,
457{
458    fn metrics(&self) -> BTreeMap<String, f64> {
459        // Aggregate metrics from all segments
460        let mut aggregated = BTreeMap::new();
461
462        for segment in self.segments.iter() {
463            let segment_metrics = segment.lock().metrics().metrics();
464            for (key, value) in segment_metrics {
465                *aggregated.entry(key).or_insert(0.0) += value;
466            }
467        }
468
469        aggregated
470    }
471
472    fn algorithm_name(&self) -> &'static str {
473        "ConcurrentLRU"
474    }
475}
476
477// SAFETY: ConcurrentLruCache uses Mutex for synchronization, making it safe to
478// send and share across threads when K and V are Send.
479unsafe impl<K: Send, V: Send, S: Send> Send for ConcurrentLruCache<K, V, S> {}
480unsafe impl<K: Send, V: Send, S: Send + Sync> Sync for ConcurrentLruCache<K, V, S> {}
481
482impl<K, V, S> core::fmt::Debug for ConcurrentLruCache<K, V, S>
483where
484    K: Hash + Eq + Clone + Send,
485    V: Clone + Send,
486    S: BuildHasher + Clone + Send,
487{
488    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
489        f.debug_struct("ConcurrentLruCache")
490            .field("segment_count", &self.segments.len())
491            .field("total_len", &self.len())
492            .finish()
493    }
494}
495
496#[cfg(test)]
497mod tests {
498    use super::*;
499    use crate::config::{ConcurrentCacheConfig, ConcurrentLruCacheConfig, LruCacheConfig};
500
501    extern crate std;
502    use std::string::ToString;
503    use std::sync::Arc;
504    use std::thread;
505    use std::vec::Vec;
506
507    fn make_config(capacity: usize, segments: usize) -> ConcurrentLruCacheConfig {
508        ConcurrentCacheConfig {
509            base: LruCacheConfig {
510                capacity: NonZeroUsize::new(capacity).unwrap(),
511                max_size: u64::MAX,
512            },
513            segments,
514        }
515    }
516
517    #[test]
518    fn test_basic_operations() {
519        let cache: ConcurrentLruCache<String, i32> =
520            ConcurrentLruCache::init(make_config(100, 16), None);
521
522        assert!(cache.is_empty());
523        assert_eq!(cache.len(), 0);
524
525        cache.put("a".to_string(), 1, 1);
526        cache.put("b".to_string(), 2, 1);
527        cache.put("c".to_string(), 3, 1);
528
529        assert_eq!(cache.len(), 3);
530        assert!(!cache.is_empty());
531
532        assert_eq!(cache.get(&"a".to_string()), Some(1));
533        assert_eq!(cache.get(&"b".to_string()), Some(2));
534        assert_eq!(cache.get(&"c".to_string()), Some(3));
535        assert_eq!(cache.get(&"d".to_string()), None);
536    }
537
538    #[test]
539    fn test_get_with() {
540        let cache: ConcurrentLruCache<String, String> =
541            ConcurrentLruCache::init(make_config(100, 16), None);
542
543        cache.put("key".to_string(), "hello world".to_string(), 1);
544
545        let len = cache.get_with(&"key".to_string(), |v: &String| v.len());
546        assert_eq!(len, Some(11));
547
548        let missing = cache.get_with(&"missing".to_string(), |v: &String| v.len());
549        assert_eq!(missing, None);
550    }
551
552    #[test]
553    fn test_get_mut_with() {
554        let cache: ConcurrentLruCache<String, i32> =
555            ConcurrentLruCache::init(make_config(100, 16), None);
556
557        cache.put("counter".to_string(), 0, 1);
558
559        cache.get_mut_with(&"counter".to_string(), |v: &mut i32| *v += 1);
560        cache.get_mut_with(&"counter".to_string(), |v: &mut i32| *v += 1);
561
562        assert_eq!(cache.get(&"counter".to_string()), Some(2));
563    }
564
565    #[test]
566    fn test_remove() {
567        let cache: ConcurrentLruCache<String, i32> =
568            ConcurrentLruCache::init(make_config(100, 16), None);
569
570        cache.put("a".to_string(), 1, 1);
571        cache.put("b".to_string(), 2, 1);
572
573        assert_eq!(cache.remove(&"a".to_string()), Some(1));
574        assert_eq!(cache.get(&"a".to_string()), None);
575        assert_eq!(cache.len(), 1);
576
577        assert_eq!(cache.remove(&"nonexistent".to_string()), None);
578    }
579
580    #[test]
581    fn test_clear() {
582        let cache: ConcurrentLruCache<String, i32> =
583            ConcurrentLruCache::init(make_config(100, 16), None);
584
585        cache.put("a".to_string(), 1, 1);
586        cache.put("b".to_string(), 2, 1);
587        cache.put("c".to_string(), 3, 1);
588
589        assert_eq!(cache.len(), 3);
590        cache.clear();
591        assert_eq!(cache.len(), 0);
592        assert!(cache.is_empty());
593    }
594
595    #[test]
596    fn test_contains_key() {
597        let cache: ConcurrentLruCache<String, i32> =
598            ConcurrentLruCache::init(make_config(100, 16), None);
599
600        cache.put("exists".to_string(), 1, 1);
601
602        assert!(cache.contains(&"exists".to_string()));
603        assert!(!cache.contains(&"missing".to_string()));
604    }
605
606    #[test]
607    fn test_concurrent_access() {
608        let cache: Arc<ConcurrentLruCache<String, i32>> =
609            Arc::new(ConcurrentLruCache::init(make_config(1000, 16), None));
610        let num_threads = 8;
611        let ops_per_thread = 1000;
612
613        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
614
615        for t in 0..num_threads {
616            let cache = Arc::clone(&cache);
617            handles.push(thread::spawn(move || {
618                for i in 0..ops_per_thread {
619                    let key = std::format!("thread_{}_key_{}", t, i);
620                    cache.put(key.clone(), t * 1000 + i, 1);
621                    let _ = cache.get(&key);
622                }
623            }));
624        }
625
626        for handle in handles {
627            handle.join().unwrap();
628        }
629
630        assert!(!cache.is_empty());
631    }
632
633    #[test]
634    fn test_concurrent_mixed_operations() {
635        let cache: Arc<ConcurrentLruCache<String, i32>> =
636            Arc::new(ConcurrentLruCache::init(make_config(100, 16), None));
637        let num_threads = 8;
638        let ops_per_thread = 500;
639
640        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
641
642        for t in 0..num_threads {
643            let cache = Arc::clone(&cache);
644            handles.push(thread::spawn(move || {
645                for i in 0..ops_per_thread {
646                    let key = std::format!("key_{}", i % 200);
647
648                    match i % 4 {
649                        0 => {
650                            cache.put(key, i, 1);
651                        }
652                        1 => {
653                            let _ = cache.get(&key);
654                        }
655                        2 => {
656                            cache.get_mut_with(&key, |v: &mut i32| *v += 1);
657                        }
658                        3 => {
659                            let _ = cache.remove(&key);
660                        }
661                        _ => unreachable!(),
662                    }
663
664                    if i == 250 && t == 0 {
665                        cache.clear();
666                    }
667                }
668            }));
669        }
670
671        for handle in handles {
672            handle.join().unwrap();
673        }
674
675        // Cache should be in valid state
676        assert!(cache.len() <= 100);
677    }
678
679    #[test]
680    fn test_segment_count() {
681        let cache: ConcurrentLruCache<String, i32> =
682            ConcurrentLruCache::init(make_config(100, 8), None);
683
684        assert_eq!(cache.segment_count(), 8);
685    }
686
687    #[test]
688    fn test_capacity() {
689        let cache: ConcurrentLruCache<String, i32> =
690            ConcurrentLruCache::init(make_config(100, 16), None);
691
692        // Capacity is distributed across segments, so it may not be exactly 100
693        // It should be close to the requested capacity
694        let capacity = cache.capacity();
695        assert!(capacity >= 16); // At least 1 per segment
696        assert!(capacity <= 100); // Not more than requested
697    }
698
699    #[test]
700    fn test_eviction_on_capacity() {
701        let cache: ConcurrentLruCache<String, i32> =
702            ConcurrentLruCache::init(make_config(48, 16), None);
703
704        cache.put("a".to_string(), 1, 1);
705        cache.put("b".to_string(), 2, 1);
706        cache.put("c".to_string(), 3, 1);
707
708        assert_eq!(cache.len(), 3);
709
710        // This should evict the LRU item
711        cache.put("d".to_string(), 4, 1);
712
713        assert!(cache.len() <= 48);
714        assert!(cache.contains(&"d".to_string()));
715    }
716
717    #[test]
718    fn test_update_existing_key() {
719        let cache: ConcurrentLruCache<String, i32> =
720            ConcurrentLruCache::init(make_config(100, 16), None);
721
722        cache.put("key".to_string(), 1, 1);
723        assert_eq!(cache.get(&"key".to_string()), Some(1));
724
725        cache.put("key".to_string(), 2, 1);
726        assert_eq!(cache.get(&"key".to_string()), Some(2));
727        assert_eq!(cache.len(), 1);
728    }
729
730    #[test]
731    fn test_lru_ordering() {
732        let cache: ConcurrentLruCache<String, i32> =
733            ConcurrentLruCache::init(make_config(48, 16), None);
734
735        cache.put("a".to_string(), 1, 1);
736        cache.put("b".to_string(), 2, 1);
737        cache.put("c".to_string(), 3, 1);
738
739        // Access "a" to make it recently used
740        let _ = cache.get(&"a".to_string());
741
742        // Add a new item
743        cache.put("d".to_string(), 4, 1);
744
745        assert!(cache.contains(&"a".to_string()));
746        assert!(cache.contains(&"d".to_string()));
747    }
748
749    #[test]
750    fn test_metrics() {
751        let cache: ConcurrentLruCache<String, i32> =
752            ConcurrentLruCache::init(make_config(100, 16), None);
753
754        cache.put("a".to_string(), 1, 1);
755        cache.put("b".to_string(), 2, 1);
756
757        let metrics = cache.metrics();
758        // Metrics aggregation across segments
759        assert!(!metrics.is_empty());
760    }
761
762    #[test]
763    fn test_record_miss() {
764        let cache: ConcurrentLruCache<String, i32> =
765            ConcurrentLruCache::init(make_config(100, 16), None);
766
767        cache.record_miss(100);
768        cache.record_miss(200);
769
770        let metrics = cache.metrics();
771        // Metrics are aggregated across segments
772        assert!(!metrics.is_empty());
773    }
774
775    #[test]
776    fn test_empty_cache_operations() {
777        let cache: ConcurrentLruCache<String, i32> =
778            ConcurrentLruCache::init(make_config(100, 16), None);
779
780        assert!(cache.is_empty());
781        assert_eq!(cache.len(), 0);
782        assert_eq!(cache.get(&"missing".to_string()), None);
783        assert_eq!(cache.remove(&"missing".to_string()), None);
784        assert!(!cache.contains(&"missing".to_string()));
785
786        let result = cache.get_with(&"missing".to_string(), |v: &i32| *v);
787        assert_eq!(result, None);
788    }
789
790    #[test]
791    fn test_single_item_cache() {
792        let cache: ConcurrentLruCache<String, i32> =
793            ConcurrentLruCache::init(make_config(16, 16), None);
794
795        cache.put("a".to_string(), 1, 1);
796        assert!(!cache.is_empty());
797
798        cache.put("b".to_string(), 2, 1);
799        assert!(cache.len() <= 16);
800    }
801
802    #[test]
803    fn test_borrowed_key_lookup() {
804        let cache: ConcurrentLruCache<String, i32> =
805            ConcurrentLruCache::init(make_config(100, 16), None);
806
807        cache.put("test_key".to_string(), 42, 1);
808
809        // Test with borrowed key
810        let key_str = "test_key";
811        assert_eq!(cache.get(key_str), Some(42));
812        assert!(cache.contains(key_str));
813        assert_eq!(cache.remove(key_str), Some(42));
814    }
815
816    #[test]
817    fn test_algorithm_name() {
818        let cache: ConcurrentLruCache<String, i32> =
819            ConcurrentLruCache::init(make_config(100, 16), None);
820
821        assert_eq!(cache.algorithm_name(), "ConcurrentLRU");
822    }
823
824    #[test]
825    fn test_contains_non_promoting() {
826        let cache: ConcurrentLruCache<String, i32> =
827            ConcurrentLruCache::init(make_config(100, 16), None);
828
829        cache.put("a".to_string(), 1, 1);
830        cache.put("b".to_string(), 2, 1);
831
832        // contains() should check without promoting
833        assert!(cache.contains(&"a".to_string()));
834        assert!(cache.contains(&"b".to_string()));
835        assert!(!cache.contains(&"c".to_string()));
836    }
837}