cache_rs/concurrent/lru.rs
1//! Concurrent LRU Cache Implementation
2//!
3//! A thread-safe LRU cache using lock striping (segmented storage) for high-performance
4//! concurrent access. This is the multi-threaded counterpart to [`LruCache`](crate::LruCache).
5//!
6//! # How It Works
7//!
8//! The cache partitions keys across multiple independent segments, each with its own lock.
9//! This allows concurrent operations on different segments without contention.
10//!
11//! ```text
12//! ┌──────────────────────────────────────────────────────────────────────┐
13//! │ ConcurrentLruCache │
14//! │ │
15//! │ hash(key) % N ──▶ Segment Selection │
16//! │ │
17//! │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
18//! │ │ Segment 0 │ │ Segment 1 │ ... │ Segment N-1 │ │
19//! │ │ ┌────────┐ │ │ ┌────────┐ │ │ ┌────────┐ │ │
20//! │ │ │ Mutex │ │ │ │ Mutex │ │ │ │ Mutex │ │ │
21//! │ │ └────┬───┘ │ │ └────┬───┘ │ │ └────┬───┘ │ │
22//! │ │ │ │ │ │ │ │ │ │ │
23//! │ │ ┌────▼───┐ │ │ ┌────▼───┐ │ │ ┌────▼───┐ │ │
24//! │ │ │LruCache│ │ │ │LruCache│ │ │ │LruCache│ │ │
25//! │ │ └────────┘ │ │ └────────┘ │ │ └────────┘ │ │
26//! │ └──────────────┘ └──────────────┘ └──────────────┘ │
27//! └──────────────────────────────────────────────────────────────────────┘
28//! ```
29//!
30//! ## Segment Count
31//!
32//! The default segment count is based on available CPU cores (typically 16).
33//! More segments = less contention but more memory overhead.
34//!
35//! ## Trade-offs
36//!
37//! - **Pros**: Near-linear scaling with thread count, no global lock
38//! - **Cons**: LRU ordering is per-segment, not global. An item might be evicted
39//! from one segment while another segment has older items.
40//!
41//! # Performance Characteristics
42//!
43//! | Metric | Value |
44//! |--------|-------|
45//! | Get/Put/Remove | O(1) average |
46//! | Concurrency | Near-linear scaling up to segment count |
47//! | Memory overhead | ~150 bytes per entry + one Mutex per segment |
48//!
49//! # When to Use
50//!
51//! **Use ConcurrentLruCache when:**
52//! - Multiple threads need cache access
53//! - You need better throughput than `Mutex<LruCache>`
54//! - Keys distribute evenly (hot keys in one segment will still contend)
55//!
56//! **Consider alternatives when:**
57//! - Single-threaded access only → use `LruCache`
58//! - Need strict global LRU ordering → use `Mutex<LruCache>`
59//! - Very hot keys → consider per-key caching or request coalescing
60//!
61//! # Thread Safety
62//!
63//! `ConcurrentLruCache` is `Send + Sync` and can be shared via `Arc`.
64//!
65//! # Example
66//!
67//! ```rust,ignore
68//! use cache_rs::concurrent::ConcurrentLruCache;
69//! use cache_rs::config::{ConcurrentLruCacheConfig, ConcurrentCacheConfig, LruCacheConfig};
70//! use std::num::NonZeroUsize;
71//! use std::sync::Arc;
72//! use std::thread;
73//!
74//! let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
75//! base: LruCacheConfig {
76//! capacity: NonZeroUsize::new(10_000).unwrap(),
77//! max_size: u64::MAX,
78//! },
79//! segments: 16,
80//! };
81//! let cache = Arc::new(ConcurrentLruCache::init(config, None));
82//!
83//! let handles: Vec<_> = (0..4).map(|i| {
84//! let cache = Arc::clone(&cache);
85//! thread::spawn(move || {
86//! for j in 0..1000 {
87//! cache.put(format!("key-{}-{}", i, j), j);
88//! }
89//! })
90//! }).collect();
91//!
92//! for h in handles {
93//! h.join().unwrap();
94//! }
95//!
96//! println!("Total entries: {}", cache.len());
97//! ```
98
99extern crate alloc;
100
101use crate::lru::LruSegment;
102use crate::metrics::CacheMetrics;
103use alloc::boxed::Box;
104use alloc::collections::BTreeMap;
105use alloc::string::String;
106use alloc::vec::Vec;
107use core::borrow::Borrow;
108use core::hash::{BuildHasher, Hash};
109use core::num::NonZeroUsize;
110use parking_lot::Mutex;
111
112#[cfg(feature = "hashbrown")]
113use hashbrown::DefaultHashBuilder;
114
115#[cfg(not(feature = "hashbrown"))]
116use std::collections::hash_map::RandomState as DefaultHashBuilder;
117
118/// A thread-safe LRU cache with segmented storage for high concurrency.
119///
120/// Keys are partitioned across multiple segments using hash-based sharding.
121/// Each segment has its own lock, allowing concurrent access to different
122/// segments without blocking.
123///
124/// # Type Parameters
125///
126/// - `K`: Key type. Must implement `Hash + Eq + Clone + Send`.
127/// - `V`: Value type. Must implement `Clone + Send`.
128/// - `S`: Hash builder type. Defaults to `DefaultHashBuilder`.
129///
130/// # Note on LRU Semantics
131///
132/// LRU ordering is maintained **per-segment**, not globally. This means an item
133/// in segment A might be evicted while segment B has items that were accessed
134/// less recently in wall-clock time. For most workloads with good key distribution,
135/// this approximation works well.
136///
137/// # Example
138///
139/// ```rust,ignore
140/// use cache_rs::concurrent::ConcurrentLruCache;
141/// use std::sync::Arc;
142///
143/// let cache = Arc::new(ConcurrentLruCache::new(1000));
144///
145/// // Safe to use from multiple threads
146/// cache.put("key".to_string(), 42);
147/// assert_eq!(cache.get(&"key".to_string()), Some(42));
148/// ```
149pub struct ConcurrentLruCache<K, V, S = DefaultHashBuilder> {
150 segments: Box<[Mutex<LruSegment<K, V, S>>]>,
151 hash_builder: S,
152}
153
154impl<K, V> ConcurrentLruCache<K, V, DefaultHashBuilder>
155where
156 K: Hash + Eq + Clone + Send,
157 V: Clone + Send,
158{
159 /// Creates a new concurrent LRU cache from a configuration with an optional hasher.
160 ///
161 /// This is the **recommended** way to create a concurrent LRU cache.
162 ///
163 /// # Arguments
164 ///
165 /// * `config` - Configuration specifying capacity, segments, and optional size limit
166 /// * `hasher` - Optional custom hash builder. If `None`, uses `DefaultHashBuilder`
167 ///
168 /// # Example
169 ///
170 /// ```rust,ignore
171 /// use cache_rs::concurrent::ConcurrentLruCache;
172 /// use cache_rs::config::{ConcurrentLruCacheConfig, ConcurrentCacheConfig, LruCacheConfig};
173 /// use core::num::NonZeroUsize;
174 ///
175 /// // Simple capacity-only cache with default segments
176 /// let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
177 /// base: LruCacheConfig {
178 /// capacity: NonZeroUsize::new(10000).unwrap(),
179 /// max_size: u64::MAX,
180 /// },
181 /// segments: 16,
182 /// };
183 /// let cache: ConcurrentLruCache<String, i32> = ConcurrentLruCache::init(config, None);
184 ///
185 /// // With custom segments and size limit
186 /// let config: ConcurrentLruCacheConfig = ConcurrentCacheConfig {
187 /// base: LruCacheConfig {
188 /// capacity: NonZeroUsize::new(10000).unwrap(),
189 /// max_size: 100 * 1024 * 1024, // 100MB
190 /// },
191 /// segments: 32,
192 /// };
193 /// let cache: ConcurrentLruCache<String, Vec<u8>> = ConcurrentLruCache::init(config, None);
194 /// ```
195 pub fn init(
196 config: crate::config::ConcurrentLruCacheConfig,
197 hasher: Option<DefaultHashBuilder>,
198 ) -> Self {
199 let segment_count = config.segments;
200 let capacity = config.base.capacity;
201 let max_size = config.base.max_size;
202
203 let segment_capacity = capacity.get() / segment_count;
204 let segment_cap = NonZeroUsize::new(segment_capacity.max(1)).unwrap();
205 let segment_max_size = max_size / segment_count as u64;
206
207 let hash_builder = hasher.unwrap_or_default();
208 let segments: Vec<_> = (0..segment_count)
209 .map(|_| {
210 let segment_config = crate::config::LruCacheConfig {
211 capacity: segment_cap,
212 max_size: segment_max_size,
213 };
214 Mutex::new(crate::lru::LruSegment::init(
215 segment_config,
216 hash_builder.clone(),
217 ))
218 })
219 .collect();
220
221 Self {
222 segments: segments.into_boxed_slice(),
223 hash_builder,
224 }
225 }
226}
227
228impl<K, V, S> ConcurrentLruCache<K, V, S>
229where
230 K: Hash + Eq + Clone + Send,
231 V: Clone + Send,
232 S: BuildHasher + Clone + Send,
233{
234 /// Returns the segment index for the given key.
235 ///
236 /// This is used internally for routing operations to the correct segment.
237 #[inline]
238 fn segment_index<Q>(&self, key: &Q) -> usize
239 where
240 K: Borrow<Q>,
241 Q: ?Sized + Hash,
242 {
243 (self.hash_builder.hash_one(key) as usize) % self.segments.len()
244 }
245
246 /// Returns the total capacity across all segments.
247 pub fn capacity(&self) -> usize {
248 self.segments.iter().map(|s| s.lock().cap().get()).sum()
249 }
250
251 /// Returns the number of segments in the cache.
252 pub fn segment_count(&self) -> usize {
253 self.segments.len()
254 }
255
256 /// Returns the total number of entries across all segments.
257 ///
258 /// Note: This acquires a lock on each segment sequentially, so the
259 /// returned value may be slightly stale in high-concurrency scenarios.
260 pub fn len(&self) -> usize {
261 self.segments.iter().map(|s| s.lock().len()).sum()
262 }
263
264 /// Returns `true` if the cache contains no entries.
265 pub fn is_empty(&self) -> bool {
266 self.segments.iter().all(|s| s.lock().is_empty())
267 }
268
269 /// Retrieves a value from the cache.
270 ///
271 /// Returns a **clone** of the value to avoid holding the lock. For operations
272 /// that don't need ownership, use [`get_with()`](Self::get_with) instead.
273 ///
274 /// If the key exists, it is moved to the MRU position within its segment.
275 ///
276 /// # Example
277 ///
278 /// ```rust,ignore
279 /// let value = cache.get(&"key".to_string());
280 /// ```
281 pub fn get<Q>(&self, key: &Q) -> Option<V>
282 where
283 K: Borrow<Q>,
284 Q: ?Sized + Hash + Eq,
285 {
286 let idx = self.segment_index(key);
287 let mut segment = self.segments[idx].lock();
288 segment.get(key).cloned()
289 }
290
291 /// Retrieves a value and applies a function to it while holding the lock.
292 ///
293 /// More efficient than `get()` when you only need to read from the value,
294 /// as it avoids cloning. The lock is released after `f` returns.
295 ///
296 /// # Type Parameters
297 ///
298 /// - `F`: Function that takes `&V` and returns `R`
299 /// - `R`: Return type of the function
300 ///
301 /// # Example
302 ///
303 /// ```rust,ignore
304 /// // Get length without cloning the whole string
305 /// let len = cache.get_with(&key, |value| value.len());
306 /// ```
307 pub fn get_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
308 where
309 K: Borrow<Q>,
310 Q: ?Sized + Hash + Eq,
311 F: FnOnce(&V) -> R,
312 {
313 let idx = self.segment_index(key);
314 let mut segment = self.segments[idx].lock();
315 segment.get(key).map(f)
316 }
317
318 /// Retrieves a mutable reference and applies a function to it.
319 ///
320 /// Allows in-place modification of cached values without removing them.
321 ///
322 /// # Example
323 ///
324 /// ```rust,ignore
325 /// // Increment a counter in-place
326 /// cache.get_mut_with(&"counter".to_string(), |value| *value += 1);
327 /// ```
328 pub fn get_mut_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
329 where
330 K: Borrow<Q>,
331 Q: ?Sized + Hash + Eq,
332 F: FnOnce(&mut V) -> R,
333 {
334 let idx = self.segment_index(key);
335 let mut segment = self.segments[idx].lock();
336 segment.get_mut(key).map(f)
337 }
338
339 /// Inserts a key-value pair into the cache.
340 ///
341 /// If the key exists, the value is updated and moved to MRU position.
342 /// If at capacity, the LRU entry in the target segment is evicted.
343 ///
344 /// # Returns
345 ///
346 /// - `Some((old_key, old_value))` if key existed or entry was evicted
347 /// - `None` if inserted with available capacity
348 ///
349 /// # Example
350 ///
351 /// ```rust,ignore
352 /// cache.put("key".to_string(), 42);
353 /// ```
354 pub fn put(&self, key: K, value: V) -> Option<(K, V)> {
355 let idx = self.segment_index(&key);
356 let mut segment = self.segments[idx].lock();
357 segment.put(key, value)
358 }
359
360 /// Inserts a key-value pair with explicit size tracking.
361 ///
362 /// Use this for size-aware caching. The size is used for `max_size` tracking
363 /// and eviction decisions.
364 ///
365 /// # Arguments
366 ///
367 /// * `key` - The key to insert
368 /// * `value` - The value to cache
369 /// * `size` - Size of this entry (in your chosen unit)
370 ///
371 /// # Example
372 ///
373 /// ```rust,ignore
374 /// let data = vec![0u8; 1024];
375 /// cache.put_with_size("file".to_string(), data, 1024);
376 /// ```
377 pub fn put_with_size(&self, key: K, value: V, size: u64) -> Option<(K, V)> {
378 let idx = self.segment_index(&key);
379 let mut segment = self.segments[idx].lock();
380 segment.put_with_size(key, value, size)
381 }
382
383 /// Removes a key from the cache.
384 ///
385 /// # Returns
386 ///
387 /// - `Some(value)` if the key existed
388 /// - `None` if the key was not found
389 pub fn remove<Q>(&self, key: &Q) -> Option<V>
390 where
391 K: Borrow<Q>,
392 Q: ?Sized + Hash + Eq,
393 {
394 let idx = self.segment_index(key);
395 let mut segment = self.segments[idx].lock();
396 segment.remove(key)
397 }
398
399 /// Checks if the cache contains a key.
400 ///
401 /// Note: This **does** update the entry's recency (moves to MRU position).
402 /// If you need a pure existence check without side effects, this isn't it.
403 pub fn contains_key<Q>(&self, key: &Q) -> bool
404 where
405 K: Borrow<Q>,
406 Q: ?Sized + Hash + Eq,
407 {
408 let idx = self.segment_index(key);
409 let mut segment = self.segments[idx].lock();
410 segment.get(key).is_some()
411 }
412
413 /// Removes all entries from all segments.
414 ///
415 /// Acquires locks on each segment sequentially.
416 pub fn clear(&self) {
417 for segment in self.segments.iter() {
418 segment.lock().clear();
419 }
420 }
421
422 /// Returns the current total size across all segments.
423 ///
424 /// This is the sum of all `size` values from `put_with_size()` calls.
425 pub fn current_size(&self) -> u64 {
426 self.segments.iter().map(|s| s.lock().current_size()).sum()
427 }
428
429 /// Returns the maximum total content size across all segments.
430 pub fn max_size(&self) -> u64 {
431 self.segments.iter().map(|s| s.lock().max_size()).sum()
432 }
433
434 /// Records a cache miss for metrics tracking.
435 ///
436 /// Call this after a failed `get()` when you fetch from the origin.
437 pub fn record_miss(&self, object_size: u64) {
438 // Record on the first segment (metrics are aggregated anyway)
439 if let Some(segment) = self.segments.first() {
440 segment.lock().record_miss(object_size);
441 }
442 }
443}
444
445impl<K, V, S> CacheMetrics for ConcurrentLruCache<K, V, S>
446where
447 K: Hash + Eq + Clone + Send,
448 V: Clone + Send,
449 S: BuildHasher + Clone + Send,
450{
451 fn metrics(&self) -> BTreeMap<String, f64> {
452 // Aggregate metrics from all segments
453 let mut aggregated = BTreeMap::new();
454
455 for segment in self.segments.iter() {
456 let segment_metrics = segment.lock().metrics().metrics();
457 for (key, value) in segment_metrics {
458 *aggregated.entry(key).or_insert(0.0) += value;
459 }
460 }
461
462 aggregated
463 }
464
465 fn algorithm_name(&self) -> &'static str {
466 "ConcurrentLRU"
467 }
468}
469
470// SAFETY: ConcurrentLruCache uses Mutex for synchronization, making it safe to
471// send and share across threads when K and V are Send.
472unsafe impl<K: Send, V: Send, S: Send> Send for ConcurrentLruCache<K, V, S> {}
473unsafe impl<K: Send, V: Send, S: Send + Sync> Sync for ConcurrentLruCache<K, V, S> {}
474
475impl<K, V, S> core::fmt::Debug for ConcurrentLruCache<K, V, S>
476where
477 K: Hash + Eq + Clone + Send,
478 V: Clone + Send,
479 S: BuildHasher + Clone + Send,
480{
481 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
482 f.debug_struct("ConcurrentLruCache")
483 .field("segment_count", &self.segments.len())
484 .field("total_len", &self.len())
485 .finish()
486 }
487}
488
489#[cfg(test)]
490mod tests {
491 use super::*;
492 use crate::config::{ConcurrentCacheConfig, ConcurrentLruCacheConfig, LruCacheConfig};
493
494 extern crate std;
495 use std::string::ToString;
496 use std::sync::Arc;
497 use std::thread;
498 use std::vec::Vec;
499
500 fn make_config(capacity: usize, segments: usize) -> ConcurrentLruCacheConfig {
501 ConcurrentCacheConfig {
502 base: LruCacheConfig {
503 capacity: NonZeroUsize::new(capacity).unwrap(),
504 max_size: u64::MAX,
505 },
506 segments,
507 }
508 }
509
510 #[test]
511 fn test_basic_operations() {
512 let cache: ConcurrentLruCache<String, i32> =
513 ConcurrentLruCache::init(make_config(100, 16), None);
514
515 assert!(cache.is_empty());
516 assert_eq!(cache.len(), 0);
517
518 cache.put("a".to_string(), 1);
519 cache.put("b".to_string(), 2);
520 cache.put("c".to_string(), 3);
521
522 assert_eq!(cache.len(), 3);
523 assert!(!cache.is_empty());
524
525 assert_eq!(cache.get(&"a".to_string()), Some(1));
526 assert_eq!(cache.get(&"b".to_string()), Some(2));
527 assert_eq!(cache.get(&"c".to_string()), Some(3));
528 assert_eq!(cache.get(&"d".to_string()), None);
529 }
530
531 #[test]
532 fn test_get_with() {
533 let cache: ConcurrentLruCache<String, String> =
534 ConcurrentLruCache::init(make_config(100, 16), None);
535
536 cache.put("key".to_string(), "hello world".to_string());
537
538 let len = cache.get_with(&"key".to_string(), |v: &String| v.len());
539 assert_eq!(len, Some(11));
540
541 let missing = cache.get_with(&"missing".to_string(), |v: &String| v.len());
542 assert_eq!(missing, None);
543 }
544
545 #[test]
546 fn test_get_mut_with() {
547 let cache: ConcurrentLruCache<String, i32> =
548 ConcurrentLruCache::init(make_config(100, 16), None);
549
550 cache.put("counter".to_string(), 0);
551
552 cache.get_mut_with(&"counter".to_string(), |v: &mut i32| *v += 1);
553 cache.get_mut_with(&"counter".to_string(), |v: &mut i32| *v += 1);
554
555 assert_eq!(cache.get(&"counter".to_string()), Some(2));
556 }
557
558 #[test]
559 fn test_remove() {
560 let cache: ConcurrentLruCache<String, i32> =
561 ConcurrentLruCache::init(make_config(100, 16), None);
562
563 cache.put("a".to_string(), 1);
564 cache.put("b".to_string(), 2);
565
566 assert_eq!(cache.remove(&"a".to_string()), Some(1));
567 assert_eq!(cache.get(&"a".to_string()), None);
568 assert_eq!(cache.len(), 1);
569
570 assert_eq!(cache.remove(&"nonexistent".to_string()), None);
571 }
572
573 #[test]
574 fn test_clear() {
575 let cache: ConcurrentLruCache<String, i32> =
576 ConcurrentLruCache::init(make_config(100, 16), None);
577
578 cache.put("a".to_string(), 1);
579 cache.put("b".to_string(), 2);
580 cache.put("c".to_string(), 3);
581
582 assert_eq!(cache.len(), 3);
583 cache.clear();
584 assert_eq!(cache.len(), 0);
585 assert!(cache.is_empty());
586 }
587
588 #[test]
589 fn test_contains_key() {
590 let cache: ConcurrentLruCache<String, i32> =
591 ConcurrentLruCache::init(make_config(100, 16), None);
592
593 cache.put("exists".to_string(), 1);
594
595 assert!(cache.contains_key(&"exists".to_string()));
596 assert!(!cache.contains_key(&"missing".to_string()));
597 }
598
599 #[test]
600 fn test_concurrent_access() {
601 let cache: Arc<ConcurrentLruCache<String, i32>> =
602 Arc::new(ConcurrentLruCache::init(make_config(1000, 16), None));
603 let num_threads = 8;
604 let ops_per_thread = 1000;
605
606 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
607
608 for t in 0..num_threads {
609 let cache = Arc::clone(&cache);
610 handles.push(thread::spawn(move || {
611 for i in 0..ops_per_thread {
612 let key = std::format!("thread_{}_key_{}", t, i);
613 cache.put(key.clone(), t * 1000 + i);
614 let _ = cache.get(&key);
615 }
616 }));
617 }
618
619 for handle in handles {
620 handle.join().unwrap();
621 }
622
623 assert!(!cache.is_empty());
624 }
625
626 #[test]
627 fn test_concurrent_mixed_operations() {
628 let cache: Arc<ConcurrentLruCache<String, i32>> =
629 Arc::new(ConcurrentLruCache::init(make_config(100, 16), None));
630 let num_threads = 8;
631 let ops_per_thread = 500;
632
633 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
634
635 for t in 0..num_threads {
636 let cache = Arc::clone(&cache);
637 handles.push(thread::spawn(move || {
638 for i in 0..ops_per_thread {
639 let key = std::format!("key_{}", i % 200);
640
641 match i % 4 {
642 0 => {
643 cache.put(key, i);
644 }
645 1 => {
646 let _ = cache.get(&key);
647 }
648 2 => {
649 cache.get_mut_with(&key, |v: &mut i32| *v += 1);
650 }
651 3 => {
652 let _ = cache.remove(&key);
653 }
654 _ => unreachable!(),
655 }
656
657 if i == 250 && t == 0 {
658 cache.clear();
659 }
660 }
661 }));
662 }
663
664 for handle in handles {
665 handle.join().unwrap();
666 }
667
668 // Cache should be in valid state
669 assert!(cache.len() <= 100);
670 }
671
672 #[test]
673 fn test_segment_count() {
674 let cache: ConcurrentLruCache<String, i32> =
675 ConcurrentLruCache::init(make_config(100, 8), None);
676
677 assert_eq!(cache.segment_count(), 8);
678 }
679
680 #[test]
681 fn test_capacity() {
682 let cache: ConcurrentLruCache<String, i32> =
683 ConcurrentLruCache::init(make_config(100, 16), None);
684
685 // Capacity is distributed across segments, so it may not be exactly 100
686 // It should be close to the requested capacity
687 let capacity = cache.capacity();
688 assert!(capacity >= 16); // At least 1 per segment
689 assert!(capacity <= 100); // Not more than requested
690 }
691
692 #[test]
693 fn test_eviction_on_capacity() {
694 let cache: ConcurrentLruCache<String, i32> =
695 ConcurrentLruCache::init(make_config(48, 16), None);
696
697 cache.put("a".to_string(), 1);
698 cache.put("b".to_string(), 2);
699 cache.put("c".to_string(), 3);
700
701 assert_eq!(cache.len(), 3);
702
703 // This should evict the LRU item
704 cache.put("d".to_string(), 4);
705
706 assert!(cache.len() <= 48);
707 assert!(cache.contains_key(&"d".to_string()));
708 }
709
710 #[test]
711 fn test_update_existing_key() {
712 let cache: ConcurrentLruCache<String, i32> =
713 ConcurrentLruCache::init(make_config(100, 16), None);
714
715 cache.put("key".to_string(), 1);
716 assert_eq!(cache.get(&"key".to_string()), Some(1));
717
718 cache.put("key".to_string(), 2);
719 assert_eq!(cache.get(&"key".to_string()), Some(2));
720 assert_eq!(cache.len(), 1);
721 }
722
723 #[test]
724 fn test_lru_ordering() {
725 let cache: ConcurrentLruCache<String, i32> =
726 ConcurrentLruCache::init(make_config(48, 16), None);
727
728 cache.put("a".to_string(), 1);
729 cache.put("b".to_string(), 2);
730 cache.put("c".to_string(), 3);
731
732 // Access "a" to make it recently used
733 let _ = cache.get(&"a".to_string());
734
735 // Add a new item
736 cache.put("d".to_string(), 4);
737
738 assert!(cache.contains_key(&"a".to_string()));
739 assert!(cache.contains_key(&"d".to_string()));
740 }
741
742 #[test]
743 fn test_metrics() {
744 let cache: ConcurrentLruCache<String, i32> =
745 ConcurrentLruCache::init(make_config(100, 16), None);
746
747 cache.put("a".to_string(), 1);
748 cache.put("b".to_string(), 2);
749
750 let metrics = cache.metrics();
751 // Metrics aggregation across segments
752 assert!(!metrics.is_empty());
753 }
754
755 #[test]
756 fn test_record_miss() {
757 let cache: ConcurrentLruCache<String, i32> =
758 ConcurrentLruCache::init(make_config(100, 16), None);
759
760 cache.record_miss(100);
761 cache.record_miss(200);
762
763 let metrics = cache.metrics();
764 // Metrics are aggregated across segments
765 assert!(!metrics.is_empty());
766 }
767
768 #[test]
769 fn test_empty_cache_operations() {
770 let cache: ConcurrentLruCache<String, i32> =
771 ConcurrentLruCache::init(make_config(100, 16), None);
772
773 assert!(cache.is_empty());
774 assert_eq!(cache.len(), 0);
775 assert_eq!(cache.get(&"missing".to_string()), None);
776 assert_eq!(cache.remove(&"missing".to_string()), None);
777 assert!(!cache.contains_key(&"missing".to_string()));
778
779 let result = cache.get_with(&"missing".to_string(), |v: &i32| *v);
780 assert_eq!(result, None);
781 }
782
783 #[test]
784 fn test_single_item_cache() {
785 let cache: ConcurrentLruCache<String, i32> =
786 ConcurrentLruCache::init(make_config(16, 16), None);
787
788 cache.put("a".to_string(), 1);
789 assert!(!cache.is_empty());
790
791 cache.put("b".to_string(), 2);
792 assert!(cache.len() <= 16);
793 }
794
795 #[test]
796 fn test_borrowed_key_lookup() {
797 let cache: ConcurrentLruCache<String, i32> =
798 ConcurrentLruCache::init(make_config(100, 16), None);
799
800 cache.put("test_key".to_string(), 42);
801
802 // Test with borrowed key
803 let key_str = "test_key";
804 assert_eq!(cache.get(key_str), Some(42));
805 assert!(cache.contains_key(key_str));
806 assert_eq!(cache.remove(key_str), Some(42));
807 }
808
809 #[test]
810 fn test_algorithm_name() {
811 let cache: ConcurrentLruCache<String, i32> =
812 ConcurrentLruCache::init(make_config(100, 16), None);
813
814 assert_eq!(cache.algorithm_name(), "ConcurrentLRU");
815 }
816}