cache_rs/concurrent/gdsf.rs
1//! Concurrent GDSF Cache Implementation
2//!
3//! A thread-safe GDSF cache using lock striping (segmented storage) for high-performance
4//! concurrent access. This is the multi-threaded counterpart to [`GdsfCache`](crate::GdsfCache).
5//!
6//! # How It Works
7//!
8//! GDSF (Greedy Dual-Size Frequency) is designed for caching variable-size objects.
9//! Priority is calculated as: `(Frequency / Size) + GlobalAge`
10//!
11//! This formula favors:
12//! - **Smaller objects**: More items fit in cache
13//! - **Frequently accessed objects**: Higher hit rates
14//! - **Newer objects**: Via aging mechanism
15//!
16//! The cache partitions keys across multiple independent segments using hash-based sharding.
17//!
18//! ```text
19//! ┌──────────────────────────────────────────────────────────────────────────────┐
20//! │ ConcurrentGdsfCache │
21//! │ │
22//! │ hash(key) % N ──▶ Segment Selection │
23//! │ │
24//! │ ┌────────────────────┐ ┌────────────────────┐ ┌────────────────────┐ │
25//! │ │ Segment 0 │ │ Segment 1 │ ... │ Segment N-1 │ │
26//! │ │ max_size=625MB │ │ max_size=625MB │ │ max_size=625MB │ │
27//! │ │ age=1.5 │ │ age=2.3 │ │ age=1.8 │ │
28//! │ │ ┌──────────────┐ │ │ ┌──────────────┐ │ │ ┌──────────────┐ │ │
29//! │ │ │ Mutex │ │ │ │ Mutex │ │ │ │ Mutex │ │ │
30//! │ │ └──────┬───────┘ │ │ └──────┬───────┘ │ │ └──────┬───────┘ │ │
31//! │ │ │ │ │ │ │ │ │ │ │
32//! │ │ ┌──────▼───────┐ │ │ ┌──────▼───────┐ │ │ ┌──────▼───────┐ │ │
33//! │ │ │ GdsfSegment │ │ │ │ GdsfSegment │ │ │ │ GdsfSegment │ │ │
34//! │ │ │ (priority │ │ │ │ (priority │ │ │ │ (priority │ │ │
35//! │ │ │ lists) │ │ │ │ lists) │ │ │ │ lists) │ │ │
36//! │ │ └──────────────┘ │ │ └──────────────┘ │ │ └──────────────┘ │ │
37//! │ └────────────────────┘ └────────────────────┘ └────────────────────┘ │
38//! └──────────────────────────────────────────────────────────────────────────────┘
39//! ```
40//!
41//! ## Per-Segment Size Tracking
42//!
43//! Each segment has its own:
44//! - **max_size**: Total size = cache_max_size / segment_count
45//! - **current_size**: Sum of item sizes in this segment
46//! - **global_age**: Aging factor (incremented on evictions)
47//!
48//! ## Trade-offs
49//!
50//! - **Pros**: Near-linear scaling, size-aware eviction per segment
51//! - **Cons**: Size distribution depends on key hashing. Uneven key distribution
52//! can cause some segments to be fuller than others.
53//!
54//! # API Note: Size Parameter Required
55//!
56//! Unlike other caches, GDSF's `put()` requires a size parameter:
57//!
58//! ```rust,ignore
59//! // Standard caches
60//! lru_cache.put(key, value, 1);
61//!
62//! // GDSF requires size
63//! gdsf_cache.put(key, value, 2048); // size in bytes
64//! ```
65//!
66//! # Performance Characteristics
67//!
68//! | Metric | Value |
69//! |--------|-------|
70//! | Get/Put/Remove | O(log P) per segment |
71//! | Concurrency | Near-linear scaling up to segment count |
72//! | Memory overhead | ~170 bytes per entry + one Mutex per segment |
73//! | Size-awareness | Excellent (per-segment size tracking) |
74//!
75//! Where P = distinct priority buckets per segment. Priority = (frequency/size) + age.
76//!
77//! # When to Use
78//!
79//! **Use ConcurrentGdsfCache when:**
80//! - Multiple threads need cache access
81//! - Caching variable-size objects (images, files, API responses)
82//! - Total size budget is more important than item count
83//! - CDN-like workloads with diverse object sizes
84//!
85//! **Consider alternatives when:**
86//! - Single-threaded access only → use `GdsfCache`
87//! - Uniform-size objects → simpler caches work equally well
88//! - Need global size coordination → use `Mutex<GdsfCache>`
89//! - Entry-count is the primary constraint → use `ConcurrentLruCache`
90//!
91//! # Thread Safety
92//!
93//! `ConcurrentGdsfCache` is `Send + Sync` and can be shared via `Arc`.
94//!
95//! # Example
96//!
97//! ```rust,ignore
98//! use cache_rs::concurrent::ConcurrentGdsfCache;
99//! use cache_rs::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
100//! use std::num::NonZeroUsize;
101//! use std::sync::Arc;
102//! use std::thread;
103//!
104//! // 10GB cache distributed across segments
105//! let config: ConcurrentGdsfCacheConfig = ConcurrentCacheConfig {
106//! base: GdsfCacheConfig {
107//! capacity: NonZeroUsize::new(100_000).unwrap(),
108//! initial_age: 0.0,
109//! max_size: 10 * 1024 * 1024 * 1024,
110//! },
111//! segments: 16,
112//! };
113//! let cache = Arc::new(ConcurrentGdsfCache::init(config, None));
114//!
115//! // Simulate CDN edge cache with multiple worker threads
116//! let handles: Vec<_> = (0..4).map(|worker| {
117//! let cache = Arc::clone(&cache);
118//! thread::spawn(move || {
119//! for i in 0..1000 {
120//! // Simulate varying object sizes
121//! let size = if i % 10 == 0 {
122//! 1024 * 1024 // 1MB (large objects)
123//! } else {
124//! 4096 // 4KB (small objects)
125//! };
126//!
127//! let key = format!("asset-{}-{}", worker, i);
128//! let data = vec![0u8; size as usize];
129//! cache.put(key.clone(), data, size);
130//!
131//! // Simulate cache reads
132//! let _ = cache.get(&key);
133//! }
134//! })
135//! }).collect();
136//!
137//! for h in handles {
138//! h.join().unwrap();
139//! }
140//!
141//! println!("Cache size: {} bytes", cache.current_size());
142//! ```
143//!
144//! # Size-Based Eviction Example
145//!
146//! ```rust,ignore
147//! use cache_rs::concurrent::ConcurrentGdsfCache;
148//! use cache_rs::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
149//! use std::num::NonZeroUsize;
150//!
151//! // 10MB cache
152//! let config: ConcurrentGdsfCacheConfig = ConcurrentCacheConfig {
153//! base: GdsfCacheConfig {
154//! capacity: NonZeroUsize::new(10_000).unwrap(),
155//! initial_age: 0.0,
156//! max_size: 10 * 1024 * 1024,
157//! },
158//! segments: 16,
159//! };
160//! let cache = ConcurrentGdsfCache::init(config, None);
161//!
162//! // Insert small frequently-accessed items
163//! for i in 0..100 {
164//! cache.put(format!("small-{}", i), vec![0u8; 1024], 1024);
165//! // Access multiple times to increase frequency
166//! for _ in 0..5 {
167//! let _ = cache.get(&format!("small-{}", i));
168//! }
169//! }
170//!
171//! // Insert large item - may evict multiple small items based on priority
172//! cache.put("large".to_string(), vec![0u8; 5 * 1024 * 1024], Some(5 * 1024 * 1024));
173//!
174//! // GDSF may choose to keep small popular items over one large item
175//! ```
176
177extern crate alloc;
178
179use crate::gdsf::GdsfSegment;
180use crate::metrics::CacheMetrics;
181use alloc::boxed::Box;
182use alloc::collections::BTreeMap;
183use alloc::string::String;
184use alloc::vec::Vec;
185use core::borrow::Borrow;
186use core::hash::{BuildHasher, Hash};
187use core::num::NonZeroUsize;
188use parking_lot::Mutex;
189
190#[cfg(feature = "hashbrown")]
191use hashbrown::DefaultHashBuilder;
192
193#[cfg(not(feature = "hashbrown"))]
194use std::collections::hash_map::RandomState as DefaultHashBuilder;
195
196/// A thread-safe GDSF cache with segmented storage for high concurrency.
197///
198/// GDSF (Greedy Dual-Size Frequency) is designed for caching variable-size objects.
199/// The `put` method requires specifying the object size in addition to key and value.
200pub struct ConcurrentGdsfCache<K, V, S = DefaultHashBuilder> {
201 segments: Box<[Mutex<GdsfSegment<K, V, S>>]>,
202 hash_builder: S,
203}
204
205impl<K, V> ConcurrentGdsfCache<K, V, DefaultHashBuilder>
206where
207 K: Hash + Eq + Clone + Send,
208 V: Clone + Send,
209{
210 /// Creates a new concurrent GDSF cache from a configuration.
211 ///
212 /// This is the **recommended** way to create a concurrent GDSF cache.
213 ///
214 /// # Arguments
215 ///
216 /// * `config` - The cache configuration specifying capacity, max size, and segments
217 /// * `hasher` - Optional custom hash builder. If `None`, uses the default hash builder.
218 pub fn init(
219 config: crate::config::ConcurrentGdsfCacheConfig,
220 hasher: Option<DefaultHashBuilder>,
221 ) -> Self {
222 let segment_count = config.segments;
223 let capacity = config.base.capacity;
224 let max_size = config.base.max_size;
225 let initial_age = config.base.initial_age;
226
227 let segment_capacity = capacity.get() / segment_count;
228 let segment_cap = NonZeroUsize::new(segment_capacity.max(1)).unwrap();
229 let segment_max_size = max_size / segment_count as u64;
230
231 let hash_builder = hasher.unwrap_or_default();
232
233 let segments: Vec<_> = (0..segment_count)
234 .map(|_| {
235 let segment_config = crate::config::GdsfCacheConfig {
236 capacity: segment_cap,
237 initial_age,
238 max_size: segment_max_size,
239 };
240 Mutex::new(GdsfSegment::init(segment_config, hash_builder.clone()))
241 })
242 .collect();
243
244 Self {
245 segments: segments.into_boxed_slice(),
246 hash_builder,
247 }
248 }
249}
250
251impl<K, V, S> ConcurrentGdsfCache<K, V, S>
252where
253 K: Hash + Eq + Clone + Send,
254 V: Clone + Send,
255 S: BuildHasher + Clone + Send,
256{
257 #[inline]
258 fn segment_index<Q>(&self, key: &Q) -> usize
259 where
260 K: Borrow<Q>,
261 Q: ?Sized + Hash,
262 {
263 (self.hash_builder.hash_one(key) as usize) % self.segments.len()
264 }
265
266 /// Returns the total capacity across all segments (in size units).
267 pub fn capacity(&self) -> usize {
268 let mut total = 0usize;
269 for segment in self.segments.iter() {
270 total += segment.lock().cap().get();
271 }
272 total
273 }
274
275 /// Returns the number of segments in the cache.
276 pub fn segment_count(&self) -> usize {
277 self.segments.len()
278 }
279
280 /// Returns the total number of entries across all segments.
281 pub fn len(&self) -> usize {
282 let mut total = 0usize;
283 for segment in self.segments.iter() {
284 total += segment.lock().len();
285 }
286 total
287 }
288
289 /// Returns `true` if the cache contains no entries.
290 pub fn is_empty(&self) -> bool {
291 for segment in self.segments.iter() {
292 if !segment.lock().is_empty() {
293 return false;
294 }
295 }
296 true
297 }
298
299 /// Gets a value from the cache.
300 ///
301 /// This clones the value to avoid holding the lock. For zero-copy access,
302 /// use `get_with()` instead.
303 pub fn get<Q>(&self, key: &Q) -> Option<V>
304 where
305 K: Borrow<Q> + Clone,
306 Q: ?Sized + Hash + Eq,
307 {
308 let idx = self.segment_index(key);
309 let mut segment = self.segments[idx].lock();
310 segment.get(key)
311 }
312
313 /// Gets a value and applies a function to it while holding the lock.
314 ///
315 /// This is more efficient than `get()` when you only need to read from the value,
316 /// as it avoids cloning.
317 pub fn get_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
318 where
319 K: Borrow<Q> + Clone,
320 Q: ?Sized + Hash + Eq,
321 F: FnOnce(&V) -> R,
322 {
323 let idx = self.segment_index(key);
324 let mut segment = self.segments[idx].lock();
325 segment.get(key).as_ref().map(f)
326 }
327
328 /// Inserts a key-value pair with its size into the cache.
329 ///
330 /// GDSF uses size for priority calculation. Use `SIZE_UNIT` (1) for count-based caching.
331 /// Returns `None` if no entries were evicted. Returns `Some(vec)` containing all
332 /// `(key, value)` pairs evicted from the cache as part of this operation to make
333 /// room for the new entry.
334 ///
335 /// Overwriting an existing key without triggering eviction returns `None`.
336 pub fn put(&self, key: K, value: V, size: u64) -> Option<Vec<(K, V)>>
337 where
338 K: Clone,
339 {
340 let idx = self.segment_index(&key);
341 let mut segment = self.segments[idx].lock();
342 segment.put(key, value, size)
343 }
344
345 /// Removes a key from the cache, returning the value if it existed.
346 pub fn remove<Q>(&self, key: &Q) -> Option<V>
347 where
348 K: Borrow<Q>,
349 Q: ?Sized + Hash + Eq,
350 {
351 let idx = self.segment_index(key);
352 let mut segment = self.segments[idx].lock();
353 segment.remove(key)
354 }
355
356 /// Checks if the cache contains a key without updating priority.
357 ///
358 /// This is a pure existence check that does **not** update the entry's
359 /// priority or frequency.
360 ///
361 /// # Example
362 ///
363 /// ```rust,ignore
364 /// if cache.contains(&"key".to_string()) {
365 /// println!("Key exists!");
366 /// }
367 /// ```
368 pub fn contains<Q>(&self, key: &Q) -> bool
369 where
370 K: Borrow<Q>,
371 Q: ?Sized + Hash + Eq,
372 {
373 let idx = self.segment_index(key);
374 let segment = self.segments[idx].lock();
375 segment.contains(key)
376 }
377
378 /// Returns a clone of the value without updating priority or access metadata.
379 ///
380 /// Unlike [`get()`](Self::get), this does NOT recalculate the entry's GDSF
381 /// priority or change its position. Returns a cloned value because the
382 /// internal lock cannot be held across the return boundary.
383 ///
384 /// # Example
385 ///
386 /// ```rust,ignore
387 /// let value = cache.peek(&"key".to_string());
388 /// ```
389 pub fn peek<Q>(&self, key: &Q) -> Option<V>
390 where
391 K: Borrow<Q>,
392 Q: ?Sized + Hash + Eq,
393 V: Clone,
394 {
395 let idx = self.segment_index(key);
396 let segment = self.segments[idx].lock();
397 segment.peek(key).cloned()
398 }
399
400 /// Clears all entries from the cache.
401 pub fn clear(&self) {
402 for segment in self.segments.iter() {
403 segment.lock().clear();
404 }
405 }
406
407 /// Returns the current total size of cached content across all segments.
408 pub fn current_size(&self) -> u64 {
409 self.segments.iter().map(|s| s.lock().current_size()).sum()
410 }
411
412 /// Returns the maximum content size the cache can hold across all segments.
413 pub fn max_size(&self) -> u64 {
414 self.segments.iter().map(|s| s.lock().max_size()).sum()
415 }
416}
417
418impl<K, V, S> CacheMetrics for ConcurrentGdsfCache<K, V, S>
419where
420 K: Hash + Eq + Clone + Send,
421 V: Clone + Send,
422 S: BuildHasher + Clone + Send,
423{
424 fn metrics(&self) -> BTreeMap<String, f64> {
425 let mut aggregated = BTreeMap::new();
426 for segment in self.segments.iter() {
427 let segment_metrics = segment.lock().metrics().metrics();
428 for (key, value) in segment_metrics {
429 *aggregated.entry(key).or_insert(0.0) += value;
430 }
431 }
432 aggregated
433 }
434
435 fn algorithm_name(&self) -> &'static str {
436 "ConcurrentGDSF"
437 }
438}
439
440unsafe impl<K: Send, V: Send, S: Send> Send for ConcurrentGdsfCache<K, V, S> {}
441unsafe impl<K: Send, V: Send, S: Send + Sync> Sync for ConcurrentGdsfCache<K, V, S> {}
442
443impl<K, V, S> core::fmt::Debug for ConcurrentGdsfCache<K, V, S>
444where
445 K: Hash + Eq + Clone + Send,
446 V: Clone + Send,
447 S: BuildHasher + Clone + Send,
448{
449 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
450 f.debug_struct("ConcurrentGdsfCache")
451 .field("segment_count", &self.segments.len())
452 .field("total_len", &self.len())
453 .finish()
454 }
455}
456
457#[cfg(test)]
458mod tests {
459 use super::*;
460 use crate::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
461
462 extern crate std;
463 use std::string::ToString;
464 use std::sync::Arc;
465 use std::thread;
466 use std::vec::Vec;
467
468 fn make_config(capacity: usize, segments: usize) -> ConcurrentGdsfCacheConfig {
469 ConcurrentCacheConfig {
470 base: GdsfCacheConfig {
471 capacity: NonZeroUsize::new(capacity).unwrap(),
472 initial_age: 0.0,
473 max_size: u64::MAX,
474 },
475 segments,
476 }
477 }
478
479 #[test]
480 fn test_basic_operations() {
481 let cache: ConcurrentGdsfCache<String, i32> =
482 ConcurrentGdsfCache::init(make_config(10000, 16), None);
483
484 cache.put("a".to_string(), 1, 100u64);
485 cache.put("b".to_string(), 2, 200u64);
486
487 assert_eq!(cache.get(&"a".to_string()), Some(1));
488 assert_eq!(cache.get(&"b".to_string()), Some(2));
489 }
490
491 #[test]
492 fn test_concurrent_access() {
493 let cache: Arc<ConcurrentGdsfCache<String, i32>> =
494 Arc::new(ConcurrentGdsfCache::init(make_config(100000, 16), None));
495 let num_threads = 8;
496 let ops_per_thread = 500;
497
498 let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
499
500 for t in 0..num_threads {
501 let cache = Arc::clone(&cache);
502 handles.push(thread::spawn(move || {
503 for i in 0..ops_per_thread {
504 let key = std::format!("key_{}_{}", t, i);
505 let size = (10 + (i % 100)) as u64;
506 cache.put(key.clone(), i, size);
507 let _ = cache.get(&key);
508 }
509 }));
510 }
511
512 for handle in handles {
513 handle.join().unwrap();
514 }
515
516 assert!(!cache.is_empty());
517 }
518
519 #[test]
520 fn test_capacity() {
521 let cache: ConcurrentGdsfCache<String, i32> =
522 ConcurrentGdsfCache::init(make_config(10000, 16), None);
523
524 // Capacity is distributed across segments
525 let capacity = cache.capacity();
526 assert!(capacity >= 16);
527 assert!(capacity <= 10000);
528 }
529
530 #[test]
531 fn test_segment_count() {
532 let cache: ConcurrentGdsfCache<String, i32> =
533 ConcurrentGdsfCache::init(make_config(10000, 8), None);
534
535 assert_eq!(cache.segment_count(), 8);
536 }
537
538 #[test]
539 fn test_len_and_is_empty() {
540 let cache: ConcurrentGdsfCache<String, i32> =
541 ConcurrentGdsfCache::init(make_config(10000, 16), None);
542
543 assert!(cache.is_empty());
544 assert_eq!(cache.len(), 0);
545
546 cache.put("key1".to_string(), 1, 100);
547 assert_eq!(cache.len(), 1);
548 assert!(!cache.is_empty());
549
550 cache.put("key2".to_string(), 2, 200);
551 assert_eq!(cache.len(), 2);
552 }
553
554 #[test]
555 fn test_remove() {
556 let cache: ConcurrentGdsfCache<String, i32> =
557 ConcurrentGdsfCache::init(make_config(10000, 16), None);
558
559 cache.put("key1".to_string(), 1, 100);
560 cache.put("key2".to_string(), 2, 200);
561
562 assert_eq!(cache.remove(&"key1".to_string()), Some(1));
563 assert_eq!(cache.len(), 1);
564 assert_eq!(cache.get(&"key1".to_string()), None);
565
566 assert_eq!(cache.remove(&"nonexistent".to_string()), None);
567 }
568
569 #[test]
570 fn test_clear() {
571 let cache: ConcurrentGdsfCache<String, i32> =
572 ConcurrentGdsfCache::init(make_config(10000, 16), None);
573
574 cache.put("key1".to_string(), 1, 100);
575 cache.put("key2".to_string(), 2, 200);
576 cache.put("key3".to_string(), 3, 300);
577
578 assert_eq!(cache.len(), 3);
579
580 cache.clear();
581
582 assert_eq!(cache.len(), 0);
583 assert!(cache.is_empty());
584 assert_eq!(cache.get(&"key1".to_string()), None);
585 }
586
587 #[test]
588 fn test_contains_key() {
589 let cache: ConcurrentGdsfCache<String, i32> =
590 ConcurrentGdsfCache::init(make_config(10000, 16), None);
591
592 cache.put("exists".to_string(), 1, 100);
593
594 assert!(cache.contains(&"exists".to_string()));
595 assert!(!cache.contains(&"missing".to_string()));
596 }
597
598 #[test]
599 fn test_get_with() {
600 let cache: ConcurrentGdsfCache<String, String> =
601 ConcurrentGdsfCache::init(make_config(10000, 16), None);
602
603 cache.put("key".to_string(), "hello world".to_string(), 100);
604
605 let len = cache.get_with(&"key".to_string(), |v: &String| v.len());
606 assert_eq!(len, Some(11));
607
608 let missing = cache.get_with(&"missing".to_string(), |v: &String| v.len());
609 assert_eq!(missing, None);
610 }
611
612 #[test]
613 fn test_size_aware_eviction() {
614 let cache: ConcurrentGdsfCache<String, i32> =
615 ConcurrentGdsfCache::init(make_config(1000, 16), None);
616
617 // Add items with different sizes
618 cache.put("small".to_string(), 1, 100);
619 cache.put("medium".to_string(), 2, 500);
620 cache.put("large".to_string(), 3, 800);
621
622 // Access small item multiple times
623 for _ in 0..10 {
624 let _ = cache.get(&"small".to_string());
625 }
626
627 // Add more items to trigger eviction
628 cache.put("another".to_string(), 4, 200);
629
630 // Small item with high frequency should be retained
631 assert!(!cache.is_empty());
632 }
633
634 #[test]
635 fn test_eviction_on_capacity() {
636 let cache: ConcurrentGdsfCache<String, i32> =
637 ConcurrentGdsfCache::init(make_config(5000, 16), None);
638
639 // Fill the cache with various sizes
640 for i in 0..20 {
641 let size = (100 + i * 50) as u64;
642 cache.put(std::format!("key{}", i), i, size);
643 }
644
645 // Cache size should be managed
646 assert!(!cache.is_empty());
647 }
648
649 #[test]
650 fn test_metrics() {
651 let cache: ConcurrentGdsfCache<String, i32> =
652 ConcurrentGdsfCache::init(make_config(10000, 16), None);
653
654 cache.put("a".to_string(), 1, 100);
655 cache.put("b".to_string(), 2, 200);
656
657 let metrics = cache.metrics();
658 // Metrics aggregation across segments
659 assert!(!metrics.is_empty());
660 }
661
662 #[test]
663 fn test_algorithm_name() {
664 let cache: ConcurrentGdsfCache<String, i32> =
665 ConcurrentGdsfCache::init(make_config(10000, 16), None);
666
667 assert_eq!(cache.algorithm_name(), "ConcurrentGDSF");
668 }
669
670 #[test]
671 fn test_empty_cache_operations() {
672 let cache: ConcurrentGdsfCache<String, i32> =
673 ConcurrentGdsfCache::init(make_config(10000, 16), None);
674
675 assert!(cache.is_empty());
676 assert_eq!(cache.len(), 0);
677 assert_eq!(cache.get(&"missing".to_string()), None);
678 assert_eq!(cache.remove(&"missing".to_string()), None);
679 assert!(!cache.contains(&"missing".to_string()));
680 }
681
682 #[test]
683 fn test_borrowed_key_lookup() {
684 let cache: ConcurrentGdsfCache<String, i32> =
685 ConcurrentGdsfCache::init(make_config(10000, 16), None);
686
687 cache.put("test_key".to_string(), 42, 100);
688
689 // Test with borrowed key
690 let key_str = "test_key";
691 assert_eq!(cache.get(key_str), Some(42));
692 assert!(cache.contains(key_str));
693 assert_eq!(cache.remove(key_str), Some(42));
694 }
695
696 #[test]
697 fn test_variable_sizes() {
698 let cache: ConcurrentGdsfCache<String, i32> =
699 ConcurrentGdsfCache::init(make_config(10000, 16), None);
700
701 // Add items with various sizes
702 cache.put("tiny".to_string(), 1, 10);
703 cache.put("small".to_string(), 2, 100);
704 cache.put("medium".to_string(), 3, 500);
705 cache.put("large".to_string(), 4, 1000);
706
707 // All items should be present
708 assert_eq!(cache.len(), 4);
709 assert_eq!(cache.get(&"tiny".to_string()), Some(1));
710 assert_eq!(cache.get(&"small".to_string()), Some(2));
711 assert_eq!(cache.get(&"medium".to_string()), Some(3));
712 assert_eq!(cache.get(&"large".to_string()), Some(4));
713 }
714
715 #[test]
716 fn test_frequency_and_size_interaction() {
717 let cache: ConcurrentGdsfCache<String, i32> =
718 ConcurrentGdsfCache::init(make_config(5000, 16), None);
719
720 // Add large item
721 cache.put("large".to_string(), 1, 3000);
722
723 // Add and frequently access small items
724 for i in 0..10 {
725 let key = std::format!("small{}", i);
726 cache.put(key.clone(), i, 100);
727 for _ in 0..5 {
728 let _ = cache.get(&key);
729 }
730 }
731
732 // Frequently accessed small items should have good priority
733 assert!(!cache.is_empty());
734 }
735
736 #[test]
737 fn test_contains_non_promoting() {
738 let cache: ConcurrentGdsfCache<String, i32> =
739 ConcurrentGdsfCache::init(make_config(10000, 16), None);
740
741 cache.put("a".to_string(), 1, 100);
742 cache.put("b".to_string(), 2, 100);
743
744 // contains() should check without updating priority
745 assert!(cache.contains(&"a".to_string()));
746 assert!(cache.contains(&"b".to_string()));
747 assert!(!cache.contains(&"c".to_string()));
748 }
749}