Skip to main content

cache_rs/concurrent/
gdsf.rs

1//! Concurrent GDSF Cache Implementation
2//!
3//! A thread-safe GDSF cache using lock striping (segmented storage) for high-performance
4//! concurrent access. This is the multi-threaded counterpart to [`GdsfCache`](crate::GdsfCache).
5//!
6//! # How It Works
7//!
8//! GDSF (Greedy Dual-Size Frequency) is designed for caching variable-size objects.
9//! Priority is calculated as: `(Frequency / Size) + GlobalAge`
10//!
11//! This formula favors:
12//! - **Smaller objects**: More items fit in cache
13//! - **Frequently accessed objects**: Higher hit rates
14//! - **Newer objects**: Via aging mechanism
15//!
16//! The cache partitions keys across multiple independent segments using hash-based sharding.
17//!
18//! ```text
19//! ┌──────────────────────────────────────────────────────────────────────────────┐
20//! │                        ConcurrentGdsfCache                                   │
21//! │                                                                              │
22//! │  hash(key) % N  ──▶  Segment Selection                                       │
23//! │                                                                              │
24//! │  ┌────────────────────┐ ┌────────────────────┐     ┌────────────────────┐    │
25//! │  │    Segment 0       │ │    Segment 1       │ ... │   Segment N-1      │    │
26//! │  │  max_size=625MB    │ │  max_size=625MB    │     │  max_size=625MB    │    │
27//! │  │  age=1.5           │ │  age=2.3           │     │  age=1.8           │    │
28//! │  │  ┌──────────────┐  │ │  ┌──────────────┐  │     │  ┌──────────────┐  │    │
29//! │  │  │    Mutex     │  │ │  │    Mutex     │  │     │  │    Mutex     │  │    │
30//! │  │  └──────┬───────┘  │ │  └──────┬───────┘  │     │  └──────┬───────┘  │    │
31//! │  │         │          │ │         │          │     │         │          │    │
32//! │  │  ┌──────▼───────┐  │ │  ┌──────▼───────┐  │     │  ┌──────▼───────┐  │    │
33//! │  │  │  GdsfSegment │  │ │  │  GdsfSegment │  │     │  │  GdsfSegment │  │    │
34//! │  │  │  (priority   │  │ │  │  (priority   │  │     │  │  (priority   │  │    │
35//! │  │  │   lists)     │  │ │  │   lists)     │  │     │  │   lists)     │  │    │
36//! │  │  └──────────────┘  │ │  └──────────────┘  │     │  └──────────────┘  │    │
37//! │  └────────────────────┘ └────────────────────┘     └────────────────────┘    │
38//! └──────────────────────────────────────────────────────────────────────────────┘
39//! ```
40//!
41//! ## Per-Segment Size Tracking
42//!
43//! Each segment has its own:
44//! - **max_size**: Total size = cache_max_size / segment_count
45//! - **current_size**: Sum of item sizes in this segment
46//! - **global_age**: Aging factor (incremented on evictions)
47//!
48//! ## Trade-offs
49//!
50//! - **Pros**: Near-linear scaling, size-aware eviction per segment
51//! - **Cons**: Size distribution depends on key hashing. Uneven key distribution
52//!   can cause some segments to be fuller than others.
53//!
54//! # API Note: Size Parameter Required
55//!
56//! Unlike other caches, GDSF's `put()` requires a size parameter:
57//!
58//! ```rust,ignore
59//! // Standard caches
60//! lru_cache.put(key, value);
61//!
62//! // GDSF requires size
63//! gdsf_cache.put(key, value, 2048);  // size in bytes
64//! ```
65//!
66//! # Performance Characteristics
67//!
68//! | Metric | Value |
69//! |--------|-------|
70//! | Get/Put/Remove | O(log P) per segment |
71//! | Concurrency | Near-linear scaling up to segment count |
72//! | Memory overhead | ~170 bytes per entry + one Mutex per segment |
73//! | Size-awareness | Excellent (per-segment size tracking) |
74//!
75//! Where P = distinct priority buckets per segment. Priority = (frequency/size) + age.
76//!
77//! # When to Use
78//!
79//! **Use ConcurrentGdsfCache when:**
80//! - Multiple threads need cache access
81//! - Caching variable-size objects (images, files, API responses)
82//! - Total size budget is more important than item count
83//! - CDN-like workloads with diverse object sizes
84//!
85//! **Consider alternatives when:**
86//! - Single-threaded access only → use `GdsfCache`
87//! - Uniform-size objects → simpler caches work equally well
88//! - Need global size coordination → use `Mutex<GdsfCache>`
89//! - Entry-count is the primary constraint → use `ConcurrentLruCache`
90//!
91//! # Thread Safety
92//!
93//! `ConcurrentGdsfCache` is `Send + Sync` and can be shared via `Arc`.
94//!
95//! # Example
96//!
97//! ```rust,ignore
98//! use cache_rs::concurrent::ConcurrentGdsfCache;
99//! use cache_rs::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
100//! use std::num::NonZeroUsize;
101//! use std::sync::Arc;
102//! use std::thread;
103//!
104//! // 10GB cache distributed across segments
105//! let config: ConcurrentGdsfCacheConfig = ConcurrentCacheConfig {
106//!     base: GdsfCacheConfig {
107//!         capacity: NonZeroUsize::new(100_000).unwrap(),
108//!         initial_age: 0.0,
109//!         max_size: 10 * 1024 * 1024 * 1024,
110//!     },
111//!     segments: 16,
112//! };
113//! let cache = Arc::new(ConcurrentGdsfCache::init(config, None));
114//!
115//! // Simulate CDN edge cache with multiple worker threads
116//! let handles: Vec<_> = (0..4).map(|worker| {
117//!     let cache = Arc::clone(&cache);
118//!     thread::spawn(move || {
119//!         for i in 0..1000 {
120//!             // Simulate varying object sizes
121//!             let size = if i % 10 == 0 {
122//!                 1024 * 1024  // 1MB (large objects)
123//!             } else {
124//!                 4096  // 4KB (small objects)
125//!             };
126//!             
127//!             let key = format!("asset-{}-{}", worker, i);
128//!             let data = vec![0u8; size as usize];
129//!             cache.put(key.clone(), data, size);
130//!             
131//!             // Simulate cache reads
132//!             let _ = cache.get(&key);
133//!         }
134//!     })
135//! }).collect();
136//!
137//! for h in handles {
138//!     h.join().unwrap();
139//! }
140//!
141//! println!("Cache size: {} bytes", cache.current_size());
142//! ```
143//!
144//! # Size-Based Eviction Example
145//!
146//! ```rust,ignore
147//! use cache_rs::concurrent::ConcurrentGdsfCache;
148//! use cache_rs::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
149//! use std::num::NonZeroUsize;
150//!
151//! // 10MB cache
152//! let config: ConcurrentGdsfCacheConfig = ConcurrentCacheConfig {
153//!     base: GdsfCacheConfig {
154//!         capacity: NonZeroUsize::new(10_000).unwrap(),
155//!         initial_age: 0.0,
156//!         max_size: 10 * 1024 * 1024,
157//!     },
158//!     segments: 16,
159//! };
160//! let cache = ConcurrentGdsfCache::init(config, None);
161//!
162//! // Insert small frequently-accessed items
163//! for i in 0..100 {
164//!     cache.put(format!("small-{}", i), vec![0u8; 1024], 1024);
165//!     // Access multiple times to increase frequency
166//!     for _ in 0..5 {
167//!         let _ = cache.get(&format!("small-{}", i));
168//!     }
169//! }
170//!
171//! // Insert large item - may evict multiple small items based on priority
172//! cache.put("large".to_string(), vec![0u8; 5 * 1024 * 1024], 5 * 1024 * 1024);
173//!
174//! // GDSF may choose to keep small popular items over one large item
175//! ```
176
177extern crate alloc;
178
179use crate::gdsf::GdsfSegment;
180use crate::metrics::CacheMetrics;
181use alloc::boxed::Box;
182use alloc::collections::BTreeMap;
183use alloc::string::String;
184use alloc::vec::Vec;
185use core::borrow::Borrow;
186use core::hash::{BuildHasher, Hash};
187use core::num::NonZeroUsize;
188use parking_lot::Mutex;
189
190#[cfg(feature = "hashbrown")]
191use hashbrown::DefaultHashBuilder;
192
193#[cfg(not(feature = "hashbrown"))]
194use std::collections::hash_map::RandomState as DefaultHashBuilder;
195
196/// A thread-safe GDSF cache with segmented storage for high concurrency.
197///
198/// GDSF (Greedy Dual-Size Frequency) is designed for caching variable-size objects.
199/// The `put` method requires specifying the object size in addition to key and value.
200pub struct ConcurrentGdsfCache<K, V, S = DefaultHashBuilder> {
201    segments: Box<[Mutex<GdsfSegment<K, V, S>>]>,
202    hash_builder: S,
203}
204
205impl<K, V> ConcurrentGdsfCache<K, V, DefaultHashBuilder>
206where
207    K: Hash + Eq + Clone + Send,
208    V: Clone + Send,
209{
210    /// Creates a new concurrent GDSF cache from a configuration.
211    ///
212    /// This is the **recommended** way to create a concurrent GDSF cache.
213    ///
214    /// # Arguments
215    ///
216    /// * `config` - The cache configuration specifying capacity, max size, and segments
217    /// * `hasher` - Optional custom hash builder. If `None`, uses the default hash builder.
218    pub fn init(
219        config: crate::config::ConcurrentGdsfCacheConfig,
220        hasher: Option<DefaultHashBuilder>,
221    ) -> Self {
222        let segment_count = config.segments;
223        let capacity = config.base.capacity;
224        let max_size = config.base.max_size;
225        let initial_age = config.base.initial_age;
226
227        let segment_capacity = capacity.get() / segment_count;
228        let segment_cap = NonZeroUsize::new(segment_capacity.max(1)).unwrap();
229        let segment_max_size = max_size / segment_count as u64;
230
231        let hash_builder = hasher.unwrap_or_default();
232
233        let segments: Vec<_> = (0..segment_count)
234            .map(|_| {
235                let segment_config = crate::config::GdsfCacheConfig {
236                    capacity: segment_cap,
237                    initial_age,
238                    max_size: segment_max_size,
239                };
240                Mutex::new(GdsfSegment::init(segment_config, hash_builder.clone()))
241            })
242            .collect();
243
244        Self {
245            segments: segments.into_boxed_slice(),
246            hash_builder,
247        }
248    }
249}
250
251impl<K, V, S> ConcurrentGdsfCache<K, V, S>
252where
253    K: Hash + Eq + Clone + Send,
254    V: Clone + Send,
255    S: BuildHasher + Clone + Send,
256{
257    #[inline]
258    fn segment_index<Q>(&self, key: &Q) -> usize
259    where
260        K: Borrow<Q>,
261        Q: ?Sized + Hash,
262    {
263        (self.hash_builder.hash_one(key) as usize) % self.segments.len()
264    }
265
266    /// Returns the total capacity across all segments (in size units).
267    pub fn capacity(&self) -> usize {
268        let mut total = 0usize;
269        for segment in self.segments.iter() {
270            total += segment.lock().cap().get();
271        }
272        total
273    }
274
275    /// Returns the number of segments in the cache.
276    pub fn segment_count(&self) -> usize {
277        self.segments.len()
278    }
279
280    /// Returns the total number of entries across all segments.
281    pub fn len(&self) -> usize {
282        let mut total = 0usize;
283        for segment in self.segments.iter() {
284            total += segment.lock().len();
285        }
286        total
287    }
288
289    /// Returns `true` if the cache contains no entries.
290    pub fn is_empty(&self) -> bool {
291        for segment in self.segments.iter() {
292            if !segment.lock().is_empty() {
293                return false;
294            }
295        }
296        true
297    }
298
299    /// Gets a value from the cache.
300    ///
301    /// This clones the value to avoid holding the lock. For zero-copy access,
302    /// use `get_with()` instead.
303    pub fn get<Q>(&self, key: &Q) -> Option<V>
304    where
305        K: Borrow<Q> + Clone,
306        Q: ?Sized + Hash + Eq,
307    {
308        let idx = self.segment_index(key);
309        let mut segment = self.segments[idx].lock();
310        segment.get(key)
311    }
312
313    /// Gets a value and applies a function to it while holding the lock.
314    ///
315    /// This is more efficient than `get()` when you only need to read from the value,
316    /// as it avoids cloning.
317    pub fn get_with<Q, F, R>(&self, key: &Q, f: F) -> Option<R>
318    where
319        K: Borrow<Q> + Clone,
320        Q: ?Sized + Hash + Eq,
321        F: FnOnce(&V) -> R,
322    {
323        let idx = self.segment_index(key);
324        let mut segment = self.segments[idx].lock();
325        segment.get(key).as_ref().map(f)
326    }
327
328    /// Inserts a key-value pair with its size into the cache.
329    ///
330    /// Unlike other caches, GDSF requires the size of the object for priority calculation.
331    /// Returns the old value if the key was already present.
332    pub fn put(&self, key: K, value: V, size: u64) -> Option<V>
333    where
334        K: Clone,
335    {
336        let idx = self.segment_index(&key);
337        let mut segment = self.segments[idx].lock();
338        segment.put(key, value, size)
339    }
340
341    /// Removes a key from the cache, returning the value if it existed.
342    pub fn remove<Q>(&self, key: &Q) -> Option<V>
343    where
344        K: Borrow<Q>,
345        Q: ?Sized + Hash + Eq,
346    {
347        let idx = self.segment_index(key);
348        let mut segment = self.segments[idx].lock();
349        segment.pop(key)
350    }
351
352    /// Returns `true` if the cache contains the specified key.
353    pub fn contains_key<Q>(&self, key: &Q) -> bool
354    where
355        K: Borrow<Q> + Clone,
356        Q: ?Sized + Hash + Eq,
357    {
358        let idx = self.segment_index(key);
359        let segment = self.segments[idx].lock();
360        segment.contains_key(key)
361    }
362
363    /// Clears all entries from the cache.
364    pub fn clear(&self) {
365        for segment in self.segments.iter() {
366            segment.lock().clear();
367        }
368    }
369
370    /// Returns the current total size of cached content across all segments.
371    pub fn current_size(&self) -> u64 {
372        self.segments.iter().map(|s| s.lock().current_size()).sum()
373    }
374
375    /// Returns the maximum content size the cache can hold across all segments.
376    pub fn max_size(&self) -> u64 {
377        self.segments.iter().map(|s| s.lock().max_size()).sum()
378    }
379}
380
381impl<K, V, S> CacheMetrics for ConcurrentGdsfCache<K, V, S>
382where
383    K: Hash + Eq + Clone + Send,
384    V: Clone + Send,
385    S: BuildHasher + Clone + Send,
386{
387    fn metrics(&self) -> BTreeMap<String, f64> {
388        let mut aggregated = BTreeMap::new();
389        for segment in self.segments.iter() {
390            let segment_metrics = segment.lock().metrics().metrics();
391            for (key, value) in segment_metrics {
392                *aggregated.entry(key).or_insert(0.0) += value;
393            }
394        }
395        aggregated
396    }
397
398    fn algorithm_name(&self) -> &'static str {
399        "ConcurrentGDSF"
400    }
401}
402
403unsafe impl<K: Send, V: Send, S: Send> Send for ConcurrentGdsfCache<K, V, S> {}
404unsafe impl<K: Send, V: Send, S: Send + Sync> Sync for ConcurrentGdsfCache<K, V, S> {}
405
406impl<K, V, S> core::fmt::Debug for ConcurrentGdsfCache<K, V, S>
407where
408    K: Hash + Eq + Clone + Send,
409    V: Clone + Send,
410    S: BuildHasher + Clone + Send,
411{
412    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
413        f.debug_struct("ConcurrentGdsfCache")
414            .field("segment_count", &self.segments.len())
415            .field("total_len", &self.len())
416            .finish()
417    }
418}
419
420#[cfg(test)]
421mod tests {
422    use super::*;
423    use crate::config::{ConcurrentCacheConfig, ConcurrentGdsfCacheConfig, GdsfCacheConfig};
424
425    extern crate std;
426    use std::string::ToString;
427    use std::sync::Arc;
428    use std::thread;
429    use std::vec::Vec;
430
431    fn make_config(capacity: usize, segments: usize) -> ConcurrentGdsfCacheConfig {
432        ConcurrentCacheConfig {
433            base: GdsfCacheConfig {
434                capacity: NonZeroUsize::new(capacity).unwrap(),
435                initial_age: 0.0,
436                max_size: u64::MAX,
437            },
438            segments,
439        }
440    }
441
442    #[test]
443    fn test_basic_operations() {
444        let cache: ConcurrentGdsfCache<String, i32> =
445            ConcurrentGdsfCache::init(make_config(10000, 16), None);
446
447        cache.put("a".to_string(), 1, 100u64);
448        cache.put("b".to_string(), 2, 200u64);
449
450        assert_eq!(cache.get(&"a".to_string()), Some(1));
451        assert_eq!(cache.get(&"b".to_string()), Some(2));
452    }
453
454    #[test]
455    fn test_concurrent_access() {
456        let cache: Arc<ConcurrentGdsfCache<String, i32>> =
457            Arc::new(ConcurrentGdsfCache::init(make_config(100000, 16), None));
458        let num_threads = 8;
459        let ops_per_thread = 500;
460
461        let mut handles: Vec<std::thread::JoinHandle<()>> = Vec::new();
462
463        for t in 0..num_threads {
464            let cache = Arc::clone(&cache);
465            handles.push(thread::spawn(move || {
466                for i in 0..ops_per_thread {
467                    let key = std::format!("key_{}_{}", t, i);
468                    let size = (10 + (i % 100)) as u64;
469                    cache.put(key.clone(), i, size);
470                    let _ = cache.get(&key);
471                }
472            }));
473        }
474
475        for handle in handles {
476            handle.join().unwrap();
477        }
478
479        assert!(!cache.is_empty());
480    }
481
482    #[test]
483    fn test_capacity() {
484        let cache: ConcurrentGdsfCache<String, i32> =
485            ConcurrentGdsfCache::init(make_config(10000, 16), None);
486
487        // Capacity is distributed across segments
488        let capacity = cache.capacity();
489        assert!(capacity >= 16);
490        assert!(capacity <= 10000);
491    }
492
493    #[test]
494    fn test_segment_count() {
495        let cache: ConcurrentGdsfCache<String, i32> =
496            ConcurrentGdsfCache::init(make_config(10000, 8), None);
497
498        assert_eq!(cache.segment_count(), 8);
499    }
500
501    #[test]
502    fn test_len_and_is_empty() {
503        let cache: ConcurrentGdsfCache<String, i32> =
504            ConcurrentGdsfCache::init(make_config(10000, 16), None);
505
506        assert!(cache.is_empty());
507        assert_eq!(cache.len(), 0);
508
509        cache.put("key1".to_string(), 1, 100);
510        assert_eq!(cache.len(), 1);
511        assert!(!cache.is_empty());
512
513        cache.put("key2".to_string(), 2, 200);
514        assert_eq!(cache.len(), 2);
515    }
516
517    #[test]
518    fn test_remove() {
519        let cache: ConcurrentGdsfCache<String, i32> =
520            ConcurrentGdsfCache::init(make_config(10000, 16), None);
521
522        cache.put("key1".to_string(), 1, 100);
523        cache.put("key2".to_string(), 2, 200);
524
525        assert_eq!(cache.remove(&"key1".to_string()), Some(1));
526        assert_eq!(cache.len(), 1);
527        assert_eq!(cache.get(&"key1".to_string()), None);
528
529        assert_eq!(cache.remove(&"nonexistent".to_string()), None);
530    }
531
532    #[test]
533    fn test_clear() {
534        let cache: ConcurrentGdsfCache<String, i32> =
535            ConcurrentGdsfCache::init(make_config(10000, 16), None);
536
537        cache.put("key1".to_string(), 1, 100);
538        cache.put("key2".to_string(), 2, 200);
539        cache.put("key3".to_string(), 3, 300);
540
541        assert_eq!(cache.len(), 3);
542
543        cache.clear();
544
545        assert_eq!(cache.len(), 0);
546        assert!(cache.is_empty());
547        assert_eq!(cache.get(&"key1".to_string()), None);
548    }
549
550    #[test]
551    fn test_contains_key() {
552        let cache: ConcurrentGdsfCache<String, i32> =
553            ConcurrentGdsfCache::init(make_config(10000, 16), None);
554
555        cache.put("exists".to_string(), 1, 100);
556
557        assert!(cache.contains_key(&"exists".to_string()));
558        assert!(!cache.contains_key(&"missing".to_string()));
559    }
560
561    #[test]
562    fn test_get_with() {
563        let cache: ConcurrentGdsfCache<String, String> =
564            ConcurrentGdsfCache::init(make_config(10000, 16), None);
565
566        cache.put("key".to_string(), "hello world".to_string(), 100);
567
568        let len = cache.get_with(&"key".to_string(), |v: &String| v.len());
569        assert_eq!(len, Some(11));
570
571        let missing = cache.get_with(&"missing".to_string(), |v: &String| v.len());
572        assert_eq!(missing, None);
573    }
574
575    #[test]
576    fn test_size_aware_eviction() {
577        let cache: ConcurrentGdsfCache<String, i32> =
578            ConcurrentGdsfCache::init(make_config(1000, 16), None);
579
580        // Add items with different sizes
581        cache.put("small".to_string(), 1, 100);
582        cache.put("medium".to_string(), 2, 500);
583        cache.put("large".to_string(), 3, 800);
584
585        // Access small item multiple times
586        for _ in 0..10 {
587            let _ = cache.get(&"small".to_string());
588        }
589
590        // Add more items to trigger eviction
591        cache.put("another".to_string(), 4, 200);
592
593        // Small item with high frequency should be retained
594        assert!(!cache.is_empty());
595    }
596
597    #[test]
598    fn test_eviction_on_capacity() {
599        let cache: ConcurrentGdsfCache<String, i32> =
600            ConcurrentGdsfCache::init(make_config(5000, 16), None);
601
602        // Fill the cache with various sizes
603        for i in 0..20 {
604            let size = (100 + i * 50) as u64;
605            cache.put(std::format!("key{}", i), i, size);
606        }
607
608        // Cache size should be managed
609        assert!(!cache.is_empty());
610    }
611
612    #[test]
613    fn test_metrics() {
614        let cache: ConcurrentGdsfCache<String, i32> =
615            ConcurrentGdsfCache::init(make_config(10000, 16), None);
616
617        cache.put("a".to_string(), 1, 100);
618        cache.put("b".to_string(), 2, 200);
619
620        let metrics = cache.metrics();
621        // Metrics aggregation across segments
622        assert!(!metrics.is_empty());
623    }
624
625    #[test]
626    fn test_algorithm_name() {
627        let cache: ConcurrentGdsfCache<String, i32> =
628            ConcurrentGdsfCache::init(make_config(10000, 16), None);
629
630        assert_eq!(cache.algorithm_name(), "ConcurrentGDSF");
631    }
632
633    #[test]
634    fn test_empty_cache_operations() {
635        let cache: ConcurrentGdsfCache<String, i32> =
636            ConcurrentGdsfCache::init(make_config(10000, 16), None);
637
638        assert!(cache.is_empty());
639        assert_eq!(cache.len(), 0);
640        assert_eq!(cache.get(&"missing".to_string()), None);
641        assert_eq!(cache.remove(&"missing".to_string()), None);
642        assert!(!cache.contains_key(&"missing".to_string()));
643    }
644
645    #[test]
646    fn test_borrowed_key_lookup() {
647        let cache: ConcurrentGdsfCache<String, i32> =
648            ConcurrentGdsfCache::init(make_config(10000, 16), None);
649
650        cache.put("test_key".to_string(), 42, 100);
651
652        // Test with borrowed key
653        let key_str = "test_key";
654        assert_eq!(cache.get(key_str), Some(42));
655        assert!(cache.contains_key(key_str));
656        assert_eq!(cache.remove(key_str), Some(42));
657    }
658
659    #[test]
660    fn test_variable_sizes() {
661        let cache: ConcurrentGdsfCache<String, i32> =
662            ConcurrentGdsfCache::init(make_config(10000, 16), None);
663
664        // Add items with various sizes
665        cache.put("tiny".to_string(), 1, 10);
666        cache.put("small".to_string(), 2, 100);
667        cache.put("medium".to_string(), 3, 500);
668        cache.put("large".to_string(), 4, 1000);
669
670        // All items should be present
671        assert_eq!(cache.len(), 4);
672        assert_eq!(cache.get(&"tiny".to_string()), Some(1));
673        assert_eq!(cache.get(&"small".to_string()), Some(2));
674        assert_eq!(cache.get(&"medium".to_string()), Some(3));
675        assert_eq!(cache.get(&"large".to_string()), Some(4));
676    }
677
678    #[test]
679    fn test_frequency_and_size_interaction() {
680        let cache: ConcurrentGdsfCache<String, i32> =
681            ConcurrentGdsfCache::init(make_config(5000, 16), None);
682
683        // Add large item
684        cache.put("large".to_string(), 1, 3000);
685
686        // Add and frequently access small items
687        for i in 0..10 {
688            let key = std::format!("small{}", i);
689            cache.put(key.clone(), i, 100);
690            for _ in 0..5 {
691                let _ = cache.get(&key);
692            }
693        }
694
695        // Frequently accessed small items should have good priority
696        assert!(!cache.is_empty());
697    }
698}