cache_rs/gdsf.rs
1//! Greedy Dual-Size Frequency (GDSF) cache implementation.
2//!
3//! GDSF is a sophisticated cache replacement algorithm that combines frequency, size,
4//! and aging to optimize cache performance for variable-sized objects. It's particularly
5//! well-suited for CDNs and web caches where objects have significantly different sizes.
6//!
7//! # Algorithm
8//!
9//! GDSF assigns a priority value to each cached item using the formula:
10//!
11//! ```text
12//! Priority = (Frequency * Cost_Factor / Size) + Global_Age
13//! ```
14//!
15//! Where:
16//! - `Frequency`: Number of times the item has been accessed
17//! - `Cost_Factor`: Optional weighting factor (defaults to 1.0)
18//! - `Size`: Size of the item in bytes (or other cost units)
19//! - `Global_Age`: A global aging factor that increases when items are evicted
20//!
21//! When the cache is full, the item with the lowest priority is evicted.
22//! After eviction, the global age is set to the priority of the evicted item.
23//!
24//! # Performance Characteristics
25//!
26//! - **Time Complexity**:
27//! - Get: O(1)
28//! - Put: O(1)
29//! - Remove: O(1)
30//!
31//! - **Space Complexity**:
32//! - O(n) where n is the number of items
33//! - Higher overhead than LRU due to priority calculations
34//!
35//! # Size Considerations
36//!
37//! Unlike basic LRU or LFU caches, GDSF is size-aware and aims to maximize the hit ratio
38//! per byte of cache used. This makes it particularly effective when:
39//!
40//! - Items vary significantly in size
41//! - Storage space is at a premium
42//! - You want to optimize for hit rate relative to storage cost
43//!
44//! # CDN Use Cases
45//!
46//! GDSF is ideal for Content Delivery Networks because it:
47//! - Favors keeping small, frequently accessed items (e.g., thumbnails, icons)
48//! - Can intelligently evict large, infrequently accessed items (e.g., large videos)
49//! - Adapts to changing access patterns through the aging mechanism
50//!
51//! # Thread Safety
52//!
53//! This implementation is not thread-safe. For concurrent access, wrap the cache
54//! with a synchronization primitive such as `Mutex` or `RwLock`.
55
56extern crate alloc;
57
58use crate::config::GdsfCacheConfig;
59use crate::list::{Entry, List};
60use crate::metrics::{CacheMetrics, GdsfCacheMetrics};
61use alloc::boxed::Box;
62use alloc::collections::BTreeMap;
63use alloc::string::String;
64use core::borrow::Borrow;
65use core::hash::{BuildHasher, Hash};
66use core::mem;
67use core::num::NonZeroUsize;
68
69#[cfg(feature = "hashbrown")]
70use hashbrown::hash_map::DefaultHashBuilder;
71#[cfg(feature = "hashbrown")]
72use hashbrown::HashMap;
73
74#[cfg(not(feature = "hashbrown"))]
75use std::collections::hash_map::RandomState as DefaultHashBuilder;
76#[cfg(not(feature = "hashbrown"))]
77use std::collections::HashMap;
78
79/// Metadata for each cache entry in GDSF
80#[derive(Debug, Clone, Copy)]
81struct EntryMetadata<K, V> {
82 /// Frequency of access for this item
83 frequency: u64,
84 /// Size of this item
85 size: u64,
86 /// Calculated priority (frequency/size + global_age)
87 priority: f64,
88 /// Pointer to the entry in the priority list
89 node: *mut Entry<(K, V)>,
90}
91
92/// An implementation of a Greedy Dual-Size Frequency (GDSF) cache.
93///
94/// GDSF combines frequency, size, and aging to make eviction decisions.
95/// Items with higher frequency-to-size ratios and recent access patterns
96/// are prioritized for retention in the cache.
97///
98/// # Examples
99///
100/// ```
101/// use cache_rs::gdsf::GdsfCache;
102/// use core::num::NonZeroUsize;
103///
104/// // Create a GDSF cache with capacity 3
105/// let mut cache = GdsfCache::new(NonZeroUsize::new(3).unwrap());
106///
107/// // Add items with different sizes
108/// cache.put("small", 1, 1); // key="small", value=1, size=1
109/// cache.put("large", 2, 5); // key="large", value=2, size=5
110/// cache.put("medium", 3, 3); // key="medium", value=3, size=3
111///
112/// // Access items to increase their frequency
113/// assert_eq!(cache.get(&"small"), Some(1));
114/// assert_eq!(cache.get(&"small"), Some(1)); // Higher frequency
115/// ```
116#[derive(Debug)]
117pub struct GdsfCache<K, V, S = DefaultHashBuilder> {
118 /// Configuration for the GDSF cache
119 config: GdsfCacheConfig,
120
121 /// Global age value that increases when items are evicted
122 global_age: f64,
123
124 /// Current minimum priority in the cache
125 min_priority: f64,
126
127 /// Map from keys to their metadata
128 map: HashMap<K, EntryMetadata<K, V>, S>,
129
130 /// Map from priority to list of items with that priority
131 /// Items within each priority list are ordered by recency (LRU within priority)
132 priority_lists: BTreeMap<u64, List<(K, V)>>,
133
134 /// Metrics tracking for this cache instance
135 metrics: GdsfCacheMetrics,
136}
137
138impl<K: Hash + Eq, V: Clone, S: BuildHasher> GdsfCache<K, V, S> {
139 /// Creates a new GDSF cache with the specified capacity and hash builder.
140 ///
141 /// # Examples
142 ///
143 /// ```
144 /// use cache_rs::gdsf::GdsfCache;
145 /// use core::num::NonZeroUsize;
146 /// use std::collections::hash_map::RandomState;
147 ///
148 /// let cache: GdsfCache<&str, u32, _> = GdsfCache::with_hasher(
149 /// NonZeroUsize::new(10).unwrap(),
150 /// RandomState::new()
151 /// );
152 /// ```
153 pub fn with_hasher(cap: NonZeroUsize, hash_builder: S) -> Self {
154 let config = GdsfCacheConfig::new(cap);
155 let map_capacity = config.capacity().get().next_power_of_two();
156 let max_cache_size_bytes = config.capacity().get() as u64 * 128; // Estimate based on capacity
157
158 GdsfCache {
159 config,
160 global_age: config.initial_age(),
161 min_priority: 0.0,
162 map: HashMap::with_capacity_and_hasher(map_capacity, hash_builder),
163 priority_lists: BTreeMap::new(),
164 metrics: GdsfCacheMetrics::new(max_cache_size_bytes),
165 }
166 }
167
168 /// Returns the maximum number of key-value pairs the cache can hold.
169 pub fn cap(&self) -> NonZeroUsize {
170 self.config.capacity()
171 }
172
173 /// Returns the current number of key-value pairs in the cache.
174 pub fn len(&self) -> usize {
175 self.map.len()
176 }
177
178 /// Returns `true` if the cache contains no key-value pairs.
179 pub fn is_empty(&self) -> bool {
180 self.map.is_empty()
181 }
182
183 /// Returns the current global age value.
184 pub fn global_age(&self) -> f64 {
185 self.global_age
186 }
187
188 /// Estimates the size of a key-value pair in bytes for metrics tracking
189 fn estimate_object_size(&self, _key: &K, _value: &V) -> u64 {
190 // Simple estimation: key size + value size + overhead for pointers and metadata
191 mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64
192 }
193
194 /// Records a cache miss for metrics tracking (to be called by simulation system)
195 pub fn record_miss(&mut self, object_size: u64) {
196 self.metrics.core.record_miss(object_size);
197 }
198
199 /// Calculates the priority for an item based on GDSF formula.
200 fn calculate_priority(&self, frequency: u64, size: u64) -> f64 {
201 if size == 0 {
202 return f64::INFINITY; // Protect against division by zero
203 }
204 (frequency as f64 / size as f64) + self.global_age
205 }
206
207 /// Updates the priority of an item and moves it to the appropriate priority list.
208 unsafe fn update_priority(&mut self, key: &K) -> *mut Entry<(K, V)>
209 where
210 K: Clone,
211 {
212 let metadata = self.map.get_mut(key).unwrap();
213 let old_priority = metadata.priority;
214 let size = metadata.size;
215
216 // Increment frequency
217 metadata.frequency += 1;
218
219 // Calculate new priority outside of borrowing context
220 let global_age = self.global_age;
221 let new_priority = if size == 0 {
222 f64::INFINITY
223 } else {
224 (metadata.frequency as f64 / size as f64) + global_age
225 };
226 metadata.priority = new_priority;
227
228 // Convert priority to integer key for BTreeMap (multiply by 1000 for precision)
229 let old_priority_key = (old_priority * 1000.0) as u64;
230 let new_priority_key = (new_priority * 1000.0) as u64;
231
232 // If priority hasn't changed significantly, just move to front of the same list
233 if old_priority_key == new_priority_key {
234 let node = metadata.node;
235 self.priority_lists
236 .get_mut(&new_priority_key)
237 .unwrap()
238 .move_to_front(node);
239 return node;
240 }
241
242 // Remove from old priority list
243 let node = metadata.node;
244 let boxed_entry = self
245 .priority_lists
246 .get_mut(&old_priority_key)
247 .unwrap()
248 .remove(node)
249 .unwrap();
250
251 // Clean up empty list
252 if self
253 .priority_lists
254 .get(&old_priority_key)
255 .unwrap()
256 .is_empty()
257 {
258 self.priority_lists.remove(&old_priority_key);
259 }
260
261 // Add to new priority list
262 let entry_ptr = Box::into_raw(boxed_entry);
263
264 // Ensure the new priority list exists
265 let capacity = self.config.capacity();
266 self.priority_lists
267 .entry(new_priority_key)
268 .or_insert_with(|| List::new(capacity));
269
270 // Add to the front of the new priority list (most recently used within that priority)
271 self.priority_lists
272 .get_mut(&new_priority_key)
273 .unwrap()
274 .attach_from_other_list(entry_ptr);
275
276 // Update the metadata
277 metadata.node = entry_ptr;
278
279 entry_ptr
280 }
281
282 /// Returns a reference to the value corresponding to the key.
283 ///
284 /// The key may be any borrowed form of the cache's key type, but
285 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
286 /// the key type.
287 ///
288 /// Accessing an item increases its frequency count.
289 pub fn get<Q>(&mut self, key: &Q) -> Option<V>
290 where
291 K: Borrow<Q> + Clone,
292 Q: ?Sized + Hash + Eq,
293 {
294 if let Some(metadata) = self.map.get(key) {
295 let node = metadata.node;
296 unsafe {
297 // SAFETY: node comes from our map, so it's a valid pointer to an entry in our priority list
298 // Get the key from the node to pass to update_priority
299 let (key_ref, value) = (*node).get_value();
300
301 // Record hit with object size estimation
302 let object_size = self.estimate_object_size(key_ref, value);
303 self.metrics.core.record_hit(object_size);
304
305 // Record GDSF-specific item access metrics
306 self.metrics.record_item_access(
307 metadata.frequency,
308 metadata.size,
309 metadata.priority,
310 );
311
312 let new_node = self.update_priority(key_ref);
313 let (_, value) = (*new_node).get_value();
314 Some(value.clone())
315 }
316 } else {
317 None
318 }
319 }
320
321 /// Returns a mutable reference to the value corresponding to the key.
322 ///
323 /// The key may be any borrowed form of the cache's key type, but
324 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
325 /// the key type.
326 ///
327 /// Accessing an item increases its frequency count.
328 pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
329 where
330 K: Borrow<Q> + Clone,
331 Q: ?Sized + Hash + Eq,
332 {
333 if let Some(metadata) = self.map.get(key) {
334 let node = metadata.node;
335 unsafe {
336 // SAFETY: node comes from our map, so it's a valid pointer to an entry in our priority list
337 let (key_ref, value) = (*node).get_value();
338
339 // Record hit with object size estimation
340 let object_size = self.estimate_object_size(key_ref, value);
341 self.metrics.core.record_hit(object_size);
342
343 // Record GDSF-specific item access metrics
344 self.metrics.record_item_access(
345 metadata.frequency,
346 metadata.size,
347 metadata.priority,
348 );
349
350 let new_node = self.update_priority(key_ref);
351 let (_, value) = (*new_node).get_value_mut();
352 Some(value)
353 }
354 } else {
355 None
356 }
357 }
358
359 /// Returns `true` if the cache contains a value for the specified key.
360 ///
361 /// The key may be any borrowed form of the cache's key type, but
362 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
363 /// the key type.
364 ///
365 /// This does not modify the cache or update access frequency.
366 pub fn contains_key<Q>(&self, key: &Q) -> bool
367 where
368 K: Borrow<Q>,
369 Q: ?Sized + Hash + Eq,
370 {
371 self.map.contains_key(key)
372 }
373
374 /// Inserts a key-value pair with the specified size into the cache.
375 ///
376 /// If the key already exists, the value is updated and the old value is returned.
377 /// If the cache is full, items are evicted based on GDSF priority until there's space.
378 ///
379 /// # Arguments
380 ///
381 /// * `key` - The key to insert
382 /// * `val` - The value to associate with the key
383 /// * `size` - The size/cost of storing this item (must be > 0)
384 ///
385 /// # Examples
386 ///
387 /// ```
388 /// use cache_rs::gdsf::GdsfCache;
389 /// use core::num::NonZeroUsize;
390 ///
391 /// let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
392 ///
393 /// cache.put("key1", "value1", 1);
394 /// cache.put("key2", "value2", 2);
395 /// assert_eq!(cache.len(), 2);
396 /// ```
397 pub fn put(&mut self, key: K, val: V, size: u64) -> Option<V>
398 where
399 K: Clone,
400 {
401 if size == 0 {
402 // Don't allow zero-sized items as they would have infinite priority
403 return None;
404 }
405
406 let object_size = self.estimate_object_size(&key, &val);
407
408 // Check if key already exists
409 if let Some(mut metadata) = self.map.remove(&key) {
410 // Update existing entry
411 unsafe {
412 // SAFETY: metadata.node comes from our map and was just removed, so priority_lists.remove is safe
413 let old_priority_key = (metadata.priority * 1000.0) as u64;
414
415 // Remove from current list
416 let list = self.priority_lists.get_mut(&old_priority_key).unwrap();
417 let entry = list.remove(metadata.node).unwrap();
418
419 // Clean up empty list
420 if list.is_empty() {
421 self.priority_lists.remove(&old_priority_key);
422 }
423
424 // SAFETY: entry is a valid Box we own, converting to raw pointer for temporary access
425 // Extract old value
426 let entry_ptr = Box::into_raw(entry);
427 let (_, old_value) = (*entry_ptr).get_value().clone();
428
429 // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
430 // Free the old entry
431 let _ = Box::from_raw(entry_ptr);
432
433 // Update metadata with new size but same frequency
434 metadata.size = size;
435 metadata.priority = self.calculate_priority(metadata.frequency, size);
436
437 // Create new entry and add to appropriate list
438 let new_priority_key = (metadata.priority * 1000.0) as u64;
439 let capacity = self.cap();
440 let list = self
441 .priority_lists
442 .entry(new_priority_key)
443 .or_insert_with(|| List::new(capacity));
444
445 if let Some(new_node) = list.add((key.clone(), val)) {
446 metadata.node = new_node;
447 self.map.insert(key, metadata);
448
449 // Record insertion (update)
450 self.metrics.core.record_insertion(object_size);
451
452 return Some(old_value);
453 } else {
454 // This shouldn't happen since we just made space
455 return None;
456 }
457 }
458 }
459
460 // Make space if needed
461 while self.len() >= self.config.capacity().get() {
462 self.evict_one();
463 }
464
465 // Calculate priority for new item (frequency starts at 1)
466 let priority = self.calculate_priority(1, size);
467 let priority_key = (priority * 1000.0) as u64;
468
469 // Add to appropriate priority list
470 let capacity = self.config.capacity();
471 let list = self
472 .priority_lists
473 .entry(priority_key)
474 .or_insert_with(|| List::new(capacity));
475
476 if let Some(entry) = list.add((key.clone(), val)) {
477 // Update metadata
478 let metadata = EntryMetadata {
479 frequency: 1,
480 size,
481 priority,
482 node: entry,
483 };
484
485 self.map.insert(key, metadata);
486
487 // Update min_priority if this is the first item or lower
488 if self.len() == 1 || priority < self.min_priority {
489 self.min_priority = priority;
490 }
491
492 // Record insertion and GDSF-specific metrics
493 self.metrics.core.record_insertion(object_size);
494 self.metrics
495 .record_item_cached(size, self.metrics.average_item_size());
496 self.metrics.record_item_access(1, size, priority);
497
498 None
499 } else {
500 // List is full, shouldn't happen since we made space
501 None
502 }
503 }
504
505 /// Evicts one item from the cache based on GDSF priority.
506 /// Items with lower priority are evicted first.
507 fn evict_one(&mut self) {
508 if self.is_empty() {
509 return;
510 }
511
512 // Find the list with minimum priority
513 let min_priority_key = *self.priority_lists.keys().next().unwrap();
514
515 // Remove the least recently used item from the minimum priority list
516 let list = self.priority_lists.get_mut(&min_priority_key).unwrap();
517 if let Some(entry) = list.remove_last() {
518 unsafe {
519 // SAFETY: entry comes from list.remove_last(), so it's a valid Box
520 // that we own. Converting to raw pointer is safe for temporary access.
521 let entry_ptr = Box::into_raw(entry);
522 let (entry_key, _entry_value) = (*entry_ptr).get_value();
523
524 // Get the priority before removing from map for metrics
525 let priority_to_update = if let Some(metadata) = self.map.get(entry_key) {
526 metadata.priority
527 } else {
528 self.global_age // fallback
529 };
530
531 // Estimate object size before moving values
532 let estimated_size = mem::size_of::<K>() as u64 + mem::size_of::<V>() as u64 + 64;
533
534 // Record eviction with estimated object size and GDSF-specific metrics
535 self.metrics.core.record_eviction(estimated_size);
536 self.metrics.record_size_based_eviction();
537 self.metrics.record_aging_event(priority_to_update);
538
539 // Update global age to the evicted item's priority
540 self.global_age = priority_to_update;
541
542 // Remove from map using the owned key
543 self.map.remove(entry_key);
544
545 // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
546 // Free the entry
547 let _ = Box::from_raw(entry_ptr);
548 }
549 }
550
551 // Clean up empty list
552 if list.is_empty() {
553 self.priority_lists.remove(&min_priority_key);
554 }
555 }
556
557 /// Removes a key from the cache, returning the value if the key was in the cache.
558 ///
559 /// The key may be any borrowed form of the cache's key type, but
560 /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for
561 /// the key type.
562 pub fn pop<Q>(&mut self, key: &Q) -> Option<V>
563 where
564 K: Borrow<Q>,
565 Q: ?Sized + Hash + Eq,
566 {
567 if let Some(metadata) = self.map.remove(key) {
568 unsafe {
569 // SAFETY: metadata.node comes from our map and was just removed, so priority_lists.remove is safe
570 let priority_key = (metadata.priority * 1000.0) as u64;
571 let list = self.priority_lists.get_mut(&priority_key).unwrap();
572 let entry = list.remove(metadata.node).unwrap();
573
574 // Clean up empty list
575 if list.is_empty() {
576 self.priority_lists.remove(&priority_key);
577 }
578
579 // SAFETY: entry is a valid Box we own, converting to raw pointer for temporary access
580 let entry_ptr = Box::into_raw(entry);
581 let (_, value) = (*entry_ptr).get_value();
582 let result = value.clone();
583
584 // SAFETY: Reconstructing Box from the same raw pointer to properly free memory
585 // Free the entry
586 let _ = Box::from_raw(entry_ptr);
587
588 Some(result)
589 }
590 } else {
591 None
592 }
593 }
594
595 /// Clears the cache, removing all key-value pairs.
596 pub fn clear(&mut self) {
597 self.map.clear();
598 self.priority_lists.clear();
599 self.global_age = 0.0;
600 self.min_priority = 0.0;
601 }
602}
603
604impl<K, V, S> CacheMetrics for GdsfCache<K, V, S> {
605 /// Returns all GDSF cache metrics as key-value pairs in deterministic order
606 ///
607 /// # Returns
608 /// A BTreeMap containing all metrics tracked by this GDSF cache instance
609 fn metrics(&self) -> BTreeMap<String, f64> {
610 self.metrics.metrics()
611 }
612
613 /// Returns the algorithm name for this cache implementation
614 ///
615 /// # Returns
616 /// "GDSF" - identifying this as a Greedy Dual-Size Frequency cache
617 fn algorithm_name(&self) -> &'static str {
618 self.metrics.algorithm_name()
619 }
620}
621
622impl<K: Hash + Eq, V: Clone> GdsfCache<K, V, DefaultHashBuilder> {
623 /// Creates a new GDSF cache with the specified capacity.
624 ///
625 /// # Examples
626 ///
627 /// ```
628 /// use cache_rs::gdsf::GdsfCache;
629 /// use core::num::NonZeroUsize;
630 ///
631 /// let cache: GdsfCache<&str, u32> = GdsfCache::new(NonZeroUsize::new(10).unwrap());
632 /// ```
633 pub fn new(cap: NonZeroUsize) -> Self {
634 let config = GdsfCacheConfig::new(cap);
635 Self::with_hasher(config.capacity(), DefaultHashBuilder::default())
636 }
637}
638
639#[cfg(test)]
640mod tests {
641 use super::GdsfCache;
642 use core::num::NonZeroUsize;
643
644 #[test]
645 fn test_gdsf_basic_operations() {
646 let mut cache = GdsfCache::new(NonZeroUsize::new(3).unwrap());
647
648 // Test insertion
649 assert_eq!(cache.put("a", 1, 1), None);
650 assert_eq!(cache.put("b", 2, 2), None);
651 assert_eq!(cache.put("c", 3, 1), None);
652 assert_eq!(cache.len(), 3);
653
654 // Test retrieval
655 assert_eq!(cache.get(&"a"), Some(1));
656 assert_eq!(cache.get(&"b"), Some(2));
657 assert_eq!(cache.get(&"c"), Some(3));
658
659 // Test contains_key
660 assert!(cache.contains_key(&"a"));
661 assert!(!cache.contains_key(&"d"));
662 }
663
664 #[test]
665 fn test_gdsf_frequency_priority() {
666 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
667
668 // Insert items with same size
669 cache.put("a", 1, 1);
670 cache.put("b", 2, 1);
671
672 // Access "a" more frequently
673 cache.get(&"a");
674 cache.get(&"a");
675
676 // Add new item, "b" should be evicted due to lower frequency
677 cache.put("c", 3, 1);
678
679 assert!(cache.contains_key(&"a"));
680 assert!(!cache.contains_key(&"b"));
681 assert!(cache.contains_key(&"c"));
682 }
683
684 #[test]
685 fn test_gdsf_size_consideration() {
686 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
687
688 // Insert items with different sizes but same access patterns
689 cache.put("small", 1, 1); // frequency/size = 1/1 = 1.0
690 cache.put("large", 2, 10); // frequency/size = 1/10 = 0.1
691
692 // Add new item, "large" should be evicted due to lower priority
693 cache.put("medium", 3, 5);
694
695 assert!(cache.contains_key(&"small"));
696 assert!(!cache.contains_key(&"large"));
697 assert!(cache.contains_key(&"medium"));
698 }
699
700 #[test]
701 fn test_gdsf_update_existing() {
702 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
703
704 cache.put("key", 1, 1);
705 assert_eq!(cache.get(&"key"), Some(1));
706
707 // Update with new value and size
708 assert_eq!(cache.put("key", 2, 2), Some(1));
709 assert_eq!(cache.get(&"key"), Some(2));
710 assert_eq!(cache.len(), 1);
711 }
712
713 #[test]
714 fn test_gdsf_zero_size_rejection() {
715 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
716
717 // Zero-sized items should be rejected
718 assert_eq!(cache.put("key", 1, 0), None);
719 assert_eq!(cache.len(), 0);
720 assert!(!cache.contains_key(&"key"));
721 }
722
723 #[test]
724 fn test_gdsf_pop() {
725 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
726
727 cache.put("a", 1, 1);
728 cache.put("b", 2, 1);
729
730 assert_eq!(cache.pop(&"a"), Some(1));
731 assert_eq!(cache.len(), 1);
732 assert!(!cache.contains_key(&"a"));
733 assert!(cache.contains_key(&"b"));
734
735 assert_eq!(cache.pop(&"nonexistent"), None);
736 }
737
738 #[test]
739 fn test_gdsf_clear() {
740 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
741
742 cache.put("a", 1, 1);
743 cache.put("b", 2, 1);
744 assert_eq!(cache.len(), 2);
745
746 cache.clear();
747 assert_eq!(cache.len(), 0);
748 assert!(cache.is_empty());
749 assert!(!cache.contains_key(&"a"));
750 assert!(!cache.contains_key(&"b"));
751 }
752
753 #[test]
754 fn test_gdsf_global_aging() {
755 let mut cache = GdsfCache::new(NonZeroUsize::new(2).unwrap());
756
757 cache.put("a", 1, 1);
758 cache.put("b", 2, 1);
759
760 let initial_age = cache.global_age();
761
762 // Force eviction
763 cache.put("c", 3, 1);
764
765 // Global age should have increased
766 assert!(cache.global_age() > initial_age);
767 }
768}