Skip to main content

cachekit/policy/
arc.rs

1//! Adaptive Replacement Cache (ARC) replacement policy.
2//!
3//! Implements the ARC algorithm, which automatically adapts between recency and
4//! frequency preferences by maintaining four lists and adjusting a dynamic target
5//! parameter based on access patterns.
6//!
7//! ## Architecture
8//!
9//! ```text
10//! ┌─────────────────────────────────────────────────────────────────────────────┐
11//! │                           ArcCore<K, V> Layout                              │
12//! │                                                                             │
13//! │   ┌─────────────────────────────────────────────────────────────────────┐   │
14//! │   │  index: HashMap<K, NonNull<Node>>    nodes: Allocated on heap       │   │
15//! │   │                                                                     │   │
16//! │   │  ┌──────────┬───────────┐           ┌────────┬──────────────────┐   │   │
17//! │   │  │   Key    │  NodePtr  │           │ Node   │ key,val,list     │   │   │
18//! │   │  ├──────────┼───────────┤           ├────────┼──────────────────┤   │   │
19//! │   │  │  "page1" │   ptr_0   │──────────►│ Node0  │ k,v,T1           │   │   │
20//! │   │  │  "page2" │   ptr_1   │──────────►│ Node1  │ k,v,T2           │   │   │
21//! │   │  │  "page3" │   ptr_2   │──────────►│ Node2  │ k,v,T1           │   │   │
22//! │   │  └──────────┴───────────┘           └────────┴──────────────────┘   │   │
23//! │   └─────────────────────────────────────────────────────────────────────┘   │
24//! │                                                                             │
25//! │   ┌─────────────────────────────────────────────────────────────────────┐   │
26//! │   │                        List Organization                            │   │
27//! │   │                                                                     │   │
28//! │   │   T1 (Recency - Recent Once)          T2 (Frequency - Repeated)     │   │
29//! │   │   ┌─────────────────────────┐          ┌─────────────────────────┐  │   │
30//! │   │   │ MRU               LRU   │          │ MRU               LRU   │  │   │
31//! │   │   │  ▼                  ▼   │          │  ▼                  ▼   │  │   │
32//! │   │   │ [ptr_2] ◄──► [ptr_0] ◄┤ │          │ [ptr_1] ◄──► [...] ◄┤   │  │   │
33//! │   │   │  new      older   evict │          │ hot          cold evict │  │   │
34//! │   │   └─────────────────────────┘          └─────────────────────────┘  │   │
35//! │   │                                                                     │   │
36//! │   │   B1 (Ghost - evicted from T1)       B2 (Ghost - evicted from T2)   │   │
37//! │   │   ┌─────────────────────────┐          ┌─────────────────────────┐  │   │
38//! │   │   │ Keys only (no values)   │          │ Keys only (no values)   │  │   │
39//! │   │   └─────────────────────────┘          └─────────────────────────┘  │   │
40//! │   │                                                                     │   │
41//! │   │   Adaptation Parameter: p (target size for T1)                      │   │
42//! │   │   • Hit in B1 → increase p (favor recency)                          │   │
43//! │   │   • Hit in B2 → decrease p (favor frequency)                        │   │
44//! │   └─────────────────────────────────────────────────────────────────────┘   │
45//! │                                                                             │
46//! └─────────────────────────────────────────────────────────────────────────────┘
47//!
48//! Insert Flow (new key, not in any list)
49//! ────────────────────────────────────────
50//!
51//!   insert("new_key", value):
52//!     1. Check index - not found
53//!     2. Check ghost lists (B1/B2) for adaptation
54//!     3. Create Node with ListKind::T1
55//!     4. Allocate on heap → get NonNull<Node>
56//!     5. Insert key→ptr into index
57//!     6. Attach ptr to T1 MRU
58//!     7. Evict if over capacity (replace algorithm)
59//!
60//! Access Flow (existing key in T1/T2)
61//! ────────────────────────────────────
62//!
63//!   get("existing_key"):
64//!     1. Lookup ptr in index
65//!     2. Check node's list:
66//!        - If T1: promote to T2 (move to T2 MRU)
67//!        - If T2: move to MRU position within T2
68//!     3. Return &value
69//!
70//! Ghost Hit Flow (key in B1/B2)
71//! ──────────────────────────────
72//!
73//!   get("ghost_key"):
74//!     1. Found in B1: increase p (favor recency)
75//!     2. Found in B2: decrease p (favor frequency)
76//!     3. Perform replacement to make space
77//!     4. Insert into T2 (proven reuse)
78//!     5. Remove from ghost list
79//!
80//! Eviction Flow (Replace Algorithm)
81//! ──────────────────────────────────
82//!
83//!   replace():
84//!     if |T1| >= max(1, p):
85//!       evict from T1 LRU → move key to B1
86//!     else:
87//!       evict from T2 LRU → move key to B2
88//! ```
89//!
90//! ## Key Components
91//!
92//! - [`ArcCore`]: Main ARC cache implementation
93//! - Four lists: T1 (recent once), T2 (frequent), B1 (ghost for T1), B2 (ghost for T2)
94//! - Adaptation parameter `p`: target size for T1 vs T2
95//!
96//! ## Operations
97//!
98//! | Operation   | Time   | Notes                                      |
99//! |-------------|--------|--------------------------------------------|
100//! | `get`       | O(1)   | May promote T1→T2 or adapt via ghost hit   |
101//! | `insert`    | O(1)*  | *Amortized, may trigger evictions          |
102//! | `contains`  | O(1)   | Index lookup only                          |
103//! | `len`       | O(1)   | Returns T1 + T2 entries                    |
104//! | `clear`     | O(n)   | Clears all structures                      |
105//!
106//! ## Algorithm Properties
107//!
108//! - **Adaptive**: Automatically balances recency vs frequency based on workload
109//! - **Scan Resistant**: Ghost lists prevent one-time scans from polluting cache
110//! - **Self-Tuning**: No manual parameter tuning required
111//! - **Competitive**: O(1) operations, proven optimal in certain workload classes
112//!
113//! ## Use Cases
114//!
115//! - Database buffer pools with mixed access patterns
116//! - File system caches
117//! - Web caches with varying temporal/frequency characteristics
118//! - Workloads where optimal recency/frequency balance is unknown
119//!
120//! ## Example Usage
121//!
122//! ```
123//! use cachekit::policy::arc::ArcCore;
124//! use cachekit::traits::Cache;
125//!
126//! // Create ARC cache with 100 entry capacity
127//! let mut cache = ArcCore::new(100);
128//!
129//! // Insert items (go to T1 - recent list)
130//! cache.insert("page1", "content1");
131//! cache.insert("page2", "content2");
132//!
133//! // First access promotes to T2 (frequent list)
134//! assert_eq!(cache.get(&"page1"), Some(&"content1"));
135//!
136//! // Second access keeps in T2 (MRU position)
137//! assert_eq!(cache.get(&"page1"), Some(&"content1"));
138//!
139//! assert_eq!(cache.len(), 2);
140//! ```
141//!
142//! ## Thread Safety
143//!
144//! - [`ArcCore`]: Not thread-safe, designed for single-threaded use
145//! - For concurrent access, wrap in external synchronization
146//!
147//! ## Implementation Notes
148//!
149//! - T1 uses LRU ordering (recent once entries)
150//! - T2 uses LRU ordering (frequent entries)
151//! - B1/B2 are ghost lists (keys only, no values)
152//! - Default initial `p` is `capacity / 2`
153//! - Ghost list sizes are each up to `capacity`
154//! - Promotion from T1 to T2 happens on re-access
155//!
156//! ## References
157//!
158//! - Megiddo & Modha, "ARC: A Self-Tuning, Low Overhead Replacement Cache",
159//!   FAST 2003
160//! - Wikipedia: Cache replacement policies (ARC section)
161//!
162//! ## Performance Trade-offs
163//!
164//! - **When to Use**: Unknown or shifting workload patterns; need adaptive behavior
165//! - **Memory Overhead**: 4 lists + ghost entries (up to 2× capacity in keys)
166//! - **vs LRU**: Better on mixed workloads, slightly higher metadata overhead
167//! - **vs 2Q/SLRU**: More adaptive, no manual tuning needed
168
169use crate::ds::GhostList;
170#[cfg(feature = "metrics")]
171use crate::metrics::metrics_impl::ArcMetrics;
172#[cfg(feature = "metrics")]
173use crate::metrics::snapshot::ArcMetricsSnapshot;
174#[cfg(feature = "metrics")]
175use crate::metrics::traits::{ArcMetricsRecorder, CoreMetricsRecorder, MetricsSnapshotProvider};
176use crate::traits::Cache;
177use rustc_hash::FxHashMap;
178use std::hash::Hash;
179use std::iter::FusedIterator;
180use std::marker::PhantomData;
181use std::ptr::NonNull;
182
183/// Indicates which list an entry resides in.
184#[derive(Copy, Clone, Debug, Eq, PartialEq)]
185enum ListKind {
186    /// Entry is in T1 (recent once).
187    T1,
188    /// Entry is in T2 (frequent).
189    T2,
190}
191
192/// Node in the ARC linked list.
193///
194/// Cache-line optimized layout with pointers first.
195#[repr(C)]
196struct Node<K, V> {
197    prev: Option<NonNull<Node<K, V>>>,
198    next: Option<NonNull<Node<K, V>>>,
199    list: ListKind,
200    key: K,
201    value: V,
202}
203
204/// Core Adaptive Replacement Cache (ARC) implementation.
205///
206/// Implements the ARC replacement algorithm with automatic adaptation between
207/// recency and frequency preferences:
208/// - **T1**: Recently accessed once (recency list)
209/// - **T2**: Accessed multiple times (frequency list)
210/// - **B1**: Ghost list for items evicted from T1
211/// - **B2**: Ghost list for items evicted from T2
212///
213/// The cache maintains an adaptation parameter `p` that controls the target
214/// size of T1 vs T2. Ghost hits in B1/B2 adjust `p` to favor recency or frequency.
215///
216/// # Type Parameters
217///
218/// - `K`: Key type, must be `Clone + Eq + Hash`
219/// - `V`: Value type
220///
221/// # Example
222///
223/// ```
224/// use cachekit::policy::arc::ArcCore;
225/// use cachekit::traits::Cache;
226///
227/// // 100 capacity ARC cache
228/// let mut cache = ArcCore::new(100);
229///
230/// // Insert goes to T1 (recent list)
231/// cache.insert("key1", "value1");
232/// assert!(cache.contains(&"key1"));
233///
234/// // First get promotes to T2 (frequent list)
235/// cache.get(&"key1");
236///
237/// // Update existing key
238/// cache.insert("key1", "new_value");
239/// assert_eq!(cache.get(&"key1"), Some(&"new_value"));
240/// ```
241///
242/// # Eviction Behavior
243///
244/// The replacement algorithm selects victims based on the adaptation parameter `p`:
245/// - If `|T1| >= max(1, p)`: evict from T1 LRU, move key to B1
246/// - Otherwise: evict from T2 LRU, move key to B2
247///
248/// # Implementation
249///
250/// Uses raw pointer linked lists for O(1) operations with minimal overhead.
251/// Ghost lists track recently evicted keys to enable adaptation.
252pub struct ArcCore<K, V> {
253    /// Direct key -> node pointer mapping
254    map: FxHashMap<K, NonNull<Node<K, V>>>,
255
256    /// T1 list (recent once): head=MRU, tail=LRU
257    t1_head: Option<NonNull<Node<K, V>>>,
258    t1_tail: Option<NonNull<Node<K, V>>>,
259    t1_len: usize,
260
261    /// T2 list (frequent): head=MRU, tail=LRU
262    t2_head: Option<NonNull<Node<K, V>>>,
263    t2_tail: Option<NonNull<Node<K, V>>>,
264    t2_len: usize,
265
266    /// B1 ghost list (evicted from T1)
267    b1: GhostList<K>,
268
269    /// B2 ghost list (evicted from T2)
270    b2: GhostList<K>,
271
272    /// Adaptation parameter: target size for T1
273    p: usize,
274
275    /// Maximum total cache capacity
276    capacity: usize,
277
278    #[cfg(feature = "metrics")]
279    metrics: ArcMetrics,
280}
281
282// SAFETY: The `NonNull<Node>` pointers are exclusively owned by this `ArcCore`
283// instance. They are never aliased or exposed externally, and all mutation is
284// gated behind `&mut self`, so transferring ownership between threads is safe
285// when K and V are Send.
286unsafe impl<K, V> Send for ArcCore<K, V>
287where
288    K: Send,
289    V: Send,
290{
291}
292
293// SAFETY: Shared references (`&ArcCore`) only expose `Cache` read-only methods
294// (`contains`, `len`, `capacity`, `peek`), none of which dereference the internal
295// `NonNull` pointers through interior mutability. Sharing `&ArcCore` across
296// threads is safe when K and V are Sync.
297unsafe impl<K, V> Sync for ArcCore<K, V>
298where
299    K: Sync,
300    V: Sync,
301{
302}
303
304impl<K, V> ArcCore<K, V> {
305    /// Drops all heap-allocated nodes reachable from the T1 and T2 lists.
306    fn drop_all_nodes(&mut self) {
307        let mut current = self.t1_head;
308        while let Some(node_ptr) = current {
309            unsafe {
310                current = node_ptr.as_ref().next;
311                let _ = Box::from_raw(node_ptr.as_ptr());
312            }
313        }
314        let mut current = self.t2_head;
315        while let Some(node_ptr) = current {
316            unsafe {
317                current = node_ptr.as_ref().next;
318                let _ = Box::from_raw(node_ptr.as_ptr());
319            }
320        }
321    }
322
323    /// Returns an iterator over all cached key-value pairs.
324    ///
325    /// Iterates over T1 entries first, then T2 entries, from MRU to LRU within
326    /// each list. Does not modify access state.
327    ///
328    /// # Example
329    ///
330    /// ```
331    /// use cachekit::policy::arc::ArcCore;
332    /// use cachekit::traits::Cache;
333    ///
334    /// let mut cache = ArcCore::new(10);
335    /// cache.insert("a", 1);
336    /// cache.insert("b", 2);
337    ///
338    /// let entries: Vec<_> = cache.iter().collect();
339    /// assert_eq!(entries.len(), 2);
340    /// ```
341    pub fn iter(&self) -> Iter<'_, K, V> {
342        Iter {
343            current: self.t1_head,
344            t2_head: self.t2_head,
345            in_t2: false,
346            remaining: self.t1_len + self.t2_len,
347            _marker: PhantomData,
348        }
349    }
350
351    /// Returns a mutable iterator over all cached key-value pairs.
352    ///
353    /// Keys are yielded as shared references; values as mutable references.
354    /// Modifying values does not affect eviction ordering.
355    ///
356    /// # Example
357    ///
358    /// ```
359    /// use cachekit::policy::arc::ArcCore;
360    /// use cachekit::traits::Cache;
361    ///
362    /// let mut cache = ArcCore::new(10);
363    /// cache.insert("a", 1);
364    /// cache.insert("b", 2);
365    ///
366    /// for (_key, value) in cache.iter_mut() {
367    ///     *value += 10;
368    /// }
369    /// assert_eq!(cache.get(&"a"), Some(&11));
370    /// ```
371    pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
372        IterMut {
373            current: self.t1_head,
374            t2_head: self.t2_head,
375            in_t2: false,
376            remaining: self.t1_len + self.t2_len,
377            _marker: PhantomData,
378        }
379    }
380}
381
382impl<K, V> ArcCore<K, V>
383where
384    K: Clone + Eq + Hash,
385{
386    /// Creates a new ARC cache with the specified capacity.
387    ///
388    /// # Arguments
389    ///
390    /// - `capacity`: Maximum number of entries in T1 + T2
391    ///
392    /// Ghost lists (B1/B2) can each hold up to `capacity` keys.
393    /// Initial adaptation parameter `p` is set to `capacity / 2`.
394    ///
395    /// # Example
396    ///
397    /// ```
398    /// use cachekit::policy::arc::ArcCore;
399    /// use cachekit::traits::Cache;
400    ///
401    /// // 100 capacity ARC cache
402    /// let cache: ArcCore<String, i32> = ArcCore::new(100);
403    /// assert_eq!(cache.capacity(), 100);
404    /// assert!(cache.is_empty());
405    /// ```
406    #[inline]
407    pub fn new(capacity: usize) -> Self {
408        Self {
409            map: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
410            t1_head: None,
411            t1_tail: None,
412            t1_len: 0,
413            t2_head: None,
414            t2_tail: None,
415            t2_len: 0,
416            b1: GhostList::new(capacity),
417            b2: GhostList::new(capacity),
418            p: capacity / 2,
419            capacity,
420            #[cfg(feature = "metrics")]
421            metrics: ArcMetrics::default(),
422        }
423    }
424
425    /// Detach a node from its current list (T1 or T2).
426    #[inline(always)]
427    fn detach(&mut self, node_ptr: NonNull<Node<K, V>>) {
428        unsafe {
429            let node = node_ptr.as_ref();
430            let prev = node.prev;
431            let next = node.next;
432            let list = node.list;
433
434            let (head, tail, len) = match list {
435                ListKind::T1 => (&mut self.t1_head, &mut self.t1_tail, &mut self.t1_len),
436                ListKind::T2 => (&mut self.t2_head, &mut self.t2_tail, &mut self.t2_len),
437            };
438
439            match prev {
440                Some(mut p) => p.as_mut().next = next,
441                None => *head = next,
442            }
443
444            match next {
445                Some(mut n) => n.as_mut().prev = prev,
446                None => *tail = prev,
447            }
448
449            *len -= 1;
450        }
451    }
452
453    /// Attach a node at the head of T1 list (MRU position).
454    #[inline(always)]
455    fn attach_t1_head(&mut self, mut node_ptr: NonNull<Node<K, V>>) {
456        unsafe {
457            let node = node_ptr.as_mut();
458            node.prev = None;
459            node.next = self.t1_head;
460            node.list = ListKind::T1;
461
462            match self.t1_head {
463                Some(mut h) => h.as_mut().prev = Some(node_ptr),
464                None => self.t1_tail = Some(node_ptr),
465            }
466
467            self.t1_head = Some(node_ptr);
468            self.t1_len += 1;
469        }
470    }
471
472    /// Attach a node at the head of T2 list (MRU position).
473    #[inline(always)]
474    fn attach_t2_head(&mut self, mut node_ptr: NonNull<Node<K, V>>) {
475        unsafe {
476            let node = node_ptr.as_mut();
477            node.prev = None;
478            node.next = self.t2_head;
479            node.list = ListKind::T2;
480
481            match self.t2_head {
482                Some(mut h) => h.as_mut().prev = Some(node_ptr),
483                None => self.t2_tail = Some(node_ptr),
484            }
485
486            self.t2_head = Some(node_ptr);
487            self.t2_len += 1;
488        }
489    }
490
491    /// Replace: select a victim from T1 or T2 based on adaptation parameter p.
492    ///
493    /// This is the core ARC replacement algorithm.
494    fn replace(&mut self, in_b2: bool) {
495        #[cfg(feature = "metrics")]
496        self.metrics.record_evict_call();
497
498        // Decide whether to evict from T1 or T2
499        let evict_from_t1 =
500            if self.t1_len > 0 && (self.t1_len > self.p || (self.t1_len == self.p && in_b2)) {
501                true
502            } else if self.t2_len > 0 {
503                false
504            } else {
505                self.t1_len > 0
506            };
507
508        if evict_from_t1 {
509            if let Some(victim_ptr) = self.t1_tail {
510                unsafe {
511                    let victim = victim_ptr.as_ref();
512                    let key = victim.key.clone();
513
514                    self.detach(victim_ptr);
515                    self.map.remove(&key);
516                    self.b1.record(key.clone());
517                    let _ = Box::from_raw(victim_ptr.as_ptr());
518
519                    #[cfg(feature = "metrics")]
520                    {
521                        self.metrics.record_evicted_entry();
522                        self.metrics.record_t1_eviction();
523                    }
524                }
525            }
526        } else if let Some(victim_ptr) = self.t2_tail {
527            unsafe {
528                let victim = victim_ptr.as_ref();
529                let key = victim.key.clone();
530
531                self.detach(victim_ptr);
532                self.map.remove(&key);
533                self.b2.record(key.clone());
534                let _ = Box::from_raw(victim_ptr.as_ptr());
535
536                #[cfg(feature = "metrics")]
537                {
538                    self.metrics.record_evicted_entry();
539                    self.metrics.record_t2_eviction();
540                }
541            }
542        }
543    }
544
545    /// Adapt parameter p based on ghost hit location.
546    fn adapt(&mut self, in_b1: bool, in_b2: bool) {
547        if in_b1 {
548            let delta = if self.b2.len() >= self.b1.len() {
549                1
550            } else if !self.b1.is_empty() {
551                ((self.b2.len() as f64 / self.b1.len() as f64).ceil() as usize).max(1)
552            } else {
553                1
554            };
555            self.p = (self.p + delta).min(self.capacity);
556
557            #[cfg(feature = "metrics")]
558            self.metrics.record_p_increase();
559        } else if in_b2 {
560            let delta = if self.b1.len() >= self.b2.len() {
561                1
562            } else if !self.b2.is_empty() {
563                ((self.b1.len() as f64 / self.b2.len() as f64).ceil() as usize).max(1)
564            } else {
565                1
566            };
567            self.p = self.p.saturating_sub(delta);
568
569            #[cfg(feature = "metrics")]
570            self.metrics.record_p_decrease();
571        }
572    }
573
574    /// Returns the current value of the adaptation parameter p.
575    ///
576    /// This represents the target size for T1. Higher values favor recency,
577    /// lower values favor frequency.
578    ///
579    /// # Example
580    ///
581    /// ```
582    /// use cachekit::policy::arc::ArcCore;
583    ///
584    /// let cache: ArcCore<String, i32> = ArcCore::new(100);
585    /// // Initial p is capacity / 2
586    /// assert_eq!(cache.p_value(), 50);
587    /// ```
588    pub fn p_value(&self) -> usize {
589        self.p
590    }
591
592    /// Returns the number of entries in T1 (recent once list).
593    ///
594    /// # Example
595    ///
596    /// ```
597    /// use cachekit::policy::arc::ArcCore;
598    /// use cachekit::traits::Cache;
599    ///
600    /// let mut cache = ArcCore::new(100);
601    /// cache.insert("key", "value");
602    /// assert_eq!(cache.t1_len(), 1);  // New entries go to T1
603    /// ```
604    pub fn t1_len(&self) -> usize {
605        self.t1_len
606    }
607
608    /// Returns the number of entries in T2 (frequent list).
609    ///
610    /// # Example
611    ///
612    /// ```
613    /// use cachekit::policy::arc::ArcCore;
614    /// use cachekit::traits::Cache;
615    ///
616    /// let mut cache = ArcCore::new(100);
617    /// cache.insert("key", "value");
618    /// cache.get(&"key");  // Promotes to T2
619    /// assert_eq!(cache.t2_len(), 1);
620    /// ```
621    pub fn t2_len(&self) -> usize {
622        self.t2_len
623    }
624
625    /// Returns the number of keys in B1 (ghost list for T1 evictions).
626    ///
627    /// # Example
628    ///
629    /// ```
630    /// use cachekit::policy::arc::ArcCore;
631    /// use cachekit::traits::Cache;
632    ///
633    /// let mut cache = ArcCore::new(2);
634    /// cache.insert("a", 1);
635    /// cache.insert("b", 2);
636    /// cache.insert("c", 3); // evicts "a" to B1
637    /// assert_eq!(cache.b1_len(), 1);
638    /// ```
639    pub fn b1_len(&self) -> usize {
640        self.b1.len()
641    }
642
643    /// Returns the number of keys in B2 (ghost list for T2 evictions).
644    ///
645    /// # Example
646    ///
647    /// ```
648    /// use cachekit::policy::arc::ArcCore;
649    /// use cachekit::traits::Cache;
650    ///
651    /// let mut cache = ArcCore::new(2);
652    /// cache.insert("a", 1);
653    /// cache.insert("b", 2);
654    /// cache.get(&"a"); // promote to T2
655    /// cache.get(&"b"); // promote to T2
656    /// cache.insert("c", 3); // evicts T2 LRU to B2
657    /// assert!(cache.b2_len() <= 1);
658    /// ```
659    pub fn b2_len(&self) -> usize {
660        self.b2.len()
661    }
662
663    /// Validates internal invariants of the ARC cache.
664    ///
665    /// # Panics
666    ///
667    /// Panics if any of the following invariants are violated:
668    /// - `len() == t1_len + t2_len`
669    /// - `map.len() == t1_len + t2_len`
670    /// - total entries do not exceed capacity
671    /// - `p <= capacity`
672    /// - ghost list sizes do not exceed capacity
673    /// - linked-list counts match stored lengths
674    /// - no node appears in both T1 and T2
675    /// - no key appears in both the cache and a ghost list
676    #[cfg(any(test, debug_assertions))]
677    pub fn debug_validate_invariants(&self)
678    where
679        K: std::fmt::Debug,
680    {
681        // 1. Total length matches sum of T1 and T2
682        assert_eq!(
683            self.len(),
684            self.t1_len + self.t2_len,
685            "len() should equal t1_len + t2_len"
686        );
687
688        // 2. Map size equals total entries
689        assert_eq!(
690            self.map.len(),
691            self.t1_len + self.t2_len,
692            "map.len() should equal total entries"
693        );
694
695        // 3. Total entries don't exceed capacity
696        assert!(
697            self.t1_len + self.t2_len <= self.capacity,
698            "total entries ({}) exceed capacity ({})",
699            self.t1_len + self.t2_len,
700            self.capacity
701        );
702
703        // 4. p is within valid range
704        assert!(
705            self.p <= self.capacity,
706            "p ({}) exceeds capacity ({})",
707            self.p,
708            self.capacity
709        );
710
711        // 5. Ghost lists don't exceed capacity
712        assert!(
713            self.b1.len() <= self.capacity,
714            "B1 length ({}) exceeds capacity ({})",
715            self.b1.len(),
716            self.capacity
717        );
718        assert!(
719            self.b2.len() <= self.capacity,
720            "B2 length ({}) exceeds capacity ({})",
721            self.b2.len(),
722            self.capacity
723        );
724
725        // 6. Count actual T1 entries
726        let mut t1_count = 0;
727        let mut current = self.t1_head;
728        let mut visited_t1 = std::collections::HashSet::new();
729        while let Some(node_ptr) = current {
730            unsafe {
731                let node = node_ptr.as_ref();
732
733                // Check for cycles
734                assert!(visited_t1.insert(node_ptr), "Cycle detected in T1 list");
735
736                // Verify list kind
737                assert_eq!(node.list, ListKind::T1, "Node in T1 has wrong list kind");
738
739                // Verify node is in map
740                assert!(self.map.contains_key(&node.key), "T1 node key not in map");
741
742                t1_count += 1;
743                current = node.next;
744            }
745        }
746        assert_eq!(
747            t1_count, self.t1_len,
748            "T1 actual count doesn't match t1_len"
749        );
750
751        // 7. Count actual T2 entries
752        let mut t2_count = 0;
753        let mut current = self.t2_head;
754        let mut visited_t2 = std::collections::HashSet::new();
755        while let Some(node_ptr) = current {
756            unsafe {
757                let node = node_ptr.as_ref();
758
759                // Check for cycles
760                assert!(visited_t2.insert(node_ptr), "Cycle detected in T2 list");
761
762                // Verify list kind
763                assert_eq!(node.list, ListKind::T2, "Node in T2 has wrong list kind");
764
765                // Verify node is in map
766                assert!(self.map.contains_key(&node.key), "T2 node key not in map");
767
768                t2_count += 1;
769                current = node.next;
770            }
771        }
772        assert_eq!(
773            t2_count, self.t2_len,
774            "T2 actual count doesn't match t2_len"
775        );
776
777        // 8. Verify no overlap between T1 and T2
778        for t1_ptr in &visited_t1 {
779            assert!(
780                !visited_t2.contains(t1_ptr),
781                "Node appears in both T1 and T2"
782            );
783        }
784
785        // 9. Verify all map entries are accounted for in T1 or T2
786        assert_eq!(
787            visited_t1.len() + visited_t2.len(),
788            self.map.len(),
789            "Map contains entries not in T1 or T2"
790        );
791
792        // 10. Ghost lists don't contain keys that are in the cache
793        for key in self.map.keys() {
794            assert!(
795                !self.b1.contains(key),
796                "Key {:?} is in both cache and B1",
797                key
798            );
799            assert!(
800                !self.b2.contains(key),
801                "Key {:?} is in both cache and B2",
802                key
803            );
804        }
805    }
806}
807
808impl<K, V> std::fmt::Debug for ArcCore<K, V>
809where
810    K: Clone + Eq + Hash + std::fmt::Debug,
811    V: std::fmt::Debug,
812{
813    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
814        f.debug_struct("ArcCore")
815            .field("capacity", &self.capacity)
816            .field("t1_len", &self.t1_len)
817            .field("t2_len", &self.t2_len)
818            .field("b1_len", &self.b1.len())
819            .field("b2_len", &self.b2.len())
820            .field("p", &self.p)
821            .field("total_len", &(self.t1_len + self.t2_len))
822            .finish()
823    }
824}
825
826impl<K, V> Cache<K, V> for ArcCore<K, V>
827where
828    K: Clone + Eq + Hash,
829{
830    fn contains(&self, key: &K) -> bool {
831        self.map.contains_key(key)
832    }
833
834    fn len(&self) -> usize {
835        self.t1_len + self.t2_len
836    }
837
838    fn capacity(&self) -> usize {
839        self.capacity
840    }
841
842    fn peek(&self, key: &K) -> Option<&V> {
843        self.map
844            .get(key)
845            .map(|&node_ptr| unsafe { &(*node_ptr.as_ptr()).value })
846    }
847
848    fn get(&mut self, key: &K) -> Option<&V> {
849        let node_ptr = match self.map.get(key) {
850            Some(&ptr) => ptr,
851            None => {
852                #[cfg(feature = "metrics")]
853                self.metrics.record_get_miss();
854                return None;
855            },
856        };
857
858        #[cfg(feature = "metrics")]
859        self.metrics.record_get_hit();
860
861        unsafe {
862            let node = node_ptr.as_ref();
863
864            match node.list {
865                ListKind::T1 => {
866                    #[cfg(feature = "metrics")]
867                    self.metrics.record_t1_to_t2_promotion();
868
869                    self.detach(node_ptr);
870                    self.attach_t2_head(node_ptr);
871                },
872                ListKind::T2 => {
873                    self.detach(node_ptr);
874                    self.attach_t2_head(node_ptr);
875                },
876            }
877
878            Some(&node_ptr.as_ref().value)
879        }
880    }
881
882    fn insert(&mut self, key: K, value: V) -> Option<V> {
883        #[cfg(feature = "metrics")]
884        self.metrics.record_insert_call();
885
886        if self.capacity == 0 {
887            return None;
888        }
889
890        // Case 1: Key already in cache (T1 or T2)
891        if let Some(&node_ptr) = self.map.get(&key) {
892            #[cfg(feature = "metrics")]
893            self.metrics.record_insert_update();
894
895            unsafe {
896                let mut node_ptr = node_ptr;
897                let node = node_ptr.as_mut();
898                let old_value = std::mem::replace(&mut node.value, value);
899
900                match node.list {
901                    ListKind::T1 => {
902                        self.detach(node_ptr);
903                        self.attach_t2_head(node_ptr);
904                    },
905                    ListKind::T2 => {
906                        self.detach(node_ptr);
907                        self.attach_t2_head(node_ptr);
908                    },
909                }
910
911                return Some(old_value);
912            }
913        }
914
915        let in_b1 = self.b1.contains(&key);
916        let in_b2 = self.b2.contains(&key);
917
918        // Case 2: Ghost hit in B1
919        if in_b1 {
920            #[cfg(feature = "metrics")]
921            {
922                self.metrics.record_b1_ghost_hit();
923                self.metrics.record_insert_new();
924            }
925
926            self.adapt(true, false);
927            self.b1.remove(&key);
928
929            if self.t1_len + self.t2_len >= self.capacity {
930                self.replace(false);
931            }
932
933            let node = Box::new(Node {
934                prev: None,
935                next: None,
936                list: ListKind::T2,
937                key: key.clone(),
938                value,
939            });
940            let node_ptr = NonNull::new(Box::into_raw(node)).unwrap();
941            self.map.insert(key, node_ptr);
942            self.attach_t2_head(node_ptr);
943
944            return None;
945        }
946
947        // Case 3: Ghost hit in B2
948        if in_b2 {
949            #[cfg(feature = "metrics")]
950            {
951                self.metrics.record_b2_ghost_hit();
952                self.metrics.record_insert_new();
953            }
954
955            self.adapt(false, true);
956            self.b2.remove(&key);
957
958            if self.t1_len + self.t2_len >= self.capacity {
959                self.replace(true);
960            }
961
962            let node = Box::new(Node {
963                prev: None,
964                next: None,
965                list: ListKind::T2,
966                key: key.clone(),
967                value,
968            });
969            let node_ptr = NonNull::new(Box::into_raw(node)).unwrap();
970            self.map.insert(key, node_ptr);
971            self.attach_t2_head(node_ptr);
972
973            return None;
974        }
975
976        // Case 4: Complete miss
977        #[cfg(feature = "metrics")]
978        self.metrics.record_insert_new();
979
980        let l1_len = self.t1_len + self.b1.len();
981        if l1_len >= self.capacity {
982            if !self.b1.is_empty() {
983                self.b1.evict_lru();
984            }
985            if self.t1_len + self.t2_len >= self.capacity {
986                self.replace(false);
987            }
988        } else {
989            let total = self.t1_len + self.t2_len + self.b1.len() + self.b2.len();
990            if total >= 2 * self.capacity {
991                self.b2.evict_lru();
992            }
993            if self.t1_len + self.t2_len >= self.capacity {
994                self.replace(false);
995            }
996        }
997
998        let node = Box::new(Node {
999            prev: None,
1000            next: None,
1001            list: ListKind::T1,
1002            key: key.clone(),
1003            value,
1004        });
1005        let node_ptr = NonNull::new(Box::into_raw(node)).unwrap();
1006        self.map.insert(key, node_ptr);
1007        self.attach_t1_head(node_ptr);
1008
1009        None
1010    }
1011
1012    fn remove(&mut self, key: &K) -> Option<V> {
1013        let node_ptr = self.map.remove(key)?;
1014
1015        self.detach(node_ptr);
1016
1017        unsafe {
1018            let node = Box::from_raw(node_ptr.as_ptr());
1019            Some(node.value)
1020        }
1021    }
1022
1023    fn clear(&mut self) {
1024        #[cfg(feature = "metrics")]
1025        self.metrics.record_clear();
1026
1027        self.drop_all_nodes();
1028        self.map.clear();
1029        self.t1_head = None;
1030        self.t1_tail = None;
1031        self.t1_len = 0;
1032        self.t2_head = None;
1033        self.t2_tail = None;
1034        self.t2_len = 0;
1035        self.b1.clear();
1036        self.b2.clear();
1037        self.p = self.capacity / 2;
1038    }
1039}
1040
1041impl<K, V> Drop for ArcCore<K, V> {
1042    fn drop(&mut self) {
1043        self.drop_all_nodes();
1044    }
1045}
1046
1047impl<K, V> Clone for ArcCore<K, V>
1048where
1049    K: Clone + Eq + Hash,
1050    V: Clone,
1051{
1052    fn clone(&self) -> Self {
1053        let mut new_cache = ArcCore::new(self.capacity);
1054        new_cache.p = self.p;
1055        new_cache.b1 = self.b1.clone();
1056        new_cache.b2 = self.b2.clone();
1057
1058        // Rebuild T1 from tail (LRU) to head (MRU) so attach_t1_head
1059        // reproduces the original ordering.
1060        let mut t1_entries = Vec::with_capacity(self.t1_len);
1061        let mut current = self.t1_tail;
1062        while let Some(ptr) = current {
1063            unsafe {
1064                let node = ptr.as_ref();
1065                t1_entries.push((node.key.clone(), node.value.clone()));
1066                current = node.prev;
1067            }
1068        }
1069        for (key, value) in t1_entries {
1070            let node = Box::new(Node {
1071                prev: None,
1072                next: None,
1073                list: ListKind::T1,
1074                key: key.clone(),
1075                value,
1076            });
1077            let ptr = NonNull::new(Box::into_raw(node)).unwrap();
1078            new_cache.map.insert(key, ptr);
1079            new_cache.attach_t1_head(ptr);
1080        }
1081
1082        // Rebuild T2 the same way.
1083        let mut t2_entries = Vec::with_capacity(self.t2_len);
1084        let mut current = self.t2_tail;
1085        while let Some(ptr) = current {
1086            unsafe {
1087                let node = ptr.as_ref();
1088                t2_entries.push((node.key.clone(), node.value.clone()));
1089                current = node.prev;
1090            }
1091        }
1092        for (key, value) in t2_entries {
1093            let node = Box::new(Node {
1094                prev: None,
1095                next: None,
1096                list: ListKind::T2,
1097                key: key.clone(),
1098                value,
1099            });
1100            let ptr = NonNull::new(Box::into_raw(node)).unwrap();
1101            new_cache.map.insert(key, ptr);
1102            new_cache.attach_t2_head(ptr);
1103        }
1104
1105        #[cfg(feature = "metrics")]
1106        {
1107            new_cache.metrics = self.metrics;
1108        }
1109
1110        new_cache
1111    }
1112}
1113
1114impl<K, V> Default for ArcCore<K, V>
1115where
1116    K: Clone + Eq + Hash,
1117{
1118    /// Returns an empty `ArcCore` with capacity 0.
1119    ///
1120    /// Use [`ArcCore::new`] to specify a capacity.
1121    fn default() -> Self {
1122        Self::new(0)
1123    }
1124}
1125
1126#[cfg(feature = "metrics")]
1127impl<K, V> ArcCore<K, V>
1128where
1129    K: Clone + Eq + Hash,
1130{
1131    /// Returns a snapshot of cache metrics.
1132    pub fn metrics_snapshot(&self) -> ArcMetricsSnapshot {
1133        ArcMetricsSnapshot {
1134            get_calls: self.metrics.get_calls,
1135            get_hits: self.metrics.get_hits,
1136            get_misses: self.metrics.get_misses,
1137            insert_calls: self.metrics.insert_calls,
1138            insert_updates: self.metrics.insert_updates,
1139            insert_new: self.metrics.insert_new,
1140            evict_calls: self.metrics.evict_calls,
1141            evicted_entries: self.metrics.evicted_entries,
1142            t1_to_t2_promotions: self.metrics.t1_to_t2_promotions,
1143            b1_ghost_hits: self.metrics.b1_ghost_hits,
1144            b2_ghost_hits: self.metrics.b2_ghost_hits,
1145            p_increases: self.metrics.p_increases,
1146            p_decreases: self.metrics.p_decreases,
1147            t1_evictions: self.metrics.t1_evictions,
1148            t2_evictions: self.metrics.t2_evictions,
1149            cache_len: self.len(),
1150            capacity: self.capacity,
1151        }
1152    }
1153}
1154
1155#[cfg(feature = "metrics")]
1156impl<K, V> MetricsSnapshotProvider<ArcMetricsSnapshot> for ArcCore<K, V>
1157where
1158    K: Clone + Eq + Hash,
1159{
1160    fn snapshot(&self) -> ArcMetricsSnapshot {
1161        self.metrics_snapshot()
1162    }
1163}
1164
1165// ---------------------------------------------------------------------------
1166// Iterators
1167// ---------------------------------------------------------------------------
1168
1169/// Iterator over shared references to cached key-value pairs.
1170///
1171/// Created by [`ArcCore::iter`]. Visits T1 entries (MRU to LRU) then T2.
1172pub struct Iter<'a, K, V> {
1173    current: Option<NonNull<Node<K, V>>>,
1174    t2_head: Option<NonNull<Node<K, V>>>,
1175    in_t2: bool,
1176    remaining: usize,
1177    _marker: PhantomData<&'a (K, V)>,
1178}
1179
1180impl<'a, K, V> Iterator for Iter<'a, K, V> {
1181    type Item = (&'a K, &'a V);
1182
1183    fn next(&mut self) -> Option<Self::Item> {
1184        loop {
1185            if let Some(node_ptr) = self.current {
1186                unsafe {
1187                    let node = node_ptr.as_ref();
1188                    self.current = node.next;
1189                    self.remaining -= 1;
1190                    return Some((&node.key, &node.value));
1191                }
1192            } else if !self.in_t2 {
1193                self.in_t2 = true;
1194                self.current = self.t2_head;
1195            } else {
1196                return None;
1197            }
1198        }
1199    }
1200
1201    fn size_hint(&self) -> (usize, Option<usize>) {
1202        (self.remaining, Some(self.remaining))
1203    }
1204}
1205
1206impl<K, V> ExactSizeIterator for Iter<'_, K, V> {}
1207impl<K, V> FusedIterator for Iter<'_, K, V> {}
1208
1209/// Iterator over mutable references to cached values.
1210///
1211/// Created by [`ArcCore::iter_mut`]. Keys are shared; values are mutable.
1212pub struct IterMut<'a, K, V> {
1213    current: Option<NonNull<Node<K, V>>>,
1214    t2_head: Option<NonNull<Node<K, V>>>,
1215    in_t2: bool,
1216    remaining: usize,
1217    _marker: PhantomData<&'a mut (K, V)>,
1218}
1219
1220impl<'a, K, V> Iterator for IterMut<'a, K, V> {
1221    type Item = (&'a K, &'a mut V);
1222
1223    fn next(&mut self) -> Option<Self::Item> {
1224        loop {
1225            if let Some(mut node_ptr) = self.current {
1226                unsafe {
1227                    let node = node_ptr.as_mut();
1228                    self.current = node.next;
1229                    self.remaining -= 1;
1230                    return Some((&node.key, &mut node.value));
1231                }
1232            } else if !self.in_t2 {
1233                self.in_t2 = true;
1234                self.current = self.t2_head;
1235            } else {
1236                return None;
1237            }
1238        }
1239    }
1240
1241    fn size_hint(&self) -> (usize, Option<usize>) {
1242        (self.remaining, Some(self.remaining))
1243    }
1244}
1245
1246impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {}
1247impl<K, V> FusedIterator for IterMut<'_, K, V> {}
1248
1249/// Owning iterator over cached key-value pairs.
1250///
1251/// Created by calling [`IntoIterator`] on an `ArcCore`.
1252pub struct IntoIter<K, V> {
1253    current: Option<NonNull<Node<K, V>>>,
1254    t2_head: Option<NonNull<Node<K, V>>>,
1255    in_t2: bool,
1256    remaining: usize,
1257}
1258
1259impl<K, V> Iterator for IntoIter<K, V> {
1260    type Item = (K, V);
1261
1262    fn next(&mut self) -> Option<Self::Item> {
1263        loop {
1264            if let Some(node_ptr) = self.current {
1265                unsafe {
1266                    let node = Box::from_raw(node_ptr.as_ptr());
1267                    self.current = node.next;
1268                    self.remaining -= 1;
1269                    return Some((node.key, node.value));
1270                }
1271            } else if !self.in_t2 {
1272                self.in_t2 = true;
1273                self.current = self.t2_head;
1274            } else {
1275                return None;
1276            }
1277        }
1278    }
1279
1280    fn size_hint(&self) -> (usize, Option<usize>) {
1281        (self.remaining, Some(self.remaining))
1282    }
1283}
1284
1285impl<K, V> ExactSizeIterator for IntoIter<K, V> {}
1286impl<K, V> FusedIterator for IntoIter<K, V> {}
1287
1288impl<K, V> Drop for IntoIter<K, V> {
1289    fn drop(&mut self) {
1290        while self.next().is_some() {}
1291    }
1292}
1293
1294// SAFETY: IntoIter exclusively owns its nodes (moved out of ArcCore).
1295unsafe impl<K: Send, V: Send> Send for IntoIter<K, V> {}
1296unsafe impl<K: Sync, V: Sync> Sync for IntoIter<K, V> {}
1297
1298impl<K, V> IntoIterator for ArcCore<K, V> {
1299    type Item = (K, V);
1300    type IntoIter = IntoIter<K, V>;
1301
1302    fn into_iter(mut self) -> IntoIter<K, V> {
1303        let iter = IntoIter {
1304            current: self.t1_head,
1305            t2_head: self.t2_head,
1306            in_t2: false,
1307            remaining: self.t1_len + self.t2_len,
1308        };
1309        // Prevent Drop from double-freeing nodes the iterator now owns.
1310        self.t1_head = None;
1311        self.t1_tail = None;
1312        self.t1_len = 0;
1313        self.t2_head = None;
1314        self.t2_tail = None;
1315        self.t2_len = 0;
1316        iter
1317    }
1318}
1319
1320impl<'a, K, V> IntoIterator for &'a ArcCore<K, V> {
1321    type Item = (&'a K, &'a V);
1322    type IntoIter = Iter<'a, K, V>;
1323
1324    fn into_iter(self) -> Iter<'a, K, V> {
1325        self.iter()
1326    }
1327}
1328
1329impl<'a, K, V> IntoIterator for &'a mut ArcCore<K, V> {
1330    type Item = (&'a K, &'a mut V);
1331    type IntoIter = IterMut<'a, K, V>;
1332
1333    fn into_iter(self) -> IterMut<'a, K, V> {
1334        self.iter_mut()
1335    }
1336}
1337
1338impl<K, V> FromIterator<(K, V)> for ArcCore<K, V>
1339where
1340    K: Clone + Eq + Hash,
1341{
1342    fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
1343        let iter = iter.into_iter();
1344        let (lower, _) = iter.size_hint();
1345        let mut cache = ArcCore::new(lower);
1346        cache.extend(iter);
1347        cache
1348    }
1349}
1350
1351impl<K, V> Extend<(K, V)> for ArcCore<K, V>
1352where
1353    K: Clone + Eq + Hash,
1354{
1355    fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iter: I) {
1356        for (key, value) in iter {
1357            self.insert(key, value);
1358        }
1359    }
1360}
1361
1362#[cfg(test)]
1363mod tests {
1364    use super::*;
1365
1366    #[test]
1367    fn arc_new_cache() {
1368        let cache: ArcCore<String, i32> = ArcCore::new(100);
1369        assert_eq!(cache.capacity(), 100);
1370        assert_eq!(cache.len(), 0);
1371        assert!(cache.is_empty());
1372        assert_eq!(cache.t1_len(), 0);
1373        assert_eq!(cache.t2_len(), 0);
1374        assert_eq!(cache.b1_len(), 0);
1375        assert_eq!(cache.b2_len(), 0);
1376        assert_eq!(cache.p_value(), 50); // Initial p = capacity / 2
1377    }
1378
1379    #[test]
1380    fn arc_insert_and_get() {
1381        let mut cache = ArcCore::new(10);
1382
1383        // First insert goes to T1
1384        cache.insert("key1", "value1");
1385        assert_eq!(cache.len(), 1);
1386        assert_eq!(cache.t1_len(), 1);
1387        assert_eq!(cache.t2_len(), 0);
1388
1389        // Get promotes to T2
1390        assert_eq!(cache.get(&"key1"), Some(&"value1"));
1391        assert_eq!(cache.t1_len(), 0);
1392        assert_eq!(cache.t2_len(), 1);
1393
1394        // Second get keeps in T2
1395        assert_eq!(cache.get(&"key1"), Some(&"value1"));
1396        assert_eq!(cache.t1_len(), 0);
1397        assert_eq!(cache.t2_len(), 1);
1398    }
1399
1400    #[test]
1401    fn arc_update_existing() {
1402        let mut cache = ArcCore::new(10);
1403
1404        cache.insert("key1", "value1");
1405        assert_eq!(cache.t1_len(), 1);
1406
1407        // Update in T1 promotes to T2
1408        let old = cache.insert("key1", "new_value");
1409        assert_eq!(old, Some("value1"));
1410        assert_eq!(cache.len(), 1);
1411        assert_eq!(cache.t1_len(), 0);
1412        assert_eq!(cache.t2_len(), 1);
1413
1414        assert_eq!(cache.get(&"key1"), Some(&"new_value"));
1415    }
1416
1417    #[test]
1418    fn arc_eviction_fills_ghost_lists() {
1419        let mut cache = ArcCore::new(2);
1420
1421        // Fill cache
1422        cache.insert("a", 1);
1423        cache.insert("b", 2);
1424        assert_eq!(cache.len(), 2);
1425        assert_eq!(cache.t1_len(), 2);
1426
1427        // Insert third item triggers eviction
1428        cache.insert("c", 3);
1429        assert_eq!(cache.len(), 2);
1430        assert_eq!(cache.t1_len(), 2); // c and b (a evicted)
1431        assert_eq!(cache.b1_len(), 1); // a moved to B1
1432        assert!(!cache.contains(&"a"));
1433    }
1434
1435    #[test]
1436    fn arc_ghost_hit_promotes_to_t2() {
1437        let mut cache = ArcCore::new(2);
1438
1439        // Fill and evict
1440        cache.insert("a", 1);
1441        cache.insert("b", 2);
1442        cache.insert("c", 3); // Evicts "a" to B1
1443
1444        cache.debug_validate_invariants();
1445
1446        assert!(!cache.contains(&"a"));
1447        assert_eq!(cache.b1_len(), 1);
1448        assert!(cache.b1.contains(&"a"));
1449
1450        // Ghost hit on "a"
1451        // This should: remove "a" from B1, make space (evicting something else), insert "a" to T2
1452        cache.insert("a", 10);
1453        cache.debug_validate_invariants();
1454
1455        println!(
1456            "After ghost hit: len={}, t1={}, t2={}, b1={}, b2={}",
1457            cache.len(),
1458            cache.t1_len(),
1459            cache.t2_len(),
1460            cache.b1_len(),
1461            cache.b2_len()
1462        );
1463
1464        assert_eq!(
1465            cache.len(),
1466            2,
1467            "Cache length should be 2, got {}",
1468            cache.len()
1469        );
1470        assert_eq!(cache.t2_len(), 1); // "a" goes to T2 (ghost hit)
1471        assert!(!cache.b1.contains(&"a")); // "a" removed from B1
1472        // Note: B1 may still have entries from the eviction that happened to make space for "a"
1473    }
1474
1475    #[test]
1476    fn arc_adaptation_increases_p() {
1477        let mut cache = ArcCore::new(4);
1478        let initial_p = cache.p_value();
1479
1480        // Create scenario with B1 ghost hit
1481        cache.insert("a", 1);
1482        cache.insert("b", 2);
1483        cache.insert("c", 3);
1484        cache.insert("d", 4);
1485        cache.insert("e", 5); // Evicts "a" to B1
1486
1487        println!(
1488            "Before ghost hit: p={}, t1={}, t2={}, b1={}, b2={}",
1489            cache.p_value(),
1490            cache.t1_len(),
1491            cache.t2_len(),
1492            cache.b1_len(),
1493            cache.b2_len()
1494        );
1495        println!("B1 contains a={}", cache.b1.contains(&"a"));
1496
1497        // Ghost hit in B1 should increase p
1498        cache.insert("a", 10);
1499
1500        println!(
1501            "After ghost hit: p={}, t1={}, t2={}, b1={}, b2={}",
1502            cache.p_value(),
1503            cache.t1_len(),
1504            cache.t2_len(),
1505            cache.b1_len(),
1506            cache.b2_len()
1507        );
1508
1509        // Note: If b1.len() == b2.len() initially, delta would be 1
1510        // p should increase by at least 1
1511        assert!(
1512            cache.p_value() > initial_p,
1513            "Expected p to increase from {} but got {}",
1514            initial_p,
1515            cache.p_value()
1516        );
1517    }
1518
1519    #[test]
1520    fn arc_remove() {
1521        let mut cache = ArcCore::new(10);
1522
1523        cache.insert("key1", "value1");
1524        cache.insert("key2", "value2");
1525        assert_eq!(cache.len(), 2);
1526
1527        let removed = cache.remove(&"key1");
1528        assert_eq!(removed, Some("value1"));
1529        assert_eq!(cache.len(), 1);
1530        assert!(!cache.contains(&"key1"));
1531        assert!(cache.contains(&"key2"));
1532    }
1533
1534    #[test]
1535    fn arc_clear() {
1536        let mut cache = ArcCore::new(10);
1537
1538        cache.insert("key1", "value1");
1539        cache.insert("key2", "value2");
1540        cache.get(&"key1"); // Promote to T2
1541
1542        cache.clear();
1543
1544        assert_eq!(cache.len(), 0);
1545        assert_eq!(cache.t1_len(), 0);
1546        assert_eq!(cache.t2_len(), 0);
1547        assert_eq!(cache.b1_len(), 0);
1548        assert_eq!(cache.b2_len(), 0);
1549        assert!(cache.is_empty());
1550    }
1551
1552    #[test]
1553    fn arc_contains() {
1554        let mut cache = ArcCore::new(10);
1555
1556        assert!(!cache.contains(&"key1"));
1557
1558        cache.insert("key1", "value1");
1559        assert!(cache.contains(&"key1"));
1560
1561        cache.remove(&"key1");
1562        assert!(!cache.contains(&"key1"));
1563    }
1564
1565    #[test]
1566    fn arc_promotion_t1_to_t2() {
1567        let mut cache = ArcCore::new(10);
1568
1569        // Insert into T1
1570        cache.insert("key", "value");
1571        assert_eq!(cache.t1_len(), 1);
1572        assert_eq!(cache.t2_len(), 0);
1573
1574        // First access promotes to T2
1575        cache.get(&"key");
1576        assert_eq!(cache.t1_len(), 0);
1577        assert_eq!(cache.t2_len(), 1);
1578
1579        // Second access stays in T2
1580        cache.get(&"key");
1581        assert_eq!(cache.t1_len(), 0);
1582        assert_eq!(cache.t2_len(), 1);
1583    }
1584
1585    #[test]
1586    fn arc_multiple_entries() {
1587        let mut cache = ArcCore::new(5);
1588
1589        for i in 0..5 {
1590            cache.insert(i, i * 10);
1591        }
1592
1593        assert_eq!(cache.len(), 5);
1594
1595        for i in 0..5 {
1596            assert_eq!(cache.get(&i), Some(&(i * 10)));
1597        }
1598
1599        // All should be promoted to T2 after access
1600        assert_eq!(cache.t2_len(), 5);
1601        assert_eq!(cache.t1_len(), 0);
1602    }
1603
1604    #[test]
1605    fn arc_eviction_and_ghost_tracking() {
1606        let mut cache = ArcCore::new(3);
1607
1608        // Fill cache
1609        cache.insert(1, 100);
1610        cache.insert(2, 200);
1611        cache.insert(3, 300);
1612
1613        // Access 1 and 2 to promote them to T2
1614        cache.get(&1);
1615        cache.get(&2);
1616
1617        assert_eq!(cache.t1_len(), 1); // 3 remains in T1
1618        assert_eq!(cache.t2_len(), 2); // 1, 2 promoted to T2
1619
1620        // Insert 4. With p=1, t1_len=1, t2_len=2:
1621        // Condition: t1_len > p → 1 > 1 is false
1622        // So we evict from T2 (not T1)
1623        // T2 has [2 (MRU), 1 (LRU)], so 1 gets evicted to B2
1624        cache.insert(4, 400);
1625
1626        assert_eq!(cache.len(), 3);
1627        assert!(!cache.contains(&1)); // 1 was evicted (LRU of T2)
1628        assert!(cache.contains(&2)); // 2 remains (MRU of T2)
1629        assert!(cache.contains(&3)); // 3 remains (in T1)
1630        assert!(cache.contains(&4)); // 4 just inserted
1631        assert_eq!(cache.b2_len(), 1); // 1 moved to B2 (evicted from T2)
1632    }
1633
1634    #[test]
1635    fn arc_zero_capacity() {
1636        let mut cache = ArcCore::new(0);
1637        assert_eq!(cache.capacity(), 0);
1638        assert_eq!(cache.len(), 0);
1639
1640        cache.insert("key", "value");
1641        assert_eq!(cache.len(), 0);
1642        assert!(!cache.contains(&"key"));
1643    }
1644
1645    // ==============================================
1646    // Regression Tests
1647    // ==============================================
1648
1649    #[test]
1650    fn ghost_directory_size_within_two_times_capacity() {
1651        let c = 5usize;
1652        let mut cache: ArcCore<u64, u64> = ArcCore::new(c);
1653
1654        for i in 0..c as u64 {
1655            cache.insert(i, i);
1656        }
1657        for i in 0..c as u64 {
1658            cache.get(&i);
1659        }
1660        for i in c as u64..2 * c as u64 {
1661            cache.insert(i, i);
1662        }
1663        for i in 2 * c as u64..3 * c as u64 {
1664            cache.insert(i, i);
1665        }
1666        for i in 2 * c as u64..3 * c as u64 {
1667            cache.get(&i);
1668        }
1669        for i in 3 * c as u64..8 * c as u64 {
1670            cache.insert(i, i);
1671        }
1672
1673        let t = cache.t1_len() + cache.t2_len();
1674        let b = cache.b1_len() + cache.b2_len();
1675        let total = t + b;
1676
1677        assert!(
1678            total <= 2 * c,
1679            "ARC directory size ({}) exceeds 2*capacity ({}). \
1680             B1={}, B2={}, T1={}, T2={}",
1681            total,
1682            2 * c,
1683            cache.b1_len(),
1684            cache.b2_len(),
1685            cache.t1_len(),
1686            cache.t2_len(),
1687        );
1688    }
1689
1690    #[test]
1691    fn ghost_lists_bounded_when_cache_full() {
1692        let c = 10usize;
1693        let mut cache: ArcCore<u64, u64> = ArcCore::new(c);
1694
1695        for i in 0..500u64 {
1696            cache.insert(i, i);
1697        }
1698
1699        let t = cache.t1_len() + cache.t2_len();
1700        let b1 = cache.b1_len();
1701        let b2 = cache.b2_len();
1702
1703        assert!(
1704            b1 + b2 <= c,
1705            "Ghost lists hold {} entries (B1={}, B2={}) while cache holds {} (T1+T2). \
1706             Paper requires B1+B2 <= {}",
1707            b1 + b2,
1708            b1,
1709            b2,
1710            t,
1711            c,
1712        );
1713    }
1714}