use std::fmt;
use std::hash::Hash;
use std::sync::Arc;
use crate::ds::{SlotArena, SlotId};
#[cfg(feature = "concurrency")]
use parking_lot::RwLock;
use rustc_hash::FxHashMap;
#[cfg(feature = "metrics")]
use crate::metrics::metrics_impl::LruMetrics;
#[cfg(feature = "metrics")]
use crate::metrics::snapshot::LruMetricsSnapshot;
#[cfg(feature = "metrics")]
use crate::metrics::traits::{
CoreMetricsRecorder, LruMetricsReadRecorder, LruMetricsRecorder, MetricsSnapshotProvider,
};
use crate::traits::{Cache, EvictingCache, RecencyTracking, VictimInspectable};
struct Node<K, V> {
prev: Option<SlotId>,
next: Option<SlotId>,
key: K,
value: Arc<V>,
}
impl<K: Clone, V> Clone for Node<K, V> {
fn clone(&self) -> Self {
Node {
prev: self.prev,
next: self.next,
key: self.key.clone(),
value: Arc::clone(&self.value),
}
}
}
pub struct LruCore<K, V> {
arena: SlotArena<Node<K, V>>,
map: FxHashMap<K, SlotId>,
head: Option<SlotId>,
tail: Option<SlotId>,
capacity: usize,
#[cfg(feature = "metrics")]
metrics: LruMetrics,
}
impl<K, V> LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
pub fn new(capacity: usize) -> Self {
LruCore {
arena: SlotArena::with_capacity(capacity),
map: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
head: None,
tail: None,
capacity,
#[cfg(feature = "metrics")]
metrics: LruMetrics::default(),
}
}
#[inline(always)]
fn detach(&mut self, id: SlotId) {
let node = self.arena.get(id).expect("detach: stale SlotId");
let prev = node.prev;
let next = node.next;
match prev {
Some(prev_id) => {
self.arena
.get_mut(prev_id)
.expect("detach: stale prev")
.next = next
},
None => self.head = next,
}
match next {
Some(next_id) => {
self.arena
.get_mut(next_id)
.expect("detach: stale next")
.prev = prev
},
None => self.tail = prev,
}
}
#[inline(always)]
fn attach_front(&mut self, id: SlotId) {
{
let node = self.arena.get_mut(id).expect("attach_front: stale SlotId");
node.prev = None;
node.next = self.head;
}
match self.head {
Some(old_head) => {
self.arena
.get_mut(old_head)
.expect("attach_front: stale head")
.prev = Some(id);
},
None => self.tail = Some(id),
}
self.head = Some(id);
}
#[inline(always)]
fn pop_tail(&mut self) -> Option<(K, Arc<V>)> {
let tail_id = self.tail?;
let node = self.arena.get(tail_id)?;
let new_tail = node.prev;
let node = self.arena.remove(tail_id).expect("pop_tail: stale tail");
self.tail = new_tail;
match self.tail {
Some(t) => {
self.arena
.get_mut(t)
.expect("pop_tail: stale new tail")
.next = None;
},
None => self.head = None,
}
Some((node.key, node.value))
}
fn validate_invariants(&self) {
#[cfg(debug_assertions)]
{
if self.map.is_empty() {
debug_assert!(self.head.is_none());
debug_assert!(self.tail.is_none());
return;
}
let mut count = 0usize;
let mut current = self.head;
while let Some(id) = current {
count += 1;
let node = self.arena.get(id).expect("validate: stale SlotId");
debug_assert!(self.map.contains_key(&node.key));
current = node.next;
if count > self.map.len() {
panic!("Cycle detected in list");
}
}
debug_assert_eq!(count, self.map.len());
}
}
}
impl<K, V> Cache<K, Arc<V>> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
fn contains(&self, key: &K) -> bool {
self.map.contains_key(key)
}
#[inline]
fn len(&self) -> usize {
self.map.len()
}
#[inline]
fn capacity(&self) -> usize {
self.capacity
}
#[inline]
fn peek(&self, key: &K) -> Option<&Arc<V>> {
let &id = self.map.get(key)?;
self.arena.get(id).map(|node| &node.value)
}
#[inline]
fn get(&mut self, key: &K) -> Option<&Arc<V>> {
let &id = match self.map.get(key) {
Some(id) => id,
None => {
#[cfg(feature = "metrics")]
self.metrics.record_get_miss();
return None;
},
};
#[cfg(feature = "metrics")]
self.metrics.record_get_hit();
self.detach(id);
self.attach_front(id);
#[cfg(debug_assertions)]
self.validate_invariants();
self.arena.get(id).map(|node| &node.value)
}
#[inline]
fn insert(&mut self, key: K, value: Arc<V>) -> Option<Arc<V>> {
#[cfg(feature = "metrics")]
self.metrics.record_insert_call();
if let Some(&id) = self.map.get(&key) {
#[cfg(feature = "metrics")]
self.metrics.record_insert_update();
let node = self.arena.get_mut(id).expect("insert: stale SlotId");
let previous = std::mem::replace(&mut node.value, value);
self.detach(id);
self.attach_front(id);
#[cfg(debug_assertions)]
self.validate_invariants();
return Some(previous);
}
if self.capacity == 0 {
return None;
}
#[cfg(feature = "metrics")]
self.metrics.record_insert_new();
if self.map.len() >= self.capacity {
#[cfg(feature = "metrics")]
self.metrics.record_evict_call();
if let Some((evicted_key, _)) = self.pop_tail() {
self.map.remove(&evicted_key);
#[cfg(feature = "metrics")]
self.metrics.record_evicted_entry();
}
}
let id = self.arena.insert(Node {
prev: None,
next: None,
key,
value,
});
self.map.insert(key, id);
self.attach_front(id);
#[cfg(debug_assertions)]
self.validate_invariants();
None
}
#[inline]
fn remove(&mut self, key: &K) -> Option<Arc<V>> {
let id = self.map.remove(key)?;
self.detach(id);
let node = self.arena.remove(id).expect("remove: stale SlotId");
#[cfg(debug_assertions)]
self.validate_invariants();
Some(node.value)
}
fn clear(&mut self) {
#[cfg(feature = "metrics")]
self.metrics.record_clear();
self.map.clear();
self.head = None;
self.tail = None;
self.arena = SlotArena::with_capacity(self.capacity);
self.validate_invariants();
}
}
impl<K, V> LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
pub fn peek(&self, key: &K) -> Option<Arc<V>> {
#[cfg(feature = "metrics")]
(&self.metrics).record_peek_lru_call();
if let Some(&id) = self.map.get(key) {
#[cfg(feature = "metrics")]
(&self.metrics).record_peek_lru_found();
let node = self.arena.get(id).expect("peek: stale SlotId");
return Some(Arc::clone(&node.value));
}
None
}
#[inline]
pub fn pop_lru(&mut self) -> Option<(K, Arc<V>)> {
#[cfg(feature = "metrics")]
self.metrics.record_pop_lru_call();
let (key, value) = self.pop_tail()?;
self.map.remove(&key);
#[cfg(debug_assertions)]
self.validate_invariants();
#[cfg(feature = "metrics")]
self.metrics.record_pop_lru_found();
Some((key, value))
}
#[inline]
pub fn peek_lru(&self) -> Option<(&K, &Arc<V>)> {
#[cfg(feature = "metrics")]
(&self.metrics).record_peek_lru_call();
let tail_id = self.tail?;
let node = self.arena.get(tail_id)?;
#[cfg(feature = "metrics")]
(&self.metrics).record_peek_lru_found();
Some((&node.key, &node.value))
}
#[inline]
pub fn touch(&mut self, key: &K) -> bool {
#[cfg(feature = "metrics")]
self.metrics.record_touch_call();
if let Some(&id) = self.map.get(key) {
self.detach(id);
self.attach_front(id);
#[cfg(debug_assertions)]
self.validate_invariants();
#[cfg(feature = "metrics")]
self.metrics.record_touch_found();
true
} else {
false
}
}
pub fn recency_rank(&self, key: &K) -> Option<usize> {
#[cfg(feature = "metrics")]
(&self.metrics).record_recency_rank_call();
let &target_id = self.map.get(key)?;
let mut rank = 0usize;
let mut current = self.head;
while let Some(id) = current {
#[cfg(feature = "metrics")]
(&self.metrics).record_recency_rank_scan_step();
if id == target_id {
#[cfg(feature = "metrics")]
(&self.metrics).record_recency_rank_found();
return Some(rank);
}
rank += 1;
current = self.arena.get(id).expect("recency_rank: stale SlotId").next;
}
None
}
}
impl<K, V> EvictingCache<K, Arc<V>> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
fn evict_one(&mut self) -> Option<(K, Arc<V>)> {
self.pop_lru()
}
}
impl<K, V> VictimInspectable<K, Arc<V>> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
fn peek_victim(&self) -> Option<(&K, &Arc<V>)> {
self.peek_lru()
}
}
impl<K, V> RecencyTracking<K, Arc<V>> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
#[inline]
fn touch(&mut self, key: &K) -> bool {
LruCore::touch(self, key)
}
fn recency_rank(&self, key: &K) -> Option<usize> {
LruCore::recency_rank(self, key)
}
}
#[cfg(feature = "metrics")]
impl<K, V> LruCore<K, V>
where
K: Copy + Eq + Hash,
{
pub fn metrics_snapshot(&self) -> LruMetricsSnapshot {
LruMetricsSnapshot {
get_calls: self.metrics.get_calls,
get_hits: self.metrics.get_hits,
get_misses: self.metrics.get_misses,
insert_calls: self.metrics.insert_calls,
insert_updates: self.metrics.insert_updates,
insert_new: self.metrics.insert_new,
evict_calls: self.metrics.evict_calls,
evicted_entries: self.metrics.evicted_entries,
pop_lru_calls: self.metrics.pop_lru_calls,
pop_lru_found: self.metrics.pop_lru_found,
peek_lru_calls: self.metrics.peek_lru_calls.get(),
peek_lru_found: self.metrics.peek_lru_found.get(),
touch_calls: self.metrics.touch_calls,
touch_found: self.metrics.touch_found,
recency_rank_calls: self.metrics.recency_rank_calls.get(),
recency_rank_found: self.metrics.recency_rank_found.get(),
recency_rank_scan_steps: self.metrics.recency_rank_scan_steps.get(),
cache_len: self.map.len(),
capacity: self.capacity,
}
}
}
impl<K, V> fmt::Debug for LruCore<K, V>
where
K: Copy + Eq + Hash + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LruCore")
.field("len", &self.len())
.field("capacity", &self.capacity())
.finish_non_exhaustive()
}
}
impl<K, V> Default for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
fn default() -> Self {
Self::new(16)
}
}
impl<K, V> Extend<(K, Arc<V>)> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
fn extend<T: IntoIterator<Item = (K, Arc<V>)>>(&mut self, iter: T) {
for (key, value) in iter {
self.insert(key, value);
}
}
}
impl<K, V> FromIterator<(K, Arc<V>)> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
fn from_iter<T: IntoIterator<Item = (K, Arc<V>)>>(iter: T) -> Self {
let iter = iter.into_iter();
let (lower, upper) = iter.size_hint();
let capacity = upper.unwrap_or(lower);
let mut cache = Self::new(capacity);
cache.extend(iter);
cache
}
}
impl<K, V> Clone for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
fn clone(&self) -> Self {
LruCore {
arena: self.arena.clone(),
map: self.map.clone(),
head: self.head,
tail: self.tail,
capacity: self.capacity,
#[cfg(feature = "metrics")]
metrics: LruMetrics::default(),
}
}
}
pub struct Iter<'a, K, V> {
arena: &'a SlotArena<Node<K, V>>,
current: Option<SlotId>,
remaining: usize,
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a Arc<V>);
fn next(&mut self) -> Option<Self::Item> {
let id = self.current?;
let node = self.arena.get(id)?;
self.current = node.next;
self.remaining -= 1;
Some((&node.key, &node.value))
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {}
impl<K, V> fmt::Debug for Iter<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Iter")
.field("remaining", &self.remaining)
.finish()
}
}
pub struct IntoIter<K, V> {
cache: LruCore<K, V>,
}
impl<K, V> Iterator for IntoIter<K, V>
where
K: Copy + Eq + Hash,
{
type Item = (K, Arc<V>);
fn next(&mut self) -> Option<Self::Item> {
let head_id = self.cache.head?;
let next = self
.cache
.arena
.get(head_id)
.expect("IntoIter: stale head")
.next;
let node = self
.cache
.arena
.remove(head_id)
.expect("IntoIter: stale head");
self.cache.head = next;
match next {
Some(next_id) => {
self.cache
.arena
.get_mut(next_id)
.expect("IntoIter: stale next")
.prev = None;
},
None => self.cache.tail = None,
}
Some((node.key, node.value))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.cache.map.len();
(len, Some(len))
}
}
impl<K, V> ExactSizeIterator for IntoIter<K, V> where K: Copy + Eq + Hash {}
impl<K, V> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IntoIter").finish_non_exhaustive()
}
}
impl<K, V> LruCore<K, V>
where
K: Copy + Eq + Hash,
{
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
arena: &self.arena,
current: self.head,
remaining: self.map.len(),
}
}
}
impl<K, V> IntoIterator for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
type Item = (K, Arc<V>);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { cache: self }
}
}
impl<'a, K, V> IntoIterator for &'a LruCore<K, V>
where
K: Copy + Eq + Hash,
{
type Item = (&'a K, &'a Arc<V>);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[cfg(feature = "concurrency")]
pub struct ConcurrentLruCache<K, V> {
inner: Arc<RwLock<LruCore<K, V>>>,
}
#[cfg(feature = "concurrency")]
impl<K, V> Clone for ConcurrentLruCache<K, V> {
fn clone(&self) -> Self {
ConcurrentLruCache {
inner: Arc::clone(&self.inner),
}
}
}
#[cfg(feature = "concurrency")]
impl<K, V> fmt::Debug for ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let cache = self.inner.read();
f.debug_struct("ConcurrentLruCache")
.field("len", &cache.len())
.field("capacity", &cache.capacity())
.finish_non_exhaustive()
}
}
#[cfg(feature = "concurrency")]
impl<K, V> Default for ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
fn default() -> Self {
Self::new(16)
}
}
#[cfg(feature = "concurrency")]
impl<K, V> ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
pub fn new(capacity: usize) -> Self {
ConcurrentLruCache {
inner: Arc::new(RwLock::new(LruCore::new(capacity))),
}
}
pub fn insert(&self, key: K, value: V) -> Option<Arc<V>> {
let value_arc = Arc::new(value); let mut cache = self.inner.write();
cache.insert(key, value_arc)
}
pub fn insert_arc(&self, key: K, value: Arc<V>) -> Option<Arc<V>> {
let mut cache = self.inner.write();
cache.insert(key, value)
}
pub fn get(&self, key: &K) -> Option<Arc<V>> {
let mut cache = self.inner.write();
cache.get(key).map(Arc::clone)
}
pub fn peek(&self, key: &K) -> Option<Arc<V>> {
let cache = self.inner.read();
cache.peek(key)
}
pub fn remove(&self, key: &K) -> Option<Arc<V>> {
let mut cache = self.inner.write();
cache.remove(key)
}
pub fn touch(&self, key: &K) -> bool {
let mut cache = self.inner.write();
cache.touch(key)
}
pub fn len(&self) -> usize {
let cache = self.inner.read();
cache.len()
}
pub fn is_empty(&self) -> bool {
let cache = self.inner.read();
cache.len() == 0
}
pub fn capacity(&self) -> usize {
let cache = self.inner.read();
cache.capacity()
}
pub fn contains(&self, key: &K) -> bool {
let cache = self.inner.read();
cache.contains(key)
}
pub fn clear(&self) {
let mut cache = self.inner.write();
cache.clear()
}
pub fn pop_lru(&self) -> Option<(K, Arc<V>)> {
let mut cache = self.inner.write();
cache.pop_lru()
}
pub fn peek_lru(&self) -> Option<(K, Arc<V>)> {
let cache = self.inner.read();
cache.peek_lru().map(|(k, v)| (*k, Arc::clone(v)))
}
}
#[cfg(all(feature = "metrics", feature = "concurrency"))]
impl<K, V> ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
pub fn metrics_snapshot(&self) -> LruMetricsSnapshot {
let cache = self.inner.read();
cache.metrics_snapshot()
}
}
#[cfg(feature = "concurrency")]
impl<K, V> Extend<(K, V)> for ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
for (key, value) in iter {
self.insert(key, value);
}
}
}
#[cfg(feature = "concurrency")]
impl<K, V> FromIterator<(K, V)> for ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
let items: Vec<_> = iter.into_iter().collect();
let cache = Self::new(items.len());
for (key, value) in items {
cache.insert(key, value);
}
cache
}
}
#[cfg(feature = "metrics")]
impl<K, V> MetricsSnapshotProvider<LruMetricsSnapshot> for LruCore<K, V>
where
K: Copy + Eq + Hash,
{
fn snapshot(&self) -> LruMetricsSnapshot {
self.metrics_snapshot()
}
}
#[cfg(all(feature = "metrics", feature = "concurrency"))]
impl<K, V> MetricsSnapshotProvider<LruMetricsSnapshot> for ConcurrentLruCache<K, V>
where
K: Copy + Eq + Hash + Send + Sync,
V: Send + Sync,
{
fn snapshot(&self) -> LruMetricsSnapshot {
self.metrics_snapshot()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::traits::Cache;
mod correctness {
use super::*;
mod basic_behavior {
use super::*;
#[test]
fn test_new_cache_creation() {
let cache1: LruCore<i32, i32> = LruCore::new(0);
assert_eq!(cache1.capacity(), 0);
assert_eq!(cache1.len(), 0);
let cache2: LruCore<i32, i32> = LruCore::new(10);
assert_eq!(cache2.capacity(), 10);
assert_eq!(cache2.len(), 0);
let cache3: LruCore<i32, i32> = LruCore::new(1000);
assert_eq!(cache3.capacity(), 1000);
assert_eq!(cache3.len(), 0);
}
#[test]
fn test_insert_single_item() {
let mut cache = LruCore::new(5);
let result = cache.insert(1, Arc::new(100));
assert!(result.is_none()); assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
}
#[test]
fn test_insert_multiple_items() {
let mut cache = LruCore::new(5);
for i in 1..=3 {
let result = cache.insert(i, Arc::new(i * 10));
assert!(result.is_none());
}
assert_eq!(cache.len(), 3);
for i in 1..=3 {
assert!(cache.contains(&i));
}
}
#[test]
fn test_get_existing_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 100);
}
#[test]
fn test_get_nonexistent_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
let value = cache.get(&2);
assert!(value.is_none());
}
#[test]
fn test_peek_existing_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
let value = cache.peek(&1);
assert!(value.is_some());
assert_eq!(*value.unwrap(), 100);
}
#[test]
fn test_peek_nonexistent_item() {
let cache: LruCore<i32, i32> = LruCore::new(5);
let value = cache.peek(&1);
assert!(value.is_none());
}
#[test]
fn test_contains_existing_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
assert!(cache.contains(&1));
}
#[test]
fn test_contains_nonexistent_item() {
let cache: LruCore<i32, i32> = LruCore::new(5);
assert!(!cache.contains(&1));
}
#[test]
fn test_remove_existing_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
let removed = cache.remove(&1);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 100);
assert_eq!(cache.len(), 0);
assert!(!cache.contains(&1));
}
#[test]
fn test_remove_nonexistent_item() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(100));
let removed = cache.remove(&2);
assert!(removed.is_none());
assert_eq!(cache.len(), 1);
}
#[test]
fn test_insert_duplicate_key() {
let mut cache = LruCore::new(5);
let old_value = cache.insert(1, Arc::new(100));
assert!(old_value.is_none());
let old_value = cache.insert(1, Arc::new(200));
assert!(old_value.is_some());
assert_eq!(*old_value.unwrap(), 100);
assert_eq!(cache.len(), 1);
let current_value = cache.get(&1);
assert_eq!(**current_value.unwrap(), 200);
}
#[test]
fn test_cache_length_updates() {
let mut cache = LruCore::new(3);
assert_eq!(cache.len(), 0);
cache.insert(1, Arc::new(10));
assert_eq!(cache.len(), 1);
cache.insert(2, Arc::new(20));
assert_eq!(cache.len(), 2);
cache.remove(&1);
assert_eq!(cache.len(), 1);
cache.clear();
assert_eq!(cache.len(), 0);
}
#[test]
fn test_cache_capacity() {
let cache1: LruCore<i32, i32> = LruCore::new(0);
assert_eq!(cache1.capacity(), 0);
let cache2: LruCore<i32, i32> = LruCore::new(10);
assert_eq!(cache2.capacity(), 10);
let cache3: LruCore<i32, i32> = LruCore::new(1000);
assert_eq!(cache3.capacity(), 1000);
}
#[test]
fn test_cache_clear() {
let mut cache = LruCore::new(5);
for i in 1..=3 {
cache.insert(i, Arc::new(i * 10));
}
assert_eq!(cache.len(), 3);
cache.clear();
assert_eq!(cache.len(), 0);
for i in 1..=3 {
assert!(!cache.contains(&i));
}
}
#[test]
fn test_empty_cache_behavior() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
assert_eq!(cache.len(), 0);
assert!(cache.get(&1).is_none());
assert!(cache.peek(&1).is_none());
assert!(!cache.contains(&1));
assert!(cache.remove(&1).is_none());
assert!(cache.pop_lru().is_none());
assert!(cache.peek_lru().is_none());
assert!(!cache.touch(&1));
assert!(cache.recency_rank(&1).is_none());
}
#[test]
fn test_single_item_cache() {
let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 1);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
}
#[test]
fn test_zero_capacity_cache() {
let mut cache = LruCore::new(0);
let result = cache.insert(1, Arc::new(100));
assert!(result.is_none());
assert_eq!(cache.len(), 0);
assert!(!cache.contains(&1));
}
#[test]
fn test_is_empty() {
let mut cache = LruCore::new(5);
assert_eq!(cache.len(), 0);
cache.insert(1, Arc::new(100));
assert_ne!(cache.len(), 0);
cache.remove(&1);
assert_eq!(cache.len(), 0);
cache.insert(1, Arc::new(100));
cache.clear();
assert_eq!(cache.len(), 0);
}
#[test]
fn test_lru_eviction_basic() {
let mut cache = LruCore::new(2);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 2);
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 2);
assert!(!cache.contains(&1)); assert!(cache.contains(&2));
assert!(cache.contains(&3));
}
#[test]
fn test_lru_order_preservation() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
cache.insert(4, Arc::new(400));
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_access_updates_lru_order() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.get(&1);
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1)); assert!(!cache.contains(&2)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_peek_does_not_update_lru() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.peek(&1);
cache.insert(4, Arc::new(400));
assert!(!cache.contains(&1)); assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_touch_updates_lru_order() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let touched = cache.touch(&1);
assert!(touched);
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1)); assert!(!cache.contains(&2)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_touch_nonexistent_item() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
let touched = cache.touch(&2);
assert!(!touched);
}
#[test]
fn test_pop_lru_basic() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let popped = cache.pop_lru();
assert!(popped.is_some());
let (key, value) = popped.unwrap();
assert_eq!(key, 1);
assert_eq!(*value, 100);
assert_eq!(cache.len(), 2);
assert!(!cache.contains(&1));
}
#[test]
fn test_pop_lru_empty_cache() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
let popped = cache.pop_lru();
assert!(popped.is_none());
}
#[test]
fn test_peek_lru_basic() {
let cache = LruCore::new(3);
let mut cache = cache;
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let peeked = cache.peek_lru();
assert!(peeked.is_some());
let (key, value) = peeked.unwrap();
assert_eq!(*key, 1);
assert_eq!(**value, 100);
assert_eq!(cache.len(), 3); assert!(cache.contains(&1)); }
#[test]
fn test_peek_lru_empty_cache() {
let cache: LruCore<i32, i32> = LruCore::new(3);
let peeked = cache.peek_lru();
assert!(peeked.is_none());
}
#[test]
fn test_recency_rank_basic() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2)); }
#[test]
fn test_recency_rank_nonexistent() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(100));
assert!(cache.recency_rank(&2).is_none());
}
#[cfg(feature = "concurrency")]
#[test]
fn test_concurrent_cache_basic() {
let cache = ConcurrentLruCache::new(5);
assert_eq!(cache.capacity(), 5);
assert_eq!(cache.len(), 0);
assert!(cache.is_empty());
let old_value = cache.insert(1, 100);
assert!(old_value.is_none());
assert_eq!(cache.len(), 1);
assert!(!cache.is_empty());
assert!(cache.contains(&1));
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(*value.unwrap(), 100);
let peeked = cache.peek(&1);
assert!(peeked.is_some());
assert_eq!(*peeked.unwrap(), 100);
let removed = cache.remove(&1);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 100);
assert_eq!(cache.len(), 0);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_concurrent_insert_arc() {
let cache = ConcurrentLruCache::new(5);
let value = Arc::new(100);
let value_clone = Arc::clone(&value);
let old_value = cache.insert_arc(1, value);
assert!(old_value.is_none());
let retrieved = cache.get(&1);
assert!(retrieved.is_some());
let retrieved_val = retrieved.unwrap();
assert_eq!(*retrieved_val, 100);
assert!(Arc::ptr_eq(&retrieved_val, &value_clone));
}
#[cfg(feature = "concurrency")]
#[test]
fn test_arc_value_sharing() {
let cache = ConcurrentLruCache::new(5);
cache.insert(1, 100);
let value1 = cache.get(&1);
let value2 = cache.get(&1);
let value3 = cache.peek(&1);
assert!(value1.is_some());
assert!(value2.is_some());
assert!(value3.is_some());
let v1 = value1.unwrap();
let v2 = value2.unwrap();
let v3 = value3.unwrap();
assert!(Arc::ptr_eq(&v1, &v2));
assert!(Arc::ptr_eq(&v2, &v3));
}
#[cfg(feature = "concurrency")]
#[test]
fn test_key_copy_semantics() {
let cache = ConcurrentLruCache::new(5);
let key1 = 42u32;
let key2 = key1;
cache.insert(key1, 100);
assert!(cache.contains(&key1));
assert!(cache.contains(&key2));
let value1 = cache.get(&key1);
let value2 = cache.get(&key2);
assert!(value1.is_some());
assert!(value2.is_some());
assert_eq!(*value1.unwrap(), *value2.unwrap());
}
}
mod edge_cases {
use super::*;
#[test]
fn test_maximum_capacity_cache() {
let large_capacity = 1_000_000_usize;
let cache: LruCore<i32, i32> = LruCore::new(large_capacity);
assert_eq!(cache.capacity(), large_capacity);
assert_eq!(cache.len(), 0);
let mut cache = cache;
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
}
#[test]
fn test_zero_capacity_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(0);
let result = cache.insert(1, Arc::new(100));
assert!(result.is_none());
assert_eq!(cache.len(), 0);
assert!(!cache.contains(&1));
assert!(cache.get(&1).is_none());
assert!(cache.peek(&1).is_none());
assert!(cache.remove(&1).is_none());
assert!(cache.pop_lru().is_none());
assert!(cache.peek_lru().is_none());
assert!(!cache.touch(&1));
assert!(cache.recency_rank(&1).is_none());
cache.clear();
assert_eq!(cache.len(), 0);
}
#[test]
fn test_single_capacity_eviction_patterns() {
let mut cache: LruCore<i32, i32> = LruCore::new(1);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 1);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 1);
assert!(!cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
cache.get(&3);
assert_eq!(cache.len(), 1);
assert!(cache.contains(&3));
}
#[test]
fn test_repeated_insert_same_key() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
for i in 1..=10 {
let old_value = cache.insert(1, Arc::new(i * 100));
if i == 1 {
assert!(old_value.is_none());
} else {
assert!(old_value.is_some());
assert_eq!(*old_value.unwrap(), (i - 1) * 100);
}
}
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 1000);
}
#[test]
fn test_alternating_access_pattern() {
let mut cache: LruCore<i32, i32> = LruCore::new(2);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
for _ in 0..10 {
cache.get(&1);
cache.get(&2);
}
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert_eq!(cache.len(), 2);
cache.insert(3, Arc::new(300));
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
}
#[test]
fn test_insert_then_immediate_remove() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=10 {
cache.insert(i, Arc::new(i * 100));
let removed = cache.remove(&i);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), i * 100);
assert!(!cache.contains(&i));
assert_eq!(cache.len(), 0);
}
}
#[test]
fn test_remove_during_eviction() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let removed = cache.remove(&2);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 200);
assert_eq!(cache.len(), 2);
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 3);
assert!(cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
cache.insert(5, Arc::new(500));
assert_eq!(cache.len(), 3);
assert!(!cache.contains(&1));
}
#[test]
fn test_clear_on_empty_cache() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
assert_eq!(cache.len(), 0);
cache.clear();
assert_eq!(cache.len(), 0);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
}
#[test]
fn test_clear_then_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=3 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 3);
cache.clear();
assert_eq!(cache.len(), 0);
for i in 1..=3 {
assert!(!cache.contains(&i));
assert!(cache.get(&i).is_none());
}
cache.insert(10, Arc::new(1000));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&10));
let value = cache.get(&10);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 1000);
}
#[test]
fn test_multiple_clear_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
for _ in 0..5 {
cache.clear();
assert_eq!(cache.len(), 0);
}
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&2));
}
#[test]
fn test_pop_lru_until_empty() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 5);
let mut popped_keys = Vec::new();
while let Some((key, value)) = cache.pop_lru() {
popped_keys.push(key);
assert_eq!(*value, key * 100);
}
assert_eq!(popped_keys, vec![1, 2, 3, 4, 5]);
assert_eq!(cache.len(), 0);
assert!(cache.pop_lru().is_none());
}
#[test]
fn test_peek_after_eviction() {
let mut cache: LruCore<i32, i32> = LruCore::new(2);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert!(cache.peek(&1).is_some());
assert!(cache.peek(&2).is_some());
cache.insert(3, Arc::new(300));
assert!(cache.peek(&1).is_none());
assert!(cache.peek(&2).is_some());
assert!(cache.peek(&3).is_some());
}
#[test]
fn test_touch_evicted_items() {
let mut cache: LruCore<i32, i32> = LruCore::new(2);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert!(cache.touch(&1));
cache.insert(3, Arc::new(300));
assert!(!cache.touch(&2));
assert!(cache.touch(&1));
assert!(cache.touch(&3));
}
#[test]
fn test_recency_rank_after_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.insert(4, Arc::new(400));
assert_eq!(cache.recency_rank(&4), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
assert_eq!(cache.recency_rank(&1), Some(3));
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
cache.touch(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&4), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
}
#[test]
fn test_cache_with_identical_values() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
let shared_value = Arc::new(999);
cache.insert(1, Arc::clone(&shared_value));
cache.insert(2, Arc::clone(&shared_value));
cache.insert(3, Arc::clone(&shared_value));
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
let val1 = cache.get(&1);
assert!(val1.is_some());
assert!(Arc::ptr_eq(val1.unwrap(), &shared_value));
let val2 = cache.get(&2);
assert!(val2.is_some());
assert!(Arc::ptr_eq(val2.unwrap(), &shared_value));
let val3 = cache.get(&3);
assert!(val3.is_some());
assert!(Arc::ptr_eq(val3.unwrap(), &shared_value));
}
#[test]
fn test_interleaved_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
assert!(cache.get(&1).is_some());
cache.insert(2, Arc::new(200));
assert!(cache.touch(&1));
cache.insert(3, Arc::new(300));
assert!(cache.peek(&2).is_some());
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1));
assert!(!cache.contains(&2)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
let removed = cache.remove(&1);
assert!(removed.is_some());
cache.insert(5, Arc::new(500));
assert!(cache.touch(&3));
assert!(!cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
}
#[test]
fn test_capacity_reduction_simulation() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 5);
let _ = cache.pop_lru(); let _ = cache.pop_lru(); assert_eq!(cache.len(), 3);
assert!(!cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
cache.insert(6, Arc::new(600));
cache.insert(7, Arc::new(700));
assert_eq!(cache.len(), 5); }
#[test]
fn test_duplicate_key_with_same_value() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
let value = Arc::new(100);
let result1 = cache.insert(1, Arc::clone(&value));
assert!(result1.is_none());
let result2 = cache.insert(1, Arc::clone(&value));
assert!(result2.is_some());
assert!(Arc::ptr_eq(result2.as_ref().unwrap(), &value));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
let retrieved = cache.get(&1);
assert!(Arc::ptr_eq(retrieved.unwrap(), &value));
}
#[test]
fn test_lru_order_with_duplicate_inserts() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(1, Arc::new(999));
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_peek_vs_get_ordering_difference() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.peek(&1);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1));
assert!(!cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[cfg(feature = "concurrency")]
#[test]
fn test_concurrent_cache_edge_cases() {
let cache = ConcurrentLruCache::new(2);
assert!(cache.is_empty());
assert_eq!(cache.len(), 0);
assert!(cache.get(&1).is_none());
assert!(cache.peek(&1).is_none());
assert!(!cache.contains(&1));
assert!(cache.remove(&1).is_none());
cache.insert(1, 100);
assert!(!cache.is_empty());
assert_eq!(cache.len(), 1);
cache.insert(2, 200);
assert_eq!(cache.len(), 2);
cache.insert(3, 300); assert_eq!(cache.len(), 2);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
cache.clear();
assert!(cache.is_empty());
assert_eq!(cache.len(), 0);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_arc_reference_counting_edge_cases() {
let cache = ConcurrentLruCache::new(3);
let value = Arc::new(vec![1, 2, 3, 4, 5]);
assert_eq!(Arc::strong_count(&value), 1);
let old_value = cache.insert_arc(1, Arc::clone(&value));
assert!(old_value.is_none());
assert_eq!(Arc::strong_count(&value), 2);
let retrieved = cache.get(&1);
assert!(retrieved.is_some());
assert_eq!(Arc::strong_count(&value), 3);
drop(retrieved);
assert_eq!(Arc::strong_count(&value), 2);
let removed = cache.remove(&1);
assert!(removed.is_some());
assert_eq!(Arc::strong_count(&value), 2);
drop(removed);
assert_eq!(Arc::strong_count(&value), 1); }
#[cfg(feature = "concurrency")]
#[test]
fn test_insert_arc_vs_insert_value() {
let cache = ConcurrentLruCache::new(3);
let value = Arc::new(100);
let old1 = cache.insert_arc(1, Arc::clone(&value));
assert!(old1.is_none());
let retrieved1 = cache.get(&1);
assert!(Arc::ptr_eq(retrieved1.as_ref().unwrap(), &value));
let old2 = cache.insert(2, 200);
assert!(old2.is_none());
let retrieved2 = cache.get(&2);
assert!(retrieved2.is_some());
let retrieved2_arc = retrieved2.unwrap();
assert!(!Arc::ptr_eq(&retrieved2_arc, &value));
assert_eq!(*retrieved2_arc, 200);
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert_eq!(cache.len(), 2);
}
#[test]
fn test_large_key_values() {
let mut cache: LruCore<i64, i32> = LruCore::new(3);
cache.insert(i64::MAX, Arc::new(1));
cache.insert(i64::MIN, Arc::new(2));
cache.insert(0, Arc::new(3));
assert!(cache.contains(&i64::MAX));
assert!(cache.contains(&i64::MIN));
assert!(cache.contains(&0));
assert_eq!(**cache.get(&i64::MAX).unwrap(), 1);
assert_eq!(**cache.get(&i64::MIN).unwrap(), 2);
assert_eq!(**cache.get(&0).unwrap(), 3);
}
#[test]
fn test_key_collision_scenarios() {
let mut cache: LruCore<i32, i32> = LruCore::new(10);
let keys = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
for &key in &keys {
cache.insert(key, Arc::new(key * 100));
}
for &key in &keys {
assert!(cache.contains(&key));
let value = cache.get(&key);
assert!(value.is_some());
assert_eq!(**value.unwrap(), key * 100);
}
cache.remove(&5);
assert!(!cache.contains(&5));
cache.insert(5, Arc::new(999));
assert!(cache.contains(&5));
assert_eq!(**cache.get(&5).unwrap(), 999);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_pressure_simulation() {
let mut cache: LruCore<i32, String> = LruCore::new(75);
for i in 0..50 {
let large_string = "x".repeat(1000); cache.insert(i, Arc::new(large_string));
}
assert_eq!(cache.len(), 50);
for _ in 0..10 {
for i in 0..25 {
cache.get(&i);
}
}
for i in 50..100 {
let large_string = "y".repeat(1000);
cache.insert(i, Arc::new(large_string));
}
assert_eq!(cache.len(), 75);
let mut evicted_count = 0;
for i in 0..50 {
if !cache.contains(&i) {
evicted_count += 1;
}
}
assert_eq!(evicted_count, 25);
}
#[test]
fn test_rapid_capacity_fill_and_drain() {
let mut cache: LruCore<i32, i32> = LruCore::new(50);
for i in 0..50 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 50);
for i in 0..25 {
let popped = cache.pop_lru();
assert!(popped.is_some());
let (key, value) = popped.unwrap();
assert_eq!(key, i);
assert_eq!(*value, i * 100);
}
assert_eq!(cache.len(), 25);
for i in 50..100 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 50);
for i in 25..50 {
assert!(!cache.contains(&i));
}
for i in 75..100 {
assert!(cache.contains(&i));
}
}
#[test]
fn test_operation_sequence_corner_cases() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
let removed = cache.remove(&1);
assert!(removed.is_some());
cache.insert(1, Arc::new(200));
assert_eq!(**cache.get(&1).unwrap(), 200);
cache.insert(2, Arc::new(300));
cache.insert(3, Arc::new(400));
cache.clear();
assert_eq!(cache.len(), 0);
cache.insert(4, Arc::new(500));
cache.insert(5, Arc::new(600));
assert_eq!(cache.len(), 2);
assert!(!cache.touch(&6));
cache.insert(6, Arc::new(700));
assert!(cache.touch(&6));
cache.peek(&6);
cache.get(&6);
cache.peek(&6);
assert!(cache.contains(&6));
}
#[test]
fn test_boundary_value_keys() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
let boundary_keys = vec![i32::MIN, i32::MIN + 1, -1, 0, 1, i32::MAX - 1, i32::MAX];
for (i, &key) in boundary_keys.iter().enumerate() {
cache.insert(key, Arc::new(i as i32));
}
let mut present_count = 0;
for &key in &boundary_keys {
if cache.contains(&key) {
present_count += 1;
let value = cache.get(&key);
assert!(value.is_some());
}
}
assert_eq!(present_count, cache.capacity().min(boundary_keys.len()));
}
#[test]
fn test_remove_head_and_tail_items() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let tail_removed = cache.remove(&1);
assert!(tail_removed.is_some());
assert_eq!(*tail_removed.unwrap(), 100);
let head_removed = cache.remove(&5);
assert!(head_removed.is_some());
assert_eq!(*head_removed.unwrap(), 500);
let middle_removed = cache.remove(&3);
assert!(middle_removed.is_some());
assert_eq!(*middle_removed.unwrap(), 300);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(!cache.contains(&3));
assert!(cache.contains(&4));
assert!(!cache.contains(&5));
assert_eq!(cache.len(), 2);
}
#[test]
fn test_get_after_remove() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
let removed = cache.remove(&1);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 100);
let value = cache.get(&1);
assert!(value.is_none());
assert!(!cache.contains(&1));
assert!(cache.peek(&1).is_none());
assert!(!cache.touch(&1));
assert!(cache.recency_rank(&1).is_none());
}
#[test]
fn test_contains_after_eviction() {
let mut cache: LruCore<i32, i32> = LruCore::new(2);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert!(cache.contains(&1));
assert!(cache.contains(&2));
cache.insert(3, Arc::new(300));
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.get(&1).is_none());
assert!(cache.peek(&1).is_none());
assert!(!cache.touch(&1));
assert!(cache.remove(&1).is_none());
}
#[test]
fn test_empty_cache_all_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
assert_eq!(cache.len(), 0);
assert_eq!(cache.capacity(), 5);
assert!(cache.get(&1).is_none());
assert!(cache.peek(&1).is_none());
assert!(!cache.contains(&1));
assert!(cache.remove(&1).is_none());
assert!(cache.pop_lru().is_none());
assert!(cache.peek_lru().is_none());
assert!(!cache.touch(&1));
assert!(cache.recency_rank(&1).is_none());
cache.clear();
assert_eq!(cache.len(), 0);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
}
#[test]
fn test_single_item_all_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 100);
let peeked = cache.peek(&1);
assert!(peeked.is_some());
assert_eq!(*peeked.unwrap(), 100);
assert!(cache.touch(&1));
assert_eq!(cache.recency_rank(&1), Some(0));
let (lru_key, lru_value) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
assert_eq!(**lru_value, 100);
assert!(cache.get(&2).is_none());
assert!(!cache.contains(&2));
assert!(!cache.touch(&2));
}
#[test]
fn test_full_cache_all_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 3);
assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.peek(&1).is_some());
assert!(cache.peek(&2).is_some());
assert!(cache.peek(&3).is_some());
assert!(cache.recency_rank(&1).is_some());
assert!(cache.recency_rank(&2).is_some());
assert!(cache.recency_rank(&3).is_some());
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 3);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_lru_rank_boundary_conditions() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&2), Some(2));
assert!(cache.recency_rank(&4).is_none());
}
#[test]
fn test_peek_lru_on_single_item() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
cache.insert(1, Arc::new(100));
let peeked = cache.peek_lru();
assert!(peeked.is_some());
let (key, value) = peeked.unwrap();
assert_eq!(*key, 1);
assert_eq!(**value, 100);
assert_eq!(cache.len(), 1);
assert!(cache.contains(&1));
}
#[test]
fn test_touch_only_item() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
cache.insert(1, Arc::new(100));
assert!(cache.touch(&1));
assert!(cache.contains(&1));
assert_eq!(cache.recency_rank(&1), Some(0));
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 100);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_concurrent_read_write_edge_cases() {
let cache = ConcurrentLruCache::new(2);
cache.insert(1, 100);
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(*value.unwrap(), 100);
cache.insert(2, 200);
let removed = cache.remove(&2);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 200);
assert!(!cache.contains(&2));
cache.insert(3, 300);
cache.insert(4, 400);
assert!(cache.contains(&3));
assert!(cache.contains(&4));
cache.clear();
assert!(cache.is_empty());
}
#[test]
fn test_drop_behavior_edge_cases() {
{
let cache: LruCore<i32, i32> = LruCore::new(5);
assert_eq!(cache.len(), 0);
}
{
let mut cache: LruCore<i32, i32> = LruCore::new(5);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
}
{
let mut cache: LruCore<i32, i32> = LruCore::new(3);
for i in 1..=3 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 3);
}
}
}
mod lru_operations {
use super::*;
#[test]
fn test_lru_insertion_order_tracking() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.recency_rank(&5), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
assert_eq!(cache.recency_rank(&1), Some(4));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
}
#[test]
fn test_lru_access_order_updates() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
cache.get(&2);
assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
}
#[test]
fn test_lru_eviction_policy() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.get(&1);
cache.get(&3);
cache.insert(4, Arc::new(400));
assert!(cache.contains(&1));
assert!(!cache.contains(&2)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_lru_head_tail_positioning() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.insert(4, Arc::new(400));
assert_eq!(cache.recency_rank(&4), Some(0));
let (lru_key, lru_value) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
assert_eq!(**lru_value, 100);
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
let (new_lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*new_lru_key, 2);
}
#[test]
fn test_move_to_head_operation() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
cache.get(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&5), Some(1));
assert_eq!(cache.recency_rank(&4), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
assert_eq!(cache.recency_rank(&1), Some(4));
cache.touch(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
}
#[test]
fn test_lru_chain_integrity() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
for j in 1..=i {
assert!(cache.recency_rank(&j).is_some());
}
}
cache.get(&2); cache.remove(&3); cache.insert(5, Arc::new(500));
assert!(cache.recency_rank(&5).is_some());
assert!(cache.recency_rank(&2).is_some());
assert!(cache.recency_rank(&4).is_some());
assert!(cache.recency_rank(&1).is_some());
assert!(cache.recency_rank(&3).is_none());
let mut ranks = vec![];
for &key in &[5, 2, 4, 1] {
if cache.contains(&key) {
let rank = cache.recency_rank(&key).unwrap();
assert!(!ranks.contains(&rank));
assert!(rank < cache.len());
ranks.push(rank);
}
}
}
#[test]
fn test_lru_ordering_after_get() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let _initial_ranks: Vec<_> = (1..=4)
.map(|i| (i, cache.recency_rank(&i).unwrap()))
.collect();
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 100);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
cache.get(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&4), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
}
#[test]
fn test_lru_ordering_after_touch() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
assert!(cache.touch(&1));
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
assert!(cache.touch(&3));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
assert!(!cache.touch(&99));
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
}
#[test]
fn test_lru_ordering_preservation_on_peek() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let _initial_ranks: Vec<_> = (1..=4)
.map(|i| (i, cache.recency_rank(&i).unwrap()))
.collect();
assert_eq!(*cache.peek(&1).unwrap(), 100);
assert_eq!(*cache.peek(&4).unwrap(), 400);
assert_eq!(*cache.peek(&2).unwrap(), 200);
assert_eq!(*cache.peek(&3).unwrap(), 300);
for (key, expected_rank) in _initial_ranks {
assert_eq!(cache.recency_rank(&key), Some(expected_rank));
}
let (lru_key, lru_value) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
assert_eq!(**lru_value, 100);
assert_eq!(cache.recency_rank(&1), Some(3));
}
#[test]
fn test_pop_lru_removes_tail() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
cache.get(&3);
cache.get(&1);
let (popped_key, popped_value) = cache.pop_lru().unwrap();
assert_eq!(popped_key, 2);
assert_eq!(*popped_value, 200);
assert!(!cache.contains(&2));
assert_eq!(cache.len(), 4);
let (next_lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*next_lru_key, 4);
let (popped_key2, popped_value2) = cache.pop_lru().unwrap();
assert_eq!(popped_key2, 4);
assert_eq!(*popped_value2, 400);
assert_eq!(cache.len(), 3);
}
#[test]
fn test_pop_lru_updates_tail_slot() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let (initial_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*initial_tail_key, 1);
let (popped_key, _) = cache.pop_lru().unwrap();
assert_eq!(popped_key, 1);
let (new_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*new_tail_key, 2);
let _ = cache.pop_lru();
let (final_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*final_tail_key, 3);
let _ = cache.pop_lru();
assert!(cache.peek_lru().is_none());
assert_eq!(cache.len(), 0);
}
#[test]
fn test_peek_lru_returns_tail() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
cache.get(&3);
cache.get(&1);
let (lru_key, lru_value) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 2);
assert_eq!(**lru_value, 200);
assert!(cache.contains(&2));
assert_eq!(cache.len(), 4);
for _ in 0..5 {
let (peek_key, peek_value) = cache.peek_lru().unwrap();
assert_eq!(*peek_key, 2);
assert_eq!(**peek_value, 200);
}
assert_eq!(cache.recency_rank(&2), Some(3));
}
#[test]
fn test_lru_recency_rank_calculation() {
let mut cache: LruCore<i32, i32> = LruCore::new(6);
for i in 1..=6 {
cache.insert(i, Arc::new(i * 100));
}
for i in 1..=6 {
let expected_rank = (6 - i) as usize; assert_eq!(cache.recency_rank(&i), Some(expected_rank));
}
cache.get(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&6), Some(1));
assert_eq!(cache.recency_rank(&5), Some(2));
assert_eq!(cache.recency_rank(&4), Some(3));
assert_eq!(cache.recency_rank(&2), Some(4));
assert_eq!(cache.recency_rank(&1), Some(5));
let mut ranks: Vec<usize> =
(1..=6).map(|i| cache.recency_rank(&i).unwrap()).collect();
ranks.sort();
assert_eq!(ranks, vec![0, 1, 2, 3, 4, 5]);
}
#[test]
fn test_lru_rank_after_reordering() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
cache.touch(&1); assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
assert_eq!(cache.recency_rank(&2), Some(3));
cache.get(&2); assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&4), Some(2));
assert_eq!(cache.recency_rank(&3), Some(3));
cache.touch(&4); assert_eq!(cache.recency_rank(&4), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
assert_eq!(cache.recency_rank(&3), Some(3));
}
#[test]
fn test_multiple_access_lru_stability() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
for _ in 0..10 {
cache.get(&2);
assert_eq!(cache.recency_rank(&2), Some(0));
}
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
for _ in 0..5 {
cache.touch(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
}
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
}
#[test]
fn test_lru_eviction_sequence() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.get(&2);
cache.insert(4, Arc::new(400)); assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
cache.insert(5, Arc::new(500)); assert!(!cache.contains(&3));
assert!(cache.contains(&2));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
cache.insert(6, Arc::new(600)); assert!(!cache.contains(&2));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
assert!(cache.contains(&6));
}
#[test]
fn test_lru_invariants_after_insert() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1)); let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 3);
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 3);
assert!(!cache.contains(&1)); assert_eq!(cache.recency_rank(&4), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2)); }
#[test]
fn test_lru_invariants_after_remove() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
cache.remove(&4);
assert_eq!(cache.len(), 3);
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.remove(&1);
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1)); let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 2);
cache.remove(&2);
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&3), Some(0)); let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 3);
cache.remove(&3);
assert_eq!(cache.len(), 0);
assert!(cache.peek_lru().is_none());
}
#[test]
fn test_lru_invariants_after_clear() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 5);
cache.clear();
assert_eq!(cache.len(), 0);
assert_eq!(cache.capacity(), 5);
assert!(cache.peek_lru().is_none());
for i in 1..=5 {
assert!(cache.recency_rank(&i).is_none());
assert!(!cache.contains(&i));
}
cache.insert(10, Arc::new(1000));
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&10), Some(0));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 10);
for i in 11..=14 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 5);
assert_eq!(cache.recency_rank(&14), Some(0));
assert_eq!(cache.recency_rank(&10), Some(4));
}
#[test]
fn test_lru_order_with_duplicate_keys() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.insert(1, Arc::new(999));
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(**cache.get(&1).unwrap(), 999);
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
for _ in 0..3 {
cache.get(&2);
}
assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2));
cache.insert(2, Arc::new(777));
assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(**cache.get(&2).unwrap(), 777);
}
#[test]
fn test_lru_traversal_forward() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let expected_order = [5, 4, 3, 2, 1]; for (idx, &expected_key) in expected_order.iter().enumerate() {
assert_eq!(cache.recency_rank(&expected_key), Some(idx));
}
cache.get(&3);
let new_expected_order = [3, 5, 4, 2, 1];
for (idx, &expected_key) in new_expected_order.iter().enumerate() {
assert_eq!(cache.recency_rank(&expected_key), Some(idx));
}
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
}
#[test]
fn test_lru_traversal_backward() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
cache.get(&2);
cache.get(&4);
let (tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*tail_key, 1);
assert_eq!(cache.recency_rank(&1), Some(3));
assert_eq!(cache.recency_rank(&3), Some(2));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&4), Some(0));
let mut popped_sequence = vec![];
while let Some((key, _)) = cache.pop_lru() {
popped_sequence.push(key);
}
assert_eq!(popped_sequence, vec![1, 3, 2, 4]);
}
#[test]
fn test_lru_middle_node_removal() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let removed = cache.remove(&3);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 300);
assert_eq!(cache.len(), 4);
assert_eq!(cache.recency_rank(&5), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
assert_eq!(cache.recency_rank(&1), Some(3));
assert!(cache.recency_rank(&3).is_none());
cache.remove(&2);
assert_eq!(cache.len(), 3);
assert_eq!(cache.recency_rank(&5), Some(0));
assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
}
#[test]
fn test_lru_head_node_removal() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let removed = cache.remove(&4);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 400);
assert_eq!(cache.len(), 3);
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.remove(&3);
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1));
cache.remove(&2);
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
}
#[test]
fn test_lru_tail_node_removal() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let removed = cache.remove(&1);
assert!(removed.is_some());
assert_eq!(*removed.unwrap(), 100);
assert_eq!(cache.len(), 3);
let (new_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*new_tail_key, 2); assert_eq!(cache.recency_rank(&4), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&2), Some(2));
cache.remove(&2);
assert_eq!(cache.len(), 2);
let (newer_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*newer_tail_key, 3);
cache.remove(&3);
assert_eq!(cache.len(), 1);
let (final_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*final_tail_key, 4); }
#[test]
fn test_lru_single_node_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
let (lru_key, lru_value) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
assert_eq!(**lru_value, 100);
let value = cache.get(&1);
assert!(value.is_some());
assert_eq!(**value.unwrap(), 100);
assert_eq!(cache.recency_rank(&1), Some(0));
assert!(cache.touch(&1));
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(*cache.peek(&1).unwrap(), 100);
let (peek_key, peek_value) = cache.peek_lru().unwrap();
assert_eq!(*peek_key, 1);
assert_eq!(**peek_value, 100);
cache.insert(1, Arc::new(999));
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
assert_eq!(**cache.get(&1).unwrap(), 999);
let (popped_key, popped_value) = cache.pop_lru().unwrap();
assert_eq!(popped_key, 1);
assert_eq!(*popped_value, 999);
assert_eq!(cache.len(), 0);
assert!(cache.peek_lru().is_none());
}
#[test]
fn test_lru_two_node_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1));
let (tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*tail_key, 1);
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
let (new_tail_key, _) = cache.peek_lru().unwrap();
assert_eq!(*new_tail_key, 2);
assert!(cache.touch(&2));
assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1));
cache.remove(&2);
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1));
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 3); assert!(cache.contains(&1)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert_eq!(cache.recency_rank(&4), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1)); assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(5, Arc::new(500));
assert_eq!(cache.len(), 3); assert!(!cache.contains(&1)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
assert_eq!(cache.recency_rank(&5), Some(0)); assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2)); }
#[test]
fn test_lru_aging_pattern() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100));
assert_eq!(cache.recency_rank(&1), Some(0));
cache.insert(2, Arc::new(200));
assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&1), Some(1));
cache.insert(3, Arc::new(300));
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1)); assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(4, Arc::new(400));
assert_eq!(cache.recency_rank(&4), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1)); assert_eq!(cache.recency_rank(&2), Some(2)); assert_eq!(cache.recency_rank(&1), Some(3));
cache.insert(5, Arc::new(500));
assert!(!cache.contains(&1)); assert_eq!(cache.recency_rank(&5), Some(0)); assert_eq!(cache.recency_rank(&4), Some(1)); assert_eq!(cache.recency_rank(&3), Some(2)); assert_eq!(cache.recency_rank(&2), Some(3)); }
#[test]
fn test_lru_promotion_to_head() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
cache.touch(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
cache.get(&5);
assert_eq!(cache.recency_rank(&5), Some(0));
cache.touch(&5);
assert_eq!(cache.recency_rank(&5), Some(0));
assert_eq!(cache.recency_rank(&5), Some(0));
assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
assert_eq!(cache.recency_rank(&4), Some(3));
assert_eq!(cache.recency_rank(&2), Some(4)); }
#[test]
fn test_lru_demotion_patterns() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
cache.insert(4, Arc::new(400));
assert_eq!(cache.recency_rank(&4), Some(0));
cache.get(&3);
assert_eq!(cache.recency_rank(&4), Some(1)); assert_eq!(cache.recency_rank(&3), Some(0));
cache.get(&2);
assert_eq!(cache.recency_rank(&4), Some(2)); assert_eq!(cache.recency_rank(&2), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1));
cache.get(&1);
assert_eq!(cache.recency_rank(&4), Some(3)); assert_eq!(cache.recency_rank(&1), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1)); assert_eq!(cache.recency_rank(&3), Some(2));
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 4);
cache.insert(5, Arc::new(500));
assert!(!cache.contains(&4)); assert!(cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&5));
}
#[test]
fn test_lru_circular_access_pattern() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
for _round in 0..5 {
for key in [1, 2, 3] {
cache.get(&key);
assert_eq!(cache.recency_rank(&key), Some(0)); }
for key in [1, 2, 3] {
assert!(cache.contains(&key));
}
}
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(4, Arc::new(400));
assert!(!cache.contains(&1)); assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_lru_working_set_behavior() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
let working_set = [1, 2, 3, 4, 5];
for &key in &working_set[0..3] {
cache.insert(key, Arc::new(key * 100));
}
for _round in 0..3 {
for &key in &working_set {
if cache.contains(&key) {
cache.get(&key);
} else {
cache.insert(key, Arc::new(key * 100));
}
}
}
assert_eq!(cache.len(), 3);
assert!(cache.contains(&5));
assert!(cache.contains(&4));
assert!(cache.contains(&3));
assert_eq!(cache.recency_rank(&5), Some(0)); assert_eq!(cache.recency_rank(&4), Some(1));
assert_eq!(cache.recency_rank(&3), Some(2)); }
#[test]
fn test_lru_temporal_locality() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let hot_items = [2, 3];
for _ in 0..10 {
for &item in &hot_items {
cache.get(&item);
}
}
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&4), Some(2));
assert_eq!(cache.recency_rank(&1), Some(3));
cache.insert(5, Arc::new(500));
assert!(!cache.contains(&1));
cache.insert(6, Arc::new(600));
assert!(!cache.contains(&4));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
}
#[test]
fn test_lru_no_temporal_locality() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
let mut access_sequence = 1;
for i in 1..=3 {
cache.insert(i, Arc::new(i * 100));
}
for _ in 0..10 {
access_sequence += 1;
cache.insert(access_sequence, Arc::new(access_sequence * 100));
}
let expected_items = [access_sequence - 2, access_sequence - 1, access_sequence];
for &item in &expected_items {
assert!(cache.contains(&item));
}
assert_eq!(cache.recency_rank(&access_sequence), Some(0)); assert_eq!(cache.recency_rank(&(access_sequence - 1)), Some(1));
assert_eq!(cache.recency_rank(&(access_sequence - 2)), Some(2));
for i in 1..=(access_sequence - 3) {
assert!(!cache.contains(&i));
}
}
#[test]
fn test_lru_mixed_access_patterns() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let random_pattern = [2, 4, 1, 3, 2, 1];
for &key in &random_pattern {
cache.get(&key);
}
for i in 5..=7 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 4);
assert!(cache.contains(&7));
assert!(cache.contains(&6));
assert!(cache.contains(&5));
let (lru_key, _) = cache.peek_lru().unwrap();
assert!(cache.recency_rank(lru_key).unwrap() == 3);
cache.get(&7); cache.insert(8, Arc::new(800)); cache.get(&6);
assert_eq!(cache.recency_rank(&6), Some(0)); assert_eq!(cache.recency_rank(&8), Some(1)); assert_eq!(cache.recency_rank(&7), Some(2)); }
#[test]
fn test_lru_hotspot_behavior() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let hotspots = [2, 4];
let cold_items = [1, 3, 5];
for round in 0..20 {
for _ in 0..5 {
for &hot_item in &hotspots {
cache.get(&hot_item);
}
}
if round % 4 == 0 && !cold_items.is_empty() {
let cold_idx = round / 4 % cold_items.len();
if cache.contains(&cold_items[cold_idx]) {
cache.get(&cold_items[cold_idx]);
}
}
}
assert!(cache.recency_rank(&2).unwrap() <= 1);
assert!(cache.recency_rank(&4).unwrap() <= 1);
cache.insert(6, Arc::new(600));
cache.insert(7, Arc::new(700));
assert!(cache.contains(&2));
assert!(cache.contains(&4));
let cold_evicted = cold_items
.iter()
.filter(|&&item| !cache.contains(&item))
.count();
assert!(cold_evicted > 0);
}
#[test]
fn test_lru_coldspot_eviction() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
for _ in 0..5 {
cache.get(&2);
cache.get(&3);
cache.get(&4);
}
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 1);
assert_eq!(cache.recency_rank(&1), Some(3));
cache.insert(5, Arc::new(500));
assert!(!cache.contains(&1)); assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
for _ in 0..3 {
cache.get(&3);
cache.get(&4);
cache.get(&5);
}
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 2);
cache.insert(6, Arc::new(600));
assert!(!cache.contains(&2)); assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
assert!(cache.contains(&6));
}
#[test]
fn test_lru_rank_consistency() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let verify_ranks = |cache: &LruCore<i32, i32>| {
let mut ranks = vec![];
for i in 1..=10 {
if let Some(rank) = cache.recency_rank(&i) {
ranks.push((i, rank));
}
}
assert_eq!(ranks.len(), cache.len());
let mut rank_values: Vec<_> = ranks.iter().map(|(_, rank)| *rank).collect();
rank_values.sort();
rank_values.dedup();
assert_eq!(rank_values.len(), ranks.len());
for (idx, &rank) in rank_values.iter().enumerate() {
assert_eq!(
rank, idx,
"Rank at index {} should be {}, but was {}. Current items: {:?}",
idx, idx, rank, ranks
);
}
if let Some((lru_key, _)) = cache.peek_lru() {
let lru_rank = cache.recency_rank(lru_key).unwrap();
assert_eq!(lru_rank, cache.len() - 1);
}
};
verify_ranks(&cache);
cache.get(&3);
verify_ranks(&cache);
cache.touch(&1);
verify_ranks(&cache);
cache.remove(&4);
verify_ranks(&cache);
cache.insert(6, Arc::new(600));
verify_ranks(&cache);
let _ = cache.pop_lru();
verify_ranks(&cache);
}
#[test]
fn test_lru_rank_updates_after_access() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
let _initial_ranks: Vec<_> = (1..=4)
.map(|i| (i, cache.recency_rank(&i).unwrap()))
.collect();
let old_rank_2 = cache.recency_rank(&2).unwrap();
cache.get(&2);
let new_rank_2 = cache.recency_rank(&2).unwrap();
assert_eq!(new_rank_2, 0); assert_ne!(old_rank_2, new_rank_2);
for i in [1, 3, 4] {
let old_rank = _initial_ranks.iter().find(|(key, _)| *key == i).unwrap().1;
let new_rank = cache.recency_rank(&i);
if let Some(rank) = new_rank {
if old_rank < old_rank_2 {
assert_eq!(rank, old_rank + 1);
}
}
}
let _old_rank_1 = cache.recency_rank(&1).unwrap();
cache.touch(&1);
assert_eq!(cache.recency_rank(&1).unwrap(), 0);
assert_eq!(cache.recency_rank(&2).unwrap(), 1); }
#[test]
fn test_lru_batch_operations() {
let mut cache: LruCore<i32, i32> = LruCore::new(6);
let batch1_keys = [1, 2, 3, 4];
for &key in &batch1_keys {
cache.insert(key, Arc::new(key * 100));
}
assert_eq!(cache.len(), 4);
let batch2_access = [2, 4];
for &key in &batch2_access {
cache.get(&key);
}
assert!(cache.recency_rank(&4).unwrap() < cache.recency_rank(&1).unwrap());
assert!(cache.recency_rank(&2).unwrap() < cache.recency_rank(&3).unwrap());
let batch3_keys = [5, 6];
for &key in &batch3_keys {
cache.insert(key, Arc::new(key * 100));
}
assert_eq!(cache.len(), 6);
let batch4_remove = [1, 3];
for &key in &batch4_remove {
cache.remove(&key);
}
assert_eq!(cache.len(), 4);
for &key in &batch4_remove {
assert!(!cache.contains(&key));
}
cache.get(&2); cache.insert(7, Arc::new(700)); cache.touch(&6); cache.insert(8, Arc::new(800));
assert_eq!(cache.len(), 6);
assert_eq!(cache.recency_rank(&8), Some(0)); assert_eq!(cache.recency_rank(&6), Some(1)); assert_eq!(cache.recency_rank(&7), Some(2)); assert_eq!(cache.recency_rank(&2), Some(3));
let remaining_keys = vec![2, 4, 5, 6, 7, 8];
for &key in &remaining_keys {
assert!(cache.contains(&key));
assert!(cache.recency_rank(&key).is_some());
}
}
#[test]
fn test_lru_interleaved_insert_access() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100)); cache.insert(2, Arc::new(200)); cache.get(&1); cache.insert(3, Arc::new(300)); cache.get(&2); cache.insert(4, Arc::new(400)); cache.get(&3);
assert_eq!(cache.recency_rank(&3), Some(0)); assert_eq!(cache.recency_rank(&4), Some(1)); assert_eq!(cache.recency_rank(&2), Some(2)); assert_eq!(cache.recency_rank(&1), Some(3));
cache.insert(5, Arc::new(500)); assert!(!cache.contains(&1)); assert_eq!(cache.recency_rank(&5), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1)); assert_eq!(cache.recency_rank(&4), Some(2)); assert_eq!(cache.recency_rank(&2), Some(3)); }
#[test]
fn test_lru_frequency_vs_recency() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
for _ in 0..100 {
cache.get(&1);
}
cache.get(&2);
cache.get(&3);
assert_eq!(cache.recency_rank(&3), Some(0));
assert_eq!(cache.recency_rank(&2), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2));
cache.insert(4, Arc::new(400));
assert!(!cache.contains(&1)); assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_lru_cache_warming() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
assert_eq!(cache.len(), 0);
assert!(cache.peek_lru().is_none());
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
cache.get(&1);
assert_eq!(cache.recency_rank(&1), Some(0));
cache.insert(3, Arc::new(300));
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 4);
assert_eq!(cache.recency_rank(&4), Some(0)); assert_eq!(cache.recency_rank(&3), Some(1));
assert_eq!(cache.recency_rank(&1), Some(2)); assert_eq!(cache.recency_rank(&2), Some(3));
cache.insert(5, Arc::new(500));
assert_eq!(cache.len(), 5);
cache.insert(6, Arc::new(600));
assert_eq!(cache.len(), 5); assert!(!cache.contains(&2)); }
#[test]
fn test_lru_cache_cooling() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
for _ in 0..20 {
for i in 1..=4 {
cache.get(&i);
}
}
for i in 1..=4 {
assert!(cache.contains(&i));
}
let active_items = [2, 4];
for _ in 0..5 {
for &item in &active_items {
cache.get(&item);
}
}
assert!(cache.recency_rank(&2).unwrap() < cache.recency_rank(&1).unwrap());
assert!(cache.recency_rank(&4).unwrap() < cache.recency_rank(&3).unwrap());
for _ in 0..3 {
cache.get(&4);
}
assert_eq!(cache.recency_rank(&4), Some(0));
cache.insert(5, Arc::new(500));
cache.insert(6, Arc::new(600));
assert!(cache.contains(&4));
assert!(cache.contains(&5));
assert!(cache.contains(&6));
}
#[test]
fn test_lru_steady_state_behavior() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 4);
let steady_state_items = [5, 6, 7, 8, 9, 10];
for &item in &steady_state_items {
let old_lru = *cache.peek_lru().unwrap().0;
cache.insert(item, Arc::new(item * 100));
assert_eq!(cache.len(), 4);
assert!(!cache.contains(&old_lru));
assert_eq!(cache.recency_rank(&item), Some(0));
}
cache.get(&8); assert_eq!(cache.recency_rank(&8), Some(0));
cache.insert(11, Arc::new(1100)); assert_eq!(cache.recency_rank(&11), Some(0));
assert_eq!(cache.recency_rank(&8), Some(1));
for i in 0..cache.len() {
let mut found_rank = false;
for j in 7..=11 {
if cache.contains(&j) && cache.recency_rank(&j) == Some(i) {
found_rank = true;
break;
}
}
if !found_rank {
panic!("Missing rank {} in steady state", i);
}
}
}
#[test]
fn test_lru_transition_states() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
cache.insert(1, Arc::new(100));
assert_eq!(cache.len(), 1);
assert_eq!(cache.recency_rank(&1), Some(0));
cache.insert(2, Arc::new(200));
assert_eq!(cache.len(), 2);
assert_eq!(cache.recency_rank(&2), Some(0));
assert_eq!(cache.recency_rank(&1), Some(1));
cache.insert(3, Arc::new(300));
assert_eq!(cache.len(), 3);
cache.insert(4, Arc::new(400));
assert_eq!(cache.len(), 4);
for i in 1..=4 {
assert!(cache.contains(&i));
}
cache.insert(5, Arc::new(500));
assert_eq!(cache.len(), 4); assert!(!cache.contains(&1));
cache.remove(&2);
assert_eq!(cache.len(), 3);
cache.remove(&3);
assert_eq!(cache.len(), 2);
cache.remove(&4);
assert_eq!(cache.len(), 1);
assert!(cache.contains(&5));
assert_eq!(cache.recency_rank(&5), Some(0));
cache.remove(&5);
assert_eq!(cache.len(), 0);
assert!(cache.peek_lru().is_none());
}
#[test]
fn test_lru_list_integrity() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
for j in 1..=i {
assert!(cache.recency_rank(&j).is_some());
}
assert!(cache.peek_lru().is_some());
}
cache.get(&1); assert_eq!(cache.recency_rank(&1), Some(0));
cache.get(&3); assert_eq!(cache.recency_rank(&3), Some(0));
cache.remove(&3);
assert_eq!(cache.len(), 4);
let tail_key = {
let (key, _) = cache.peek_lru().unwrap();
*key
};
cache.remove(&tail_key);
assert_eq!(cache.len(), 3);
let middle_key = cache.recency_rank(&1).unwrap() == 1;
if middle_key {
cache.remove(&1);
} else {
for i in [2, 4, 5] {
if cache.contains(&i) {
let rank = cache.recency_rank(&i).unwrap();
if rank != 0 && rank != cache.len() - 1 {
cache.remove(&i);
break;
}
}
}
}
let remaining_count = cache.len();
for i in 0..remaining_count {
let mut found = false;
for j in 1..=5 {
if cache.contains(&j) && cache.recency_rank(&j) == Some(i) {
found = true;
break;
}
}
assert!(found, "Rank {} not found after list operations", i);
}
}
#[test]
fn test_lru_list_node_count() {
let mut cache: LruCore<i32, i32> = LruCore::new(4);
assert_eq!(cache.len(), 0);
for i in 1..=4 {
cache.insert(i, Arc::new(i * 100));
assert_eq!(cache.len(), i as usize);
let mut found_count = 0;
for j in 1..=i {
if cache.recency_rank(&j).is_some() {
found_count += 1;
}
}
assert_eq!(found_count, i);
}
cache.remove(&2);
assert_eq!(cache.len(), 3);
let mut valid_ranks = 0;
for i in 1..=4 {
if cache.recency_rank(&i).is_some() {
valid_ranks += 1;
}
}
assert_eq!(valid_ranks, 3);
cache.insert(5, Arc::new(500));
assert_eq!(cache.len(), 4);
cache.clear();
assert_eq!(cache.len(), 0);
for i in 1..=5 {
assert!(cache.recency_rank(&i).is_none());
}
}
#[test]
fn test_lru_bidirectional_consistency() {
let mut cache: LruCore<i32, i32> = LruCore::new(5);
for i in 1..=5 {
cache.insert(i, Arc::new(i * 100));
}
let verify_bidirectional = |cache: &LruCore<i32, i32>| {
let mut forward_items = vec![];
for rank in 0..cache.len() {
for i in 1..=5 {
if cache.contains(&i) && cache.recency_rank(&i) == Some(rank) {
forward_items.push(i);
break;
}
}
}
let mut backward_items = vec![];
let mut current_items: Vec<_> = (1..=5).filter(|i| cache.contains(i)).collect();
current_items.sort_by_key(|i| cache.recency_rank(i).unwrap());
current_items.reverse();
for &item in ¤t_items {
backward_items.push(item);
}
forward_items.reverse();
assert_eq!(forward_items, backward_items);
if let Some((lru_key, _)) = cache.peek_lru() {
let lru_key = *lru_key;
forward_items.reverse(); assert_eq!(forward_items.last(), Some(&lru_key));
}
};
verify_bidirectional(&cache);
cache.get(&3);
verify_bidirectional(&cache);
cache.touch(&1);
verify_bidirectional(&cache);
cache.remove(&4);
verify_bidirectional(&cache);
cache.insert(6, Arc::new(600));
verify_bidirectional(&cache);
}
#[test]
fn test_lru_eviction_callback_order() {
let mut cache: LruCore<i32, i32> = LruCore::new(3);
cache.insert(1, Arc::new(100));
cache.insert(2, Arc::new(200));
cache.insert(3, Arc::new(300));
let mut eviction_order = vec![];
for new_item in 4..=8 {
let (lru_key, _) = cache.peek_lru().unwrap();
eviction_order.push(*lru_key);
cache.insert(new_item, Arc::new(new_item * 100));
}
assert_eq!(eviction_order, vec![1, 2, 3, 4, 5]);
assert!(cache.contains(&6));
assert!(cache.contains(&7));
assert!(cache.contains(&8));
assert_eq!(cache.len(), 3);
cache.clear();
cache.insert(10, Arc::new(1000));
cache.insert(11, Arc::new(1100));
cache.insert(12, Arc::new(1200));
cache.get(&11);
let (lru_key, _) = cache.peek_lru().unwrap();
assert_eq!(*lru_key, 10);
cache.insert(13, Arc::new(1300));
assert!(!cache.contains(&10));
assert!(cache.contains(&11));
assert!(cache.contains(&12));
assert!(cache.contains(&13));
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_lru_memory_layout_efficiency() {
let mut cache: LruCore<i32, i32> = LruCore::new(1000);
for i in 1..=1000 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), 1000);
for i in (1..=1000).step_by(7) {
cache.get(&i);
}
let (lru_key, _) = cache.peek_lru().unwrap();
assert!(cache.recency_rank(lru_key).is_some());
let start_len = cache.len();
for i in 1001..=1100 {
cache.insert(i, Arc::new(i * 100));
}
assert_eq!(cache.len(), start_len);
let mut evicted_count = 0;
for i in 1..=1000 {
if !cache.contains(&i) {
evicted_count += 1;
}
}
assert_eq!(evicted_count, 100);
let mut found_ranks = std::collections::HashSet::new();
for i in 1..=1100 {
if let Some(rank) = cache.recency_rank(&i) {
assert!(rank < cache.len());
assert!(found_ranks.insert(rank)); }
}
assert_eq!(found_ranks.len(), cache.len());
}
#[test]
fn test_lru_algorithmic_complexity() {
let mut cache: LruCore<i32, i32> = LruCore::new(100);
for i in 1..=100 {
cache.insert(i, Arc::new(i * 100));
}
cache.insert(101, Arc::new(10100));
assert!(cache.contains(&101));
assert_eq!(cache.len(), 100);
let value = cache.get(&50);
assert!(value.is_some());
assert_eq!(cache.recency_rank(&50), Some(0));
assert!(cache.contains(&75));
assert!(!cache.contains(&1));
let removed = cache.remove(&75);
assert!(removed.is_some());
assert!(!cache.contains(&75));
assert!(cache.touch(&60));
assert_eq!(cache.recency_rank(&60), Some(0));
let peeked = cache.peek(&80);
assert!(peeked.is_some());
assert_ne!(cache.recency_rank(&80), Some(0));
let rank = cache.recency_rank(&90);
assert!(rank.is_some());
assert!(rank.unwrap() < cache.len());
let (lru_key, _) = cache.peek_lru().unwrap();
let lru_rank = cache.recency_rank(lru_key).unwrap();
assert_eq!(lru_rank, cache.len() - 1);
let expected_lru_key = *lru_key;
let (popped_key, _) = cache.pop_lru().unwrap();
assert_eq!(popped_key, expected_lru_key);
assert!(!cache.contains(&popped_key));
}
#[cfg(feature = "concurrency")]
#[test]
fn test_lru_concurrent_ordering() {
use std::sync::Arc;
let cache = super::super::ConcurrentLruCache::new(4);
for i in 1..=4 {
cache.insert(i, i * 100);
}
cache.get(&1);
cache.get(&3);
assert!(cache.contains(&1));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
assert!(cache.contains(&2));
cache.insert(5, 500);
assert!(cache.contains(&5));
assert_eq!(cache.len(), 4);
let value_arc = Arc::new(999);
cache.insert_arc(6, Arc::clone(&value_arc));
assert_eq!(Arc::strong_count(&value_arc), 2);
let retrieved = cache.get(&6);
assert!(retrieved.is_some());
assert_eq!(Arc::strong_count(&value_arc), 3);
drop(retrieved);
assert_eq!(Arc::strong_count(&value_arc), 2);
assert!(cache.contains(&6));
assert_eq!(cache.len(), 4); }
#[test]
fn test_lru_deterministic_behavior() {
let mut cache1: LruCore<i32, i32> = LruCore::new(4);
let mut cache2: LruCore<i32, i32> = LruCore::new(4);
let operations = [
("insert", 1, 100),
("insert", 2, 200),
("insert", 3, 300),
("get", 1, 0), ("insert", 4, 400),
("get", 2, 0),
("touch", 4, 0),
("insert", 5, 500),
("remove", 3, 0),
("insert", 6, 600),
];
for (op, key, value) in operations {
match op {
"insert" => {
cache1.insert(key, Arc::new(value));
cache2.insert(key, Arc::new(value));
},
"get" => {
cache1.get(&key);
cache2.get(&key);
},
"touch" => {
cache1.touch(&key);
cache2.touch(&key);
},
"remove" => {
cache1.remove(&key);
cache2.remove(&key);
},
_ => panic!("Unknown operation"),
}
assert_eq!(cache1.len(), cache2.len());
for i in 1..=6 {
assert_eq!(cache1.contains(&i), cache2.contains(&i));
}
for i in 1..=6 {
assert_eq!(cache1.recency_rank(&i), cache2.recency_rank(&i));
}
match (cache1.peek_lru(), cache2.peek_lru()) {
(Some((key1, _)), Some((key2, _))) => assert_eq!(key1, key2),
(None, None) => {}, _ => panic!("LRU mismatch between caches"),
}
}
for i in 7..=10 {
cache1.insert(i, Arc::new(i * 100));
cache2.insert(i, Arc::new(i * 100));
}
assert_eq!(cache1.len(), cache2.len());
for i in 1..=10 {
assert_eq!(cache1.contains(&i), cache2.contains(&i));
assert_eq!(cache1.recency_rank(&i), cache2.recency_rank(&i));
}
}
}
mod state_consistency {
use std::collections::HashSet;
use std::sync::Arc;
use super::*;
fn count_nodes<K, V>(cache: &LruCore<K, V>) -> usize
where
K: Copy + Eq + Hash,
{
cache.map.len()
}
fn list_keys<K, V>(cache: &LruCore<K, V>) -> Vec<K>
where
K: Copy + Eq + Hash,
{
let mut keys = Vec::new();
let mut current = cache.head;
while let Some(id) = current {
let node = cache.arena.get(id).unwrap();
keys.push(node.key);
current = node.next;
}
keys
}
fn head_key<K, V>(cache: &LruCore<K, V>) -> Option<K>
where
K: Copy + Eq + Hash,
{
cache.head.and_then(|id| cache.arena.get(id)).map(|n| n.key)
}
fn tail_key<K, V>(cache: &LruCore<K, V>) -> Option<K>
where
K: Copy + Eq + Hash,
{
cache.tail.and_then(|id| cache.arena.get(id)).map(|n| n.key)
}
#[test]
fn test_hashmap_linkedlist_size_consistency() {
let mut cache = LruCore::new(10);
assert_eq!(cache.map.len(), count_nodes(&cache));
cache.insert(1, Arc::new(10));
assert_eq!(cache.map.len(), count_nodes(&cache));
cache.insert(2, Arc::new(20));
assert_eq!(cache.map.len(), count_nodes(&cache));
cache.remove(&1);
assert_eq!(cache.map.len(), count_nodes(&cache));
cache.clear();
assert_eq!(cache.map.len(), count_nodes(&cache));
}
#[test]
fn test_head_tail_slot_consistency() {
let mut cache = LruCore::new(10);
assert!(cache.head.is_none());
assert!(cache.tail.is_none());
cache.insert(1, Arc::new(10));
assert_eq!(head_key(&cache), Some(1));
assert_eq!(tail_key(&cache), Some(1));
cache.insert(2, Arc::new(20));
assert_eq!(head_key(&cache), Some(2));
assert_eq!(tail_key(&cache), Some(1));
assert_eq!(list_keys(&cache), vec![2, 1]);
}
#[test]
fn test_node_reference_consistency() {
let mut cache = LruCore::new(10);
for i in 0..5 {
cache.insert(i, Arc::new(i));
}
let list_keys_set: HashSet<_> = list_keys(&cache).into_iter().collect();
let map_keys: HashSet<_> = cache.map.keys().copied().collect();
assert_eq!(list_keys_set.len(), 5);
assert_eq!(cache.map.len(), 5);
assert_eq!(list_keys_set, map_keys);
}
#[test]
fn test_doubly_linked_list_integrity() {
let mut cache = LruCore::new(10);
for i in 0..5 {
cache.insert(i, Arc::new(i));
}
let keys = list_keys(&cache);
let uniq: HashSet<_> = keys.iter().copied().collect();
assert_eq!(keys.len(), uniq.len());
assert_eq!(keys.len(), cache.map.len());
}
#[test]
fn test_invariants_after_every_operation() {
let mut cache = LruCore::new(5);
cache.validate_invariants();
for i in 0..5 {
cache.insert(i, Arc::new(i));
cache.validate_invariants();
}
cache.get(&2);
cache.validate_invariants();
cache.insert(5, Arc::new(5)); cache.validate_invariants();
cache.remove(&3);
cache.validate_invariants();
cache.clear();
cache.validate_invariants();
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_consistency_on_eviction() {
let mut cache = LruCore::new(2);
cache.insert(1, Arc::new(10));
cache.insert(2, Arc::new(20));
assert_eq!(cache.map.len(), 2);
assert_eq!(count_nodes(&cache), 2);
cache.insert(3, Arc::new(30));
assert_eq!(cache.map.len(), 2);
assert_eq!(count_nodes(&cache), 2);
assert!(cache.contains(&2));
assert!(cache.contains(&3));
assert!(!cache.contains(&1));
cache.validate_invariants();
}
#[test]
fn test_capacity_constraints_enforcement() {
let capacity = 10;
let mut cache = LruCore::new(capacity);
for i in 0..capacity * 2 {
cache.insert(i, Arc::new(i));
assert!(cache.map.len() <= capacity);
assert!(count_nodes(&cache) <= capacity);
}
}
#[test]
fn test_empty_cache_state_invariants() {
let cache: LruCore<i32, i32> = LruCore::new(10);
assert!(cache.head.is_none());
assert!(cache.tail.is_none());
assert!(cache.map.is_empty());
assert_eq!(count_nodes(&cache), 0);
}
#[test]
fn test_single_item_cache_state() {
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(100));
assert_eq!(head_key(&cache), Some(1));
assert_eq!(tail_key(&cache), Some(1));
assert_eq!(cache.map.len(), 1);
assert_eq!(count_nodes(&cache), 1);
}
#[test]
fn test_full_cache_state_invariants() {
let capacity = 3;
let mut cache = LruCore::new(capacity);
for i in 0..capacity {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.map.len(), capacity);
assert_eq!(count_nodes(&cache), capacity);
assert!(head_key(&cache).is_some());
assert!(tail_key(&cache).is_some());
assert_ne!(head_key(&cache), tail_key(&cache));
cache.validate_invariants();
}
#[test]
fn test_state_after_clear_operation() {
let mut cache = LruCore::new(5);
for i in 0..3 {
cache.insert(i, Arc::new(i));
}
cache.clear();
assert!(cache.head.is_none());
assert!(cache.tail.is_none());
assert!(cache.map.is_empty());
assert_eq!(count_nodes(&cache), 0);
}
#[test]
fn test_state_during_capacity_transitions() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
assert_eq!(count_nodes(&cache), 1);
cache.insert(2, Arc::new(2));
assert_eq!(count_nodes(&cache), 2);
cache.remove(&1);
assert_eq!(count_nodes(&cache), 1);
cache.remove(&2);
assert_eq!(count_nodes(&cache), 0);
assert!(cache.head.is_none());
}
#[test]
fn test_node_allocation_consistency() {
let mut cache = LruCore::new(10);
for i in 0..10 {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.map.len(), 10);
assert_eq!(count_nodes(&cache), 10);
for i in 0..5 {
cache.insert(i, Arc::new(i + 100));
}
assert_eq!(cache.map.len(), 10);
assert_eq!(count_nodes(&cache), 10);
}
#[test]
fn test_key_value_mapping_consistency() {
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(i * 10));
}
for i in 0..5 {
let &id = cache.map.get(&i).unwrap();
let node = cache.arena.get(id).unwrap();
assert_eq!(node.key, i);
assert_eq!(*node.value, i * 10);
}
}
#[test]
fn test_lru_ordering_state_consistency() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
assert_eq!(list_keys(&cache), vec![3, 2, 1]);
cache.get(&1);
assert_eq!(list_keys(&cache), vec![1, 3, 2]);
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_concurrent_state_consistency() {
let cache = Arc::new(ConcurrentLruCache::new(10));
let mut threads = vec![];
for i in 0..10 {
let cache_clone = cache.clone();
threads.push(std::thread::spawn(move || {
cache_clone.insert(i, Arc::new(i));
let _ = cache_clone.get(&i);
}));
}
for t in threads {
t.join().unwrap();
}
assert!(cache.len() <= 10);
let guard = cache.inner.read();
assert_eq!(guard.map.len(), count_nodes(&*guard));
}
#[test]
fn test_state_recovery_after_errors() {
let mut cache = LruCore::new(0);
assert!(cache.insert(1, Arc::new(1)).is_none());
assert!(cache.map.is_empty());
let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(1));
assert!(cache.remove(&2).is_none());
assert_eq!(cache.map.len(), 1);
cache.validate_invariants();
}
#[test]
fn test_arc_reference_count_consistency() {
let mut cache = LruCore::new(5);
let val = Arc::new(100);
assert_eq!(Arc::strong_count(&val), 1);
cache.insert(1, val.clone());
assert_eq!(Arc::strong_count(&val), 2);
cache.remove(&1);
assert_eq!(Arc::strong_count(&val), 1);
}
#[test]
fn test_phantom_data_type_consistency() {
let cache: LruCore<u32, String> = LruCore::new(10);
assert_eq!(cache.capacity(), 10);
}
#[test]
fn test_state_transitions_insert_remove() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
cache.validate_invariants();
cache.remove(&2);
cache.validate_invariants();
assert!(!cache.contains(&2));
cache.insert(4, Arc::new(4));
cache.validate_invariants();
}
#[test]
fn test_state_transitions_get_peek() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
let head_before = head_key(&cache).unwrap();
cache.peek(&1);
let head_after = head_key(&cache).unwrap();
assert_eq!(head_before, head_after);
cache.get(&1);
let head_after_get = head_key(&cache).unwrap();
assert_eq!(head_after_get, 1);
cache.validate_invariants();
}
#[test]
fn test_state_transitions_touch_operations() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1)); cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
assert!(cache.touch(&1));
assert_eq!(head_key(&cache), Some(1));
assert_eq!(tail_key(&cache), Some(2));
cache.validate_invariants();
}
#[test]
fn test_node_slot_validity() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
for (k, &id) in &cache.map {
let node = cache.arena.get(id).unwrap();
assert_eq!(node.key, *k);
}
}
#[test]
fn test_circular_reference_prevention() {
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(i));
}
let mut visited = HashSet::new();
let keys = list_keys(&cache);
for k in keys {
assert!(visited.insert(k), "Duplicate entry detected!");
}
}
#[test]
fn test_orphaned_node_detection() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
let count_list = count_nodes(&cache);
assert_eq!(count_list, cache.map.len());
}
#[test]
fn test_duplicate_node_prevention() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
let id1 = cache.map.get(&1).copied();
cache.insert(1, Arc::new(2)); let id2 = cache.map.get(&1).copied();
assert_eq!(id1, id2);
assert_eq!(cache.map.len(), 1);
assert_eq!(count_nodes(&cache), 1);
}
#[test]
fn test_list_termination_consistency() {
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(i));
}
let keys = list_keys(&cache);
let uniq: HashSet<_> = keys.iter().copied().collect();
assert_eq!(keys.len(), uniq.len());
}
#[test]
fn test_head_node_properties() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
assert_eq!(head_key(&cache), Some(1));
cache.insert(2, Arc::new(2));
assert_eq!(head_key(&cache), Some(2));
}
#[test]
fn test_tail_node_properties() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1)); cache.insert(2, Arc::new(2));
assert_eq!(tail_key(&cache), Some(1));
}
#[test]
fn test_middle_node_properties() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2)); cache.insert(3, Arc::new(3));
assert_eq!(list_keys(&cache), vec![3, 2, 1]);
}
#[test]
fn test_key_uniqueness_in_list() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(1, Arc::new(3));
let mut keys = HashSet::new();
for key in list_keys(&cache) {
assert!(keys.insert(key));
}
}
#[test]
fn test_value_consistency_across_structures() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(10));
let head_key_val = head_key(&cache).unwrap();
assert_eq!(head_key_val, 1);
let value = cache.peek(&1).unwrap();
assert_eq!(*value, 10);
}
#[test]
fn test_state_during_eviction_cascades() {
let mut cache = LruCore::new(3);
for i in 0..10 {
cache.insert(i, Arc::new(i));
assert!(cache.len() <= 3);
cache.validate_invariants();
}
}
#[test]
fn test_atomic_operation_consistency() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.validate_invariants();
}
#[test]
fn test_rollback_state_on_failure() {
let cache = LruCore::<i32, i32>::new(5);
assert!(cache.head.is_none());
}
#[test]
fn test_debug_invariant_validation() {
let mut cache = LruCore::new(5);
cache.validate_invariants();
cache.insert(1, Arc::new(1));
cache.validate_invariants();
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_leak_prevention() {
let mut cache = LruCore::new(10);
for i in 0..100 {
cache.insert(i % 20, Arc::new(i));
}
assert_eq!(cache.map.len(), count_nodes(&cache));
}
#[test]
fn test_double_free_prevention() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.remove(&1);
cache.remove(&1); }
#[test]
fn test_use_after_free_prevention() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
let val = cache.get(&1).cloned();
cache.remove(&1);
assert_eq!(*val.unwrap(), 1);
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_thread_safety_state_consistency() {
let cache = Arc::new(ConcurrentLruCache::new(10));
let c1 = cache.clone();
let t1 = std::thread::spawn(move || {
for i in 0..100 {
c1.insert(i, Arc::new(i));
}
});
let c2 = cache.clone();
let t2 = std::thread::spawn(move || {
for i in 0..100 {
c2.get(&i);
}
});
t1.join().unwrap();
t2.join().unwrap();
assert!(cache.len() <= 10);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_lock_state_consistency() {
let cache = Arc::new(ConcurrentLruCache::new(10));
{
cache.insert(1, Arc::new(1));
}
{
assert!(cache.contains(&1));
}
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_poison_lock_recovery() {
let cache = Arc::new(ConcurrentLruCache::new(10));
let c_clone = cache.clone();
let _ = std::thread::spawn(move || {
let _ = c_clone.insert(1, Arc::new(1));
})
.join();
cache.insert(2, Arc::new(2));
assert!(cache.contains(&2));
}
#[test]
fn test_capacity_zero_state_consistency() {
let mut cache = LruCore::new(0);
cache.insert(1, Arc::new(1));
assert_eq!(cache.len(), 0);
assert!(cache.head.is_none());
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_large_capacity_state_consistency() {
let mut cache = LruCore::new(1000);
for i in 0..1000 {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.len(), 1000);
assert_eq!(count_nodes(&cache), 1000);
}
#[test]
fn test_state_after_drop() {
let cache: LruCore<i32, i32> = LruCore::new(5);
drop(cache);
}
#[test]
fn test_partial_operation_state_consistency() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.validate_invariants();
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_stress_state_consistency() {
let mut cache = LruCore::new(10);
for i in 0..10000 {
cache.insert(i % 20, Arc::new(i));
if i % 100 == 0 {
cache.validate_invariants();
}
}
}
#[test]
fn test_node_lifetime_consistency() {
let mut cache = LruCore::new(5);
let val = Arc::new(42);
cache.insert(1, val.clone());
assert_eq!(Arc::strong_count(&val), 2);
cache.remove(&1);
assert_eq!(Arc::strong_count(&val), 1);
}
#[test]
fn test_reallocation_state_consistency() {
let mut cache = LruCore::new(100);
for i in 0..100 {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.len(), 100);
assert_eq!(count_nodes(&cache), 100);
cache.clear();
for i in 0..50 {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.len(), 50);
}
#[test]
fn test_hash_collision_state_consistency() {
let mut cache = LruCore::new(100);
for i in 0..200 {
cache.insert(i, Arc::new(i));
}
}
#[test]
fn test_boundary_condition_state() {
let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2)); assert_eq!(cache.len(), 1);
assert!(cache.contains(&2));
cache.remove(&2);
assert!(cache.is_empty());
}
#[test]
fn test_state_serialization_consistency() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
let state: Vec<_> = cache.map.keys().copied().collect();
assert_eq!(state.len(), 1);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_clone_state_consistency() {
let cache = Arc::new(ConcurrentLruCache::new(5));
cache.insert(1, Arc::new(1));
let c2 = cache.clone();
assert!(c2.contains(&1));
cache.insert(2, Arc::new(2));
assert!(c2.contains(&2)); }
#[test]
fn test_recursive_operation_state() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
cache.validate_invariants();
}
#[test]
fn test_error_propagation_state() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
}
#[test]
fn test_deterministic_state_reproduction() {
let mut c1 = LruCore::new(5);
let mut c2 = LruCore::new(5);
let ops = [1, 2, 3, 1, 4, 5, 2, 6];
for &op in &ops {
c1.insert(op, Arc::new(op));
c2.insert(op, Arc::new(op));
}
assert_eq!(c1.len(), c2.len());
assert_eq!(list_keys(&c1), list_keys(&c2));
}
#[test]
fn test_state_checkpointing() {
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(1));
let checkpoint: Vec<i32> = list_keys(&cache);
assert_eq!(checkpoint, vec![1]);
}
#[test]
fn test_incremental_state_validation() {
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(i));
cache.validate_invariants();
}
}
}
}
mod memory_safety {
use super::*;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(feature = "concurrency")]
use std::thread;
struct LifeCycleTracker {
_id: usize,
counter: Arc<AtomicUsize>,
}
impl LifeCycleTracker {
fn new(id: usize, counter: Arc<AtomicUsize>) -> Self {
counter.fetch_add(1, Ordering::SeqCst);
Self { _id: id, counter }
}
}
impl Drop for LifeCycleTracker {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::SeqCst);
}
}
#[test]
fn test_no_memory_leaks_on_eviction() {
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(2);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
cache.insert(2, Arc::new(LifeCycleTracker::new(2, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 2);
cache.insert(3, Arc::new(LifeCycleTracker::new(3, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 2);
assert!(!cache.contains(&1));
assert!(cache.contains(&2));
assert!(cache.contains(&3));
}
#[test]
fn test_no_memory_leaks_on_remove() {
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 1);
{
let _item = cache.remove(&1);
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_no_memory_leaks_on_pop_lru() {
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 1);
{
let popped = cache.pop_lru();
assert!(popped.is_some());
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_no_memory_leaks_on_clear() {
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(LifeCycleTracker::new(i, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 5);
cache.clear();
assert_eq!(counter.load(Ordering::SeqCst), 0);
assert_eq!(cache.len(), 0);
}
#[test]
fn test_no_memory_leaks_on_drop() {
let counter = Arc::new(AtomicUsize::new(0));
{
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(LifeCycleTracker::new(i, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 5);
} assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_no_double_free_on_eviction() {
let counter = Arc::new(AtomicUsize::new(100)); let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 101);
cache.insert(2, Arc::new(LifeCycleTracker::new(2, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 101); }
#[test]
fn test_no_double_free_on_remove() {
let counter = Arc::new(AtomicUsize::new(100));
let mut cache = LruCore::new(5);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
assert_eq!(counter.load(Ordering::SeqCst), 101);
let removed = cache.remove(&1);
assert!(removed.is_some());
drop(removed);
assert_eq!(counter.load(Ordering::SeqCst), 100);
let removed2 = cache.remove(&1);
assert!(removed2.is_none());
assert_eq!(counter.load(Ordering::SeqCst), 100);
}
#[test]
fn test_no_double_free_on_clear() {
let counter = Arc::new(AtomicUsize::new(100));
let mut cache = LruCore::new(5);
for i in 0..5 {
cache.insert(i, Arc::new(LifeCycleTracker::new(i, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 105);
cache.clear();
assert_eq!(counter.load(Ordering::SeqCst), 100);
cache.clear();
assert_eq!(counter.load(Ordering::SeqCst), 100);
}
#[test]
fn test_no_use_after_free_access() {
let mut cache = LruCore::new(5);
let key = 1;
cache.insert(key, Arc::new(100));
let val = cache.get(&key).cloned();
assert!(val.is_some());
cache.remove(&key);
assert!(cache.get(&key).is_none());
assert_eq!(*val.unwrap(), 100);
}
#[test]
fn test_no_use_after_free_traversal() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
let _ = cache.pop_lru();
assert!(cache.recency_rank(&2).is_some());
assert!(cache.recency_rank(&3).is_some());
assert!(cache.recency_rank(&1).is_none());
}
#[test]
fn test_safe_node_allocation() {
let mut cache = LruCore::new(1000);
for i in 0..1000 {
cache.insert(i, Arc::new(i));
}
assert_eq!(cache.len(), 1000);
for i in 0..1000 {
assert!(cache.contains(&i));
}
}
#[test]
fn test_safe_node_deallocation() {
let counter = Arc::new(AtomicUsize::new(0));
{
let mut cache = LruCore::new(10);
for i in 0..10 {
cache.insert(i, Arc::new(LifeCycleTracker::new(i, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 10);
cache.remove(&0);
cache.remove(&5);
assert_eq!(counter.load(Ordering::SeqCst), 8);
}
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_safe_list_traversal() {
let mut cache = LruCore::new(3);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
assert_eq!(*cache.peek(&1).unwrap(), 1);
assert_eq!(*cache.peek(&2).unwrap(), 2);
assert_eq!(*cache.peek(&3).unwrap(), 3);
}
#[test]
fn test_safe_list_manipulation() {
let mut cache = LruCore::new(10);
for i in 0..5 {
cache.insert(i, Arc::new(i));
}
cache.get(&2);
let _ = cache.pop_lru();
cache.remove(&2);
assert_eq!(cache.len(), 3);
assert!(cache.contains(&1));
assert!(cache.contains(&3));
assert!(cache.contains(&4));
}
#[test]
fn test_arc_reference_counting_safety() {
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(5);
let tracker = Arc::new(LifeCycleTracker::new(1, counter.clone()));
cache.insert(1, tracker.clone());
assert_eq!(counter.load(Ordering::SeqCst), 1);
cache.remove(&1);
assert_eq!(counter.load(Ordering::SeqCst), 1);
drop(tracker);
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_arc_cyclic_reference_prevention() {
struct _Node {
_next: Option<Arc<_Node>>,
}
let mut cache = LruCore::new(2);
let counter = Arc::new(AtomicUsize::new(0));
let cycle_node = Arc::new(LifeCycleTracker::new(1, counter.clone()));
cache.insert(1, cycle_node.clone());
assert_eq!(counter.load(Ordering::SeqCst), 1);
cache.remove(&1);
assert_eq!(counter.load(Ordering::SeqCst), 1);
drop(cycle_node);
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_alignment_safety() {
use std::mem;
assert!(mem::align_of::<Node<u32, u32>>() >= mem::align_of::<u32>());
let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(1u64)); assert_eq!(**cache.get(&1).unwrap(), 1u64);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_stack_overflow_prevention() {
let mut cache = LruCore::new(10000);
for i in 0..10000 {
cache.insert(i, Arc::new(i));
}
drop(cache);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_heap_corruption_prevention() {
let mut cache = LruCore::new(100);
for i in 0..1000 {
cache.insert(i % 200, Arc::new(i));
if i % 3 == 0 {
cache.remove(&(i % 200));
}
}
}
#[test]
fn test_empty_list_access_safety() {
let mut cache: LruCore<i32, i32> = LruCore::new(10);
assert!(cache.pop_lru().is_none());
assert!(cache.peek_lru().is_none());
assert!(cache.remove(&1).is_none());
cache.insert(1, Arc::new(1));
cache.remove(&1);
assert!(cache.pop_lru().is_none());
}
#[test]
fn test_stale_slot_prevention() {
let mut cache = LruCore::new(2);
cache.insert(1, Arc::new(1));
let val = cache.get(&1).cloned();
cache.remove(&1);
assert_eq!(*val.unwrap(), 1);
}
#[test]
fn test_buffer_overflow_prevention() {
let mut cache = LruCore::new(2);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
cache.insert(3, Arc::new(3));
assert_eq!(cache.len(), 2);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_bounds_checking() {
let mut cache = LruCore::new(1);
cache.insert(1, Arc::new(1));
cache.insert(2, Arc::new(2));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&2));
assert!(!cache.contains(&1));
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_safe_concurrent_access() {
let counter = Arc::new(AtomicUsize::new(0));
let cache = Arc::new(ConcurrentLruCache::new(10));
let mut handles = vec![];
for i in 0..10 {
let cache = cache.clone();
let counter = counter.clone();
handles.push(thread::spawn(move || {
for j in 0..100 {
let key = i * 1000 + j;
let val = Arc::new(LifeCycleTracker::new(key as usize, counter.clone()));
cache.insert(key, val);
}
}));
}
for h in handles {
h.join().unwrap();
}
let count = counter.load(Ordering::SeqCst);
assert!(
count <= 10,
"Memory leak detected: {} items alive (capacity 10)",
count
);
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_safe_concurrent_modification() {
let counter = Arc::new(AtomicUsize::new(0));
let cache = Arc::new(ConcurrentLruCache::new(100));
let mut handles = vec![];
for i in 0..10 {
let cache = cache.clone();
let counter = counter.clone();
handles.push(thread::spawn(move || {
for j in 0..100 {
let key = i * 1000 + j;
let val = Arc::new(LifeCycleTracker::new(key as usize, counter.clone()));
cache.insert(key, val);
if j % 2 == 0 {
cache.remove(&key);
}
}
}));
}
for h in handles {
h.join().unwrap();
}
let count = counter.load(Ordering::SeqCst);
assert!(count <= 100);
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_lock_poisoning_memory_safety() {
use std::hash::{Hash, Hasher};
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
struct PanickingKey(i32);
impl Hash for PanickingKey {
fn hash<H: Hasher>(&self, state: &mut H) {
if self.0 == 666 {
panic!("Simulated panic during hash");
}
self.0.hash(state);
}
}
let cache = Arc::new(ConcurrentLruCache::<PanickingKey, i32>::new(10));
let c_clone = cache.clone();
let _ = thread::spawn(move || {
c_clone.insert(PanickingKey(666), 1);
})
.join()
.unwrap_err();
assert_eq!(cache.len(), 0);
cache.insert(PanickingKey(1), 1);
assert_eq!(cache.len(), 1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_panic_safety_memory_cleanup() {
use std::hash::{Hash, Hasher};
use std::panic::{self, AssertUnwindSafe};
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
struct PanickingKey(i32);
impl Hash for PanickingKey {
fn hash<H: Hasher>(&self, state: &mut H) {
if self.0 == 666 {
panic!("Simulated panic during hash");
}
self.0.hash(state);
}
}
let counter = Arc::new(AtomicUsize::new(0));
let mut cache = LruCore::new(10);
let result = panic::catch_unwind(AssertUnwindSafe(|| {
let tracker = Arc::new(LifeCycleTracker::new(666, counter.clone()));
cache.insert(PanickingKey(666), tracker);
}));
assert!(result.is_err());
cache.insert(
PanickingKey(1),
Arc::new(LifeCycleTracker::new(1, counter.clone())),
);
assert_eq!(cache.len(), 1);
}
#[test]
fn test_exception_safety_guarantees() {
use std::hash::{Hash, Hasher};
use std::panic::{self, AssertUnwindSafe};
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
struct PanickingKey(i32);
impl Hash for PanickingKey {
fn hash<H: Hasher>(&self, state: &mut H) {
if self.0 == 666 {
panic!("Panic");
}
self.0.hash(state);
}
}
let mut cache = LruCore::new(10);
cache.insert(PanickingKey(1), Arc::new(1));
let _ = panic::catch_unwind(AssertUnwindSafe(|| {
cache.insert(PanickingKey(666), Arc::new(2));
}));
assert_eq!(cache.len(), 1);
assert!(cache.contains(&PanickingKey(1)));
assert!(cache.peek(&PanickingKey(1)).is_some());
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_leak_detection_valgrind() {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_leak_detection_miri() {
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_memory_safety_under_stress() {
let cache = Arc::new(ConcurrentLruCache::new(100));
let mut handles = vec![];
for _i in 0..10 {
let c = cache.clone();
handles.push(thread::spawn(move || {
for j in 0..1000 {
c.insert(j % 200, Arc::new(j));
if j % 3 == 0 {
c.remove(&(j % 200));
}
}
}));
}
for h in handles {
h.join().unwrap();
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_fragmentation_handling() {
let mut cache = LruCore::new(10);
for i in 0..10000 {
cache.insert(i % 20, Arc::new(i));
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_large_allocation_safety() {
let mut cache = LruCore::new(2);
let big_vec = vec![0u8; 1024 * 1024]; cache.insert(1, Arc::new(big_vec));
assert_eq!(cache.get(&1).unwrap().len(), 1024 * 1024);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_copy_type_memory_efficiency() {
let mut cache = LruCore::new(10);
cache.insert(1usize, Arc::new(1));
assert!(cache.contains(&1));
}
#[test]
fn test_move_semantics_safety() {
let s = String::from("hello");
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(s)); let v = cache.get(&1).unwrap();
assert_eq!(v.as_str(), "hello");
}
#[test]
fn test_lifetime_parameter_safety() {
let mut cache = LruCore::new(10);
let v = Arc::new(1);
cache.insert(1, v.clone());
{
let r = cache.get(&1).unwrap();
assert_eq!(**r, 1);
} cache.remove(&1);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_send_sync_memory_safety() {
fn assert_send<T: Send>() {}
fn assert_sync<T: Sync>() {}
assert_send::<LruCore<i32, i32>>();
assert_sync::<LruCore<i32, i32>>();
assert_send::<ConcurrentLruCache<i32, i32>>();
assert_sync::<ConcurrentLruCache<i32, i32>>();
}
#[test]
fn test_drop_trait_memory_cleanup() {
let counter = Arc::new(AtomicUsize::new(0));
{
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[cfg(feature = "concurrency")]
#[test]
fn test_clone_memory_safety() {
let cache = Arc::new(ConcurrentLruCache::new(10));
let c2 = cache.clone();
cache.insert(1, Arc::new(1));
assert!(c2.contains(&1));
}
#[test]
fn test_unsafe_block_soundness() {
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(1));
cache.remove(&1);
}
#[test]
fn test_slot_based_safety() {
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(1));
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_reclamation_safety() {
let counter = Arc::new(AtomicUsize::new(0));
{
let mut cache = LruCore::new(10);
cache.insert(1, Arc::new(LifeCycleTracker::new(1, counter.clone())));
}
assert_eq!(counter.load(Ordering::SeqCst), 0);
}
#[test]
fn test_oom_handling_safety() {
}
#[cfg(feature = "concurrency")]
#[cfg_attr(miri, ignore)]
#[test]
fn test_cross_thread_memory_safety() {
let cache = Arc::new(ConcurrentLruCache::new(10));
let c2 = cache.clone();
thread::spawn(move || {
c2.insert(1, Arc::new(1));
})
.join()
.unwrap();
assert!(cache.contains(&1));
}
#[test]
fn test_unwind_safety() {
let mut cache = LruCore::new(10);
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
cache.insert(1, Arc::new(1));
panic!("oops");
}));
assert!(result.is_err());
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_memory_sanitizer_compatibility() {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_address_sanitizer_compatibility() {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_thread_sanitizer_compatibility() {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_leak_sanitizer_compatibility() {
}
}
}