Skip to main content

cache_rs/
lib.rs

1#![doc = include_str!("../README.md")]
2//!
3//! ---
4//!
5//! # Code Reference
6//!
7//! This section provides quick code examples and API references for each cache algorithm.
8//!
9//! ## Algorithm Selection Guide
10//!
11//! ```text
12//! ┌─────────────────────────────────────────────────────────────────────────────┐
13//! │                    Which Cache Algorithm Should I Use?                       │
14//! ├─────────────────────────────────────────────────────────────────────────────┤
15//! │                                                                              │
16//! │  Is your workload primarily...                                               │
17//! │                                                                              │
18//! │  ┌─────────────────┐                                                         │
19//! │  │ Recency-based?  │──Yes──▶ Are you worried about scans?                   │
20//! │  │ (recent = hot)  │              │                                          │
21//! │  └────────┬────────┘         Yes  │  No                                      │
22//! │           │                   │   │                                          │
23//! │          No                   ▼   ▼                                          │
24//! │           │               ┌──────────┐  ┌──────────┐                         │
25//! │           │               │   SLRU   │  │   LRU    │                         │
26//! │           ▼               └──────────┘  └──────────┘                         │
27//! │  ┌─────────────────┐                                                         │
28//! │  │ Frequency-based?│──Yes──▶ Does popularity change over time?              │
29//! │  │ (popular = hot) │              │                                          │
30//! │  └────────┬────────┘         Yes  │  No                                      │
31//! │           │                   │   │                                          │
32//! │          No                   ▼   ▼                                          │
33//! │           │               ┌──────────┐  ┌──────────┐                         │
34//! │           │               │  LFUDA   │  │   LFU    │                         │
35//! │           ▼               └──────────┘  └──────────┘                         │
36//! │  ┌─────────────────┐                                                         │
37//! │  │ Variable-sized  │──Yes──▶ ┌──────────┐                                   │
38//! │  │    objects?     │         │   GDSF   │                                    │
39//! │  └─────────────────┘         └──────────┘                                    │
40//! │                                                                              │
41//! └─────────────────────────────────────────────────────────────────────────────┘
42//! ```
43//!
44//! ## Quick Reference
45//!
46//! | Algorithm | Description | Best Use Case |
47//! |-----------|-------------|---------------|
48//! | [`LruCache`] | Least Recently Used | General purpose, recency-based access |
49//! | [`SlruCache`] | Segmented LRU | Mixed workloads with scans |
50//! | [`LfuCache`] | Least Frequently Used | Stable popularity patterns |
51//! | [`LfudaCache`] | LFU with Dynamic Aging | Long-running, evolving popularity |
52//! | [`GdsfCache`] | Greedy Dual Size Frequency | CDNs, variable-sized objects |
53//!
54//! ## Performance Characteristics
55//!
56//! | Algorithm | Get | Put | Remove | Memory/Entry | Scan Resist | Adapts |
57//! |-----------|-----|-----|--------|--------------|-------------|--------|
58//! | LRU       | O(1)| O(1)| O(1)   | ~80 bytes    | Poor        | N/A    |
59//! | SLRU      | O(1)| O(1)| O(1)   | ~90 bytes    | Good        | N/A    |
60//! | LFU       | O(1)| O(1)| O(1)   | ~100 bytes   | Excellent   | No     |
61//! | LFUDA     | O(1)| O(1)| O(1)   | ~110 bytes   | Excellent   | Yes    |
62//! | GDSF      | O(1)| O(1)| O(1)   | ~120 bytes   | Good        | Yes    |
63//!
64//! ## Code Examples
65//!
66//! ### LRU (Least Recently Used)
67//!
68//! Evicts the item that hasn't been accessed for the longest time. Simple and effective
69//! for workloads with temporal locality.
70//!
71//! ```rust
72//! use cache_rs::LruCache;
73//! use cache_rs::config::LruCacheConfig;
74//! use core::num::NonZeroUsize;
75//!
76//! let config = LruCacheConfig {
77//!     capacity: NonZeroUsize::new(2).unwrap(),
78//!     max_size: u64::MAX,
79//! };
80//! let mut cache = LruCache::init(config, None);
81//! cache.put("a", 1);
82//! cache.put("b", 2);
83//! cache.get(&"a");      // "a" becomes most recently used
84//! cache.put("c", 3);    // "b" evicted (least recently used)
85//! assert!(cache.get(&"b").is_none());
86//! ```
87//!
88//! ### SLRU (Segmented LRU)
89//!
90//! Divides cache into probationary and protected segments. Items enter probationary
91//! and are promoted to protected on second access. Excellent scan resistance.
92//!
93//! ```rust
94//! use cache_rs::SlruCache;
95//! use cache_rs::config::SlruCacheConfig;
96//! use core::num::NonZeroUsize;
97//!
98//! let config = SlruCacheConfig {
99//!     capacity: NonZeroUsize::new(100).unwrap(),
100//!     protected_capacity: NonZeroUsize::new(20).unwrap(),
101//!     max_size: u64::MAX,
102//! };
103//! let mut cache = SlruCache::init(config, None);
104//!
105//! cache.put("hot", 1);
106//! cache.get(&"hot");  // Promoted to protected segment!
107//! ```
108//!
109//! ### LFU (Least Frequently Used)
110//!
111//! Tracks access frequency and evicts the least frequently accessed item.
112//! Great for workloads with stable popularity patterns.
113//!
114//! ```rust
115//! use cache_rs::LfuCache;
116//! use cache_rs::config::LfuCacheConfig;
117//! use core::num::NonZeroUsize;
118//!
119//! let config = LfuCacheConfig {
120//!     capacity: NonZeroUsize::new(2).unwrap(),
121//!     max_size: u64::MAX,
122//! };
123//! let mut cache = LfuCache::init(config, None);
124//! cache.put("rare", 1);
125//! cache.put("popular", 2);
126//!
127//! // Access "popular" multiple times
128//! for _ in 0..10 { cache.get(&"popular"); }
129//!
130//! cache.put("new", 3);  // "rare" evicted (lowest frequency)
131//! assert!(cache.get(&"popular").is_some());
132//! ```
133//!
134//! ### LFUDA (LFU with Dynamic Aging)
135//!
136//! Addresses LFU's "cache pollution" problem by incorporating aging. Old popular
137//! items gradually lose priority, allowing new items to compete fairly.
138//!
139//! ```rust
140//! use cache_rs::LfudaCache;
141//! use cache_rs::config::LfudaCacheConfig;
142//! use core::num::NonZeroUsize;
143//!
144//! let config = LfudaCacheConfig {
145//!     capacity: NonZeroUsize::new(100).unwrap(),
146//!     initial_age: 0,
147//!     max_size: u64::MAX,
148//! };
149//! let mut cache = LfudaCache::init(config, None);
150//!
151//! // Old popular items will eventually age out if not accessed
152//! for i in 0..100 {
153//!     cache.put(i, i);
154//! }
155//! ```
156//!
157//! ### GDSF (Greedy Dual-Size Frequency)
158//!
159//! Combines frequency, size, and aging. Priority = (Frequency / Size) + Age.
160//! Ideal for caching variable-sized objects like images or API responses.
161//!
162//! ```rust
163//! use cache_rs::GdsfCache;
164//! use cache_rs::config::GdsfCacheConfig;
165//! use core::num::NonZeroUsize;
166//!
167//! let config = GdsfCacheConfig {
168//!     capacity: NonZeroUsize::new(1000).unwrap(),
169//!     initial_age: 0.0,
170//!     max_size: 10 * 1024 * 1024,  // 10MB
171//! };
172//! let mut cache: GdsfCache<String, Vec<u8>> = GdsfCache::init(config, None);
173//!
174//! // Size-aware insertion
175//! cache.put("small.txt".to_string(), vec![0u8; 100], 100);
176//! cache.put("large.bin".to_string(), vec![0u8; 10000], 10000);
177//! // Small items get higher priority per byte
178//! ```
179//!
180//! ## Concurrent Caches
181//!
182//! Enable the `concurrent` feature for thread-safe versions:
183//!
184//! ```toml
185//! [dependencies]
186//! cache-rs = { version = "0.3", features = ["concurrent"] }
187//! ```
188//!
189//! ```rust,ignore
190//! use cache_rs::ConcurrentLruCache;
191//! use std::sync::Arc;
192//!
193//! let cache = Arc::new(ConcurrentLruCache::new(10_000));
194//!
195//! // Safe to share across threads
196//! let cache_clone = Arc::clone(&cache);
197//! std::thread::spawn(move || {
198//!     cache_clone.put("key".to_string(), 42);
199//! });
200//! ```
201//!
202//! Concurrent caches use **lock striping** for high throughput:
203//!
204//! ```text
205//! ┌────────────────────────────────────────────────────────────────────┐
206//! │              ConcurrentCache (16 segments)                         │
207//! │                                                                    │
208//! │  ┌─────────┐ ┌─────────┐ ┌─────────┐     ┌─────────┐              │
209//! │  │Segment 0│ │Segment 1│ │Segment 2│ ... │Segment15│              │
210//! │  │ [Mutex] │ │ [Mutex] │ │ [Mutex] │     │ [Mutex] │              │
211//! │  └─────────┘ └─────────┘ └─────────┘     └─────────┘              │
212//! │       ▲           ▲           ▲               ▲                   │
213//! │       │           │           │               │                   │
214//! │  hash(k1)%16  hash(k2)%16  hash(k3)%16   hash(kN)%16              │
215//! └────────────────────────────────────────────────────────────────────┘
216//! ```
217//!
218//! ## Dual-Limit Capacity
219//!
220//! All caches support both entry count and size limits:
221//!
222//! ```rust
223//! use cache_rs::LruCache;
224//! use cache_rs::config::LruCacheConfig;
225//! use core::num::NonZeroUsize;
226//!
227//! // Limit by both count (1000 entries) AND size (10MB)
228//! let config = LruCacheConfig {
229//!     capacity: NonZeroUsize::new(1000).unwrap(),
230//!     max_size: 10 * 1024 * 1024,
231//! };
232//! let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
233//!
234//! // Track size explicitly
235//! let data = vec![0u8; 1024];
236//! cache.put_with_size("file.bin".to_string(), data, 1024);
237//! ```
238//!
239//! ## Modules
240//!
241//! - [`lru`]: Least Recently Used cache implementation
242//! - [`slru`]: Segmented LRU cache implementation
243//! - [`lfu`]: Least Frequently Used cache implementation
244//! - [`lfuda`]: LFU with Dynamic Aging cache implementation
245//! - [`gdsf`]: Greedy Dual Size Frequency cache implementation
246//! - [`config`]: Configuration structures for all cache algorithms
247//! - [`metrics`]: Metrics collection for cache performance monitoring
248//! - [`concurrent`]: Thread-safe concurrent cache implementations (requires `concurrent` feature)
249
250#![no_std]
251
252#[cfg(test)]
253extern crate scoped_threadpool;
254
255/// Unified cache entry type.
256///
257/// Provides a generic `CacheEntry<K, V, M>` structure that holds key, value,
258/// timestamps, and algorithm-specific metadata. This is the foundation for
259/// the dual-limit capacity management system.
260pub mod entry;
261
262/// Algorithm-specific metadata types.
263///
264/// Provides metadata structures for each cache algorithm:
265/// - `LfuMeta`: Frequency counter for LFU
266/// - `LfudaMeta`: Frequency for LFUDA (age is cache-global)
267/// - `SlruMeta`: Segment location for SLRU
268/// - `GdsfMeta`: Frequency and priority for GDSF
269pub mod meta;
270
271/// Doubly linked list implementation with in-place editing capabilities.
272///
273/// This module provides a memory-efficient doubly linked list that allows for
274/// efficient insertion, removal, and reordering operations.
275///
276/// **Note**: This module is internal infrastructure and should not be used directly
277/// by library consumers. It exposes unsafe raw pointer operations that require
278/// careful invariant maintenance. Use the high-level cache implementations instead.
279pub(crate) mod list;
280
281/// Cache configuration structures.
282///
283/// Provides configuration structures for all cache algorithm implementations.
284pub mod config;
285
286/// Least Recently Used (LRU) cache implementation.
287///
288/// Provides a fixed-size cache that evicts the least recently used items when
289/// the capacity is reached.
290pub mod lru;
291
292/// Segmented LRU (SLRU) cache implementation.
293///
294/// Provides a fixed-size cache that uses a segmented approach to evict
295/// items based on their usage patterns. This is useful for scenarios where
296/// certain items are accessed more frequently than others.
297pub mod slru;
298
299/// Least Frequently Used (LFU) cache implementation.
300///
301/// Provides a fixed-size cache that evicts the least frequently used items
302/// when capacity is reached. Items are tracked by their access frequency.
303pub mod lfu;
304
305/// Least Frequently Used with Dynamic Aging (LFUDA) cache implementation.
306///
307/// An enhanced LFU cache that addresses the aging problem by dynamically
308/// adjusting item priorities based on a global aging factor.
309pub mod lfuda;
310
311/// Greedy Dual-Size Frequency (GDSF) cache implementation.
312///
313/// A cache replacement algorithm that combines frequency, size, and aging.
314/// Assigns priority based on (Frequency / Size) + Global_Age formula.
315pub mod gdsf;
316
317/// Cache metrics system.
318///
319/// Provides a flexible metrics collection and reporting system for all cache algorithms.
320/// Each algorithm can track algorithm-specific metrics while implementing a common interface.
321pub mod metrics;
322
323/// Concurrent cache implementations.
324///
325/// Provides thread-safe cache implementations using segmented storage for high-performance
326/// multi-threaded access. Each concurrent cache partitions the key space across multiple
327/// segments, with each segment protected by its own lock.
328///
329/// Available when the `concurrent` feature is enabled.
330#[cfg(feature = "concurrent")]
331pub mod concurrent;
332
333// Re-export cache types
334pub use gdsf::GdsfCache;
335pub use lfu::LfuCache;
336pub use lfuda::LfudaCache;
337pub use lru::LruCache;
338pub use slru::SlruCache;
339
340// Re-export entry type
341pub use entry::CacheEntry;
342
343// Re-export metadata types
344pub use meta::{GdsfMeta, LfuMeta, LfudaMeta, SlruMeta, SlruSegment};
345
346#[cfg(feature = "concurrent")]
347pub use concurrent::{
348    ConcurrentGdsfCache, ConcurrentLfuCache, ConcurrentLfudaCache, ConcurrentLruCache,
349    ConcurrentSlruCache,
350};