Skip to main content

cache_rs/
lib.rs

1//! # Cache-RS
2//!
3//! A high-performance, memory-efficient cache library implementing multiple eviction
4//! algorithms with both single-threaded and thread-safe concurrent variants.
5//!
6//! ## Why Cache-RS?
7//!
8//! - **Zero-Copy Architecture**: HashMap stores pointers; values live in cache-friendly linked structures
9//! - **O(1) Everything**: All operations (get, put, remove) are constant time
10//! - **No-std Compatible**: Works in embedded environments (with `alloc`)
11//! - **Comprehensive Algorithm Suite**: LRU, SLRU, LFU, LFUDA, and GDSF
12//! - **Production Ready**: Extensive testing, Miri-verified, benchmarked
13//!
14//! ## Quick Start
15//!
16//! ```rust
17//! use cache_rs::LruCache;
18//! use cache_rs::config::LruCacheConfig;
19//! use core::num::NonZeroUsize;
20//!
21//! let config = LruCacheConfig {
22//!     capacity: NonZeroUsize::new(100).unwrap(),
23//!     max_size: u64::MAX,
24//! };
25//! let mut cache = LruCache::init(config, None);
26//! cache.put("key", "value");
27//! assert_eq!(cache.get(&"key"), Some(&"value"));
28//! ```
29//!
30//! ## Algorithm Selection Guide
31//!
32//! ```text
33//! ┌─────────────────────────────────────────────────────────────────────────────┐
34//! │                    Which Cache Algorithm Should I Use?                       │
35//! ├─────────────────────────────────────────────────────────────────────────────┤
36//! │                                                                              │
37//! │  Is your workload primarily...                                               │
38//! │                                                                              │
39//! │  ┌─────────────────┐                                                         │
40//! │  │ Recency-based?  │──Yes──▶ Are you worried about scans?                   │
41//! │  │ (recent = hot)  │              │                                          │
42//! │  └────────┬────────┘         Yes  │  No                                      │
43//! │           │                   │   │                                          │
44//! │          No                   ▼   ▼                                          │
45//! │           │               ┌──────────┐  ┌──────────┐                         │
46//! │           │               │   SLRU   │  │   LRU    │                         │
47//! │           ▼               └──────────┘  └──────────┘                         │
48//! │  ┌─────────────────┐                                                         │
49//! │  │ Frequency-based?│──Yes──▶ Does popularity change over time?              │
50//! │  │ (popular = hot) │              │                                          │
51//! │  └────────┬────────┘         Yes  │  No                                      │
52//! │           │                   │   │                                          │
53//! │          No                   ▼   ▼                                          │
54//! │           │               ┌──────────┐  ┌──────────┐                         │
55//! │           │               │  LFUDA   │  │   LFU    │                         │
56//! │           ▼               └──────────┘  └──────────┘                         │
57//! │  ┌─────────────────┐                                                         │
58//! │  │ Variable-sized  │──Yes──▶ ┌──────────┐                                   │
59//! │  │    objects?     │         │   GDSF   │                                    │
60//! │  └─────────────────┘         └──────────┘                                    │
61//! │                                                                              │
62//! └─────────────────────────────────────────────────────────────────────────────┘
63//! ```
64//!
65//! ## Available Cache Algorithms
66//!
67//! | Algorithm | Description | Best Use Case |
68//! |-----------|-------------|---------------|
69//! | [`LruCache`] | Least Recently Used | General purpose, recency-based access |
70//! | [`SlruCache`] | Segmented LRU | Mixed workloads with scans |
71//! | [`LfuCache`] | Least Frequently Used | Stable popularity patterns |
72//! | [`LfudaCache`] | LFU with Dynamic Aging | Long-running, evolving popularity |
73//! | [`GdsfCache`] | Greedy Dual Size Frequency | CDNs, variable-sized objects |
74//!
75//! ## Performance Characteristics
76//!
77//! | Algorithm | Get | Put | Remove | Memory/Entry | Scan Resist | Adapts |
78//! |-----------|-----|-----|--------|--------------|-------------|--------|
79//! | LRU       | O(1)| O(1)| O(1)   | ~80 bytes    | Poor        | N/A    |
80//! | SLRU      | O(1)| O(1)| O(1)   | ~90 bytes    | Good        | N/A    |
81//! | LFU       | O(1)| O(1)| O(1)   | ~100 bytes   | Excellent   | No     |
82//! | LFUDA     | O(1)| O(1)| O(1)   | ~110 bytes   | Excellent   | Yes    |
83//! | GDSF      | O(1)| O(1)| O(1)   | ~120 bytes   | Good        | Yes    |
84//!
85//! ## Concurrent Caches
86//!
87//! Enable the `concurrent` feature for thread-safe versions:
88//!
89//! ```toml
90//! [dependencies]
91//! cache-rs = { version = "0.2", features = ["concurrent"] }
92//! ```
93//!
94//! ```rust,ignore
95//! use cache_rs::ConcurrentLruCache;
96//! use std::sync::Arc;
97//!
98//! let cache = Arc::new(ConcurrentLruCache::new(10_000));
99//!
100//! // Safe to share across threads
101//! let cache_clone = Arc::clone(&cache);
102//! std::thread::spawn(move || {
103//!     cache_clone.put("key".to_string(), 42);
104//! });
105//! ```
106//!
107//! Concurrent caches use **lock striping** for high throughput:
108//!
109//! ```text
110//! ┌────────────────────────────────────────────────────────┐
111//! │              ConcurrentCache (16 segments)             │
112//! │                                                        │
113//! │  ┌─────────┐ ┌─────────┐ ┌─────────┐     ┌─────────┐   │
114//! │  │Segment 0│ │Segment 1│ │Segment 2│ ... │Segment15│   │
115//! │  │ [Mutex] │ │ [Mutex] │ │ [Mutex] │     │ [Mutex] │   │
116//! │  └─────────┘ └─────────┘ └─────────┘     └─────────┘   │
117//! │       ▲           ▲           ▲               ▲        │
118//! │       │           │           │               │        │
119//! │  hash(k1)%16  hash(k2)%16  hash(k3)%16   hash(kN)%16   │
120//! └────────────────────────────────────────────────────────┘
121//! ```
122//!
123//! ## Dual-Limit Capacity
124//!
125//! All caches support both entry count and size limits:
126//!
127//! ```rust
128//! use cache_rs::LruCache;
129//! use cache_rs::config::LruCacheConfig;
130//! use core::num::NonZeroUsize;
131//!
132//! // Limit by both count (1000 entries) AND size (10MB)
133//! let config = LruCacheConfig {
134//!     capacity: NonZeroUsize::new(1000).unwrap(),
135//!     max_size: 10 * 1024 * 1024,
136//! };
137//! let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
138//!
139//! // Track size explicitly
140//! let data = vec![0u8; 1024];
141//! cache.put_with_size("file.bin".to_string(), data, 1024);
142//! ```
143//!
144//! ## Feature Flags
145//!
146//! | Feature | Default | Description |
147//! |---------|---------|-------------|
148//! | `hashbrown` | ✓ | Use hashbrown for `no_std` HashMap |
149//! | `std` | | Enable standard library features |
150//! | `concurrent` | | Thread-safe cache implementations |
151//! | `nightly` | | Nightly-only optimizations |
152//!
153//! ## No-std Support
154//!
155//! This crate works in `no_std` environments by default (requires `alloc`):
156//!
157//! ```rust
158//! // Works in embedded environments!
159//! use cache_rs::LruCache;
160//! use cache_rs::config::LruCacheConfig;
161//! use core::num::NonZeroUsize;
162//!
163//! let config = LruCacheConfig {
164//!     capacity: NonZeroUsize::new(100).unwrap(),
165//!     max_size: u64::MAX,
166//! };
167//! let mut cache = LruCache::init(config, None);
168//! cache.put("sensor_1", 42.5f32);
169//! ```
170//!
171//! ## Detailed Algorithm Descriptions
172//!
173//! ### LRU (Least Recently Used)
174//!
175//! Evicts the item that hasn't been accessed for the longest time. Simple and effective
176//! for workloads with temporal locality.
177//!
178//! ```rust
179//! use cache_rs::LruCache;
180//! use cache_rs::config::LruCacheConfig;
181//! use core::num::NonZeroUsize;
182//!
183//! let config = LruCacheConfig {
184//!     capacity: NonZeroUsize::new(2).unwrap(),
185//!     max_size: u64::MAX,
186//! };
187//! let mut cache = LruCache::init(config, None);
188//! cache.put("a", 1);
189//! cache.put("b", 2);
190//! cache.get(&"a");      // "a" becomes most recently used
191//! cache.put("c", 3);    // "b" evicted (least recently used)
192//! assert!(cache.get(&"b").is_none());
193//! ```
194//!
195//! ### SLRU (Segmented LRU)
196//!
197//! Divides cache into probationary and protected segments. Items enter probationary
198//! and are promoted to protected on second access. Excellent scan resistance.
199//!
200//! ```rust
201//! use cache_rs::SlruCache;
202//! use cache_rs::config::SlruCacheConfig;
203//! use core::num::NonZeroUsize;
204//!
205//! let config = SlruCacheConfig {
206//!     capacity: NonZeroUsize::new(100).unwrap(),
207//!     protected_capacity: NonZeroUsize::new(20).unwrap(),
208//!     max_size: u64::MAX,
209//! };
210//! let mut cache = SlruCache::init(config, None);
211//!
212//! cache.put("hot", 1);
213//! cache.get(&"hot");  // Promoted to protected segment!
214//! ```
215//!
216//! ### LFU (Least Frequently Used)
217//!
218//! Tracks access frequency and evicts the least frequently accessed item.
219//! Great for workloads with stable popularity patterns.
220//!
221//! ```rust
222//! use cache_rs::LfuCache;
223//! use cache_rs::config::LfuCacheConfig;
224//! use core::num::NonZeroUsize;
225//!
226//! let config = LfuCacheConfig {
227//!     capacity: NonZeroUsize::new(2).unwrap(),
228//!     max_size: u64::MAX,
229//! };
230//! let mut cache = LfuCache::init(config, None);
231//! cache.put("rare", 1);
232//! cache.put("popular", 2);
233//!
234//! // Access "popular" multiple times
235//! for _ in 0..10 { cache.get(&"popular"); }
236//!
237//! cache.put("new", 3);  // "rare" evicted (lowest frequency)
238//! assert!(cache.get(&"popular").is_some());
239//! ```
240//!
241//! ### LFUDA (LFU with Dynamic Aging)
242//!
243//! Addresses LFU's "cache pollution" problem by incorporating aging. Old popular
244//! items gradually lose priority, allowing new items to compete fairly.
245//!
246//! ```rust
247//! use cache_rs::LfudaCache;
248//! use cache_rs::config::LfudaCacheConfig;
249//! use core::num::NonZeroUsize;
250//!
251//! let config = LfudaCacheConfig {
252//!     capacity: NonZeroUsize::new(100).unwrap(),
253//!     initial_age: 0,
254//!     max_size: u64::MAX,
255//! };
256//! let mut cache = LfudaCache::init(config, None);
257//!
258//! // Old popular items will eventually age out if not accessed
259//! for i in 0..100 {
260//!     cache.put(i, i);
261//! }
262//! ```
263//!
264//! ### GDSF (Greedy Dual-Size Frequency)
265//!
266//! Combines frequency, size, and aging. Priority = (Frequency / Size) + Age.
267//! Ideal for caching variable-sized objects like images or API responses.
268//!
269//! ```rust
270//! use cache_rs::GdsfCache;
271//! use cache_rs::config::GdsfCacheConfig;
272//! use core::num::NonZeroUsize;
273//!
274//! let config = GdsfCacheConfig {
275//!     capacity: NonZeroUsize::new(1000).unwrap(),
276//!     initial_age: 0.0,
277//!     max_size: 10 * 1024 * 1024,  // 10MB
278//! };
279//! let mut cache: GdsfCache<String, Vec<u8>> = GdsfCache::init(config, None);
280//!
281//! // Size-aware insertion
282//! cache.put("small.txt".to_string(), vec![0u8; 100], 100);
283//! cache.put("large.bin".to_string(), vec![0u8; 10000], 10000);
284//! // Small items get higher priority per byte
285//! ```
286//!
287//! ## Modules
288//!
289//! - [`lru`]: Least Recently Used cache implementation
290//! - [`slru`]: Segmented LRU cache implementation  
291//! - [`lfu`]: Least Frequently Used cache implementation
292//! - [`lfuda`]: LFU with Dynamic Aging cache implementation
293//! - [`gdsf`]: Greedy Dual Size Frequency cache implementation
294//! - [`config`]: Configuration structures for all cache algorithms
295//! - [`metrics`]: Metrics collection for cache performance monitoring
296//! - `concurrent`: Thread-safe concurrent cache implementations (requires `concurrent` feature)
297
298#![no_std]
299
300#[cfg(test)]
301extern crate scoped_threadpool;
302
303/// Unified cache entry type.
304///
305/// Provides a generic `CacheEntry<K, V, M>` structure that holds key, value,
306/// timestamps, and algorithm-specific metadata. This is the foundation for
307/// the dual-limit capacity management system.
308pub mod entry;
309
310/// Algorithm-specific metadata types.
311///
312/// Provides metadata structures for each cache algorithm:
313/// - `LfuMeta`: Frequency counter for LFU
314/// - `LfudaMeta`: Frequency for LFUDA (age is cache-global)
315/// - `SlruMeta`: Segment location for SLRU
316/// - `GdsfMeta`: Frequency and priority for GDSF
317pub mod meta;
318
319/// Doubly linked list implementation with in-place editing capabilities.
320///
321/// This module provides a memory-efficient doubly linked list that allows for
322/// efficient insertion, removal, and reordering operations.
323///
324/// **Note**: This module is internal infrastructure and should not be used directly
325/// by library consumers. It exposes unsafe raw pointer operations that require
326/// careful invariant maintenance. Use the high-level cache implementations instead.
327pub(crate) mod list;
328
329/// Cache configuration structures.
330///
331/// Provides configuration structures for all cache algorithm implementations.
332pub mod config;
333
334/// Least Recently Used (LRU) cache implementation.
335///
336/// Provides a fixed-size cache that evicts the least recently used items when
337/// the capacity is reached.
338pub mod lru;
339
340/// Segmented LRU (SLRU) cache implementation.
341///
342/// Provides a fixed-size cache that uses a segmented approach to evict
343/// items based on their usage patterns. This is useful for scenarios where
344/// certain items are accessed more frequently than others.
345pub mod slru;
346
347/// Least Frequently Used (LFU) cache implementation.
348///
349/// Provides a fixed-size cache that evicts the least frequently used items
350/// when capacity is reached. Items are tracked by their access frequency.
351pub mod lfu;
352
353/// Least Frequently Used with Dynamic Aging (LFUDA) cache implementation.
354///
355/// An enhanced LFU cache that addresses the aging problem by dynamically
356/// adjusting item priorities based on a global aging factor.
357pub mod lfuda;
358
359/// Greedy Dual-Size Frequency (GDSF) cache implementation.
360///
361/// A cache replacement algorithm that combines frequency, size, and aging.
362/// Assigns priority based on (Frequency / Size) + Global_Age formula.
363pub mod gdsf;
364
365/// Cache metrics system.
366///
367/// Provides a flexible metrics collection and reporting system for all cache algorithms.
368/// Each algorithm can track algorithm-specific metrics while implementing a common interface.
369pub mod metrics;
370
371/// Concurrent cache implementations.
372///
373/// Provides thread-safe cache implementations using segmented storage for high-performance
374/// multi-threaded access. Each concurrent cache partitions the key space across multiple
375/// segments, with each segment protected by its own lock.
376///
377/// Available when the `concurrent` feature is enabled.
378#[cfg(feature = "concurrent")]
379pub mod concurrent;
380
381// Re-export cache types
382pub use gdsf::GdsfCache;
383pub use lfu::LfuCache;
384pub use lfuda::LfudaCache;
385pub use lru::LruCache;
386pub use slru::SlruCache;
387
388// Re-export entry type
389pub use entry::CacheEntry;
390
391// Re-export metadata types
392pub use meta::{GdsfMeta, LfuMeta, LfudaMeta, SlruMeta, SlruSegment};
393
394#[cfg(feature = "concurrent")]
395pub use concurrent::{
396    ConcurrentGdsfCache, ConcurrentLfuCache, ConcurrentLfudaCache, ConcurrentLruCache,
397    ConcurrentSlruCache,
398};