1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
//!
//! ---
//!
//! # Code Reference
//!
//! This section provides quick code examples and API references for each cache algorithm.
//!
//! ## Algorithm Selection Guide
//!
//! ```text
//! ┌─────────────────────────────────────────────────────────────────────────────┐
//! │ Which Cache Algorithm Should I Use? │
//! ├─────────────────────────────────────────────────────────────────────────────┤
//! │ │
//! │ Is your workload primarily... │
//! │ │
//! │ ┌─────────────────┐ │
//! │ │ Recency-based? │──Yes──▶ Are you worried about scans? │
//! │ │ (recent = hot) │ │ │
//! │ └────────┬────────┘ Yes │ No │
//! │ │ │ │ │
//! │ No ▼ ▼ │
//! │ │ ┌──────────┐ ┌──────────┐ │
//! │ │ │ SLRU │ │ LRU │ │
//! │ ▼ └──────────┘ └──────────┘ │
//! │ ┌─────────────────┐ │
//! │ │ Frequency-based?│──Yes──▶ Does popularity change over time? │
//! │ │ (popular = hot) │ │ │
//! │ └────────┬────────┘ Yes │ No │
//! │ │ │ │ │
//! │ No ▼ ▼ │
//! │ │ ┌──────────┐ ┌──────────┐ │
//! │ │ │ LFUDA │ │ LFU │ │
//! │ ▼ └──────────┘ └──────────┘ │
//! │ ┌─────────────────┐ │
//! │ │ Variable-sized │──Yes──▶ ┌──────────┐ │
//! │ │ objects? │ │ GDSF │ │
//! │ └─────────────────┘ └──────────┘ │
//! │ │
//! └─────────────────────────────────────────────────────────────────────────────┘
//! ```
//!
//! ## Quick Reference
//!
//! | Algorithm | Description | Best Use Case |
//! |-----------|-------------|---------------|
//! | [`LruCache`] | Least Recently Used | General purpose, recency-based access |
//! | [`SlruCache`] | Segmented LRU | Mixed workloads with scans |
//! | [`LfuCache`] | Least Frequently Used | Stable popularity patterns |
//! | [`LfudaCache`] | LFU with Dynamic Aging | Long-running, evolving popularity |
//! | [`GdsfCache`] | Greedy Dual Size Frequency | CDNs, variable-sized objects |
//!
//! ## Performance Characteristics
//!
//! | Algorithm | Get | Put | Remove | Memory/Entry | Scan Resist | Adapts |
//! |-----------|-----|-----|--------|--------------|-------------|--------|
//! | LRU | O(1)| O(1)| O(1) | ~80 bytes | Poor | N/A |
//! | SLRU | O(1)| O(1)| O(1) | ~90 bytes | Good | N/A |
//! | LFU | O(1)| O(1)| O(1) | ~100 bytes | Excellent | No |
//! | LFUDA | O(1)| O(1)| O(1) | ~110 bytes | Excellent | Yes |
//! | GDSF | O(1)| O(1)| O(1) | ~120 bytes | Good | Yes |
//!
//! ## Code Examples
//!
//! ### LRU (Least Recently Used)
//!
//! Evicts the item that hasn't been accessed for the longest time. Simple and effective
//! for workloads with temporal locality.
//!
//! ```rust
//! use cache_rs::LruCache;
//! use cache_rs::config::LruCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! let config = LruCacheConfig {
//! capacity: NonZeroUsize::new(2).unwrap(),
//! max_size: u64::MAX,
//! };
//! let mut cache = LruCache::init(config, None);
//! cache.put("a", 1, 1);
//! cache.put("b", 2, 1);
//! cache.get(&"a"); // "a" becomes most recently used
//! cache.put("c", 3, 1); // "b" evicted (least recently used)
//! assert!(cache.get(&"b").is_none());
//! ```
//!
//! ### SLRU (Segmented LRU)
//!
//! Divides cache into probationary and protected segments. Items enter probationary
//! and are promoted to protected on second access. Excellent scan resistance.
//!
//! ```rust
//! use cache_rs::SlruCache;
//! use cache_rs::config::SlruCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! let config = SlruCacheConfig {
//! capacity: NonZeroUsize::new(100).unwrap(),
//! protected_capacity: NonZeroUsize::new(20).unwrap(),
//! max_size: u64::MAX,
//! };
//! let mut cache = SlruCache::init(config, None);
//!
//! cache.put("hot", 1, 1);
//! cache.get(&"hot"); // Promoted to protected segment!
//! ```
//!
//! ### LFU (Least Frequently Used)
//!
//! Tracks access frequency and evicts the least frequently accessed item.
//! Great for workloads with stable popularity patterns.
//!
//! ```rust
//! use cache_rs::LfuCache;
//! use cache_rs::config::LfuCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! let config = LfuCacheConfig {
//! capacity: NonZeroUsize::new(2).unwrap(),
//! max_size: u64::MAX,
//! };
//! let mut cache = LfuCache::init(config, None);
//! cache.put("rare", 1, 1);
//! cache.put("popular", 2, 1);
//!
//! // Access "popular" multiple times
//! for _ in 0..10 { cache.get(&"popular"); }
//!
//! cache.put("new", 3, 1); // "rare" evicted (lowest frequency)
//! assert!(cache.get(&"popular").is_some());
//! ```
//!
//! ### LFUDA (LFU with Dynamic Aging)
//!
//! Addresses LFU's "cache pollution" problem by incorporating aging. Old popular
//! items gradually lose priority, allowing new items to compete fairly.
//!
//! ```rust
//! use cache_rs::LfudaCache;
//! use cache_rs::config::LfudaCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! let config = LfudaCacheConfig {
//! capacity: NonZeroUsize::new(100).unwrap(),
//! initial_age: 0,
//! max_size: u64::MAX,
//! };
//! let mut cache = LfudaCache::init(config, None);
//!
//! // Old popular items will eventually age out if not accessed
//! for i in 0..100 {
//! cache.put(i, i, 1);
//! }
//! ```
//!
//! ### GDSF (Greedy Dual-Size Frequency)
//!
//! Combines frequency, size, and aging. Priority = (Frequency / Size) + Age.
//! Ideal for caching variable-sized objects like images or API responses.
//!
//! ```rust
//! use cache_rs::GdsfCache;
//! use cache_rs::config::GdsfCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! let config = GdsfCacheConfig {
//! capacity: NonZeroUsize::new(1000).unwrap(),
//! initial_age: 0.0,
//! max_size: 10 * 1024 * 1024, // 10MB
//! };
//! let mut cache: GdsfCache<String, Vec<u8>> = GdsfCache::init(config, None);
//!
//! // Size-aware insertion
//! cache.put("small.txt".to_string(), vec![0u8; 100], 100);
//! cache.put("large.bin".to_string(), vec![0u8; 10000], 10000);
//! // Small items get higher priority per byte
//! ```
//!
//! ## Concurrent Caches
//!
//! Enable the `concurrent` feature for thread-safe versions:
//!
//! ```toml
//! [dependencies]
//! cache-rs = { version = "0.3", features = ["concurrent"] }
//! ```
//!
//! ```rust,ignore
//! use cache_rs::ConcurrentLruCache;
//! use std::sync::Arc;
//!
//! let cache = Arc::new(ConcurrentLruCache::new(10_000));
//!
//! // Safe to share across threads
//! let cache_clone = Arc::clone(&cache);
//! std::thread::spawn(move || {
//! cache_clone.put("key".to_string(), 42);
//! });
//! ```
//!
//! Concurrent caches use **lock striping** for high throughput:
//!
//! ```text
//! ┌────────────────────────────────────────────────────────────────────┐
//! │ ConcurrentCache (16 segments) │
//! │ │
//! │ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
//! │ │Segment 0│ │Segment 1│ │Segment 2│ ... │Segment15│ │
//! │ │ [Mutex] │ │ [Mutex] │ │ [Mutex] │ │ [Mutex] │ │
//! │ └─────────┘ └─────────┘ └─────────┘ └─────────┘ │
//! │ ▲ ▲ ▲ ▲ │
//! │ │ │ │ │ │
//! │ hash(k1)%16 hash(k2)%16 hash(k3)%16 hash(kN)%16 │
//! └────────────────────────────────────────────────────────────────────┘
//! ```
//!
//! ## Dual-Limit Capacity
//!
//! All caches support both entry count and size limits:
//!
//! ```rust
//! use cache_rs::LruCache;
//! use cache_rs::config::LruCacheConfig;
//! use core::num::NonZeroUsize;
//!
//! // Limit by both count (1000 entries) AND size (10MB)
//! let config = LruCacheConfig {
//! capacity: NonZeroUsize::new(1000).unwrap(),
//! max_size: 10 * 1024 * 1024,
//! };
//! let mut cache: LruCache<String, Vec<u8>> = LruCache::init(config, None);
//!
//! // Track size explicitly
//! let data = vec![0u8; 1024];
//! cache.put("file.bin".to_string(), data, 1024);
//! ```
//!
//! ## Modules
//!
//! - [`lru`]: Least Recently Used cache implementation
//! - [`slru`]: Segmented LRU cache implementation
//! - [`lfu`]: Least Frequently Used cache implementation
//! - [`lfuda`]: LFU with Dynamic Aging cache implementation
//! - [`gdsf`]: Greedy Dual Size Frequency cache implementation
//! - [`config`]: Configuration structures for all cache algorithms
//! - [`metrics`]: Metrics collection for cache performance monitoring
//! - `concurrent`: Thread-safe concurrent cache implementations (requires `concurrent` feature)
extern crate scoped_threadpool;
/// Unified cache entry type.
///
/// Provides a generic `CacheEntry<K, V, M>` structure that holds key, value,
/// timestamps, and algorithm-specific metadata. This is the foundation for
/// the dual-limit capacity management system.
/// Algorithm-specific metadata types.
///
/// Provides metadata structures for each cache algorithm:
/// - `LfuMeta`: Frequency counter for LFU
/// - `LfudaMeta`: Frequency for LFUDA (age is cache-global)
/// - `GdsfMeta`: Frequency and priority for GDSF
/// Doubly linked list implementation with in-place editing capabilities.
///
/// This module provides a memory-efficient doubly linked list that allows for
/// efficient insertion, removal, and reordering operations.
///
/// **Note**: This module is internal infrastructure and should not be used directly
/// by library consumers. It exposes unsafe raw pointer operations that require
/// careful invariant maintenance. Use the high-level cache implementations instead.
pub
/// Cache configuration structures.
///
/// Provides configuration structures for all cache algorithm implementations.
/// Least Recently Used (LRU) cache implementation.
///
/// Provides a fixed-size cache that evicts the least recently used items when
/// the capacity is reached.
/// Segmented LRU (SLRU) cache implementation.
///
/// Provides a fixed-size cache that uses a segmented approach to evict
/// items based on their usage patterns. This is useful for scenarios where
/// certain items are accessed more frequently than others.
/// Least Frequently Used (LFU) cache implementation.
///
/// Provides a fixed-size cache that evicts the least frequently used items
/// when capacity is reached. Items are tracked by their access frequency.
/// Least Frequently Used with Dynamic Aging (LFUDA) cache implementation.
///
/// An enhanced LFU cache that addresses the aging problem by dynamically
/// adjusting item priorities based on a global aging factor.
/// Greedy Dual-Size Frequency (GDSF) cache implementation.
///
/// A cache replacement algorithm that combines frequency, size, and aging.
/// Assigns priority based on (Frequency / Size) + Global_Age formula.
/// Cache metrics system.
///
/// Provides a flexible metrics collection and reporting system for all cache algorithms.
/// Each algorithm can track algorithm-specific metrics while implementing a common interface.
/// Concurrent cache implementations.
///
/// Provides thread-safe cache implementations using segmented storage for high-performance
/// multi-threaded access. Each concurrent cache partitions the key space across multiple
/// segments, with each segment protected by its own lock.
///
/// Available when the `concurrent` feature is enabled.
/// Size value for entry-count mode where actual size doesn't matter.
///
/// Use this when you only want to limit the number of entries, not total size.
/// Each entry will be counted as having size 1, making `current_size()` equal to `len()`.
///
/// # Example
///
/// ```rust
/// use cache_rs::{LruCache, SIZE_UNIT};
/// use cache_rs::config::LruCacheConfig;
/// use core::num::NonZeroUsize;
///
/// let config = LruCacheConfig {
/// capacity: NonZeroUsize::new(100).unwrap(),
/// max_size: u64::MAX, // No size limit
/// };
/// let mut cache = LruCache::init(config, None);
///
/// // Using SIZE_UNIT for count-based caching
/// cache.put("key1", "value1", SIZE_UNIT);
/// cache.put("key2", "value2", SIZE_UNIT);
/// assert_eq!(cache.len(), 2);
/// assert_eq!(cache.current_size(), 2); // Each entry counts as 1
/// ```
///
/// # When to Use
///
/// - **Entry-count mode**: When you only care about limiting the number of entries
/// - **Fixed-size objects**: When all cached items are the same size
/// - **Simple caching**: When size tracking isn't important for your use case
///
/// # When NOT to Use
///
/// - **Size-constrained caches**: When `max_size` is set to a real memory limit
/// - **Variable-sized objects**: When objects have significantly different sizes
/// - **Size-aware algorithms**: GDSF and LFUDA work better with actual sizes
pub const SIZE_UNIT: u64 = 1;
// Re-export cache types
pub use GdsfCache;
pub use LfuCache;
pub use LfudaCache;
pub use LruCache;
pub use SlruCache;
// Re-export entry types
pub use ;
// Re-export metadata types (also available directly from algorithm modules)
pub use ;
// Re-export SLRU Location enum for completeness
pub use Location as SlruLocation;
pub use ;