use std::time::Duration;
use std::sync::atomic::{AtomicU64, Ordering};
use crate::backends::CacheBackend;
use crate::{Error, Result};
#[derive(Debug)]
pub struct CacheStats {
hits: AtomicU64,
misses: AtomicU64,
time_saved_ns: AtomicU64,
execution_time_ns: AtomicU64,
}
impl CacheStats {
pub fn new() -> Self {
Self {
hits: AtomicU64::new(0),
misses: AtomicU64::new(0),
time_saved_ns: AtomicU64::new(0),
execution_time_ns: AtomicU64::new(0),
}
}
pub fn record_hit(&self, saved_time_ns: u64) {
self.hits.fetch_add(1, Ordering::Relaxed);
self.time_saved_ns.fetch_add(saved_time_ns, Ordering::Relaxed);
}
pub fn record_miss(&self, execution_time_ns: u64) {
self.misses.fetch_add(1, Ordering::Relaxed);
self.execution_time_ns.fetch_add(execution_time_ns, Ordering::Relaxed);
}
pub fn hit_count(&self) -> u64 {
self.hits.load(Ordering::Relaxed)
}
pub fn miss_count(&self) -> u64 {
self.misses.load(Ordering::Relaxed)
}
pub fn hit_ratio(&self) -> f64 {
let hits = self.hits.load(Ordering::Relaxed) as f64;
let misses = self.misses.load(Ordering::Relaxed) as f64;
if hits + misses == 0.0 {
0.0
} else {
hits / (hits + misses)
}
}
pub fn time_saved(&self) -> Duration {
Duration::from_nanos(self.time_saved_ns.load(Ordering::Relaxed))
}
pub fn average_execution_time(&self) -> Duration {
let total_time = self.execution_time_ns.load(Ordering::Relaxed);
let misses = self.misses.load(Ordering::Relaxed);
if misses == 0 {
Duration::from_nanos(0)
} else {
Duration::from_nanos(total_time / misses)
}
}
pub fn reset(&self) {
self.hits.store(0, Ordering::Relaxed);
self.misses.store(0, Ordering::Relaxed);
self.time_saved_ns.store(0, Ordering::Relaxed);
self.execution_time_ns.store(0, Ordering::Relaxed);
}
}
impl Default for CacheStats {
fn default() -> Self {
Self::new()
}
}
pub struct AdaptiveTtl {
base_ttl: u64,
min_ttl: u64,
max_ttl: u64,
access_threshold: u64,
multiplier: f64,
}
impl AdaptiveTtl {
pub fn new(base_ttl: u64, min_ttl: u64, max_ttl: u64) -> Self {
Self {
base_ttl,
min_ttl,
max_ttl,
access_threshold: 5,
multiplier: 1.5,
}
}
pub fn with_access_threshold(mut self, threshold: u64) -> Self {
self.access_threshold = threshold;
self
}
pub fn with_multiplier(mut self, multiplier: f64) -> Self {
self.multiplier = multiplier;
self
}
pub fn calculate_ttl(&self, access_count: u64) -> Duration {
if access_count < self.access_threshold {
return Duration::from_secs(self.base_ttl);
}
let factor = (access_count as f64 / self.access_threshold as f64).min(10.0);
let adjusted_ttl = (self.base_ttl as f64 * self.multiplier * factor) as u64;
Duration::from_secs(adjusted_ttl.clamp(self.min_ttl, self.max_ttl))
}
}
pub struct Prefetcher<B: CacheBackend> {
backend: B,
max_items: usize,
}
impl<B: CacheBackend> Prefetcher<B> {
pub fn new(backend: B) -> Self {
Self {
backend,
max_items: 10,
}
}
pub fn with_max_items(mut self, max_items: usize) -> Self {
self.max_items = max_items;
self
}
pub async fn prefetch<F, T>(&self, pattern_fn: F, ttl: Option<Duration>) -> Result<()>
where
F: Fn() -> Vec<(String, Vec<u8>)>,
T: 'static,
{
let items = pattern_fn();
let items = items.into_iter().take(self.max_items);
futures::future::try_join_all(
items.map(|(key, value)| {
let backend = &self.backend;
let ttl = ttl.clone();
async move {
backend.set(key, value, ttl).await
}
})
).await?;
Ok(())
}
}
pub struct BatchOperations<B: CacheBackend> {
backend: B,
operations: Vec<BatchOperation>,
}
enum BatchOperation {
Set {
key: String,
value: Vec<u8>,
ttl: Option<Duration>,
},
Remove(String),
}
impl<B: CacheBackend> BatchOperations<B> {
pub fn new(backend: B) -> Self {
Self {
backend,
operations: Vec::new(),
}
}
pub fn set(&mut self, key: String, value: Vec<u8>, ttl: Option<Duration>) -> &mut Self {
self.operations.push(BatchOperation::Set { key, value, ttl });
self
}
pub fn remove(&mut self, key: String) -> &mut Self {
self.operations.push(BatchOperation::Remove(key));
self
}
pub async fn execute(self) -> Result<()> {
for op in self.operations {
match op {
BatchOperation::Set { key, value, ttl } => {
self.backend.set(key, value, ttl).await?;
},
BatchOperation::Remove(key) => {
self.backend.remove(&key).await?;
}
}
}
Ok(())
}
}
#[cfg(feature = "memory")]
pub struct MemoryOptimizer {
max_memory: usize,
current_memory: AtomicU64,
}
#[cfg(feature = "memory")]
impl MemoryOptimizer {
pub fn new(max_memory_mb: usize) -> Self {
Self {
max_memory: max_memory_mb * 1024 * 1024, current_memory: AtomicU64::new(0),
}
}
pub fn record_allocation(&self, size_bytes: usize) {
self.current_memory.fetch_add(size_bytes as u64, Ordering::Relaxed);
}
pub fn record_deallocation(&self, size_bytes: usize) {
self.current_memory.fetch_sub(size_bytes as u64, Ordering::Relaxed);
}
pub fn should_evict(&self) -> bool {
self.current_memory.load(Ordering::Relaxed) as usize > self.max_memory
}
pub fn memory_usage(&self) -> usize {
self.current_memory.load(Ordering::Relaxed) as usize
}
pub fn memory_usage_percent(&self) -> f64 {
let current = self.current_memory.load(Ordering::Relaxed) as f64;
let max = self.max_memory as f64;
(current / max) * 100.0
}
}
#[cfg(feature = "bincode")]
pub struct Compression {
level: u32,
}
#[cfg(feature = "bincode")]
impl Compression {
pub fn new(level: u32) -> Self {
Self {
level: level.clamp(0, 9),
}
}
pub fn compress(&self, data: &[u8]) -> Result<Vec<u8>> {
use flate2::{write::ZlibEncoder, Compression as FlateCompression};
use std::io::Write;
let mut encoder = ZlibEncoder::new(Vec::new(), FlateCompression::new(self.level));
encoder.write_all(data).map_err(|e| Error::Codec(e.to_string()))?;
encoder.finish().map_err(|e| Error::Codec(e.to_string()))
}
pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>> {
use flate2::read::ZlibDecoder;
use std::io::Read;
let mut decoder = ZlibDecoder::new(data);
let mut decompressed = Vec::new();
decoder.read_to_end(&mut decompressed).map_err(|e| Error::Codec(e.to_string()))?;
Ok(decompressed)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cache_stats() {
let stats = CacheStats::new();
stats.record_hit(1_000_000); stats.record_hit(2_000_000); stats.record_miss(5_000_000);
assert_eq!(stats.hit_count(), 2);
assert_eq!(stats.miss_count(), 1);
assert_eq!(stats.hit_ratio(), 2.0 / 3.0);
assert_eq!(stats.time_saved().as_nanos(), 3_000_000);
assert_eq!(stats.average_execution_time().as_nanos(), 5_000_000);
stats.reset();
assert_eq!(stats.hit_count(), 0);
assert_eq!(stats.miss_count(), 0);
}
#[test]
fn test_adaptive_ttl() {
let adaptive_ttl = AdaptiveTtl::new(60, 10, 3600);
assert_eq!(adaptive_ttl.calculate_ttl(0).as_secs(), 60);
assert_eq!(adaptive_ttl.calculate_ttl(4).as_secs(), 60);
let ttl_6 = adaptive_ttl.calculate_ttl(6).as_secs();
let ttl_10 = adaptive_ttl.calculate_ttl(10).as_secs();
assert!(ttl_6 > 60);
assert!(ttl_10 > ttl_6);
let ttl_1000 = adaptive_ttl.calculate_ttl(1000).as_secs();
assert_eq!(ttl_1000, 3600);
}
}