use mod_interface::mod_interface;
#[ cfg( all( feature = "caching", feature = "compression" ) ) ]
mod private
{
use std::
{
collections ::HashMap,
sync ::Arc,
};
use core::
{
hash ::Hash,
time ::Duration,
};
use std::time::Instant;
use tokio::sync::RwLock;
use blake3::{ Hash as Blake3Hash, Hasher as Blake3Hasher };
#[ derive( Debug, Clone ) ]
pub struct CacheConfig
{
pub max_entries : usize,
pub default_ttl : Duration,
pub max_memory_bytes : usize,
pub enable_compression : bool,
pub metrics_interval : Duration,
}
impl Default for CacheConfig
{
#[ inline ]
fn default() -> Self
{
Self
{
max_entries : 10000,
default_ttl : Duration::from_secs( 300 ), max_memory_bytes : 100 * 1024 * 1024, enable_compression : true,
metrics_interval : Duration::from_secs( 60 ),
}
}
}
#[ derive( Debug, Clone, PartialEq, Eq, Hash ) ]
pub struct CacheKey
{
pub method : String,
pub path : String,
pub body_hash : Blake3Hash,
pub query_hash : Blake3Hash,
}
impl CacheKey
{
#[ inline ]
#[ must_use ]
pub fn new( method : &str, path : &str, body : &[u8], query : &str ) -> Self
{
let mut body_hasher = Blake3Hasher::new();
body_hasher.update( body );
let body_hash = body_hasher.finalize();
let mut query_hasher = Blake3Hasher::new();
query_hasher.update( query.as_bytes() );
let query_hash = query_hasher.finalize();
Self
{
method : method.to_string(),
path : path.to_string(),
body_hash,
query_hash,
}
}
#[ inline ]
#[ must_use ]
pub fn for_get( path : &str, query : &str ) -> Self
{
Self::new( "GET", path, &[], query )
}
#[ inline ]
#[ must_use ]
pub fn for_post( path : &str, body : &[u8] ) -> Self
{
Self::new( "POST", path, body, "" )
}
}
#[ derive( Debug, Clone ) ]
pub struct CachedResponse
{
pub data : Vec< u8 >,
pub cached_at : Instant,
pub expires_at : Instant,
pub size_bytes : usize,
pub access_count : u64,
pub last_accessed : Instant,
pub is_compressed : bool,
pub content_type : String,
}
impl CachedResponse
{
#[ inline ]
#[ must_use ]
pub fn new( data : Vec< u8 >, ttl : Duration, content_type : String, enable_compression : bool ) -> Self
{
let now = Instant::now();
let ( final_data, is_compressed ) = if enable_compression && data.len() > 1024
{
match Self::compress( &data )
{
Ok( compressed ) if compressed.len() < data.len() => ( compressed, true ),
_ => ( data, false ), }
}
else
{
( data, false )
};
Self
{
size_bytes : final_data.len(),
data : final_data,
cached_at : now,
expires_at : now + ttl,
access_count : 1,
last_accessed : now,
is_compressed,
content_type,
}
}
#[ inline ]
#[ must_use ]
pub fn is_expired( &self ) -> bool
{
Instant::now() > self.expires_at
}
#[ inline ]
pub fn mark_accessed( &mut self )
{
self.access_count += 1;
self.last_accessed = Instant::now();
}
#[ inline ]
pub fn get_data( &mut self ) -> Result< Vec< u8 >, String >
{
self.mark_accessed();
if self.is_compressed
{
Self::decompress( &self.data )
}
else
{
Ok( self.data.clone() )
}
}
fn compress( data : &[u8] ) -> Result< Vec< u8 >, String >
{
use flate2::{ write::GzEncoder, Compression };
use std::io::Write;
let mut encoder = GzEncoder::new( Vec::new(), Compression::fast() );
encoder.write_all( data ).map_err( | e | e.to_string() )?;
encoder.finish().map_err( | e | e.to_string() )
}
fn decompress( data : &[u8] ) -> Result< Vec< u8 >, String >
{
use flate2::read::GzDecoder;
use std::io::Read;
let mut decoder = GzDecoder::new( data );
let mut decompressed = Vec::new();
decoder.read_to_end( &mut decompressed ).map_err( | e | e.to_string() )?;
Ok( decompressed )
}
}
#[ derive( Debug, Clone, Default ) ]
pub struct CacheMetrics
{
pub hits : u64,
pub misses : u64,
pub hit_ratio : f64,
pub memory_usage : usize,
pub evictions : u64,
pub avg_hit_time_us : u64,
pub compression_ratio : f64,
}
impl CacheMetrics
{
#[ inline ]
pub fn update_hit_ratio( &mut self )
{
let total = self.hits + self.misses;
self.hit_ratio = if total > 0 { self.hits as f64 / total as f64 } else { 0.0 };
}
}
#[ derive( Debug ) ]
pub struct PerformanceCache
{
storage : Arc< RwLock< HashMap< CacheKey, CachedResponse > > >,
config : CacheConfig,
metrics : Arc< RwLock< CacheMetrics > >,
current_memory : Arc< RwLock< usize > >,
}
impl PerformanceCache
{
#[ inline ]
#[ must_use ]
pub fn new( config : CacheConfig ) -> Self
{
Self
{
storage : Arc::new( RwLock::new( HashMap::with_capacity( config.max_entries ) ) ),
config,
metrics : Arc::new( RwLock::new( CacheMetrics::default() ) ),
current_memory : Arc::new( RwLock::new( 0 ) ),
}
}
#[ inline ]
pub async fn get( &self, key : &CacheKey ) -> Option< Vec< u8 > >
{
let start_time = Instant::now();
let mut storage = self.storage.write().await;
let should_remove = if let Some( cached_response ) = storage.get( key )
{
cached_response.is_expired()
}
else
{
false
};
if should_remove
{
if let Some( expired_response ) = storage.remove( key )
{
let mut memory = self.current_memory.write().await;
*memory = memory.saturating_sub( expired_response.size_bytes );
}
let mut metrics = self.metrics.write().await;
metrics.misses += 1;
metrics.update_hit_ratio();
return None;
}
if let Some( cached_response ) = storage.get_mut( key )
{
if let Ok( data ) = cached_response.get_data()
{
let mut metrics = self.metrics.write().await;
metrics.hits += 1;
let elapsed_micros = u64::try_from( start_time.elapsed().as_micros() ).unwrap_or( u64::MAX );
metrics.avg_hit_time_us = ( metrics.avg_hit_time_us + elapsed_micros ) / 2;
metrics.update_hit_ratio();
Some( data )
}
else
{
let size_bytes = cached_response.size_bytes;
core ::mem::drop( storage.remove( key ) );
let mut memory = self.current_memory.write().await;
*memory = memory.saturating_sub( size_bytes );
None
}
}
else
{
let mut metrics = self.metrics.write().await;
metrics.misses += 1;
metrics.update_hit_ratio();
None
}
}
#[ inline ]
pub async fn put( &self, key : CacheKey, data : Vec< u8 >, content_type : String )
{
self.put_with_ttl( key, data, content_type, self.config.default_ttl ).await;
}
#[ inline ]
pub async fn put_with_ttl( &self, key : CacheKey, data : Vec< u8 >, content_type : String, ttl : Duration )
{
let cached_response = CachedResponse::new( data, ttl, content_type, self.config.enable_compression );
self.ensure_capacity( cached_response.size_bytes ).await;
let mut storage = self.storage.write().await;
let mut memory = self.current_memory.write().await;
if let Some( old_response ) = storage.remove( &key )
{
*memory = memory.saturating_sub( old_response.size_bytes );
}
*memory += cached_response.size_bytes;
storage.insert( key, cached_response );
}
async fn ensure_capacity( &self, needed_bytes : usize )
{
let current_memory = *self.current_memory.read().await;
if current_memory + needed_bytes > self.config.max_memory_bytes
{
self.evict_lru( needed_bytes ).await;
}
}
async fn evict_lru( &self, needed_bytes : usize )
{
let mut storage = self.storage.write().await;
let mut memory = self.current_memory.write().await;
let mut freed_bytes = 0;
let mut entries : Vec< _ > = storage.iter().map( | ( k, v ) | ( k.clone(), v.last_accessed ) ).collect();
entries.sort_by_key( | ( _, last_accessed ) | *last_accessed );
for ( key, _ ) in entries
{
if freed_bytes >= needed_bytes
{
break;
}
if let Some( response ) = storage.remove( &key )
{
freed_bytes += response.size_bytes;
*memory = memory.saturating_sub( response.size_bytes );
let mut metrics = self.metrics.write().await;
metrics.evictions += 1;
}
}
}
#[ inline ]
pub async fn metrics( &self ) -> CacheMetrics
{
let metrics = self.metrics.read().await;
let mut result = metrics.clone();
result.memory_usage = *self.current_memory.read().await;
result
}
#[ inline ]
pub async fn clear( &self )
{
let mut storage = self.storage.write().await;
let mut memory = self.current_memory.write().await;
storage.clear();
*memory = 0;
}
#[ inline ]
pub async fn cleanup_expired( &self )
{
let mut storage = self.storage.write().await;
let mut memory = self.current_memory.write().await;
let now = Instant::now();
storage.retain( | _, response |
{
if response.expires_at <= now
{
*memory = memory.saturating_sub( response.size_bytes );
false
}
else
{
true
}
} );
}
}
}
mod_interface!
{
exposed use
{
CacheConfig,
CacheKey,
CachedResponse,
CacheMetrics,
PerformanceCache,
};
}