pub struct GenericCache { /* private fields */ }Expand description
Generic cache for storing arbitrary data with LRU eviction
Implementations§
Source§impl GenericCache
impl GenericCache
Sourcepub async fn with_subdirectory(subdir: &str) -> Result<Self>
pub async fn with_subdirectory(subdir: &str) -> Result<Self>
Create a new generic cache with a custom subdirectory
Sourcepub async fn with_limits(
max_size_bytes: Option<u64>,
max_entries: Option<usize>,
) -> Result<Self>
pub async fn with_limits( max_size_bytes: Option<u64>, max_entries: Option<usize>, ) -> Result<Self>
Create a new cache with size and entry limits
Sourcepub async fn with_config(
subdir: Option<&str>,
max_size_bytes: Option<u64>,
max_entries: Option<usize>,
) -> Result<Self>
pub async fn with_config( subdir: Option<&str>, max_size_bytes: Option<u64>, max_entries: Option<usize>, ) -> Result<Self>
Create a cache with full configuration
Sourcepub async fn with_config_and_path(
base_dir: PathBuf,
max_size_bytes: Option<u64>,
max_entries: Option<usize>,
stats: Option<Arc<CacheStats>>,
) -> Result<Self>
pub async fn with_config_and_path( base_dir: PathBuf, max_size_bytes: Option<u64>, max_entries: Option<usize>, stats: Option<Arc<CacheStats>>, ) -> Result<Self>
Create a cache with full configuration and custom path
Sourcepub fn stats(&self) -> Arc<CacheStats>
pub fn stats(&self) -> Arc<CacheStats>
Get cache statistics
Sourcepub fn current_size(&self) -> u64
pub fn current_size(&self) -> u64
Get current cache size in bytes
Sourcepub fn current_entries(&self) -> usize
pub fn current_entries(&self) -> usize
Get current number of entries
Sourcepub async fn read_to_writer<W>(&self, key: &str, writer: W) -> Result<u64>where
W: AsyncWrite + Unpin,
pub async fn read_to_writer<W>(&self, key: &str, writer: W) -> Result<u64>where
W: AsyncWrite + Unpin,
Stream data from cache to a writer (memory-efficient for large cached files)
Sourcepub async fn warm_cache(&self, keys: &[String]) -> Result<()>
pub async fn warm_cache(&self, keys: &[String]) -> Result<()>
Warm cache with a list of keys by pre-loading them into LRU order
Sourcepub fn get_lru_keys(&self) -> Vec<String>
pub fn get_lru_keys(&self) -> Vec<String>
Get LRU ordered list of cache keys (least recently used first)
Sourcepub fn get_mru_keys(&self, limit: usize) -> Vec<String>
pub fn get_mru_keys(&self, limit: usize) -> Vec<String>
Get most recently used keys (up to limit)
Sourcepub async fn write_batch(&self, entries: &[(String, Vec<u8>)]) -> Result<()>
pub async fn write_batch(&self, entries: &[(String, Vec<u8>)]) -> Result<()>
Write multiple entries to the cache in parallel
This is more efficient than calling write() multiple times sequentially.
Sourcepub async fn read_batch(&self, keys: &[String]) -> Vec<Result<Vec<u8>>> ⓘ
pub async fn read_batch(&self, keys: &[String]) -> Vec<Result<Vec<u8>>> ⓘ
Read multiple entries from the cache in parallel
Returns a vector of results in the same order as the input keys. Failed reads will be represented as Err values in the vector.
Sourcepub async fn delete_batch(&self, keys: &[String]) -> Result<()>
pub async fn delete_batch(&self, keys: &[String]) -> Result<()>
Delete multiple entries from the cache in parallel
This is more efficient than calling delete() multiple times sequentially.
Sourcepub async fn exists_batch(&self, keys: &[String]) -> Vec<bool>
pub async fn exists_batch(&self, keys: &[String]) -> Vec<bool>
Check existence of multiple entries in parallel
Returns a vector of booleans in the same order as the input keys.
Sourcepub async fn read_streaming<W>(&self, key: &str, writer: W) -> Result<u64>where
W: AsyncWrite + Unpin,
pub async fn read_streaming<W>(&self, key: &str, writer: W) -> Result<u64>where
W: AsyncWrite + Unpin,
Stream data from cache to a writer
This is more memory-efficient than read() for large files.
Sourcepub async fn write_streaming<R>(&self, key: &str, reader: R) -> Result<u64>
pub async fn write_streaming<R>(&self, key: &str, reader: R) -> Result<u64>
Stream data from a reader to cache
This is more memory-efficient than write() for large data.
Sourcepub async fn read_chunked<F>(&self, key: &str, callback: F) -> Result<u64>
pub async fn read_chunked<F>(&self, key: &str, callback: F) -> Result<u64>
Process cache data in chunks without loading it all into memory
The callback is called for each chunk read from the cache file.
Sourcepub async fn write_chunked<I>(&self, key: &str, chunks: I) -> Result<u64>
pub async fn write_chunked<I>(&self, key: &str, chunks: I) -> Result<u64>
Write data to cache in chunks from an iterator
This allows writing large data without keeping it all in memory.
Sourcepub async fn copy(&self, from_key: &str, to_key: &str) -> Result<u64>
pub async fn copy(&self, from_key: &str, to_key: &str) -> Result<u64>
Copy data between cache entries efficiently
This is more efficient than read + write for large files.
Sourcepub async fn size(&self, key: &str) -> Result<u64>
pub async fn size(&self, key: &str) -> Result<u64>
Get the size of a cache entry without reading it
Sourcepub async fn read_streaming_buffered<W>(
&self,
key: &str,
writer: W,
buffer_size: usize,
) -> Result<u64>where
W: AsyncWrite + Unpin,
pub async fn read_streaming_buffered<W>(
&self,
key: &str,
writer: W,
buffer_size: usize,
) -> Result<u64>where
W: AsyncWrite + Unpin,
Stream data from cache with a custom buffer size
Useful for optimizing I/O based on expected data size.