use crate::backend::{CacheBackend, MemoryBackend};
use crate::error::Result;
use crate::serialization::json::JsonSerializer;
use crate::serialization::Serializer;
use crate::traits::{CacheKey, Cacheable};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use crate::serialization::SerializerEnum;
use async_trait::async_trait;
use std::any::Any;
pub(crate) struct BackendCacheOps {
pub(crate) backend: Arc<dyn CacheBackend>,
pub(crate) serializer: SerializerEnum,
}
#[async_trait]
impl crate::client::CacheOps for BackendCacheOps {
async fn get_bytes(&self, key: &str) -> Result<Option<Vec<u8>>> {
self.backend.get(key).await
}
async fn set_bytes(&self, key: &str, value: Vec<u8>, _ttl: Option<u64>) -> Result<()> {
self.backend.set(key, value, None).await
}
async fn delete(&self, key: &str) -> Result<()> {
self.backend.delete(key).await
}
async fn clear_l1(&self) -> Result<()> {
self.backend.clear().await
}
async fn clear_l2(&self) -> Result<()> {
self.backend.clear().await
}
async fn shutdown(&self) -> Result<()> {
self.backend.close().await
}
fn serializer(&self) -> &SerializerEnum {
&self.serializer
}
fn as_any(&self) -> &dyn Any {
self
}
fn into_any_arc(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
self
}
}
pub fn create_cache_ops_wrapper(
backend: Arc<dyn CacheBackend>,
serializer: SerializerEnum,
) -> Arc<dyn crate::client::CacheOps + Send + Sync> {
Arc::new(BackendCacheOps {
backend,
serializer,
})
}
pub struct Cache<K, V> {
backend: Arc<dyn CacheBackend>,
_phantom: std::marker::PhantomData<(K, V)>,
}
impl<K, V> std::fmt::Debug for Cache<K, V>
where
K: CacheKey,
V: Cacheable,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Cache")
.field("backend", &"<CacheBackend>")
.finish()
}
}
impl<K, V> Cache<K, V>
where
K: CacheKey,
V: Cacheable,
{
pub async fn new() -> Result<Self> {
let backend = MemoryBackend::new();
Ok(Self {
backend: Arc::new(backend),
_phantom: std::marker::PhantomData,
})
}
pub(crate) fn new_with_backend(backend: Arc<dyn CacheBackend>) -> Self {
Self {
backend,
_phantom: std::marker::PhantomData,
}
}
pub async fn memory() -> Result<Self> {
Self::new().await
}
pub async fn redis(connection_string: &str) -> Result<Self> {
let backend = crate::backend::RedisBackend::new(connection_string).await?;
Ok(Self {
backend: Arc::new(backend),
_phantom: std::marker::PhantomData,
})
}
pub async fn tiered(l1_capacity: u64, l2_connection_string: &str) -> Result<Self> {
let l1 = MemoryBackend::builder().capacity(l1_capacity).build();
let l2 = crate::backend::RedisBackend::new(l2_connection_string).await?;
let backend = crate::backend::TieredBackend::new(l1, l2);
Ok(Self {
backend: Arc::new(backend),
_phantom: std::marker::PhantomData,
})
}
pub fn builder() -> crate::builder::CacheBuilder<K, V> {
crate::builder::CacheBuilder::default()
}
pub async fn get(&self, key: &K) -> Result<Option<V>> {
let key_str = key.to_key_string();
let bytes = self.backend.get(&key_str).await?;
match bytes {
Some(data) => {
let serializer = JsonSerializer::new();
let value: V = serializer.deserialize(&data)?;
Ok(Some(value))
}
None => Ok(None),
}
}
pub async fn set(&self, key: &K, value: &V) -> Result<()> {
self.set_with_ttl(key, value, None).await
}
pub async fn set_with_ttl(&self, key: &K, value: &V, ttl: Option<Duration>) -> Result<()> {
let key_str = key.to_key_string();
let serializer = JsonSerializer::new();
let bytes = serializer.serialize(value)?;
self.backend.set(&key_str, bytes, ttl).await
}
pub async fn delete(&self, key: &K) -> Result<()> {
let key_str = key.to_key_string();
self.backend.delete(&key_str).await
}
pub async fn exists(&self, key: &K) -> Result<bool> {
let key_str = key.to_key_string();
self.backend.exists(&key_str).await
}
pub async fn get_or<F, Fut>(&self, key: &K, fallback: F) -> Result<V>
where
F: FnOnce() -> Fut,
Fut: std::future::Future<Output = Result<V>>,
{
if let Some(value) = self.get(key).await? {
return Ok(value);
}
let value = fallback().await?;
self.set(key, &value).await?;
Ok(value)
}
pub async fn set_many<'a, I>(&self, items: I) -> Result<()>
where
K: 'a,
V: 'a,
I: IntoIterator<Item = (&'a K, &'a V)>,
{
for (key, value) in items {
self.set(key, value).await?;
}
Ok(())
}
pub async fn get_many<'a, I>(&self, keys: I) -> Result<HashMap<String, V>>
where
K: 'a,
I: IntoIterator<Item = &'a K>,
{
let mut result = HashMap::new();
for key in keys {
if let Some(value) = self.get(key).await? {
result.insert(key.to_key_string(), value);
}
}
Ok(result)
}
pub async fn delete_many<'a, I>(&self, keys: I) -> Result<()>
where
K: 'a,
I: IntoIterator<Item = &'a K>,
{
for key in keys {
self.delete(key).await?;
}
Ok(())
}
pub async fn clear(&self) -> Result<()> {
self.backend.clear().await
}
pub async fn stats(&self) -> Result<HashMap<String, String>> {
self.backend.stats().await
}
pub async fn health_check(&self) -> Result<bool> {
self.backend.health_check().await
}
pub async fn shutdown(&self) -> Result<()> {
self.backend.close().await
}
pub fn to_cache_ops(&self) -> Arc<dyn crate::client::CacheOps + Send + Sync> {
create_cache_ops_wrapper(
self.backend.clone(),
SerializerEnum::Json(crate::serialization::json::JsonSerializer::new()),
)
}
pub async fn register_for_macro(&self, service_name: &str) {
use crate::internal::__internal_register_cache;
let cache_ops = create_cache_ops_wrapper(
self.backend.clone(),
SerializerEnum::Json(crate::serialization::json::JsonSerializer::new()),
);
__internal_register_cache(service_name, cache_ops);
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
struct TestValue {
id: u64,
name: String,
}
#[tokio::test]
async fn test_cache_basic() {
let cache: Cache<String, TestValue> = Cache::new().await.unwrap();
let value = TestValue {
id: 1,
name: "test".to_string(),
};
cache.set(&"key1".to_string(), &value).await.unwrap();
let result = cache.get(&"key1".to_string()).await.unwrap();
assert_eq!(result, Some(value));
assert!(cache.exists(&"key1".to_string()).await.unwrap());
assert!(!cache.exists(&"key2".to_string()).await.unwrap());
cache.delete(&"key1".to_string()).await.unwrap();
assert!(!cache.exists(&"key1".to_string()).await.unwrap());
}
#[tokio::test]
async fn test_cache_get_or() {
use crate::error::CacheError;
let cache: Cache<String, TestValue> = Cache::new().await.unwrap();
let value = TestValue {
id: 1,
name: "test".to_string(),
};
async fn fallback1() -> Result<TestValue> {
Ok(TestValue {
id: 1,
name: "test".to_string(),
})
}
let result1 = cache.get_or(&"key1".to_string(), fallback1).await.unwrap();
assert_eq!(result1, value);
async fn fallback2() -> Result<TestValue> {
Err(CacheError::NotFound("should not be called".to_string()))
}
let result2 = cache.get_or(&"key1".to_string(), fallback2).await.unwrap();
assert_eq!(result2, value);
}
#[tokio::test]
async fn test_cache_batch_operations() {
let cache: Cache<String, TestValue> = Cache::new().await.unwrap();
let value1 = TestValue {
id: 1,
name: "test1".to_string(),
};
let value2 = TestValue {
id: 2,
name: "test2".to_string(),
};
cache
.set_many(vec![
(&"key1".to_string(), &value1),
(&"key2".to_string(), &value2),
])
.await
.unwrap();
let results = cache
.get_many(vec![
&"key1".to_string(),
&"key2".to_string(),
&"key3".to_string(),
])
.await
.unwrap();
assert_eq!(results.len(), 2);
assert_eq!(results.get("key1"), Some(&value1));
assert_eq!(results.get("key2"), Some(&value2));
cache
.delete_many(vec![&"key1".to_string(), &"key2".to_string()])
.await
.unwrap();
assert!(!cache.exists(&"key1".to_string()).await.unwrap());
assert!(!cache.exists(&"key2".to_string()).await.unwrap());
}
#[tokio::test]
async fn test_cache_clear() {
let cache: Cache<String, TestValue> = Cache::new().await.unwrap();
cache
.set(
&"key1".to_string(),
&TestValue {
id: 1,
name: "test".to_string(),
},
)
.await
.unwrap();
cache.clear().await.unwrap();
assert!(!cache.exists(&"key1".to_string()).await.unwrap());
}
#[tokio::test]
async fn test_cache_stats() {
let cache: Cache<String, TestValue> = Cache::new().await.unwrap();
let stats = cache.stats().await.unwrap();
assert_eq!(stats.get("type"), Some(&"memory".to_string()));
}
}