use std::time::Duration;
use super::backend::{BackendStats, CacheBackend, CacheError, CacheResult};
use super::invalidation::EntityTag;
use super::key::{CacheKey, KeyPattern};
#[derive(Debug, Clone)]
pub struct RedisCacheConfig {
pub url: String,
pub pool_size: u32,
pub connection_timeout: Duration,
pub command_timeout: Duration,
pub key_prefix: String,
pub default_ttl: Option<Duration>,
pub cluster_mode: bool,
pub database: u8,
pub tls: bool,
pub username: Option<String>,
pub password: Option<String>,
}
impl Default for RedisCacheConfig {
fn default() -> Self {
Self {
url: "redis://localhost:6379".to_string(),
pool_size: 10,
connection_timeout: Duration::from_secs(5),
command_timeout: Duration::from_secs(2),
key_prefix: "prax:cache".to_string(),
default_ttl: Some(Duration::from_secs(300)),
cluster_mode: false,
database: 0,
tls: false,
username: None,
password: None,
}
}
}
impl RedisCacheConfig {
pub fn new(url: impl Into<String>) -> Self {
Self {
url: url.into(),
..Default::default()
}
}
pub fn with_pool_size(mut self, size: u32) -> Self {
self.pool_size = size;
self
}
pub fn with_prefix(mut self, prefix: impl Into<String>) -> Self {
self.key_prefix = prefix.into();
self
}
pub fn with_ttl(mut self, ttl: Duration) -> Self {
self.default_ttl = Some(ttl);
self
}
pub fn cluster(mut self) -> Self {
self.cluster_mode = true;
self
}
pub fn database(mut self, db: u8) -> Self {
self.database = db;
self
}
pub fn auth(mut self, username: Option<String>, password: impl Into<String>) -> Self {
self.username = username;
self.password = Some(password.into());
self
}
fn full_key(&self, key: &CacheKey) -> String {
format!("{}:{}", self.key_prefix, key.as_str())
}
}
#[derive(Clone)]
pub struct RedisConnection {
config: RedisCacheConfig,
}
impl RedisConnection {
pub async fn new(config: RedisCacheConfig) -> CacheResult<Self> {
Ok(Self { config })
}
pub fn config(&self) -> &RedisCacheConfig {
&self.config
}
async fn execute<T>(&self, _cmd: &str, _args: &[&str]) -> CacheResult<T>
where
T: Default,
{
Ok(T::default())
}
pub async fn get(&self, key: &str) -> CacheResult<Option<Vec<u8>>> {
let _ = key;
Ok(None)
}
pub async fn set(&self, key: &str, value: &[u8], ttl: Option<Duration>) -> CacheResult<()> {
let _ = (key, value, ttl);
Ok(())
}
pub async fn del(&self, key: &str) -> CacheResult<bool> {
let _ = key;
Ok(false)
}
pub async fn exists(&self, key: &str) -> CacheResult<bool> {
let _ = key;
Ok(false)
}
pub async fn keys(&self, pattern: &str) -> CacheResult<Vec<String>> {
let _ = pattern;
Ok(Vec::new())
}
pub async fn mget(&self, keys: &[String]) -> CacheResult<Vec<Option<Vec<u8>>>> {
Ok(vec![None; keys.len()])
}
pub async fn mset(&self, pairs: &[(String, Vec<u8>)]) -> CacheResult<()> {
let _ = pairs;
Ok(())
}
pub async fn flush(&self) -> CacheResult<()> {
Ok(())
}
pub async fn dbsize(&self) -> CacheResult<usize> {
Ok(0)
}
pub async fn info(&self) -> CacheResult<String> {
Ok(String::new())
}
pub async fn scan(&self, pattern: &str, count: usize) -> CacheResult<Vec<String>> {
let _ = (pattern, count);
Ok(Vec::new())
}
pub fn pipeline(&self) -> RedisPipeline {
RedisPipeline::new(self.clone())
}
}
pub struct RedisPipeline {
conn: RedisConnection,
commands: Vec<PipelineCommand>,
}
enum PipelineCommand {
Get(String),
Set(String, Vec<u8>, Option<Duration>),
Del(String),
}
impl RedisPipeline {
fn new(conn: RedisConnection) -> Self {
Self {
conn,
commands: Vec::new(),
}
}
pub fn get(mut self, key: impl Into<String>) -> Self {
self.commands.push(PipelineCommand::Get(key.into()));
self
}
pub fn set(mut self, key: impl Into<String>, value: Vec<u8>, ttl: Option<Duration>) -> Self {
self.commands
.push(PipelineCommand::Set(key.into(), value, ttl));
self
}
pub fn del(mut self, key: impl Into<String>) -> Self {
self.commands.push(PipelineCommand::Del(key.into()));
self
}
pub async fn execute(self) -> CacheResult<Vec<PipelineResult>> {
Ok(vec![PipelineResult::Ok; self.commands.len()])
}
}
#[derive(Debug, Clone)]
pub enum PipelineResult {
Ok,
Value(Option<Vec<u8>>),
Error(String),
}
#[derive(Clone)]
pub struct RedisCache {
conn: RedisConnection,
config: RedisCacheConfig,
}
impl RedisCache {
pub async fn new(config: RedisCacheConfig) -> CacheResult<Self> {
let conn = RedisConnection::new(config.clone()).await?;
Ok(Self { conn, config })
}
pub async fn from_url(url: &str) -> CacheResult<Self> {
Self::new(RedisCacheConfig::new(url)).await
}
pub fn connection(&self) -> &RedisConnection {
&self.conn
}
pub fn config(&self) -> &RedisCacheConfig {
&self.config
}
fn full_key(&self, key: &CacheKey) -> String {
self.config.full_key(key)
}
}
impl CacheBackend for RedisCache {
async fn get<T>(&self, key: &CacheKey) -> CacheResult<Option<T>>
where
T: serde::de::DeserializeOwned,
{
let full_key = self.full_key(key);
match self.conn.get(&full_key).await? {
Some(data) => {
let value: T = serde_json::from_slice(&data)
.map_err(|e| CacheError::Deserialization(e.to_string()))?;
Ok(Some(value))
}
None => Ok(None),
}
}
async fn set<T>(&self, key: &CacheKey, value: &T, ttl: Option<Duration>) -> CacheResult<()>
where
T: serde::Serialize + Sync,
{
let full_key = self.full_key(key);
let data =
serde_json::to_vec(value).map_err(|e| CacheError::Serialization(e.to_string()))?;
let effective_ttl = ttl.or(self.config.default_ttl);
self.conn.set(&full_key, &data, effective_ttl).await
}
async fn delete(&self, key: &CacheKey) -> CacheResult<bool> {
let full_key = self.full_key(key);
self.conn.del(&full_key).await
}
async fn exists(&self, key: &CacheKey) -> CacheResult<bool> {
let full_key = self.full_key(key);
self.conn.exists(&full_key).await
}
async fn get_many<T>(&self, keys: &[CacheKey]) -> CacheResult<Vec<Option<T>>>
where
T: serde::de::DeserializeOwned,
{
let full_keys: Vec<String> = keys.iter().map(|k| self.full_key(k)).collect();
let results = self.conn.mget(&full_keys).await?;
results
.into_iter()
.map(|opt| {
opt.map(|data| {
serde_json::from_slice(&data)
.map_err(|e| CacheError::Deserialization(e.to_string()))
})
.transpose()
})
.collect()
}
async fn invalidate_pattern(&self, pattern: &KeyPattern) -> CacheResult<u64> {
let full_pattern = format!("{}:{}", self.config.key_prefix, pattern.to_redis_pattern());
let keys = self.conn.scan(&full_pattern, 1000).await?;
if keys.is_empty() {
return Ok(0);
}
let mut deleted = 0u64;
for key in keys {
if self.conn.del(&key).await? {
deleted += 1;
}
}
Ok(deleted)
}
async fn invalidate_tags(&self, tags: &[EntityTag]) -> CacheResult<u64> {
let mut total = 0u64;
for tag in tags {
let tag_key = format!("{}:tag:{}", self.config.key_prefix, tag.value());
let _ = tag_key;
total += 0; }
Ok(total)
}
async fn clear(&self) -> CacheResult<()> {
self.conn.flush().await
}
async fn len(&self) -> CacheResult<usize> {
self.conn.dbsize().await
}
async fn stats(&self) -> CacheResult<BackendStats> {
let info = self.conn.info().await?;
let entries = self.conn.dbsize().await?;
Ok(BackendStats {
entries,
memory_bytes: None, connections: Some(self.config.pool_size as usize),
info: Some(info),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_redis_config() {
let config = RedisCacheConfig::new("redis://localhost:6379")
.with_pool_size(20)
.with_prefix("myapp")
.with_ttl(Duration::from_secs(600));
assert_eq!(config.pool_size, 20);
assert_eq!(config.key_prefix, "myapp");
assert_eq!(config.default_ttl, Some(Duration::from_secs(600)));
}
#[test]
fn test_full_key() {
let config = RedisCacheConfig::new("redis://localhost").with_prefix("app:cache");
let key = CacheKey::new("User", "id:123");
let full = config.full_key(&key);
assert_eq!(full, "app:cache:prax:User:id:123");
}
#[tokio::test]
async fn test_redis_cache_creation() {
let config = RedisCacheConfig::default();
let cache = RedisCache::new(config).await.unwrap();
assert_eq!(cache.config().pool_size, 10);
}
}