use crate::domain::error::{CacheError, Result, StygianError};
use crate::ports::CachePort;
use async_trait::async_trait;
use deadpool_redis::{Config as PoolConfig, Pool, Runtime};
use redis::AsyncCommands;
use std::time::Duration;
#[derive(Debug, Clone)]
pub struct RedisCacheConfig {
pub url: String,
pub key_prefix: Option<String>,
pub default_ttl: Option<Duration>,
pub pool_size: usize,
}
impl Default for RedisCacheConfig {
fn default() -> Self {
Self {
url: "redis://127.0.0.1:6379".into(),
key_prefix: None,
default_ttl: None,
pool_size: 8,
}
}
}
pub struct RedisCache {
pool: Pool,
key_prefix: Option<String>,
default_ttl: Option<Duration>,
}
impl RedisCache {
pub fn new(config: RedisCacheConfig) -> Result<Self> {
let pool_cfg = PoolConfig::from_url(&config.url);
let pool = pool_cfg
.builder()
.map(|b| b.max_size(config.pool_size))
.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!(
"failed to build Redis pool: {e}"
)))
})?
.runtime(Runtime::Tokio1)
.build()
.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!(
"failed to build Redis pool: {e}"
)))
})?;
Ok(Self {
pool,
key_prefix: config.key_prefix,
default_ttl: config.default_ttl,
})
}
fn full_key(&self, key: &str) -> String {
self.key_prefix
.as_ref()
.map_or_else(|| key.to_string(), |prefix| format!("{prefix}{key}"))
}
pub async fn healthcheck(&self) -> Result<()> {
let mut conn = self.pool.get().await.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis pool error: {e}")))
})?;
redis::cmd("PING")
.query_async::<String>(&mut conn)
.await
.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis PING failed: {e}")))
})?;
Ok(())
}
}
#[async_trait]
impl CachePort for RedisCache {
async fn get(&self, key: &str) -> Result<Option<String>> {
let full_key = self.full_key(key);
let mut conn = self.pool.get().await.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis pool error: {e}")))
})?;
let value: Option<String> = conn.get(&full_key).await.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis GET failed: {e}")))
})?;
Ok(value)
}
async fn set(&self, key: &str, value: String, ttl: Option<Duration>) -> Result<()> {
let full_key = self.full_key(key);
let effective_ttl = ttl.or(self.default_ttl);
let mut conn = self.pool.get().await.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!("Redis pool error: {e}")))
})?;
match effective_ttl {
Some(duration) => {
let ttl_millis = duration.as_millis().try_into().unwrap_or(u64::MAX);
redis::cmd("PSETEX")
.arg(&full_key)
.arg(ttl_millis)
.arg(&value)
.query_async::<()>(&mut conn)
.await
.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!(
"Redis PSETEX failed: {e}"
)))
})?;
}
None => {
conn.set::<_, _, ()>(&full_key, &value).await.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!("Redis SET failed: {e}")))
})?;
}
}
Ok(())
}
async fn invalidate(&self, key: &str) -> Result<()> {
let full_key = self.full_key(key);
let mut conn = self.pool.get().await.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!("Redis pool error: {e}")))
})?;
conn.del::<_, ()>(&full_key).await.map_err(|e| {
StygianError::Cache(CacheError::WriteFailed(format!("Redis DEL failed: {e}")))
})?;
Ok(())
}
async fn exists(&self, key: &str) -> Result<bool> {
let full_key = self.full_key(key);
let mut conn = self.pool.get().await.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis pool error: {e}")))
})?;
let count: u32 = conn.exists(&full_key).await.map_err(|e| {
StygianError::Cache(CacheError::ReadFailed(format!("Redis EXISTS failed: {e}")))
})?;
Ok(count > 0)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn full_key_without_prefix() -> Result<()> {
let cache = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: None,
default_ttl: None,
pool_size: 1,
})?;
assert_eq!(cache.full_key("abc"), "abc");
Ok(())
}
#[test]
fn full_key_with_prefix() -> Result<()> {
let cache = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("ns:".into()),
default_ttl: None,
pool_size: 1,
})?;
assert_eq!(cache.full_key("abc"), "ns:abc");
Ok(())
}
#[test]
fn default_config_values() {
let cfg = RedisCacheConfig::default();
assert_eq!(cfg.url, "redis://127.0.0.1:6379");
assert!(cfg.key_prefix.is_none());
assert!(cfg.default_ttl.is_none());
assert_eq!(cfg.pool_size, 8);
}
#[test]
fn pool_creation_with_bad_url_fails() {
let result = RedisCache::new(RedisCacheConfig {
url: "not-a-url".into(),
..Default::default()
});
assert!(result.is_err());
}
#[tokio::test]
#[ignore = "requires running Redis/Valkey (docker-compose up -d valkey)"]
async fn integration_set_get_invalidate_cycle() -> Result<()> {
let cache = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("test:integ:".into()),
..Default::default()
})?;
cache.healthcheck().await?;
let key = "integration_cycle";
cache
.set(key, "hello".into(), Some(Duration::from_secs(30)))
.await?;
let val = cache.get(key).await?;
assert_eq!(val, Some("hello".into()));
assert!(cache.exists(key).await?);
cache.invalidate(key).await?;
let val = cache.get(key).await?;
assert_eq!(val, None);
assert!(!cache.exists(key).await?);
Ok(())
}
#[tokio::test]
#[ignore = "requires running Redis/Valkey (docker-compose up -d valkey)"]
async fn integration_ttl_expiration() -> Result<()> {
let cache = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("test:ttl:".into()),
..Default::default()
})?;
let key = "short_lived";
cache
.set(key, "expires".into(), Some(Duration::from_millis(200)))
.await?;
assert_eq!(cache.get(key).await?, Some("expires".into()));
tokio::time::sleep(Duration::from_millis(350)).await;
assert_eq!(cache.get(key).await?, None);
Ok(())
}
#[tokio::test]
#[ignore = "requires running Redis/Valkey (docker-compose up -d valkey)"]
async fn integration_key_namespacing_isolation() -> Result<()> {
let cache_a = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("ns_a:".into()),
..Default::default()
})?;
let cache_b = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("ns_b:".into()),
..Default::default()
})?;
let key = "shared_name";
cache_a
.set(key, "alpha".into(), Some(Duration::from_secs(30)))
.await?;
cache_b
.set(key, "beta".into(), Some(Duration::from_secs(30)))
.await?;
assert_eq!(cache_a.get(key).await?, Some("alpha".into()));
assert_eq!(cache_b.get(key).await?, Some("beta".into()));
cache_a.invalidate(key).await?;
cache_b.invalidate(key).await?;
Ok(())
}
#[tokio::test]
#[ignore = "requires running Redis/Valkey (docker-compose up -d valkey)"]
async fn integration_default_ttl_applied() -> Result<()> {
let cache = RedisCache::new(RedisCacheConfig {
url: "redis://127.0.0.1:6379".into(),
key_prefix: Some("test:dttl:".into()),
default_ttl: Some(Duration::from_millis(200)),
pool_size: 2,
})?;
let key = "default_ttl_key";
cache.set(key, "has_default".into(), None).await?;
assert!(cache.exists(key).await?);
tokio::time::sleep(Duration::from_millis(350)).await;
assert!(!cache.exists(key).await?);
Ok(())
}
}