use secrecy::{ExposeSecret, SecretString};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
pub const CONFIG_VERSION: u32 = 2;
pub const CONFIG_VERSION_FIELD: &str = "config_version";
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct Config {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub config_version: Option<u32>,
#[serde(default)]
pub global: GlobalConfig,
pub services: HashMap<String, ServiceConfig>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct GlobalConfig {
pub default_ttl: u64,
pub health_check_interval: u64,
pub serialization: SerializationType,
pub enable_metrics: bool,
}
impl Default for GlobalConfig {
fn default() -> Self {
Self {
default_ttl: 300,
health_check_interval: 60,
serialization: SerializationType::Json,
enable_metrics: true,
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ServiceConfig {
pub cache_type: CacheType,
pub ttl: Option<u64>,
pub serialization: Option<SerializationType>,
pub l1: Option<L1Config>,
pub l2: Option<L2Config>,
pub two_level: Option<TwoLevelConfig>,
}
impl Default for ServiceConfig {
fn default() -> Self {
Self {
cache_type: CacheType::TwoLevel,
ttl: None,
serialization: None,
l1: Some(L1Config::default()),
l2: Some(L2Config::default()),
two_level: Some(TwoLevelConfig::default()),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)]
#[serde(rename_all = "lowercase")]
pub enum SerializationType {
#[default]
Json,
Bincode,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)]
#[serde(rename_all = "lowercase")]
pub enum CacheType {
L1,
L2,
#[default]
TwoLevel,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(default)]
pub struct L1Config {
pub max_capacity: u64,
pub max_key_length: usize,
pub max_value_size: usize,
pub cleanup_interval_secs: u64,
}
impl Default for L1Config {
fn default() -> Self {
Self {
max_capacity: 10000,
max_key_length: 256,
max_value_size: 1024 * 1024, cleanup_interval_secs: 300, }
}
}
#[derive(Serialize, Deserialize, Clone)]
#[serde(default)]
pub struct L2Config {
pub mode: RedisMode,
#[serde(skip)]
pub connection_string: SecretString,
pub connection_timeout_ms: u64,
pub command_timeout_ms: u64,
#[serde(skip)]
pub password: Option<SecretString>,
pub enable_tls: bool,
pub sentinel: Option<SentinelConfig>,
pub cluster: Option<ClusterConfig>,
pub default_ttl: Option<u64>,
pub max_key_length: usize,
pub max_value_size: usize,
}
impl fmt::Debug for L2Config {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("L2Config")
.field("mode", &self.mode)
.field("connection_string", &"[REDACTED]")
.field("connection_timeout_ms", &self.connection_timeout_ms)
.field("command_timeout_ms", &self.command_timeout_ms)
.field("password", &"[REDACTED]")
.field("enable_tls", &self.enable_tls)
.field("sentinel", &self.sentinel)
.field("cluster", &self.cluster)
.field("default_ttl", &self.default_ttl)
.field("max_key_length", &self.max_key_length)
.field("max_value_size", &self.max_value_size)
.finish()
}
}
impl Default for L2Config {
fn default() -> Self {
Self {
mode: RedisMode::Standalone,
connection_string: SecretString::new("redis://localhost:6379".to_string()),
connection_timeout_ms: 5000,
command_timeout_ms: 3000,
password: None,
enable_tls: false,
sentinel: None,
cluster: None,
default_ttl: Some(3600),
max_key_length: 256,
max_value_size: 1024 * 1024 * 10, }
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SentinelConfig {
pub master_name: String,
pub nodes: Vec<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ClusterConfig {
pub nodes: Vec<String>,
}
impl Config {
pub fn validate(&self) -> Result<(), String> {
if let Some(version) = &self.config_version {
if *version > CONFIG_VERSION {
return Err(format!(
"Configuration version {} is not supported. Current version is {}.",
version, CONFIG_VERSION
));
}
}
if self.global.default_ttl == 0 {
return Err("Global default_ttl cannot be zero".to_string());
}
if self.global.default_ttl > 86400 * 30 {
return Err("Global default_ttl cannot exceed 30 days (2592000 seconds)".to_string());
}
if self.global.health_check_interval == 0 {
return Err("Global health_check_interval cannot be zero".to_string());
}
if self.global.health_check_interval < 1 || self.global.health_check_interval > 3600 {
return Err(
"Global health_check_interval must be between 1 and 3600 seconds".to_string(),
);
}
for (name, service) in &self.services {
if name.is_empty() {
return Err("Service name cannot be empty".to_string());
}
if name.len() > 64 {
return Err(format!(
"Service name '{}' exceeds maximum length of 64 characters",
name
));
}
let service_ttl = service.ttl.unwrap_or(self.global.default_ttl);
if service_ttl == 0 {
return Err(format!("Service '{}' TTL cannot be zero", name));
}
if service_ttl > 86400 * 30 {
return Err(format!("Service '{}' TTL cannot exceed 30 days", name));
}
if let Some(l2_config) = &service.l2 {
if let Some(l2_specific_ttl) = l2_config.default_ttl {
if l2_specific_ttl == 0 {
return Err(format!("Service '{}' L2 TTL cannot be zero", name));
}
if service_ttl > l2_specific_ttl {
return Err(format!(
"Service '{}' configuration error: L1 TTL ({}) must be <= L2 TTL ({})",
name, service_ttl, l2_specific_ttl
));
}
}
let timeout = l2_config.connection_timeout_ms;
if !(100..=30000).contains(&timeout) {
return Err(format!(
"Service '{}' connection_timeout_ms must be between 100 and 30000 ms",
name
));
}
let timeout = l2_config.command_timeout_ms;
if !(100..=60000).contains(&timeout) {
return Err(format!(
"Service '{}' command_timeout_ms must be between 100 and 60000 ms",
name
));
}
if l2_config.password.is_none() {
let conn_str = l2_config.connection_string.expose_secret();
let is_production = conn_str.contains("production")
|| conn_str.contains("prod")
|| (!conn_str.contains("localhost")
&& !conn_str.contains("127.0.0.1")
&& !conn_str.contains("192.168.")
&& !conn_str.contains("10."));
if is_production {
return Err(format!(
"Service '{}' is in production environment but Redis password is not configured. \
For security reasons, production Redis connections must use authentication. \
Please set 'password' in L2Config.",
name
));
}
}
if !l2_config.enable_tls {
let conn_str = l2_config.connection_string.expose_secret();
let is_production = conn_str.contains("production")
|| conn_str.contains("prod")
|| (!conn_str.contains("localhost")
&& !conn_str.contains("127.0.0.1")
&& !conn_str.contains("192.168.")
&& !conn_str.contains("10."));
if is_production {
return Err(format!(
"Service '{}' is in production environment but TLS is not enabled. \
For security reasons, production Redis connections must use TLS encryption. \
Please set 'enable_tls = true' in L2Config.",
name
));
}
}
}
if let Some(l1_config) = &service.l1 {
if l1_config.max_capacity == 0 {
return Err(format!("Service '{}' L1 max_capacity cannot be zero", name));
}
if l1_config.max_capacity > 10_000_000 {
return Err(format!(
"Service '{}' L1 max_capacity cannot exceed 10,000,000",
name
));
}
if l1_config.cleanup_interval_secs > 0
&& l1_config.cleanup_interval_secs > service_ttl
{
return Err(format!(
"Service '{}' L1 cleanup_interval_secs ({}) must be <= service TTL ({})",
name, l1_config.cleanup_interval_secs, service_ttl
));
}
}
if let Some(two_level_config) = &service.two_level {
if two_level_config.enable_batch_write {
if two_level_config.batch_size == 0 {
return Err(format!(
"Service '{}' batch_size cannot be zero when batch_write is enabled",
name
));
}
if two_level_config.batch_size > 10000 {
return Err(format!("Service '{}' batch_size cannot exceed 10000", name));
}
if two_level_config.batch_interval_ms == 0 {
return Err(format!(
"Service '{}' batch_interval_ms cannot be zero when batch_write is enabled",
name
));
}
if two_level_config.batch_interval_ms > 60000 {
return Err(format!(
"Service '{}' batch_interval_ms cannot exceed 60000 ms",
name
));
}
}
if let Some(max_key_length) = two_level_config.max_key_length {
if max_key_length == 0 || max_key_length > 1024 {
return Err(format!(
"Service '{}' max_key_length must be between 1 and 1024",
name
));
}
}
if let Some(max_value_size) = two_level_config.max_value_size {
if max_value_size == 0 || max_value_size > 10 * 1024 * 1024 {
return Err(format!(
"Service '{}' max_value_size must be between 1 and 10MB",
name
));
}
}
if let Some(bloom_config) = &two_level_config.bloom_filter {
if bloom_config.expected_elements == 0 {
return Err(format!(
"Service '{}' bloom_filter expected_elements cannot be zero",
name
));
}
if bloom_config.false_positive_rate <= 0.0
|| bloom_config.false_positive_rate >= 1.0
{
return Err(format!(
"Service '{}' bloom_filter false_positive_rate must be between 0 and 1",
name
));
}
}
}
if let Some(warmup_config) = &service.two_level.as_ref().and_then(|c| c.warmup.as_ref())
{
if warmup_config.enabled {
if warmup_config.timeout_seconds == 0 {
return Err(format!(
"Service '{}' warmup timeout_seconds cannot be zero",
name
));
}
if warmup_config.timeout_seconds > 3600 {
return Err(format!(
"Service '{}' warmup timeout_seconds cannot exceed 3600 seconds",
name
));
}
if warmup_config.batch_size == 0 {
return Err(format!(
"Service '{}' warmup batch_size cannot be zero",
name
));
}
if warmup_config.batch_size > 10000 {
return Err(format!(
"Service '{}' warmup batch_size cannot exceed 10000",
name
));
}
}
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TwoLevelConfig {
pub promote_on_hit: bool,
pub enable_batch_write: bool,
pub batch_size: usize,
pub batch_interval_ms: u64,
pub invalidation_channel: Option<InvalidationChannelConfig>,
pub bloom_filter: Option<BloomFilterConfig>,
pub warmup: Option<CacheWarmupConfig>,
pub max_key_length: Option<usize>,
pub max_value_size: Option<usize>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct CacheWarmupConfig {
pub enabled: bool,
pub timeout_seconds: u64,
pub batch_size: usize,
pub batch_interval_ms: u64,
pub data_sources: Vec<WarmupDataSource>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(tag = "type")]
pub enum WarmupDataSource {
Static {
keys: Vec<String>,
},
RedisList {
key: String,
max_count: usize,
},
Database {
query: String,
key_field: String,
value_field: String,
},
Api {
url: String,
timeout_seconds: u64,
},
}
impl Default for CacheWarmupConfig {
fn default() -> Self {
Self {
enabled: false,
timeout_seconds: 300,
batch_size: 100,
batch_interval_ms: 50,
data_sources: Vec::new(),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct BloomFilterConfig {
pub expected_elements: usize,
pub false_positive_rate: f64,
pub auto_add_keys: bool,
pub name: String,
}
impl Default for BloomFilterConfig {
fn default() -> Self {
Self {
expected_elements: 100000,
false_positive_rate: 0.01,
auto_add_keys: true,
name: "default_bloom_filter".to_string(),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(untagged)]
pub enum InvalidationChannelConfig {
Custom(String),
Structured {
prefix: Option<String>,
use_service_name: bool,
},
}
impl Default for TwoLevelConfig {
fn default() -> Self {
Self {
promote_on_hit: true,
enable_batch_write: false,
batch_size: 100,
batch_interval_ms: 1000,
invalidation_channel: None,
bloom_filter: None,
warmup: None,
max_key_length: Some(256),
max_value_size: Some(1024 * 1024 * 10),
}
}
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum RedisMode {
Standalone,
Sentinel,
Cluster,
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
#[derive(Default)]
pub enum EvictionPolicy {
Lru,
Lfu,
#[default]
TinyLfu,
Random,
}
#[derive(Serialize, Debug, Clone, PartialEq)]
pub struct CacheStrategy {
pub service_name: String,
pub ttl: u64,
pub l1_max_capacity: u64,
pub l1_eviction_policy: EvictionPolicy,
pub l2_default_ttl: u64,
pub enable_batch_write: bool,
pub batch_size: usize,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
impl CacheStrategy {
pub fn new(service_name: &str) -> Self {
Self {
service_name: service_name.to_string(),
ttl: 300,
l1_max_capacity: 10000,
l1_eviction_policy: EvictionPolicy::default(),
l2_default_ttl: 3600,
enable_batch_write: true,
batch_size: 100,
updated_at: chrono::Utc::now(),
}
}
pub fn with_ttl(mut self, ttl: u64) -> Self {
self.ttl = ttl;
self.updated_at = chrono::Utc::now();
self
}
pub fn with_l1_max_capacity(mut self, capacity: u64) -> Self {
self.l1_max_capacity = capacity;
self.updated_at = chrono::Utc::now();
self
}
pub fn with_l1_eviction_policy(mut self, policy: EvictionPolicy) -> Self {
self.l1_eviction_policy = policy;
self.updated_at = chrono::Utc::now();
self
}
pub fn with_l2_default_ttl(mut self, ttl: u64) -> Self {
self.l2_default_ttl = ttl;
self.updated_at = chrono::Utc::now();
self
}
pub fn with_enable_batch_write(mut self, enable: bool) -> Self {
self.enable_batch_write = enable;
self.updated_at = chrono::Utc::now();
self
}
pub fn with_batch_size(mut self, size: usize) -> Self {
self.batch_size = size;
self.updated_at = chrono::Utc::now();
self
}
}
#[derive(Serialize, Debug, Clone, Default)]
pub struct DynamicConfig {
#[serde(skip)]
strategies: dashmap::DashMap<String, CacheStrategy>,
}
impl DynamicConfig {
pub fn new() -> Self {
Self {
strategies: dashmap::DashMap::new(),
}
}
pub fn get_strategy(&self, service_name: &str) -> Option<CacheStrategy> {
self.strategies.get(service_name).map(|s| s.clone())
}
pub fn update_strategy(&self, strategy: CacheStrategy) {
self.strategies
.insert(strategy.service_name.clone(), strategy);
}
pub fn remove_strategy(&self, service_name: &str) {
self.strategies.remove(service_name);
}
pub fn has_strategy(&self, service_name: &str) -> bool {
self.strategies.contains_key(service_name)
}
pub fn service_names(&self) -> Vec<String> {
self.strategies.iter().map(|s| s.key().clone()).collect()
}
pub fn clear(&self) {
self.strategies.clear();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_eviction_policy_default() {
assert_eq!(EvictionPolicy::default(), EvictionPolicy::TinyLfu);
}
#[test]
fn test_cache_strategy_builder() {
let strategy = CacheStrategy::new("test_service")
.with_ttl(600)
.with_l1_max_capacity(20000)
.with_l1_eviction_policy(EvictionPolicy::Lru);
assert_eq!(strategy.service_name, "test_service");
assert_eq!(strategy.ttl, 600);
assert_eq!(strategy.l1_max_capacity, 20000);
assert_eq!(strategy.l1_eviction_policy, EvictionPolicy::Lru);
}
#[test]
fn test_dynamic_config() {
let config = DynamicConfig::new();
assert!(!config.has_strategy("test"));
let strategy = CacheStrategy::new("test").with_ttl(500);
config.update_strategy(strategy.clone());
assert!(config.has_strategy("test"));
assert_eq!(config.get_strategy("test"), Some(strategy));
config.remove_strategy("test");
assert!(!config.has_strategy("test"));
}
#[test]
fn test_dynamic_config_service_names() {
let config = DynamicConfig::new();
config.update_strategy(CacheStrategy::new("service1"));
config.update_strategy(CacheStrategy::new("service2"));
let names = config.service_names();
assert_eq!(names.len(), 2);
assert!(names.contains(&"service1".to_string()));
assert!(names.contains(&"service2".to_string()));
}
}