use bytes::Bytes;
use multi_tier_cache::error::CacheResult;
use std::sync::Arc;
use std::time::Duration;
mod common;
use common::{test_data, test_key};
use multi_tier_cache::{
CacheBackend, CacheManager, CacheStrategy, CacheSystemBuilder, L2Cache, TierConfig,
};
#[tokio::test]
async fn test_multi_tier_basic_operations() {
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1, TierConfig::as_l1())
.with_tier(l2, TierConfig::as_l2())
.with_tier(l3, TierConfig::as_l3())
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let test_data = Bytes::from("{\"user\": \"alice\", \"id\": 123}");
manager
.set_with_strategy("test:multi:1", test_data.clone(), CacheStrategy::ShortTerm)
.await
.unwrap_or_else(|_| panic!("Failed to set cache"));
let result = manager
.get("test:multi:1")
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
assert_eq!(result, Some(test_data.clone()));
let tier_stats = manager.get_tier_stats();
if tier_stats.is_empty() {
panic!("Expected tier stats for multi-tier mode");
} else {
println!("Multi-tier stats:");
for stats in &tier_stats {
println!(
" L{}: {} hits ({})",
stats.tier_level,
stats.hit_count(),
stats.backend_name
);
}
assert_eq!(tier_stats.len(), 3, "Should have 3 tiers");
}
println!("✅ Multi-tier basic operations test passed");
}
#[tokio::test]
async fn test_multi_tier_stats() {
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1.clone(), TierConfig::as_l1())
.with_tier(l2.clone(), TierConfig::as_l2())
.with_tier(l3.clone(), TierConfig::as_l3())
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let test_data = Bytes::from("{\"stats\": \"test\"}");
manager
.set_with_strategy("test:stats:1", test_data.clone(), CacheStrategy::ShortTerm)
.await
.unwrap_or_else(|_| panic!("Failed to set cache"));
for _ in 0..5 {
let _result = manager
.get("test:stats:1")
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
}
let tier_stats = manager.get_tier_stats();
if !tier_stats.is_empty() {
assert_eq!(tier_stats.len(), 3, "Should have 3 tiers");
let l1_stats = tier_stats
.iter()
.find(|s| s.tier_level == 1)
.unwrap_or_else(|| panic!("L1 stats missing"));
assert!(
l1_stats.hit_count() >= 4,
"L1 should have at least 4 hits from repeated gets"
);
println!("Tier statistics:");
for stats in &tier_stats {
println!(" L{}: {} hits", stats.tier_level, stats.hit_count());
}
}
let stats = manager.get_stats();
assert!(stats.total_requests >= 5, "Should track all requests");
assert!(stats.l1_hits >= 4, "Should have L1 hits");
println!("✅ Multi-tier statistics test passed");
}
#[tokio::test]
async fn test_backward_compatibility_legacy_mode() {
let cache = CacheSystemBuilder::new()
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let test_data = Bytes::from("{\"legacy\": \"mode\"}");
manager
.set_with_strategy("test:legacy:1", test_data.clone(), CacheStrategy::ShortTerm)
.await
.unwrap_or_else(|_| panic!("Failed to set cache"));
let result = manager
.get("test:legacy:1")
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
assert_eq!(result, Some(test_data));
let tier_stats = manager.get_tier_stats();
assert_eq!(
tier_stats.len(),
2,
"Legacy mode should have 2 tiers in unified architecture"
);
let stats = manager.get_stats();
assert!(stats.total_requests > 0, "Should have request stats");
println!("✅ Backward compatibility test passed");
}
#[tokio::test]
async fn test_multi_tier_ttl_scaling() {
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1, TierConfig::as_l1())
.with_tier(l2, TierConfig::as_l2())
.with_tier(
l3,
TierConfig::as_l3(), )
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let test_data = Bytes::from("{\"ttl\": \"test\"}");
manager
.set_with_strategy(
"test:ttl:1",
test_data,
CacheStrategy::Custom(Duration::from_secs(10)),
)
.await
.unwrap_or_else(|_| panic!("Failed to set cache"));
let result = manager
.get("test:ttl:1")
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
assert!(result.is_some(), "Should retrieve data with scaled TTL");
println!("✅ Multi-tier TTL scaling test passed");
}
#[tokio::test]
async fn test_multi_tier_cache_miss() {
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1, TierConfig::as_l1())
.with_tier(l2, TierConfig::as_l2())
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let result = manager
.get("test:miss:nonexistent")
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
assert!(result.is_none(), "Should return None for cache miss");
let stats = manager.get_stats();
assert!(stats.misses > 0, "Should track cache misses");
println!("✅ Multi-tier cache miss test passed");
}
#[tokio::test]
async fn test_convenience_methods() {
let l1_backend = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2_backend = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3_backend = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let l4_backend = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L4")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1_backend, TierConfig::as_l1())
.with_tier(l2_backend, TierConfig::as_l2())
.with_l3(l3_backend)
.with_l4(l4_backend)
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = cache.cache_manager();
let tier_stats = manager.get_tier_stats();
if !tier_stats.is_empty() {
assert_eq!(tier_stats.len(), 4, "Should have 4 tiers");
let has_l1 = tier_stats.iter().any(|s| s.tier_level == 1);
let has_l2 = tier_stats.iter().any(|s| s.tier_level == 2);
let has_l3 = tier_stats.iter().any(|s| s.tier_level == 3);
let has_l4 = tier_stats.iter().any(|s| s.tier_level == 4);
assert!(has_l1, "Should have L1 tier");
assert!(has_l2, "Should have L2 tier");
assert!(has_l3, "Should have L3 tier");
assert!(has_l4, "Should have L4 tier");
}
println!("✅ Convenience methods test passed");
}
#[tokio::test]
async fn test_multi_tier_stampede_protection() {
use std::sync::atomic::{AtomicU32, Ordering};
use tokio::task::JoinSet;
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1, TierConfig::as_l1())
.with_tier(l2, TierConfig::as_l2())
.with_l3(l3)
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = Arc::new(cache.cache_manager().clone());
let key = test_key("stampede_multi_tier");
let compute_count = Arc::new(AtomicU32::new(0));
let mut tasks: JoinSet<CacheResult<Bytes>> = JoinSet::new();
for _ in 0..50 {
let manager_clone: Arc<CacheManager> = Arc::clone(&manager);
let key_clone = key.clone();
let counter_clone = Arc::clone(&compute_count);
tasks.spawn(async move {
manager_clone
.get_or_compute_with(&key_clone, CacheStrategy::ShortTerm, || {
counter_clone.fetch_add(1, Ordering::SeqCst);
async move { Ok(test_data::bytes_user(999)) }
})
.await
});
}
while let Some(result) = tasks.join_next().await {
result
.unwrap_or_else(|_| panic!("Task panicked"))
.unwrap_or_else(|_| panic!("Compute failed"));
}
let compute_calls = compute_count.load(Ordering::SeqCst);
assert_eq!(
compute_calls, 1,
"Expected exactly 1 compute call with multi-tier stampede protection, got {compute_calls}",
);
let cached_in_l1 = manager
.get(&key)
.await
.unwrap_or_else(|_| panic!("Failed to get cache"));
assert!(
cached_in_l1.is_some(),
"Data should be cached in L1 after stampede"
);
println!("✅ Multi-tier stampede protection test passed");
}
#[tokio::test]
async fn test_stampede_retrieves_from_l3() {
use std::sync::atomic::{AtomicU32, Ordering};
use tokio::task::JoinSet;
let l1 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L1")),
);
let l2 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L2")),
);
let l3 = Arc::new(
L2Cache::new()
.await
.unwrap_or_else(|_| panic!("Failed to create L3")),
);
let cache = CacheSystemBuilder::new()
.with_tier(l1.clone(), TierConfig::as_l1())
.with_tier(l2.clone(), TierConfig::as_l2())
.with_l3(l3.clone())
.build()
.await
.unwrap_or_else(|_| panic!("Failed to build cache system"));
let manager = Arc::new(cache.cache_manager().clone());
let key = test_key("stampede_l3_hit");
let data = test_data::bytes_user(777);
l3.set_with_ttl(&key, data.clone(), std::time::Duration::from_secs(300))
.await
.unwrap_or_else(|_| panic!("Failed to set L3"));
let compute_count = Arc::new(AtomicU32::new(0));
let mut tasks: JoinSet<CacheResult<Bytes>> = JoinSet::new();
for _ in 0..30 {
let manager_clone: Arc<CacheManager> = Arc::clone(&manager);
let key_clone = key.clone();
let counter_clone = Arc::clone(&compute_count);
tasks.spawn(async move {
manager_clone
.get_or_compute_with(&key_clone, CacheStrategy::ShortTerm, || {
counter_clone.fetch_add(1, Ordering::SeqCst);
async move {
panic!("Compute should not be called when data exists in L3!");
}
})
.await
});
}
while let Some(result) = tasks.join_next().await {
result
.unwrap_or_else(|_| panic!("Task panicked"))
.unwrap_or_else(|_| panic!("Should retrieve from L3"));
}
let compute_calls = compute_count.load(Ordering::SeqCst);
assert_eq!(
compute_calls, 0,
"Expected 0 compute calls (data should be retrieved from L3), got {compute_calls}",
);
let l1_data: Option<Bytes> = l1.get(&key).await;
assert!(l1_data.is_some(), "Data should be promoted from L3 to L1");
assert_eq!(
l1_data.unwrap_or_else(|| panic!("L1 data missing")),
data,
"Promoted data should match original"
);
println!("✅ Stampede retrieves from L3 test passed");
}