use anyhow::Result;
use bytes::Bytes;
use multi_tier_cache::{CacheBackend, CacheStrategy, CacheSystemBuilder};
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<()> {
println!("=== Multi-Tier Cache: Built-in Backends Example ===\n");
println!("๐ฆ Example 1: DashMapCache (L1)");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n");
demo_dashmap_backend().await?;
#[cfg(feature = "backend-memcached")]
{
println!("\n๐ฆ Example 2: MemcachedCache (L2)");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n");
demo_memcached_backend().await?;
}
#[cfg(not(feature = "backend-memcached"))]
{
println!("\n๐ฆ Example 2: MemcachedCache (L2)");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
println!("โ ๏ธ Skipped: Requires 'backend-memcached' feature");
println!(
" Run with: cargo run --example builtin_backends --features backend-memcached\n"
);
}
#[cfg(feature = "backend-quickcache")]
{
println!("\n๐ฆ Example 3: QuickCacheBackend (L1)");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\n");
demo_quickcache_backend().await?;
}
#[cfg(not(feature = "backend-quickcache"))]
{
println!("\n๐ฆ Example 3: QuickCacheBackend (L1)");
println!("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ");
println!("โ ๏ธ Skipped: Requires 'backend-quickcache' feature");
println!(
" Run with: cargo run --example builtin_backends --features backend-quickcache\n"
);
}
println!("\nโ
Built-in backends example completed!");
Ok(())
}
async fn demo_dashmap_backend() -> Result<()> {
use multi_tier_cache::DashMapCache;
println!("Using DashMapCache as L1 backend...");
let dashmap_l1 = Arc::new(DashMapCache::new());
let cache = CacheSystemBuilder::new()
.with_l1(dashmap_l1.clone() as Arc<dyn CacheBackend>)
.build()
.await?;
let manager = cache.cache_manager();
let test_data = Bytes::from(
"{\"user\": \"bob\", \"role\": \"admin\", \"permissions\": [\"read\", \"write\", \"delete\"]}",
);
manager
.set_with_strategy("user:bob", test_data.clone(), CacheStrategy::ShortTerm)
.await?;
if let Some(cached) = manager.get("user:bob").await? {
println!("โ
Retrieved from DashMapCache: {cached:?}");
}
let stats = manager.get_stats();
println!(
"๐ Stats - L1 hits: {}, L2 hits: {}, misses: {}",
stats.l1_hits, stats.l2_hits, stats.misses
);
println!("\n๐งน DashMapCache has manual cleanup (no automatic eviction)");
let removed = dashmap_l1.cleanup_expired();
println!(" Cleaned up {removed} expired entries");
Ok(())
}
#[cfg(feature = "backend-memcached")]
async fn demo_memcached_backend() -> Result<()> {
use multi_tier_cache::MemcachedCache;
use std::time::Duration;
println!("Using MemcachedCache (standalone demonstration)...");
println!("โ ๏ธ Note: Requires Memcached server running at localhost:11211");
match MemcachedCache::new() {
Ok(memcached) => {
println!("โ
Connected to Memcached");
let test_data =
Bytes::from("{\"product\": \"laptop\", \"price\": 999.99, \"stock\": 42}");
memcached
.set_with_ttl(
"product:laptop",
test_data.clone(),
Duration::from_secs(300),
)
.await?;
if let Some(cached) = memcached.get("product:laptop").await {
println!("โ
Retrieved from MemcachedCache: {cached:?}");
}
if let Ok(server_stats) = memcached.get_server_stats() {
println!("\n๐ Memcached Server Stats:");
for (server, stats) in server_stats {
println!(" Server: {server}");
if let Some(version) = stats.get("version") {
println!(" Version: {version}");
}
if let Some(uptime) = stats.get("uptime") {
println!(" Uptime: {uptime}s");
}
if let Some(cmd_get) = stats.get("cmd_get") {
println!(" Total GETs: {cmd_get}");
}
if let Some(cmd_set) = stats.get("cmd_set") {
println!(" Total SETs: {cmd_set}");
}
}
}
memcached.remove("product:laptop").await?;
println!("\n๐ก Note: MemcachedCache implements CacheBackend but not L2CacheBackend");
println!(" This is because Memcached doesn't support TTL introspection.");
println!(
" You can use it standalone or wrap it in a custom backend that implements L2CacheBackend."
);
}
Err(e) => {
println!("โ Failed to connect to Memcached: {e}");
println!(" Make sure Memcached is running: memcached -p 11211");
println!(" Or set MEMCACHED_URL environment variable");
}
}
Ok(())
}
#[cfg(feature = "backend-quickcache")]
async fn demo_quickcache_backend() -> Result<()> {
use multi_tier_cache::QuickCacheBackend;
println!("Using QuickCacheBackend as L1 backend...");
let quickcache_l1: Arc<QuickCacheBackend> = Arc::new(QuickCacheBackend::new(5000)?);
let cache = CacheSystemBuilder::new()
.with_l1(quickcache_l1.clone() as Arc<dyn CacheBackend>)
.build()
.await?;
let manager = cache.cache_manager();
let test_data =
Bytes::from("{\"session_id\": \"abc123\", \"user_id\": 42, \"expires_at\": 1234567890}");
manager
.set_with_strategy(
"session:abc123",
test_data.clone(),
CacheStrategy::ShortTerm,
)
.await?;
if let Some(cached) = manager.get("session:abc123").await? {
println!("โ
Retrieved from QuickCache: {cached:?}");
}
let stats = manager.get_stats();
println!(
"๐ Stats - L1 hits: {}, L2 hits: {}, misses: {}",
stats.l1_hits, stats.l2_hits, stats.misses
);
println!("\nโก QuickCache is optimized for maximum throughput");
println!(" Use it when you need sub-microsecond latency");
Ok(())
}