multi-tier-cache 0.6.5

Customizable multi-tier cache with L1 (Moka in-memory) + L2 (Redis distributed) defaults, expandable to L3/L4+, cross-instance invalidation via Pub/Sub, stampede protection, and flexible TTL scaling
Documentation
//! Benchmarks for serialization and type-safe caching

use anyhow::Context;
use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main};
use multi_tier_cache::{Bytes, CacheStrategy, CacheSystem};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::time::Duration;
use tokio::runtime::Runtime;

#[derive(Debug, Clone, Serialize, Deserialize)]
struct User {
    id: u64,
    name: String,
    email: String,
}

impl User {
    fn new(id: u64) -> Self {
        Self {
            id,
            name: format!("User {id}"),
            email: format!("user{id}@example.com"),
        }
    }
}

fn setup_cache() -> (CacheSystem, Runtime) {
    let rt = Runtime::new().unwrap_or_else(|_| panic!("Failed to create runtime"));
    let cache = rt.block_on(async {
        // TODO: Audit that the environment access only happens in single-threaded code.
        unsafe { std::env::set_var("REDIS_URL", "redis://127.0.0.1:6379") };
        CacheSystem::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create cache system"))
    });
    (cache, rt)
}

/// Benchmark JSON vs typed caching
fn bench_json_vs_typed(c: &mut Criterion) {
    let (cache, rt) = setup_cache();

    let mut group = c.benchmark_group("serialization");

    group.bench_function("json_cache", |b| {
        b.iter(|| {
            rt.block_on(async {
                let key = format!("bench:json:{}", rand::random::<u32>());
                let user = json!({
                    "id": 123,
                    "name": "Test User",
                    "email": "test@example.com"
                });

                let user_bytes = Bytes::from(serde_json::to_vec(&user).map_err(|e| anyhow::anyhow!(e))?);
                cache
                    .cache_manager()
                    .set_with_strategy(&key, user_bytes, CacheStrategy::ShortTerm)
                    .await
                    .context("Failed to set cache")?;

                black_box(
                    cache
                        .cache_manager()
                        .get(&key)
                        .await
                        .context("Failed to get cache")?,
                );
                Ok::<(), anyhow::Error>(())
            })
            .unwrap_or_else(|e| panic!("Benchmark execution failed: {e:?}"));
        });
    });

    group.bench_function("typed_cache", |b| {
        b.iter(|| {
            rt.block_on(async {
                let key = format!("bench:typed:{}", rand::random::<u32>());
                let user = User::new(123);

                cache
                    .cache_manager()
                    .get_or_compute_typed::<User, _, _>(&key, CacheStrategy::ShortTerm, || {
                        let u = user.clone();
                        async move { Ok(u) }
                    })
                    .await
                    .unwrap_or_else(|_| panic!("Failed to set cache"));

                black_box(
                    cache
                        .cache_manager()
                        .get_or_compute_typed::<User, _, _>(
                            &key,
                            CacheStrategy::ShortTerm,
                            || async {
                                panic!("Should not compute");
                            },
                        )
                        .await
                        .unwrap_or_else(|_| panic!("Failed to get cache")),
                );
            });
        });
    });

    group.finish();
}

/// Benchmark different data sizes
fn bench_data_sizes(c: &mut Criterion) {
    let (cache, rt) = setup_cache();

    let mut group = c.benchmark_group("data_size");
    group.measurement_time(Duration::from_secs(10));

    for size in &[100, 1024, 10240] {
        group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
            b.iter(|| {
                rt.block_on(async {
                    let key = format!("bench:size:{}", rand::random::<u32>());
                    let data = json!({"data": "x".repeat(size)});

                    let data_bytes = Bytes::from(serde_json::to_vec(&data).map_err(|e| anyhow::anyhow!(e))?);
                    cache
                        .cache_manager()
                        .set_with_strategy(&key, data_bytes, CacheStrategy::ShortTerm)
                        .await
                        .context("Failed to set cache")?;

                    black_box(
                        cache
                            .cache_manager()
                            .get(&key)
                            .await
                            .context("Failed to get cache")?,
                    );
                    Ok::<(), anyhow::Error>(())
                })
                .unwrap_or_else(|e| panic!("Benchmark execution failed: {e:?}"));
            });
        });
    }

    group.finish();
}

criterion_group!(benches, bench_json_vs_typed, bench_data_sizes);
criterion_main!(benches);