multi-tier-cache 0.6.5

Customizable multi-tier cache with L1 (Moka in-memory) + L2 (Redis distributed) defaults, expandable to L3/L4+, cross-instance invalidation via Pub/Sub, stampede protection, and flexible TTL scaling
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
//! Integration tests for multi-tier cache architecture (v0.5.0+)

// use anyhow::Result;
use bytes::Bytes;
use multi_tier_cache::error::CacheResult;
use std::sync::Arc;
use std::time::Duration;

mod common;
use common::{test_data, test_key};
use multi_tier_cache::{
    CacheBackend, CacheManager, CacheStrategy, CacheSystemBuilder, L2Cache, TierConfig,
};

/// Test basic multi-tier get/set operations
#[tokio::test]
async fn test_multi_tier_basic_operations() {
    // Create 3-tier cache: L1 + L2 + L3 (all using L2Cache for testing)
    // Note: In production, L1 would be Moka-based, but for multi-tier mode,
    // all backends must implement L2CacheBackend for TTL support
    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1, TierConfig::as_l1())
        .with_tier(l2, TierConfig::as_l2())
        .with_tier(l3, TierConfig::as_l3())
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Test set_with_strategy - should store in all tiers
    let test_data = Bytes::from("{\"user\": \"alice\", \"id\": 123}");
    manager
        .set_with_strategy("test:multi:1", test_data.clone(), CacheStrategy::ShortTerm)
        .await
        .unwrap_or_else(|_| panic!("Failed to set cache"));

    // Test get - should hit L1
    let result = manager
        .get("test:multi:1")
        .await
        .unwrap_or_else(|_| panic!("Failed to get cache"));
    assert_eq!(result, Some(test_data.clone()));

    // Verify tier stats
    let tier_stats = manager.get_tier_stats();
    if tier_stats.is_empty() {
        panic!("Expected tier stats for multi-tier mode");
    } else {
        println!("Multi-tier stats:");
        for stats in &tier_stats {
            println!(
                "  L{}: {} hits ({})",
                stats.tier_level,
                stats.hit_count(),
                stats.backend_name
            );
        }
        assert_eq!(tier_stats.len(), 3, "Should have 3 tiers");
    }

    println!("✅ Multi-tier basic operations test passed");
}

/// Test multi-tier statistics tracking
#[tokio::test]
async fn test_multi_tier_stats() {
    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1.clone(), TierConfig::as_l1())
        .with_tier(l2.clone(), TierConfig::as_l2())
        .with_tier(l3.clone(), TierConfig::as_l3())
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Store some data
    let test_data = Bytes::from("{\"stats\": \"test\"}");
    manager
        .set_with_strategy("test:stats:1", test_data.clone(), CacheStrategy::ShortTerm)
        .await
        .unwrap_or_else(|_| panic!("Failed to set cache"));

    // Retrieve data multiple times
    for _ in 0..5 {
        let _result = manager
            .get("test:stats:1")
            .await
            .unwrap_or_else(|_| panic!("Failed to get cache"));
    }

    // Verify tier-specific stats
    let tier_stats = manager.get_tier_stats();
    if !tier_stats.is_empty() {
        assert_eq!(tier_stats.len(), 3, "Should have 3 tiers");

        // L1 should have most hits
        let l1_stats = tier_stats
            .iter()
            .find(|s| s.tier_level == 1)
            .unwrap_or_else(|| panic!("L1 stats missing"));
        assert!(
            l1_stats.hit_count() >= 4,
            "L1 should have at least 4 hits from repeated gets"
        );

        println!("Tier statistics:");
        for stats in &tier_stats {
            println!("  L{}: {} hits", stats.tier_level, stats.hit_count());
        }
    }

    // Verify overall stats
    let stats = manager.get_stats();
    assert!(stats.total_requests >= 5, "Should track all requests");
    assert!(stats.l1_hits >= 4, "Should have L1 hits");

    println!("✅ Multi-tier statistics test passed");
}

/// Test backward compatibility - legacy 2-tier mode should still work
#[tokio::test]
async fn test_backward_compatibility_legacy_mode() {
    // Use old-style constructor (no tiers)
    let cache = CacheSystemBuilder::new()
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Standard operations should work
    let test_data = Bytes::from("{\"legacy\": \"mode\"}");
    manager
        .set_with_strategy("test:legacy:1", test_data.clone(), CacheStrategy::ShortTerm)
        .await
        .unwrap_or_else(|_| panic!("Failed to set cache"));

    let result = manager
        .get("test:legacy:1")
        .await
        .unwrap_or_else(|_| panic!("Failed to get cache"));
    assert_eq!(result, Some(test_data));

    // Tier stats should now reflect the 2-tier setup even in legacy mode (unified architecture)
    let tier_stats = manager.get_tier_stats();
    assert_eq!(
        tier_stats.len(),
        2,
        "Legacy mode should have 2 tiers in unified architecture"
    );

    // Regular stats should work
    let stats = manager.get_stats();
    assert!(stats.total_requests > 0, "Should have request stats");

    println!("✅ Backward compatibility test passed");
}

/// Test TTL scaling across tiers
#[tokio::test]
async fn test_multi_tier_ttl_scaling() {
    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1, TierConfig::as_l1())
        .with_tier(l2, TierConfig::as_l2())
        .with_tier(
            l3,
            TierConfig::as_l3(), // L3 has 2x TTL multiplier
        )
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Set with 10 second TTL
    let test_data = Bytes::from("{\"ttl\": \"test\"}");
    manager
        .set_with_strategy(
            "test:ttl:1",
            test_data,
            CacheStrategy::Custom(Duration::from_secs(10)),
        )
        .await
        .unwrap_or_else(|_| panic!("Failed to set cache"));

    // L1 and L2 should have 10s TTL, L3 should have 20s TTL (2x multiplier)
    // We can't directly verify TTL from outside, but the operation should succeed

    let result = manager
        .get("test:ttl:1")
        .await
        .unwrap_or_else(|_| panic!("Failed to get cache"));
    assert!(result.is_some(), "Should retrieve data with scaled TTL");

    println!("✅ Multi-tier TTL scaling test passed");
}

/// Test multi-tier cache miss
#[tokio::test]
async fn test_multi_tier_cache_miss() {
    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1, TierConfig::as_l1())
        .with_tier(l2, TierConfig::as_l2())
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Try to get non-existent key
    let result = manager
        .get("test:miss:nonexistent")
        .await
        .unwrap_or_else(|_| panic!("Failed to get cache"));
    assert!(result.is_none(), "Should return None for cache miss");

    // Verify miss count
    let stats = manager.get_stats();
    assert!(stats.misses > 0, "Should track cache misses");

    println!("✅ Multi-tier cache miss test passed");
}

/// Test convenience methods: `with_l3()` and `with_l4()`
#[tokio::test]
async fn test_convenience_methods() {
    let l1_backend = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2_backend = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3_backend = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );
    let l4_backend = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L4")),
    );

    // Test with_l3() and with_l4() convenience methods
    let cache = CacheSystemBuilder::new()
        .with_tier(l1_backend, TierConfig::as_l1())
        .with_tier(l2_backend, TierConfig::as_l2())
        .with_l3(l3_backend)
        .with_l4(l4_backend)
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = cache.cache_manager();

    // Verify tier stats
    let tier_stats = manager.get_tier_stats();
    if !tier_stats.is_empty() {
        // Should have L1 + L2 + L3 + L4 = 4 tiers
        assert_eq!(tier_stats.len(), 4, "Should have 4 tiers");

        // Check that all tiers are present
        let has_l1 = tier_stats.iter().any(|s| s.tier_level == 1);
        let has_l2 = tier_stats.iter().any(|s| s.tier_level == 2);
        let has_l3 = tier_stats.iter().any(|s| s.tier_level == 3);
        let has_l4 = tier_stats.iter().any(|s| s.tier_level == 4);

        assert!(has_l1, "Should have L1 tier");
        assert!(has_l2, "Should have L2 tier");
        assert!(has_l3, "Should have L3 tier");
        assert!(has_l4, "Should have L4 tier");
    }

    println!("✅ Convenience methods test passed");
}

/// Test multi-tier stampede protection
#[tokio::test]
async fn test_multi_tier_stampede_protection() {
    use std::sync::atomic::{AtomicU32, Ordering};
    use tokio::task::JoinSet;

    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1, TierConfig::as_l1())
        .with_tier(l2, TierConfig::as_l2())
        .with_l3(l3)
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = Arc::new(cache.cache_manager().clone());
    let key = test_key("stampede_multi_tier");
    let compute_count = Arc::new(AtomicU32::new(0));

    // Spawn 50 concurrent requests for same key
    let mut tasks: JoinSet<CacheResult<Bytes>> = JoinSet::new();
    for _ in 0..50 {
        let manager_clone: Arc<CacheManager> = Arc::clone(&manager);
        let key_clone = key.clone();
        let counter_clone = Arc::clone(&compute_count);

        tasks.spawn(async move {
            manager_clone
                .get_or_compute_with(&key_clone, CacheStrategy::ShortTerm, || {
                    counter_clone.fetch_add(1, Ordering::SeqCst);
                    async move { Ok(test_data::bytes_user(999)) }
                })
                .await
        });
    }

    // Wait for all tasks
    while let Some(result) = tasks.join_next().await {
        result
            .unwrap_or_else(|_| panic!("Task panicked"))
            .unwrap_or_else(|_| panic!("Compute failed"));
    }

    // Stampede protection: only ONE compute should have happened
    let compute_calls = compute_count.load(Ordering::SeqCst);
    assert_eq!(
        compute_calls, 1,
        "Expected exactly 1 compute call with multi-tier stampede protection, got {compute_calls}",
    );

    // Verify data is in L1
    let cached_in_l1 = manager
        .get(&key)
        .await
        .unwrap_or_else(|_| panic!("Failed to get cache"));
    assert!(
        cached_in_l1.is_some(),
        "Data should be cached in L1 after stampede"
    );

    println!("✅ Multi-tier stampede protection test passed");
}

/// Test stampede protection retrieves from L3 instead of computing
#[tokio::test]
async fn test_stampede_retrieves_from_l3() {
    use std::sync::atomic::{AtomicU32, Ordering};
    use tokio::task::JoinSet;

    let l1 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L1")),
    );
    let l2 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L2")),
    );
    let l3 = Arc::new(
        L2Cache::new()
            .await
            .unwrap_or_else(|_| panic!("Failed to create L3")),
    );

    let cache = CacheSystemBuilder::new()
        .with_tier(l1.clone(), TierConfig::as_l1())
        .with_tier(l2.clone(), TierConfig::as_l2())
        .with_l3(l3.clone())
        .build()
        .await
        .unwrap_or_else(|_| panic!("Failed to build cache system"));

    let manager = Arc::new(cache.cache_manager().clone());
    let key = test_key("stampede_l3_hit");
    let data = test_data::bytes_user(777);

    // Pre-populate ONLY L3 (skip L1 and L2)
    l3.set_with_ttl(&key, data.clone(), std::time::Duration::from_secs(300))
        .await
        .unwrap_or_else(|_| panic!("Failed to set L3"));

    let compute_count = Arc::new(AtomicU32::new(0));

    // Spawn 30 concurrent requests
    let mut tasks: JoinSet<CacheResult<Bytes>> = JoinSet::new();
    for _ in 0..30 {
        let manager_clone: Arc<CacheManager> = Arc::clone(&manager);
        let key_clone = key.clone();
        let counter_clone = Arc::clone(&compute_count);

        tasks.spawn(async move {
            manager_clone
                .get_or_compute_with(&key_clone, CacheStrategy::ShortTerm, || {
                    counter_clone.fetch_add(1, Ordering::SeqCst);
                    async move {
                        // This should NEVER be called since data is in L3
                        panic!("Compute should not be called when data exists in L3!");
                    }
                })
                .await
        });
    }

    // Wait for all tasks
    while let Some(result) = tasks.join_next().await {
        result
            .unwrap_or_else(|_| panic!("Task panicked"))
            .unwrap_or_else(|_| panic!("Should retrieve from L3"));
    }

    // Should NOT have computed (data retrieved from L3)
    let compute_calls = compute_count.load(Ordering::SeqCst);
    assert_eq!(
        compute_calls, 0,
        "Expected 0 compute calls (data should be retrieved from L3), got {compute_calls}",
    );

    // Verify data was promoted to L1
    let l1_data: Option<Bytes> = l1.get(&key).await;
    assert!(l1_data.is_some(), "Data should be promoted from L3 to L1");
    assert_eq!(
        l1_data.unwrap_or_else(|| panic!("L1 data missing")),
        data,
        "Promoted data should match original"
    );

    println!("✅ Stampede retrieves from L3 test passed");
}