multi_tier_cache/
cache_manager.rs

1//! Cache Manager - Unified Cache Operations
2//!
3//! Manages operations across L1 (Moka) and L2 (Redis) caches with intelligent fallback.
4
5use anyhow::Result;
6use dashmap::DashMap;
7use serde_json;
8use std::future::Future;
9use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10use std::sync::Arc;
11use std::time::Duration;
12use tokio::sync::Mutex;
13
14use tracing::{debug, error, info, warn};
15
16use super::invalidation::{
17    AtomicInvalidationStats, InvalidationConfig, InvalidationMessage, InvalidationPublisher,
18    InvalidationStats, InvalidationSubscriber,
19};
20use crate::backends::{L1Cache, L2Cache};
21use crate::traits::{CacheBackend, L2CacheBackend, StreamingBackend};
22
23/// Type alias for the in-flight requests map
24type InFlightMap = DashMap<String, Arc<Mutex<()>>>;
25
26/// RAII cleanup guard for in-flight request tracking
27/// Ensures that entries are removed from `DashMap` even on early return or panic
28struct CleanupGuard<'a> {
29    map: &'a InFlightMap,
30    key: String,
31}
32
33impl Drop for CleanupGuard<'_> {
34    fn drop(&mut self) {
35        self.map.remove(&self.key);
36    }
37}
38
39/// Cache strategies for different data types
40#[derive(Debug, Clone)]
41#[allow(dead_code)]
42pub enum CacheStrategy {
43    /// Real-time data - 10 seconds TTL
44    RealTime,
45    /// Short-term data - 5 minutes TTL  
46    ShortTerm,
47    /// Medium-term data - 1 hour TTL
48    MediumTerm,
49    /// Long-term data - 3 hours TTL
50    LongTerm,
51    /// Custom TTL
52    Custom(Duration),
53    /// Default strategy (5 minutes)
54    Default,
55}
56
57impl CacheStrategy {
58    /// Convert strategy to duration
59    #[must_use]
60    pub fn to_duration(&self) -> Duration {
61        match self {
62            Self::RealTime => Duration::from_secs(10),
63            Self::ShortTerm | Self::Default => Duration::from_secs(300), // 5 minutes
64            Self::MediumTerm => Duration::from_secs(3600),               // 1 hour
65            Self::LongTerm => Duration::from_secs(10800),                // 3 hours
66            Self::Custom(duration) => *duration,
67        }
68    }
69}
70
71/// Statistics for a single cache tier
72#[derive(Debug)]
73pub struct TierStats {
74    /// Tier level (1 = L1, 2 = L2, 3 = L3, etc.)
75    pub tier_level: usize,
76    /// Number of cache hits at this tier
77    pub hits: AtomicU64,
78    /// Backend name for identification
79    pub backend_name: String,
80}
81
82impl Clone for TierStats {
83    fn clone(&self) -> Self {
84        Self {
85            tier_level: self.tier_level,
86            hits: AtomicU64::new(self.hits.load(Ordering::Relaxed)),
87            backend_name: self.backend_name.clone(),
88        }
89    }
90}
91
92impl TierStats {
93    fn new(tier_level: usize, backend_name: String) -> Self {
94        Self {
95            tier_level,
96            hits: AtomicU64::new(0),
97            backend_name,
98        }
99    }
100
101    /// Get current hit count
102    pub fn hit_count(&self) -> u64 {
103        self.hits.load(Ordering::Relaxed)
104    }
105}
106
107/// A single cache tier in the multi-tier architecture
108pub struct CacheTier {
109    /// Cache backend for this tier
110    backend: Arc<dyn L2CacheBackend>,
111    /// Tier level (1 = hottest/fastest, higher = colder/slower)
112    tier_level: usize,
113    /// Enable automatic promotion to upper tiers on cache hit
114    promotion_enabled: bool,
115    /// TTL scale factor (multiplier for TTL when storing/promoting)
116    ttl_scale: f64,
117    /// Statistics for this tier
118    stats: TierStats,
119}
120
121impl CacheTier {
122    /// Create a new cache tier
123    pub fn new(
124        backend: Arc<dyn L2CacheBackend>,
125        tier_level: usize,
126        promotion_enabled: bool,
127        ttl_scale: f64,
128    ) -> Self {
129        let backend_name = backend.name().to_string();
130        Self {
131            backend,
132            tier_level,
133            promotion_enabled,
134            ttl_scale,
135            stats: TierStats::new(tier_level, backend_name),
136        }
137    }
138
139    /// Get value with TTL from this tier
140    async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)> {
141        self.backend.get_with_ttl(key).await
142    }
143
144    /// Set value with TTL in this tier
145    async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
146        let scaled_ttl = Duration::from_secs_f64(ttl.as_secs_f64() * self.ttl_scale);
147        self.backend.set_with_ttl(key, value, scaled_ttl).await
148    }
149
150    /// Remove value from this tier
151    async fn remove(&self, key: &str) -> Result<()> {
152        self.backend.remove(key).await
153    }
154
155    /// Record a cache hit for this tier
156    fn record_hit(&self) {
157        self.stats.hits.fetch_add(1, Ordering::Relaxed);
158    }
159}
160
161/// Configuration for a cache tier (used in builder pattern)
162#[derive(Debug, Clone)]
163pub struct TierConfig {
164    /// Tier level (1, 2, 3, 4...)
165    pub tier_level: usize,
166    /// Enable promotion to upper tiers on hit
167    pub promotion_enabled: bool,
168    /// TTL scale factor (1.0 = same as base TTL)
169    pub ttl_scale: f64,
170}
171
172impl TierConfig {
173    /// Create new tier configuration
174    #[must_use]
175    pub fn new(tier_level: usize) -> Self {
176        Self {
177            tier_level,
178            promotion_enabled: true,
179            ttl_scale: 1.0,
180        }
181    }
182
183    /// Configure as L1 (hot tier)
184    #[must_use]
185    pub fn as_l1() -> Self {
186        Self {
187            tier_level: 1,
188            promotion_enabled: false, // L1 is already top tier
189            ttl_scale: 1.0,
190        }
191    }
192
193    /// Configure as L2 (warm tier)
194    #[must_use]
195    pub fn as_l2() -> Self {
196        Self {
197            tier_level: 2,
198            promotion_enabled: true,
199            ttl_scale: 1.0,
200        }
201    }
202
203    /// Configure as L3 (cold tier) with longer TTL
204    #[must_use]
205    pub fn as_l3() -> Self {
206        Self {
207            tier_level: 3,
208            promotion_enabled: true,
209            ttl_scale: 2.0, // Keep data 2x longer
210        }
211    }
212
213    /// Configure as L4 (archive tier) with much longer TTL
214    #[must_use]
215    pub fn as_l4() -> Self {
216        Self {
217            tier_level: 4,
218            promotion_enabled: true,
219            ttl_scale: 8.0, // Keep data 8x longer
220        }
221    }
222
223    /// Set promotion enabled
224    #[must_use]
225    pub fn with_promotion(mut self, enabled: bool) -> Self {
226        self.promotion_enabled = enabled;
227        self
228    }
229
230    /// Set TTL scale factor
231    #[must_use]
232    pub fn with_ttl_scale(mut self, scale: f64) -> Self {
233        self.ttl_scale = scale;
234        self
235    }
236
237    /// Set tier level
238    #[must_use]
239    pub fn with_level(mut self, level: usize) -> Self {
240        self.tier_level = level;
241        self
242    }
243}
244
245/// Proxy wrapper to convert `L2CacheBackend` to `CacheBackend`
246/// (Rust doesn't support automatic trait upcasting for trait objects)
247struct ProxyCacheBackend {
248    backend: Arc<dyn L2CacheBackend>,
249}
250
251#[async_trait::async_trait]
252impl CacheBackend for ProxyCacheBackend {
253    async fn get(&self, key: &str) -> Option<serde_json::Value> {
254        self.backend.get(key).await
255    }
256
257    async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
258        self.backend.set_with_ttl(key, value, ttl).await
259    }
260
261    async fn remove(&self, key: &str) -> Result<()> {
262        self.backend.remove(key).await
263    }
264
265    async fn health_check(&self) -> bool {
266        self.backend.health_check().await
267    }
268
269    fn name(&self) -> &'static str {
270        self.backend.name()
271    }
272}
273
274/// Cache Manager - Unified operations across multiple cache tiers
275///
276/// Supports both legacy 2-tier (L1+L2) and new multi-tier (L1+L2+L3+L4+...) architectures.
277/// When `tiers` is Some, it uses the dynamic multi-tier system. Otherwise, falls back to
278/// legacy L1+L2 behavior for backward compatibility.
279pub struct CacheManager {
280    /// Dynamic multi-tier cache architecture (v0.5.0+)
281    /// If Some, this takes precedence over `l1_cache/l2_cache` fields
282    tiers: Option<Vec<CacheTier>>,
283
284    // ===== Legacy fields (v0.1.0 - v0.4.x) =====
285    // Maintained for backward compatibility
286    /// L1 Cache (trait object for pluggable backends)
287    l1_cache: Arc<dyn CacheBackend>,
288    /// L2 Cache (trait object for pluggable backends)
289    l2_cache: Arc<dyn L2CacheBackend>,
290    /// L2 Cache concrete instance (for invalidation `scan_keys`)
291    l2_cache_concrete: Option<Arc<L2Cache>>,
292
293    /// Optional streaming backend (defaults to L2 if it implements `StreamingBackend`)
294    streaming_backend: Option<Arc<dyn StreamingBackend>>,
295    /// Statistics (`AtomicU64` is already thread-safe, no Arc needed)
296    total_requests: AtomicU64,
297    l1_hits: AtomicU64,
298    l2_hits: AtomicU64,
299    misses: AtomicU64,
300    promotions: AtomicUsize,
301    /// In-flight requests to prevent Cache Stampede on L2/compute operations
302    in_flight_requests: Arc<InFlightMap>,
303    /// Invalidation publisher (for broadcasting invalidation messages)
304    invalidation_publisher: Option<Arc<Mutex<InvalidationPublisher>>>,
305    /// Invalidation subscriber (for receiving invalidation messages)
306    invalidation_subscriber: Option<Arc<InvalidationSubscriber>>,
307    /// Invalidation statistics
308    invalidation_stats: Arc<AtomicInvalidationStats>,
309}
310
311impl CacheManager {
312    /// Create new cache manager with trait objects (pluggable backends)
313    ///
314    /// This is the primary constructor for v0.3.0+, supporting custom cache backends.
315    ///
316    /// # Arguments
317    ///
318    /// * `l1_cache` - Any L1 cache backend implementing `CacheBackend` trait
319    /// * `l2_cache` - Any L2 cache backend implementing `L2CacheBackend` trait
320    /// * `streaming_backend` - Optional streaming backend (None to disable streaming)
321    ///
322    /// # Example
323    ///
324    /// ```rust,ignore
325    /// use multi_tier_cache::{CacheManager, L1Cache, L2Cache};
326    /// use std::sync::Arc;
327    ///
328    /// let l1: Arc<dyn CacheBackend> = Arc::new(L1Cache::new().await?);
329    /// let l2: Arc<dyn L2CacheBackend> = Arc::new(L2Cache::new().await?);
330    ///
331    /// let manager = CacheManager::new_with_backends(l1, l2, None).await?;
332    /// ```
333    /// # Errors
334    ///
335    /// Returns `Ok` if successful. Currently no error conditions, but kept for future compatibility.
336    pub fn new_with_backends(
337        l1_cache: Arc<dyn CacheBackend>,
338        l2_cache: Arc<dyn L2CacheBackend>,
339        streaming_backend: Option<Arc<dyn StreamingBackend>>,
340    ) -> Result<Self> {
341        debug!("Initializing Cache Manager with custom backends...");
342        debug!("  L1: {}", l1_cache.name());
343        debug!("  L2: {}", l2_cache.name());
344        if streaming_backend.is_some() {
345            debug!("  Streaming: enabled");
346        } else {
347            debug!("  Streaming: disabled");
348        }
349
350        Ok(Self {
351            tiers: None, // Legacy mode: use l1_cache/l2_cache fields
352            l1_cache,
353            l2_cache,
354            l2_cache_concrete: None,
355            streaming_backend,
356            total_requests: AtomicU64::new(0),
357            l1_hits: AtomicU64::new(0),
358            l2_hits: AtomicU64::new(0),
359            misses: AtomicU64::new(0),
360            promotions: AtomicUsize::new(0),
361            in_flight_requests: Arc::new(DashMap::new()),
362            invalidation_publisher: None,
363            invalidation_subscriber: None,
364            invalidation_stats: Arc::new(AtomicInvalidationStats::default()),
365        })
366    }
367
368    /// Create new cache manager with default backends (backward compatible)
369    ///
370    /// This is the legacy constructor maintained for backward compatibility.
371    /// New code should prefer `new_with_backends()` or `CacheSystemBuilder`.
372    ///
373    /// # Arguments
374    ///
375    /// * `l1_cache` - Moka L1 cache instance
376    /// * `l2_cache` - Redis L2 cache instance
377    /// # Errors
378    ///
379    /// Returns an error if Redis connection fails.
380    pub async fn new(l1_cache: Arc<L1Cache>, l2_cache: Arc<L2Cache>) -> Result<Self> {
381        debug!("Initializing Cache Manager...");
382
383        // Convert concrete types to trait objects
384        let l1_backend: Arc<dyn CacheBackend> = l1_cache.clone();
385        let l2_backend: Arc<dyn L2CacheBackend> = l2_cache.clone();
386
387        // Create RedisStreams backend for streaming functionality
388        let redis_url =
389            std::env::var("REDIS_URL").unwrap_or_else(|_| "redis://127.0.0.1:6379".to_string());
390        let redis_streams = crate::redis_streams::RedisStreams::new(&redis_url).await?;
391        let streaming_backend: Arc<dyn StreamingBackend> = Arc::new(redis_streams);
392
393        Self::new_with_backends(l1_backend, l2_backend, Some(streaming_backend))
394    }
395
396    /// Create new cache manager with invalidation support
397    ///
398    /// This constructor enables cross-instance cache invalidation via Redis Pub/Sub.
399    ///
400    /// # Arguments
401    ///
402    /// * `l1_cache` - Moka L1 cache instance
403    /// * `l2_cache` - Redis L2 cache instance
404    /// * `redis_url` - Redis connection URL for Pub/Sub
405    /// * `config` - Invalidation configuration
406    ///
407    /// # Example
408    ///
409    /// ```rust,ignore
410    /// use multi_tier_cache::{CacheManager, L1Cache, L2Cache, InvalidationConfig};
411    ///
412    /// let config = InvalidationConfig {
413    ///     channel: "my_app:cache:invalidate".to_string(),
414    ///     ..Default::default()
415    /// };
416    ///
417    /// let manager = CacheManager::new_with_invalidation(
418    ///     l1, l2, "redis://localhost", config
419    /// ).await?;
420    /// ```
421    /// # Errors
422    ///
423    /// Returns an error if Redis connection fails or invalidation setup fails.
424    pub async fn new_with_invalidation(
425        l1_cache: Arc<L1Cache>,
426        l2_cache: Arc<L2Cache>,
427        redis_url: &str,
428        config: InvalidationConfig,
429    ) -> Result<Self> {
430        debug!("Initializing Cache Manager with Invalidation...");
431        debug!("  Pub/Sub channel: {}", config.channel);
432
433        // Convert concrete types to trait objects
434        let l1_backend: Arc<dyn CacheBackend> = l1_cache.clone();
435        let l2_backend: Arc<dyn L2CacheBackend> = l2_cache.clone();
436
437        // Create RedisStreams backend for streaming functionality
438        let redis_streams = crate::redis_streams::RedisStreams::new(redis_url).await?;
439        let streaming_backend: Arc<dyn StreamingBackend> = Arc::new(redis_streams);
440
441        // Create publisher
442        let client = redis::Client::open(redis_url)?;
443        let conn_manager = redis::aio::ConnectionManager::new(client).await?;
444        let publisher = InvalidationPublisher::new(conn_manager, config.clone());
445
446        // Create subscriber
447        let subscriber = InvalidationSubscriber::new(redis_url, config.clone())?;
448        let invalidation_stats = Arc::new(AtomicInvalidationStats::default());
449
450        let manager = Self {
451            tiers: None, // Legacy mode: use l1_cache/l2_cache fields
452            l1_cache: l1_backend,
453            l2_cache: l2_backend,
454            l2_cache_concrete: Some(l2_cache),
455            streaming_backend: Some(streaming_backend),
456            total_requests: AtomicU64::new(0),
457            l1_hits: AtomicU64::new(0),
458            l2_hits: AtomicU64::new(0),
459            misses: AtomicU64::new(0),
460            promotions: AtomicUsize::new(0),
461            in_flight_requests: Arc::new(DashMap::new()),
462            invalidation_publisher: Some(Arc::new(Mutex::new(publisher))),
463            invalidation_subscriber: Some(Arc::new(subscriber)),
464            invalidation_stats,
465        };
466
467        // Start subscriber with handler
468        manager.start_invalidation_subscriber();
469
470        info!("Cache Manager initialized with invalidation support");
471
472        Ok(manager)
473    }
474
475    /// Create new cache manager with multi-tier architecture (v0.5.0+)
476    ///
477    /// This constructor enables dynamic multi-tier caching with 3, 4, or more tiers.
478    /// Tiers are checked in order (lower `tier_level` = faster/hotter).
479    ///
480    /// # Arguments
481    ///
482    /// * `tiers` - Vector of configured cache tiers (must be sorted by `tier_level` ascending)
483    /// * `streaming_backend` - Optional streaming backend
484    ///
485    /// # Example
486    ///
487    /// ```rust,ignore
488    /// use multi_tier_cache::{CacheManager, CacheTier, TierConfig, L1Cache, L2Cache};
489    /// use std::sync::Arc;
490    ///
491    /// // L1 + L2 + L3 setup
492    /// let l1 = Arc::new(L1Cache::new()?);
493    /// let l2 = Arc::new(L2Cache::new().await?);
494    /// let l3 = Arc::new(RocksDBCache::new("/tmp/cache").await?);
495    ///
496    /// let tiers = vec![
497    ///     CacheTier::new(l1, 1, false, 1.0),  // L1 - no promotion
498    ///     CacheTier::new(l2, 2, true, 1.0),   // L2 - promote to L1
499    ///     CacheTier::new(l3, 3, true, 2.0),   // L3 - promote to L2&L1, 2x TTL
500    /// ];
501    ///
502    /// let manager = CacheManager::new_with_tiers(tiers, None).await?;
503    /// ```
504    /// # Errors
505    ///
506    /// Returns an error if tiers are not sorted by level or if no tiers are provided.
507    pub fn new_with_tiers(
508        tiers: Vec<CacheTier>,
509        streaming_backend: Option<Arc<dyn StreamingBackend>>,
510    ) -> Result<Self> {
511        debug!("Initializing Multi-Tier Cache Manager...");
512        debug!("  Tier count: {}", tiers.len());
513        for tier in &tiers {
514            debug!(
515                "  L{}: {} (promotion={}, ttl_scale={})",
516                tier.tier_level, tier.stats.backend_name, tier.promotion_enabled, tier.ttl_scale
517            );
518        }
519
520        // Validate tiers are sorted by level
521        for i in 1..tiers.len() {
522            if let (Some(current), Some(prev)) = (tiers.get(i), tiers.get(i - 1)) {
523                if current.tier_level <= prev.tier_level {
524                    anyhow::bail!(
525                        "Tiers must be sorted by tier_level ascending (found L{} after L{})",
526                        current.tier_level,
527                        prev.tier_level
528                    );
529                }
530            }
531        }
532
533        // For backward compatibility with legacy code, we need dummy l1/l2 caches
534        // Use first tier as l1, second tier as l2 if available
535        let (l1_cache, l2_cache) = if tiers.len() >= 2 {
536            if let (Some(t0), Some(t1)) = (tiers.first(), tiers.get(1)) {
537                (t0.backend.clone(), t1.backend.clone())
538            } else {
539                // Should be unreachable due to len check
540                anyhow::bail!("Failed to access tiers 0 and 1");
541            }
542        } else if tiers.len() == 1 {
543            // Only one tier - use it for both
544            if let Some(t0) = tiers.first() {
545                let tier = t0.backend.clone();
546                (tier.clone(), tier)
547            } else {
548                anyhow::bail!("Failed to access tier 0");
549            }
550        } else {
551            anyhow::bail!("At least one cache tier is required");
552        };
553
554        // Convert to CacheBackend trait for l1 (L2CacheBackend extends CacheBackend)
555        let l1_backend: Arc<dyn CacheBackend> = Arc::new(ProxyCacheBackend {
556            backend: l1_cache.clone(),
557        });
558
559        Ok(Self {
560            tiers: Some(tiers),
561            l1_cache: l1_backend,
562            l2_cache,
563            l2_cache_concrete: None,
564            streaming_backend,
565            total_requests: AtomicU64::new(0),
566            l1_hits: AtomicU64::new(0),
567            l2_hits: AtomicU64::new(0),
568            misses: AtomicU64::new(0),
569            promotions: AtomicUsize::new(0),
570            in_flight_requests: Arc::new(DashMap::new()),
571            invalidation_publisher: None,
572            invalidation_subscriber: None,
573            invalidation_stats: Arc::new(AtomicInvalidationStats::default()),
574        })
575    }
576
577    /// Start the invalidation subscriber background task
578    fn start_invalidation_subscriber(&self) {
579        if let Some(subscriber) = &self.invalidation_subscriber {
580            let l1_cache = Arc::clone(&self.l1_cache);
581            let l2_cache_concrete = self.l2_cache_concrete.clone();
582
583            subscriber.start(move |msg| {
584                let l1 = Arc::clone(&l1_cache);
585                let _l2 = l2_cache_concrete.clone();
586
587                async move {
588                    match msg {
589                        InvalidationMessage::Remove { key } => {
590                            // Remove from L1
591                            l1.remove(&key).await?;
592                            debug!("Invalidation: Removed '{}' from L1", key);
593                        }
594                        InvalidationMessage::Update {
595                            key,
596                            value,
597                            ttl_secs,
598                        } => {
599                            // Update L1 with new value
600                            let ttl = ttl_secs
601                                .map_or_else(|| Duration::from_secs(300), Duration::from_secs);
602                            l1.set_with_ttl(&key, value, ttl).await?;
603                            debug!("Invalidation: Updated '{}' in L1", key);
604                        }
605                        InvalidationMessage::RemovePattern { pattern } => {
606                            // For pattern-based invalidation, we can't easily iterate L1 cache
607                            // So we just log it. The pattern invalidation is mainly for L2.
608                            // L1 entries will naturally expire via TTL.
609                            debug!(
610                                "Invalidation: Pattern '{}' invalidated (L1 will expire naturally)",
611                                pattern
612                            );
613                        }
614                        InvalidationMessage::RemoveBulk { keys } => {
615                            // Remove multiple keys from L1
616                            for key in keys {
617                                if let Err(e) = l1.remove(&key).await {
618                                    warn!("Failed to remove '{}' from L1: {}", key, e);
619                                }
620                            }
621                            debug!("Invalidation: Bulk removed keys from L1");
622                        }
623                    }
624                    Ok(())
625                }
626            });
627
628            info!("Invalidation subscriber started");
629        }
630    }
631
632    /// Get value from cache using multi-tier architecture (v0.5.0+)
633    ///
634    /// This method iterates through all configured tiers and automatically promotes
635    /// to upper tiers on cache hit.
636    async fn get_multi_tier(&self, key: &str) -> Result<Option<serde_json::Value>> {
637        let Some(tiers) = self.tiers.as_ref() else {
638            panic!("Tiers must be initialized in multi-tier mode")
639        }; // Safe: only called when tiers is Some
640
641        // Try each tier sequentially (sorted by tier_level)
642        for (tier_index, tier) in tiers.iter().enumerate() {
643            if let Some((value, ttl)) = tier.get_with_ttl(key).await {
644                // Cache hit!
645                tier.record_hit();
646
647                // Promote to all upper tiers (if promotion enabled)
648                if tier.promotion_enabled && tier_index > 0 {
649                    let promotion_ttl = ttl.unwrap_or_else(|| CacheStrategy::Default.to_duration());
650
651                    // Promote to all tiers above this one
652                    for upper_tier in tiers.iter().take(tier_index).rev() {
653                        if let Err(e) = upper_tier
654                            .set_with_ttl(key, value.clone(), promotion_ttl)
655                            .await
656                        {
657                            warn!(
658                                "Failed to promote '{}' from L{} to L{}: {}",
659                                key, tier.tier_level, upper_tier.tier_level, e
660                            );
661                        } else {
662                            self.promotions.fetch_add(1, Ordering::Relaxed);
663                            debug!(
664                                "Promoted '{}' from L{} to L{} (TTL: {:?})",
665                                key, tier.tier_level, upper_tier.tier_level, promotion_ttl
666                            );
667                        }
668                    }
669                }
670
671                return Ok(Some(value));
672            }
673        }
674
675        // Cache miss across all tiers
676        Ok(None)
677    }
678
679    /// Get value from cache (L1 first, then L2 fallback with promotion)
680    ///
681    /// This method now includes built-in Cache Stampede protection when cache misses occur.
682    /// Multiple concurrent requests for the same missing key will be coalesced to prevent
683    /// unnecessary duplicate work on external data sources.
684    ///
685    /// Supports both legacy 2-tier mode and new multi-tier mode (v0.5.0+).
686    ///
687    /// # Arguments
688    /// * `key` - Cache key to retrieve
689    ///
690    /// # Returns
691    /// * `Ok(Some(value))` - Cache hit, value found in any tier
692    /// * `Ok(None)` - Cache miss, value not found in any cache
693    /// * `Err(error)` - Cache operation failed
694    /// # Errors
695    ///
696    /// Returns an error if cache operation fails.
697    ///
698    /// # Panics
699    ///
700    /// Panics if tiers are not initialized in multi-tier mode (should not happen if constructed correctly).
701    pub async fn get(&self, key: &str) -> Result<Option<serde_json::Value>> {
702        self.total_requests.fetch_add(1, Ordering::Relaxed);
703
704        // NEW: Multi-tier mode (v0.5.0+)
705        if self.tiers.is_some() {
706            // Fast path for L1 (first tier) - no locking needed
707            if let Some(tier1) = self
708                .tiers
709                .as_ref()
710                .unwrap_or_else(|| panic!("Tiers initialized"))
711                .first()
712            {
713                if let Some((value, _ttl)) = tier1.get_with_ttl(key).await {
714                    tier1.record_hit();
715                    // Update legacy stats for backward compatibility
716                    self.l1_hits.fetch_add(1, Ordering::Relaxed);
717                    return Ok(Some(value));
718                }
719            }
720
721            // L1 miss - use stampede protection for lower tiers
722            let key_owned = key.to_string();
723            let lock_guard = self
724                .in_flight_requests
725                .entry(key_owned.clone())
726                .or_insert_with(|| Arc::new(Mutex::new(())))
727                .clone();
728
729            let _guard = lock_guard.lock().await;
730            let cleanup_guard = CleanupGuard {
731                map: &self.in_flight_requests,
732                key: key_owned.clone(),
733            };
734
735            // Double-check L1 after acquiring lock
736            if let Some(tier1) = self
737                .tiers
738                .as_ref()
739                .unwrap_or_else(|| panic!("Tiers initialized"))
740                .first()
741            {
742                if let Some((value, _ttl)) = tier1.get_with_ttl(key).await {
743                    tier1.record_hit();
744                    self.l1_hits.fetch_add(1, Ordering::Relaxed);
745                    return Ok(Some(value));
746                }
747            }
748
749            // Check remaining tiers with promotion
750            let result = self.get_multi_tier(key).await?;
751
752            if result.is_some() {
753                // Hit in L2+ tier - update legacy stats
754                if self
755                    .tiers
756                    .as_ref()
757                    .unwrap_or_else(|| panic!("Tiers initialized"))
758                    .len()
759                    >= 2
760                {
761                    self.l2_hits.fetch_add(1, Ordering::Relaxed);
762                }
763            } else {
764                self.misses.fetch_add(1, Ordering::Relaxed);
765            }
766
767            drop(cleanup_guard);
768            return Ok(result);
769        }
770
771        // LEGACY: 2-tier mode (L1 + L2)
772        // Fast path: Try L1 first (no locking needed)
773        if let Some(value) = self.l1_cache.get(key).await {
774            self.l1_hits.fetch_add(1, Ordering::Relaxed);
775            return Ok(Some(value));
776        }
777
778        // L1 miss - implement Cache Stampede protection for L2 lookup
779        let key_owned = key.to_string();
780        let lock_guard = self
781            .in_flight_requests
782            .entry(key_owned.clone())
783            .or_insert_with(|| Arc::new(Mutex::new(())))
784            .clone();
785
786        let _guard = lock_guard.lock().await;
787
788        // RAII cleanup guard - ensures entry is removed even on early return or panic
789        let cleanup_guard = CleanupGuard {
790            map: &self.in_flight_requests,
791            key: key_owned.clone(),
792        };
793
794        // Double-check L1 cache after acquiring lock
795        // (Another concurrent request might have populated it while we were waiting)
796        if let Some(value) = self.l1_cache.get(key).await {
797            self.l1_hits.fetch_add(1, Ordering::Relaxed);
798            // cleanup_guard will auto-remove entry on drop
799            return Ok(Some(value));
800        }
801
802        // Check L2 cache with TTL information
803        if let Some((value, ttl)) = self.l2_cache.get_with_ttl(key).await {
804            self.l2_hits.fetch_add(1, Ordering::Relaxed);
805
806            // Promote to L1 with same TTL as Redis (or default if no TTL)
807            let promotion_ttl = ttl.unwrap_or_else(|| CacheStrategy::Default.to_duration());
808
809            if self
810                .l1_cache
811                .set_with_ttl(key, value.clone(), promotion_ttl)
812                .await
813                .is_err()
814            {
815                // L1 promotion failed, but we still have the data
816                warn!("Failed to promote key '{}' to L1 cache", key);
817            } else {
818                self.promotions.fetch_add(1, Ordering::Relaxed);
819                debug!(
820                    "Promoted '{}' from L2 to L1 with TTL {:?} (via get)",
821                    key, promotion_ttl
822                );
823            }
824
825            // cleanup_guard will auto-remove entry on drop
826            return Ok(Some(value));
827        }
828
829        // Both L1 and L2 miss
830        self.misses.fetch_add(1, Ordering::Relaxed);
831
832        // cleanup_guard will auto-remove entry on drop
833        drop(cleanup_guard);
834
835        Ok(None)
836    }
837
838    /// Set value with specific cache strategy (all tiers)
839    ///
840    /// Supports both legacy 2-tier mode and new multi-tier mode (v0.5.0+).
841    /// In multi-tier mode, stores to ALL tiers with their respective TTL scaling.
842    /// # Errors
843    ///
844    /// Returns an error if cache set operation fails.
845    pub async fn set_with_strategy(
846        &self,
847        key: &str,
848        value: serde_json::Value,
849        strategy: CacheStrategy,
850    ) -> Result<()> {
851        let ttl = strategy.to_duration();
852
853        // NEW: Multi-tier mode (v0.5.0+)
854        if let Some(tiers) = &self.tiers {
855            // Store in ALL tiers with their respective TTL scaling
856            let mut success_count = 0;
857            let mut last_error = None;
858
859            for tier in tiers {
860                match tier.set_with_ttl(key, value.clone(), ttl).await {
861                    Ok(()) => {
862                        success_count += 1;
863                    }
864                    Err(e) => {
865                        error!(
866                            "L{} cache set failed for key '{}': {}",
867                            tier.tier_level, key, e
868                        );
869                        last_error = Some(e);
870                    }
871                }
872            }
873
874            if success_count > 0 {
875                debug!(
876                    "[Multi-Tier] Cached '{}' in {}/{} tiers (base TTL: {:?})",
877                    key,
878                    success_count,
879                    tiers.len(),
880                    ttl
881                );
882                return Ok(());
883            }
884            return Err(
885                last_error.unwrap_or_else(|| anyhow::anyhow!("All tiers failed for key '{key}'"))
886            );
887        }
888
889        // LEGACY: 2-tier mode (L1 + L2)
890        // Store in both L1 and L2
891        let l1_result = self.l1_cache.set_with_ttl(key, value.clone(), ttl).await;
892        let l2_result = self.l2_cache.set_with_ttl(key, value, ttl).await;
893
894        // Return success if at least one cache succeeded
895        match (l1_result, l2_result) {
896            (Ok(()), Ok(())) => {
897                // Both succeeded
898                debug!("[L1+L2] Cached '{}' with TTL {:?}", key, ttl);
899                Ok(())
900            }
901            (Ok(()), Err(_)) => {
902                // L1 succeeded, L2 failed
903                warn!("L2 cache set failed for key '{}', continuing with L1", key);
904                debug!("[L1] Cached '{}' with TTL {:?}", key, ttl);
905                Ok(())
906            }
907            (Err(_), Ok(())) => {
908                // L1 failed, L2 succeeded
909                warn!("L1 cache set failed for key '{}', continuing with L2", key);
910                debug!("[L2] Cached '{}' with TTL {:?}", key, ttl);
911                Ok(())
912            }
913            (Err(e1), Err(_e2)) => {
914                // Both failed
915                Err(anyhow::anyhow!(
916                    "Both L1 and L2 cache set failed for key '{key}': {e1}"
917                ))
918            }
919        }
920    }
921
922    /// Get or compute value with Cache Stampede protection across L1+L2+Compute
923    ///
924    /// This method provides comprehensive Cache Stampede protection:
925    /// 1. Check L1 cache first (uses Moka's built-in coalescing)
926    /// 2. Check L2 cache with mutex-based coalescing
927    /// 3. Compute fresh data with protection against concurrent computations
928    ///
929    /// # Arguments
930    /// * `key` - Cache key
931    /// * `strategy` - Cache strategy for TTL and storage behavior
932    /// * `compute_fn` - Async function to compute the value if not in any cache
933    ///
934    /// # Example
935    /// ```ignore
936    /// let api_data = cache_manager.get_or_compute_with(
937    ///     "api_response",
938    ///     CacheStrategy::RealTime,
939    ///     || async {
940    ///         fetch_data_from_api().await
941    ///     }
942    /// ).await?;
943    /// ```
944    #[allow(dead_code)]
945    /// # Errors
946    ///
947    /// Returns an error if compute function fails or cache operations fail.
948    pub async fn get_or_compute_with<F, Fut>(
949        &self,
950        key: &str,
951        strategy: CacheStrategy,
952        compute_fn: F,
953    ) -> Result<serde_json::Value>
954    where
955        F: FnOnce() -> Fut + Send,
956        Fut: Future<Output = Result<serde_json::Value>> + Send,
957    {
958        self.total_requests.fetch_add(1, Ordering::Relaxed);
959
960        // 1. Try L1 cache first (with built-in Moka coalescing for hot data)
961        if let Some(value) = self.l1_cache.get(key).await {
962            self.l1_hits.fetch_add(1, Ordering::Relaxed);
963            return Ok(value);
964        }
965
966        // 2. L1 miss - try L2 with Cache Stampede protection
967        let key_owned = key.to_string();
968        let lock_guard = self
969            .in_flight_requests
970            .entry(key_owned.clone())
971            .or_insert_with(|| Arc::new(Mutex::new(())))
972            .clone();
973
974        let _guard = lock_guard.lock().await;
975
976        // RAII cleanup guard - ensures entry is removed even on early return or panic
977        let _cleanup_guard = CleanupGuard {
978            map: &self.in_flight_requests,
979            key: key_owned,
980        };
981
982        // 3. Double-check L1 cache after acquiring lock
983        // (Another request might have populated it while we were waiting)
984        if let Some(value) = self.l1_cache.get(key).await {
985            self.l1_hits.fetch_add(1, Ordering::Relaxed);
986            // _cleanup_guard will auto-remove entry on drop
987            return Ok(value);
988        }
989
990        // 4. Check remaining tiers (L2, L3, L4...) with Stampede protection
991        if let Some(tiers) = &self.tiers {
992            // Check tiers starting from index 1 (skip L1 since already checked)
993            for tier in tiers.iter().skip(1) {
994                if let Some((value, ttl)) = tier.get_with_ttl(key).await {
995                    tier.record_hit();
996
997                    let promotion_ttl = ttl.unwrap_or_else(|| strategy.to_duration());
998
999                    // Promote to L1 (first tier)
1000                    if let Some(l1_tier) = tiers.first() {
1001                        if let Err(e) = l1_tier
1002                            .set_with_ttl(key, value.clone(), promotion_ttl)
1003                            .await
1004                        {
1005                            warn!(
1006                                "Failed to promote '{}' from L{} to L1: {}",
1007                                key, tier.tier_level, e
1008                            );
1009                        } else {
1010                            self.promotions.fetch_add(1, Ordering::Relaxed);
1011                            debug!(
1012                                "Promoted '{}' from L{} to L1 with TTL {:?} (Stampede protected)",
1013                                key, tier.tier_level, promotion_ttl
1014                            );
1015                        }
1016                    }
1017
1018                    // _cleanup_guard will auto-remove entry on drop
1019                    return Ok(value);
1020                }
1021            }
1022        } else {
1023            // LEGACY: Check L2 cache with TTL
1024            if let Some((value, redis_ttl)) = self.l2_cache.get_with_ttl(key).await {
1025                self.l2_hits.fetch_add(1, Ordering::Relaxed);
1026
1027                // Promote to L1 using Redis TTL (or strategy TTL as fallback)
1028                let promotion_ttl = redis_ttl.unwrap_or_else(|| strategy.to_duration());
1029
1030                if let Err(e) = self
1031                    .l1_cache
1032                    .set_with_ttl(key, value.clone(), promotion_ttl)
1033                    .await
1034                {
1035                    warn!("Failed to promote key '{}' to L1: {}", key, e);
1036                } else {
1037                    self.promotions.fetch_add(1, Ordering::Relaxed);
1038                    debug!(
1039                        "Promoted '{}' from L2 to L1 with TTL {:?}",
1040                        key, promotion_ttl
1041                    );
1042                }
1043
1044                // _cleanup_guard will auto-remove entry on drop
1045                return Ok(value);
1046            }
1047        }
1048
1049        // 5. Cache miss across all tiers - compute fresh data
1050        debug!(
1051            "Computing fresh data for key: '{}' (Cache Stampede protected)",
1052            key
1053        );
1054        let fresh_data = compute_fn().await?;
1055
1056        // 6. Store in both caches
1057        if let Err(e) = self
1058            .set_with_strategy(key, fresh_data.clone(), strategy)
1059            .await
1060        {
1061            warn!("Failed to cache computed data for key '{}': {}", key, e);
1062        }
1063
1064        // 7. _cleanup_guard will auto-remove entry on drop
1065
1066        Ok(fresh_data)
1067    }
1068
1069    /// Get or compute typed value with Cache Stampede protection (Type-Safe Version)
1070    ///
1071    /// This method provides the same functionality as `get_or_compute_with()` but with
1072    /// **type-safe** automatic serialization/deserialization. Perfect for database queries,
1073    /// API calls, or any computation that returns structured data.
1074    ///
1075    /// # Type Safety
1076    ///
1077    /// - Returns your actual type `T` instead of `serde_json::Value`
1078    /// - Compiler enforces Serialize + `DeserializeOwned` bounds
1079    /// - No manual JSON conversion needed
1080    ///
1081    /// # Cache Flow
1082    ///
1083    /// 1. Check L1 cache → deserialize if found
1084    /// 2. Check L2 cache → deserialize + promote to L1 if found
1085    /// 3. Execute `compute_fn` → serialize → store in L1+L2
1086    /// 4. Full stampede protection (only ONE request computes)
1087    ///
1088    /// # Arguments
1089    ///
1090    /// * `key` - Cache key
1091    /// * `strategy` - Cache strategy for TTL
1092    /// * `compute_fn` - Async function returning `Result<T>`
1093    ///
1094    /// # Example - Database Query
1095    ///
1096    /// ```no_run
1097    /// # use multi_tier_cache::{CacheManager, CacheStrategy, L1Cache, L2Cache};
1098    /// # use std::sync::Arc;
1099    /// # use serde::{Serialize, Deserialize};
1100    /// # async fn example() -> anyhow::Result<()> {
1101    /// # let l1 = Arc::new(L1Cache::new()?);
1102    /// # let l2 = Arc::new(L2Cache::new().await?);
1103    /// # let cache_manager = CacheManager::new(l1, l2);
1104    ///
1105    /// #[derive(Serialize, Deserialize)]
1106    /// struct User {
1107    ///     id: i64,
1108    ///     name: String,
1109    /// }
1110    ///
1111    /// // Type-safe database caching (example - requires sqlx)
1112    /// // let user: User = cache_manager.get_or_compute_typed(
1113    /// //     "user:123",
1114    /// //     CacheStrategy::MediumTerm,
1115    /// //     || async {
1116    /// //         sqlx::query_as::<_, User>("SELECT * FROM users WHERE id = $1")
1117    /// //             .bind(123)
1118    /// //             .fetch_one(&pool)
1119    /// //             .await
1120    /// //     }
1121    /// // ).await?;
1122    /// # Ok(())
1123    /// # }
1124    /// ```
1125    ///
1126    /// # Example - API Call
1127    ///
1128    /// ```no_run
1129    /// # use multi_tier_cache::{CacheManager, CacheStrategy, L1Cache, L2Cache};
1130    /// # use std::sync::Arc;
1131    /// # use serde::{Serialize, Deserialize};
1132    /// # async fn example() -> anyhow::Result<()> {
1133    /// # let l1 = Arc::new(L1Cache::new()?);
1134    /// # let l2 = Arc::new(L2Cache::new().await?);
1135    /// # let cache_manager = CacheManager::new(l1, l2);
1136    /// #[derive(Serialize, Deserialize)]
1137    /// struct ApiResponse {
1138    ///     data: String,
1139    ///     timestamp: i64,
1140    /// }
1141    ///
1142    /// // API call caching (example - requires reqwest)
1143    /// // let response: ApiResponse = cache_manager.get_or_compute_typed(
1144    /// //     "api:endpoint",
1145    /// //     CacheStrategy::RealTime,
1146    /// //     || async {
1147    /// //         reqwest::get("https://api.example.com/data")
1148    /// //             .await?
1149    /// //             .json::<ApiResponse>()
1150    /// //             .await
1151    /// //     }
1152    /// // ).await?;
1153    /// # Ok(())
1154    /// # }
1155    /// ```
1156    ///
1157    /// # Performance
1158    ///
1159    /// - L1 hit: <1ms + deserialization (~10-50μs for small structs)
1160    /// - L2 hit: 2-5ms + deserialization + L1 promotion
1161    /// - Compute: Your function time + serialization + L1+L2 storage
1162    /// - Stampede protection: 99.6% latency reduction under high concurrency
1163    ///
1164    /// # Errors
1165    ///
1166    /// Returns error if:
1167    /// - Compute function fails
1168    /// - Serialization fails (invalid type for JSON)
1169    /// - Deserialization fails (cache data doesn't match type T)
1170    /// - Cache operations fail (Redis connection issues)
1171    #[allow(clippy::too_many_lines)]
1172    pub async fn get_or_compute_typed<T, F, Fut>(
1173        &self,
1174        key: &str,
1175        strategy: CacheStrategy,
1176        compute_fn: F,
1177    ) -> Result<T>
1178    where
1179        T: serde::Serialize + serde::de::DeserializeOwned + Send + 'static,
1180        F: FnOnce() -> Fut + Send,
1181        Fut: Future<Output = Result<T>> + Send,
1182    {
1183        self.total_requests.fetch_add(1, Ordering::Relaxed);
1184
1185        // 1. Try L1 cache first (with built-in Moka coalescing for hot data)
1186        if let Some(cached_json) = self.l1_cache.get(key).await {
1187            self.l1_hits.fetch_add(1, Ordering::Relaxed);
1188
1189            // Attempt to deserialize from JSON to type T
1190            match serde_json::from_value::<T>(cached_json) {
1191                Ok(typed_value) => {
1192                    debug!(
1193                        "[L1 HIT] Deserialized '{}' to type {}",
1194                        key,
1195                        std::any::type_name::<T>()
1196                    );
1197                    return Ok(typed_value);
1198                }
1199                Err(e) => {
1200                    // Deserialization failed - cache data may be stale or corrupt
1201                    warn!(
1202                        "L1 cache deserialization failed for key '{}': {}. Will recompute.",
1203                        key, e
1204                    );
1205                    // Fall through to recompute
1206                }
1207            }
1208        }
1209
1210        // 2. L1 miss - try L2 with Cache Stampede protection
1211        let key_owned = key.to_string();
1212        let lock_guard = self
1213            .in_flight_requests
1214            .entry(key_owned.clone())
1215            .or_insert_with(|| Arc::new(Mutex::new(())))
1216            .clone();
1217
1218        let _guard = lock_guard.lock().await;
1219
1220        // RAII cleanup guard - ensures entry is removed even on early return or panic
1221        let _cleanup_guard = CleanupGuard {
1222            map: &self.in_flight_requests,
1223            key: key_owned,
1224        };
1225
1226        // 3. Double-check L1 cache after acquiring lock
1227        // (Another request might have populated it while we were waiting)
1228        if let Some(cached_json) = self.l1_cache.get(key).await {
1229            self.l1_hits.fetch_add(1, Ordering::Relaxed);
1230            if let Ok(typed_value) = serde_json::from_value::<T>(cached_json) {
1231                debug!("[L1 HIT] Deserialized '{}' after lock acquisition", key);
1232                return Ok(typed_value);
1233            }
1234        }
1235
1236        // 4. Check remaining tiers (L2, L3, L4...) with Stampede protection
1237        if let Some(tiers) = &self.tiers {
1238            // Check tiers starting from index 1 (skip L1 since already checked)
1239            for tier in tiers.iter().skip(1) {
1240                if let Some((cached_json, ttl)) = tier.get_with_ttl(key).await {
1241                    tier.record_hit();
1242
1243                    // Attempt to deserialize
1244                    match serde_json::from_value::<T>(cached_json.clone()) {
1245                        Ok(typed_value) => {
1246                            debug!(
1247                                "[L{} HIT] Deserialized '{}' to type {}",
1248                                tier.tier_level,
1249                                key,
1250                                std::any::type_name::<T>()
1251                            );
1252
1253                            // Promote to L1 (first tier)
1254                            let promotion_ttl = ttl.unwrap_or_else(|| strategy.to_duration());
1255                            if let Some(tier) = tiers.first() {
1256                                if let Err(e) =
1257                                    tier.set_with_ttl(key, cached_json, promotion_ttl).await
1258                                {
1259                                    warn!(
1260                                        "Failed to promote '{}' from L{} to L1: {}",
1261                                        key, tier.tier_level, e
1262                                    );
1263                                } else {
1264                                    self.promotions.fetch_add(1, Ordering::Relaxed);
1265                                    debug!("Promoted '{}' from L{} to L1 with TTL {:?} (Stampede protected)",
1266                                            key, tier.tier_level, promotion_ttl);
1267                                }
1268                            }
1269
1270                            return Ok(typed_value);
1271                        }
1272                        Err(e) => {
1273                            warn!("L{} cache deserialization failed for key '{}': {}. Trying next tier.",
1274                                     tier.tier_level, key, e);
1275                            // Continue to next tier
1276                        }
1277                    }
1278                }
1279            }
1280        } else {
1281            // LEGACY: Check L2 cache with TTL
1282            if let Some((cached_json, redis_ttl)) = self.l2_cache.get_with_ttl(key).await {
1283                self.l2_hits.fetch_add(1, Ordering::Relaxed);
1284
1285                // Attempt to deserialize
1286                match serde_json::from_value::<T>(cached_json.clone()) {
1287                    Ok(typed_value) => {
1288                        debug!("[L2 HIT] Deserialized '{}' from Redis", key);
1289
1290                        // Promote to L1 using Redis TTL (or strategy TTL as fallback)
1291                        let promotion_ttl = redis_ttl.unwrap_or_else(|| strategy.to_duration());
1292
1293                        if let Err(e) = self
1294                            .l1_cache
1295                            .set_with_ttl(key, cached_json, promotion_ttl)
1296                            .await
1297                        {
1298                            warn!("Failed to promote key '{}' to L1: {}", key, e);
1299                        } else {
1300                            self.promotions.fetch_add(1, Ordering::Relaxed);
1301                            debug!(
1302                                "Promoted '{}' from L2 to L1 with TTL {:?}",
1303                                key, promotion_ttl
1304                            );
1305                        }
1306
1307                        return Ok(typed_value);
1308                    }
1309                    Err(e) => {
1310                        warn!(
1311                            "L2 cache deserialization failed for key '{}': {}. Will recompute.",
1312                            key, e
1313                        );
1314                        // Fall through to recompute
1315                    }
1316                }
1317            }
1318        }
1319
1320        // 5. Cache miss across all tiers (or deserialization failed) - compute fresh data
1321        debug!(
1322            "Computing fresh typed data for key: '{}' (Cache Stampede protected)",
1323            key
1324        );
1325        let typed_value = compute_fn().await?;
1326
1327        // 6. Serialize to JSON for storage
1328        let json_value = serde_json::to_value(&typed_value).map_err(|e| {
1329            anyhow::anyhow!(
1330                "Failed to serialize type {} for caching: {}",
1331                std::any::type_name::<T>(),
1332                e
1333            )
1334        })?;
1335
1336        // 7. Store in both L1 and L2 caches
1337        if let Err(e) = self.set_with_strategy(key, json_value, strategy).await {
1338            warn!(
1339                "Failed to cache computed typed data for key '{}': {}",
1340                key, e
1341            );
1342        } else {
1343            debug!(
1344                "Cached typed value for '{}' (type: {})",
1345                key,
1346                std::any::type_name::<T>()
1347            );
1348        }
1349
1350        // 8. _cleanup_guard will auto-remove entry on drop
1351
1352        Ok(typed_value)
1353    }
1354
1355    /// Get comprehensive cache statistics
1356    ///
1357    /// In multi-tier mode, aggregates statistics from all tiers.
1358    /// In legacy mode, returns L1 and L2 stats.
1359    #[allow(dead_code)]
1360    pub fn get_stats(&self) -> CacheManagerStats {
1361        let total_reqs = self.total_requests.load(Ordering::Relaxed);
1362        let l1_hits = self.l1_hits.load(Ordering::Relaxed);
1363        let l2_hits = self.l2_hits.load(Ordering::Relaxed);
1364        let misses = self.misses.load(Ordering::Relaxed);
1365
1366        CacheManagerStats {
1367            total_requests: total_reqs,
1368            l1_hits,
1369            l2_hits,
1370            total_hits: l1_hits + l2_hits,
1371            misses,
1372            hit_rate: if total_reqs > 0 {
1373                #[allow(clippy::cast_precision_loss)]
1374                {
1375                    ((l1_hits + l2_hits) as f64 / total_reqs as f64) * 100.0
1376                }
1377            } else {
1378                0.0
1379            },
1380            l1_hit_rate: if total_reqs > 0 {
1381                #[allow(clippy::cast_precision_loss)]
1382                {
1383                    (l1_hits as f64 / total_reqs as f64) * 100.0
1384                }
1385            } else {
1386                0.0
1387            },
1388            promotions: self.promotions.load(Ordering::Relaxed),
1389            in_flight_requests: self.in_flight_requests.len(),
1390        }
1391    }
1392
1393    /// Get per-tier statistics (v0.5.0+)
1394    ///
1395    /// Returns statistics for each tier if multi-tier mode is enabled.
1396    /// Returns None if using legacy 2-tier mode.
1397    ///
1398    /// # Example
1399    /// ```rust,ignore
1400    /// if let Some(tier_stats) = cache_manager.get_tier_stats() {
1401    ///     for stats in tier_stats {
1402    ///         println!("L{}: {} hits ({})",
1403    ///                  stats.tier_level,
1404    ///                  stats.hit_count(),
1405    ///                  stats.backend_name);
1406    ///     }
1407    /// }
1408    /// ```
1409    pub fn get_tier_stats(&self) -> Option<Vec<TierStats>> {
1410        self.tiers
1411            .as_ref()
1412            .map(|tiers| tiers.iter().map(|tier| tier.stats.clone()).collect())
1413    }
1414
1415    // ===== Redis Streams Methods =====
1416
1417    /// Publish data to Redis Stream
1418    ///
1419    /// # Arguments
1420    /// * `stream_key` - Name of the stream (e.g., "`events_stream`")
1421    /// * `fields` - Field-value pairs to publish
1422    /// * `maxlen` - Optional max length for stream trimming
1423    ///
1424    /// # Returns
1425    /// The entry ID generated by Redis
1426    ///
1427    /// # Errors
1428    /// Returns error if streaming backend is not configured
1429    pub async fn publish_to_stream(
1430        &self,
1431        stream_key: &str,
1432        fields: Vec<(String, String)>,
1433        maxlen: Option<usize>,
1434    ) -> Result<String> {
1435        match &self.streaming_backend {
1436            Some(backend) => backend.stream_add(stream_key, fields, maxlen).await,
1437            None => Err(anyhow::anyhow!("Streaming backend not configured")),
1438        }
1439    }
1440
1441    /// Read latest entries from Redis Stream
1442    ///
1443    /// # Arguments
1444    /// * `stream_key` - Name of the stream
1445    /// * `count` - Number of latest entries to retrieve
1446    ///
1447    /// # Returns
1448    /// Vector of (`entry_id`, fields) tuples (newest first)
1449    ///
1450    /// # Errors
1451    /// Returns error if streaming backend is not configured
1452    pub async fn read_stream_latest(
1453        &self,
1454        stream_key: &str,
1455        count: usize,
1456    ) -> Result<Vec<(String, Vec<(String, String)>)>> {
1457        match &self.streaming_backend {
1458            Some(backend) => backend.stream_read_latest(stream_key, count).await,
1459            None => Err(anyhow::anyhow!("Streaming backend not configured")),
1460        }
1461    }
1462
1463    /// Read from Redis Stream with optional blocking
1464    ///
1465    /// # Arguments
1466    /// * `stream_key` - Name of the stream
1467    /// * `last_id` - Last ID seen ("0" for start, "$" for new only)
1468    /// * `count` - Max entries to retrieve
1469    /// * `block_ms` - Optional blocking timeout in ms
1470    ///
1471    /// # Returns
1472    /// Vector of (`entry_id`, fields) tuples
1473    ///
1474    /// # Errors
1475    /// Returns error if streaming backend is not configured
1476    pub async fn read_stream(
1477        &self,
1478        stream_key: &str,
1479        last_id: &str,
1480        count: usize,
1481        block_ms: Option<usize>,
1482    ) -> Result<Vec<(String, Vec<(String, String)>)>> {
1483        match &self.streaming_backend {
1484            Some(backend) => {
1485                backend
1486                    .stream_read(stream_key, last_id, count, block_ms)
1487                    .await
1488            }
1489            None => Err(anyhow::anyhow!("Streaming backend not configured")),
1490        }
1491    }
1492
1493    // ===== Cache Invalidation Methods =====
1494
1495    /// Invalidate a cache key across all instances
1496    ///
1497    /// This removes the key from all cache tiers and broadcasts
1498    /// the invalidation to all other cache instances via Redis Pub/Sub.
1499    ///
1500    /// Supports both legacy 2-tier mode and new multi-tier mode (v0.5.0+).
1501    ///
1502    /// # Arguments
1503    /// * `key` - Cache key to invalidate
1504    ///
1505    /// # Example
1506    /// ```rust,ignore
1507    /// // Invalidate user cache after profile update
1508    /// cache_manager.invalidate("user:123").await?;
1509    /// ```
1510    /// # Errors
1511    ///
1512    /// Returns an error if invalidation fails.
1513    pub async fn invalidate(&self, key: &str) -> Result<()> {
1514        // NEW: Multi-tier mode (v0.5.0+)
1515        if let Some(tiers) = &self.tiers {
1516            // Remove from ALL tiers
1517            for tier in tiers {
1518                if let Err(e) = tier.remove(key).await {
1519                    warn!(
1520                        "Failed to remove '{}' from L{}: {}",
1521                        key, tier.tier_level, e
1522                    );
1523                }
1524            }
1525        } else {
1526            // LEGACY: 2-tier mode
1527            self.l1_cache.remove(key).await?;
1528            self.l2_cache.remove(key).await?;
1529        }
1530
1531        // Broadcast to other instances
1532        if let Some(publisher) = &self.invalidation_publisher {
1533            let mut pub_lock = publisher.lock().await;
1534            let msg = InvalidationMessage::remove(key);
1535            pub_lock.publish(&msg).await?;
1536            self.invalidation_stats
1537                .messages_sent
1538                .fetch_add(1, Ordering::Relaxed);
1539        }
1540
1541        debug!("Invalidated '{}' across all instances", key);
1542        Ok(())
1543    }
1544
1545    /// Update cache value across all instances
1546    ///
1547    /// This updates the key in all cache tiers and broadcasts
1548    /// the update to all other cache instances, avoiding cache misses.
1549    ///
1550    /// Supports both legacy 2-tier mode and new multi-tier mode (v0.5.0+).
1551    ///
1552    /// # Arguments
1553    /// * `key` - Cache key to update
1554    /// * `value` - New value
1555    /// * `ttl` - Optional TTL (uses default if None)
1556    ///
1557    /// # Example
1558    /// ```rust,ignore
1559    /// // Update user cache with new data
1560    /// let user_data = serde_json::json!({"id": 123, "name": "Alice"});
1561    /// cache_manager.update_cache("user:123", user_data, Some(Duration::from_secs(3600))).await?;
1562    /// ```
1563    /// # Errors
1564    ///
1565    /// Returns an error if cache update fails.
1566    pub async fn update_cache(
1567        &self,
1568        key: &str,
1569        value: serde_json::Value,
1570        ttl: Option<Duration>,
1571    ) -> Result<()> {
1572        let ttl = ttl.unwrap_or_else(|| CacheStrategy::Default.to_duration());
1573
1574        // NEW: Multi-tier mode (v0.5.0+)
1575        if let Some(tiers) = &self.tiers {
1576            // Update ALL tiers with their respective TTL scaling
1577            for tier in tiers {
1578                if let Err(e) = tier.set_with_ttl(key, value.clone(), ttl).await {
1579                    warn!("Failed to update '{}' in L{}: {}", key, tier.tier_level, e);
1580                }
1581            }
1582        } else {
1583            // LEGACY: 2-tier mode
1584            self.l1_cache.set_with_ttl(key, value.clone(), ttl).await?;
1585            self.l2_cache.set_with_ttl(key, value.clone(), ttl).await?;
1586        }
1587
1588        // Broadcast update to other instances
1589        if let Some(publisher) = &self.invalidation_publisher {
1590            let mut pub_lock = publisher.lock().await;
1591            let msg = InvalidationMessage::update(key, value, Some(ttl));
1592            pub_lock.publish(&msg).await?;
1593            self.invalidation_stats
1594                .messages_sent
1595                .fetch_add(1, Ordering::Relaxed);
1596        }
1597
1598        debug!("Updated '{}' across all instances", key);
1599        Ok(())
1600    }
1601
1602    /// Invalidate all keys matching a pattern
1603    ///
1604    /// This scans L2 cache for keys matching the pattern, removes them from all tiers,
1605    /// and broadcasts the invalidation. L1 caches will be cleared via broadcast.
1606    ///
1607    /// Supports both legacy 2-tier mode and new multi-tier mode (v0.5.0+).
1608    ///
1609    /// **Note**: Pattern scanning requires a concrete `L2Cache` instance with `scan_keys()`.
1610    /// In multi-tier mode, this scans from L2 but removes from all tiers.
1611    ///
1612    /// # Arguments
1613    /// * `pattern` - Glob-style pattern (e.g., "user:*", "product:123:*")
1614    ///
1615    /// # Example
1616    /// ```rust,ignore
1617    /// // Invalidate all user caches
1618    /// cache_manager.invalidate_pattern("user:*").await?;
1619    ///
1620    /// // Invalidate specific user's related caches
1621    /// cache_manager.invalidate_pattern("user:123:*").await?;
1622    /// ```
1623    /// # Errors
1624    ///
1625    /// Returns an error if invalidation fails.
1626    pub async fn invalidate_pattern(&self, pattern: &str) -> Result<()> {
1627        // Scan L2 for matching keys
1628        // (Note: Pattern scanning requires concrete L2Cache with scan_keys support)
1629        let keys = if let Some(l2) = &self.l2_cache_concrete {
1630            l2.scan_keys(pattern).await?
1631        } else {
1632            return Err(anyhow::anyhow!(
1633                "Pattern invalidation requires concrete L2Cache instance"
1634            ));
1635        };
1636
1637        if keys.is_empty() {
1638            debug!("No keys found matching pattern '{}'", pattern);
1639            return Ok(());
1640        }
1641
1642        // NEW: Multi-tier mode (v0.5.0+)
1643        if let Some(tiers) = &self.tiers {
1644            // Remove from ALL tiers
1645            for key in &keys {
1646                for tier in tiers {
1647                    if let Err(e) = tier.remove(key).await {
1648                        warn!(
1649                            "Failed to remove '{}' from L{}: {}",
1650                            key, tier.tier_level, e
1651                        );
1652                    }
1653                }
1654            }
1655        } else {
1656            // LEGACY: 2-tier mode - Remove from L2 in bulk
1657            if let Some(l2) = &self.l2_cache_concrete {
1658                l2.remove_bulk(&keys).await?;
1659            }
1660        }
1661
1662        // Broadcast pattern invalidation
1663        if let Some(publisher) = &self.invalidation_publisher {
1664            let mut pub_lock = publisher.lock().await;
1665            let msg = InvalidationMessage::remove_bulk(keys.clone());
1666            pub_lock.publish(&msg).await?;
1667            self.invalidation_stats
1668                .messages_sent
1669                .fetch_add(1, Ordering::Relaxed);
1670        }
1671
1672        debug!(
1673            "Invalidated {} keys matching pattern '{}'",
1674            keys.len(),
1675            pattern
1676        );
1677        Ok(())
1678    }
1679
1680    /// Set value with automatic broadcast to all instances
1681    ///
1682    /// This is a write-through operation that updates the cache and
1683    /// broadcasts the update to all other instances automatically.
1684    ///
1685    /// # Arguments
1686    /// * `key` - Cache key
1687    /// * `value` - Value to cache
1688    /// * `strategy` - Cache strategy (determines TTL)
1689    ///
1690    /// # Example
1691    /// ```rust,ignore
1692    /// // Update and broadcast in one call
1693    /// let data = serde_json::json!({"status": "active"});
1694    /// cache_manager.set_with_broadcast("user:123", data, CacheStrategy::MediumTerm).await?;
1695    /// ```
1696    /// # Errors
1697    ///
1698    /// Returns an error if cache set or broadcast fails.
1699    pub async fn set_with_broadcast(
1700        &self,
1701        key: &str,
1702        value: serde_json::Value,
1703        strategy: CacheStrategy,
1704    ) -> Result<()> {
1705        let ttl = strategy.to_duration();
1706
1707        // Set in local caches
1708        self.set_with_strategy(key, value.clone(), strategy).await?;
1709
1710        // Broadcast update if invalidation is enabled
1711        if let Some(publisher) = &self.invalidation_publisher {
1712            let mut pub_lock = publisher.lock().await;
1713            let msg = InvalidationMessage::update(key, value, Some(ttl));
1714            pub_lock.publish(&msg).await?;
1715            self.invalidation_stats
1716                .messages_sent
1717                .fetch_add(1, Ordering::Relaxed);
1718        }
1719
1720        Ok(())
1721    }
1722
1723    /// Get invalidation statistics
1724    ///
1725    /// Returns statistics about invalidation operations if invalidation is enabled.
1726    pub fn get_invalidation_stats(&self) -> Option<InvalidationStats> {
1727        if self.invalidation_subscriber.is_some() {
1728            Some(self.invalidation_stats.snapshot())
1729        } else {
1730            None
1731        }
1732    }
1733}
1734
1735/// Cache Manager statistics
1736#[allow(dead_code)]
1737#[derive(Debug, Clone)]
1738pub struct CacheManagerStats {
1739    pub total_requests: u64,
1740    pub l1_hits: u64,
1741    pub l2_hits: u64,
1742    pub total_hits: u64,
1743    pub misses: u64,
1744    pub hit_rate: f64,
1745    pub l1_hit_rate: f64,
1746    pub promotions: usize,
1747    pub in_flight_requests: usize,
1748}