multi_tier_cache/
traits.rs

1//! Cache Backend Traits
2//!
3//! This module defines the trait abstractions that allow users to implement
4//! custom cache backends for both L1 (in-memory) and L2 (distributed) caches.
5//!
6//! # Architecture
7//!
8//! - `CacheBackend`: Core trait for all cache implementations
9//! - `L2CacheBackend`: Extended trait for L2 caches with TTL introspection
10//! - `StreamingBackend`: Optional trait for event streaming capabilities
11//!
12//! # Example: Custom L1 Backend
13//!
14/// ```rust,no_run
15/// use multi_tier_cache::{CacheBackend, async_trait};
16/// use std::time::Duration;
17/// use anyhow::Result;
18///
19/// struct MyCustomCache;
20///
21/// #[async_trait]
22/// impl CacheBackend for MyCustomCache {
23///     async fn get(&self, key: &str) -> Option<serde_json::Value> {
24///         None
25///     }
26///
27///     async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
28///         Ok(())
29///     }
30///
31///     async fn remove(&self, key: &str) -> Result<()> {
32///         Ok(())
33///     }
34///
35///     async fn health_check(&self) -> bool {
36///         true
37///     }
38/// }
39/// ```
40use anyhow::Result;
41use async_trait::async_trait;
42use serde_json;
43use std::time::Duration;
44
45/// Core cache backend trait for both L1 and L2 caches
46///
47/// This trait defines the essential operations that any cache backend must support.
48/// Implement this trait to create custom L1 (in-memory) or L2 (distributed) cache backends.
49///
50/// # Required Operations
51///
52/// - `get`: Retrieve a value by key
53/// - `set_with_ttl`: Store a value with a time-to-live
54/// - `remove`: Delete a value by key
55/// - `health_check`: Verify cache backend is operational
56///
57/// # Thread Safety
58///
59/// Implementations must be `Send + Sync` to support concurrent access across async tasks.
60///
61/// # Performance Considerations
62///
63/// - `get` operations should be optimized for low latency (target: <1ms for L1, <5ms for L2)
64/// - `set_with_ttl` operations can be slightly slower but should still be fast
65/// - Consider connection pooling for distributed backends
66///
67/// # Example
68///
69/// See module-level documentation for a complete example.
70#[async_trait]
71pub trait CacheBackend: Send + Sync {
72    /// Get value from cache by key
73    ///
74    /// # Arguments
75    ///
76    /// * `key` - The cache key to retrieve
77    ///
78    /// # Returns
79    ///
80    /// * `Some(value)` - Value found in cache
81    /// * `None` - Key not found or expired
82    async fn get(&self, key: &str) -> Option<serde_json::Value>;
83
84    /// Set value in cache with time-to-live
85    ///
86    /// # Arguments
87    ///
88    /// * `key` - The cache key
89    /// * `value` - The value to store (must be JSON-serializable)
90    /// * `ttl` - Time-to-live duration
91    ///
92    /// # Returns
93    ///
94    /// * `Ok(())` - Value successfully cached
95    /// * `Err(e)` - Cache operation failed
96    async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()>;
97
98    /// Remove value from cache
99    ///
100    /// # Arguments
101    ///
102    /// * `key` - The cache key to remove
103    ///
104    /// # Returns
105    ///
106    /// * `Ok(())` - Value removed (or didn't exist)
107    /// * `Err(e)` - Cache operation failed
108    async fn remove(&self, key: &str) -> Result<()>;
109
110    /// Check if cache backend is healthy
111    ///
112    /// This method should verify that the cache backend is operational.
113    /// For distributed caches, this typically involves a ping or connectivity check.
114    ///
115    /// # Returns
116    ///
117    /// * `true` - Cache is healthy and operational
118    /// * `false` - Cache is unhealthy or unreachable
119    async fn health_check(&self) -> bool;
120
121    /// Remove keys matching a pattern
122    ///
123    /// # Arguments
124    ///
125    /// * `pattern` - Glob-style pattern (e.g. "user:*")
126    ///
127    /// # Returns
128    ///
129    /// * `Ok(())` - Pattern processed
130    /// * `Err(e)` - Operation failed
131    async fn remove_pattern(&self, _pattern: &str) -> Result<()> {
132        // Default implementation does nothing (for backward compatibility)
133        Ok(())
134    }
135
136    /// Get the name of this cache backend
137    ///
138    /// This is used for logging and debugging purposes.
139    ///
140    /// # Returns
141    ///
142    /// A string identifying this cache backend (e.g., "Moka", "Redis", "Memcached")
143    fn name(&self) -> &'static str {
144        "unknown"
145    }
146}
147
148/// Extended trait for L2 cache backends with TTL introspection
149///
150/// This trait extends `CacheBackend` with the ability to retrieve both a value
151/// and its remaining TTL. This is essential for implementing efficient L2-to-L1
152/// promotion with accurate TTL propagation.
153///
154/// # Use Cases
155///
156/// - L2-to-L1 promotion with same TTL
157/// - TTL-based cache warming strategies
158/// - Monitoring and analytics
159///
160/// # Example
161///
162/// ```rust,no_run
163/// use multi_tier_cache::{CacheBackend, L2CacheBackend, async_trait};
164/// use std::time::Duration;
165/// use anyhow::Result;
166///
167/// struct MyDistributedCache;
168///
169/// #[async_trait]
170/// impl CacheBackend for MyDistributedCache {
171///     async fn get(&self, _key: &str) -> Option<serde_json::Value> { None }
172///     async fn set_with_ttl(&self, _k: &str, _v: serde_json::Value, _t: Duration) -> Result<()> { Ok(()) }
173///     async fn remove(&self, _k: &str) -> Result<()> { Ok(()) }
174///     async fn health_check(&self) -> bool { true }
175/// }
176///
177/// #[async_trait]
178/// impl L2CacheBackend for MyDistributedCache {
179///     async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)> {
180///         // Retrieve value and calculate remaining TTL
181///         None
182///     }
183/// }
184/// ```
185#[async_trait]
186pub trait L2CacheBackend: CacheBackend {
187    /// Get value with its remaining TTL from L2 cache
188    ///
189    /// This method retrieves both the value and its remaining time-to-live.
190    /// This is used by the cache manager to promote entries from L2 to L1
191    /// with the correct TTL.
192    ///
193    /// # Arguments
194    ///
195    /// * `key` - The cache key to retrieve
196    ///
197    /// # Returns
198    ///
199    /// * `Some((value, Some(ttl)))` - Value found with remaining TTL
200    /// * `Some((value, None))` - Value found but no expiration set (never expires)
201    /// * `None` - Key not found or expired
202    ///
203    /// # TTL Semantics
204    ///
205    /// - TTL represents the **remaining** time until expiration
206    /// - `None` TTL means the key has no expiration
207    /// - Implementations should use backend-specific TTL commands (e.g., Redis TTL)
208    async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)>;
209}
210
211/// Optional trait for cache backends that support event streaming
212///
213/// This trait defines operations for event-driven architectures using
214/// streaming data structures like Redis Streams.
215///
216/// # Capabilities
217///
218/// - Publish events to streams with automatic trimming
219/// - Read latest entries (newest first)
220/// - Read entries with blocking support
221///
222/// # Backend Requirements
223///
224/// Not all cache backends support streaming. This trait is optional and
225/// should only be implemented by backends with native streaming support
226/// (e.g., Redis Streams, Kafka, Pulsar).
227///
228/// # Example
229///
230/// ```rust,no_run
231/// use multi_tier_cache::{StreamingBackend, async_trait};
232/// use anyhow::Result;
233///
234/// struct MyStreamingCache;
235///
236/// #[async_trait]
237/// impl StreamingBackend for MyStreamingCache {
238///     async fn stream_add(
239///         &self,
240///         stream_key: &str,
241///         fields: Vec<(String, String)>,
242///         maxlen: Option<usize>,
243///     ) -> Result<String> {
244///         Ok("entry-id".to_string())
245///     }
246///
247///     async fn stream_read_latest(
248///         &self,
249///         stream_key: &str,
250///         count: usize,
251///     ) -> Result<Vec<(String, Vec<(String, String)>)>> {
252///         Ok(vec![])
253///     }
254///
255///     async fn stream_read(
256///         &self,
257///         stream_key: &str,
258///         last_id: &str,
259///         count: usize,
260///         block_ms: Option<usize>,
261///     ) -> Result<Vec<(String, Vec<(String, String)>)>> {
262///         Ok(vec![])
263///     }
264/// }
265/// ```
266#[async_trait]
267pub trait StreamingBackend: Send + Sync {
268    /// Add an entry to a stream
269    ///
270    /// # Arguments
271    ///
272    /// * `stream_key` - Name of the stream (e.g., "`events_stream`")
273    /// * `fields` - Vector of field-value pairs to add
274    /// * `maxlen` - Optional maximum stream length (older entries are trimmed)
275    ///
276    /// # Returns
277    ///
278    /// * `Ok(entry_id)` - The generated entry ID (e.g., "1234567890-0")
279    /// * `Err(e)` - Stream operation failed
280    ///
281    /// # Trimming Behavior
282    ///
283    /// If `maxlen` is specified, the stream is automatically trimmed to keep
284    /// approximately that many entries (oldest entries are removed).
285    async fn stream_add(
286        &self,
287        stream_key: &str,
288        fields: Vec<(String, String)>,
289        maxlen: Option<usize>,
290    ) -> Result<String>;
291
292    /// Read the latest N entries from a stream (newest first)
293    ///
294    /// # Arguments
295    ///
296    /// * `stream_key` - Name of the stream
297    /// * `count` - Maximum number of entries to retrieve
298    ///
299    /// # Returns
300    ///
301    /// * `Ok(entries)` - Vector of (`entry_id`, fields) tuples (newest first)
302    /// * `Err(e)` - Stream operation failed
303    ///
304    /// # Ordering
305    ///
306    /// Entries are returned in reverse chronological order (newest first).
307    async fn stream_read_latest(
308        &self,
309        stream_key: &str,
310        count: usize,
311    ) -> Result<Vec<(String, Vec<(String, String)>)>>;
312
313    /// Read entries from a stream with optional blocking
314    ///
315    /// # Arguments
316    ///
317    /// * `stream_key` - Name of the stream
318    /// * `last_id` - Last entry ID seen ("0" for beginning, "$" for new only)
319    /// * `count` - Maximum number of entries to retrieve
320    /// * `block_ms` - Optional blocking timeout in milliseconds (None = non-blocking)
321    ///
322    /// # Returns
323    ///
324    /// * `Ok(entries)` - Vector of (`entry_id`, fields) tuples
325    /// * `Err(e)` - Stream operation failed
326    ///
327    /// # Blocking Behavior
328    ///
329    /// - `None`: Non-blocking, returns immediately
330    /// - `Some(ms)`: Blocks up to `ms` milliseconds waiting for new entries
331    ///
332    /// # Use Cases
333    ///
334    /// - Non-blocking: Poll for new events
335    /// - Blocking: Long-polling for real-time event consumption
336    async fn stream_read(
337        &self,
338        stream_key: &str,
339        last_id: &str,
340        count: usize,
341        block_ms: Option<usize>,
342    ) -> Result<Vec<(String, Vec<(String, String)>)>>;
343}