multi_tier_cache/
traits.rs

1//! Cache Backend Traits
2//!
3//! This module defines the trait abstractions that allow users to implement
4//! custom cache backends for both L1 (in-memory) and L2 (distributed) caches.
5//!
6//! # Architecture
7//!
8//! - `CacheBackend`: Core trait for all cache implementations
9//! - `L2CacheBackend`: Extended trait for L2 caches with TTL introspection
10//! - `StreamingBackend`: Optional trait for event streaming capabilities
11//!
12//! # Example: Custom L1 Backend
13//!
14//! ```rust,ignore
15//! use multi_tier_cache::{CacheBackend, async_trait};
16//! use std::time::Duration;
17//! use anyhow::Result;
18//!
19//! struct MyCustomCache {
20//!     // Your implementation
21//! }
22//!
23//! #[async_trait]
24//! impl CacheBackend for MyCustomCache {
25//!     async fn get(&self, key: &str) -> Option<serde_json::Value> {
26//!         // Your implementation
27//!     }
28//!
29//!     async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
30//!         // Your implementation
31//!     }
32//!
33//!     async fn remove(&self, key: &str) -> Result<()> {
34//!         // Your implementation
35//!     }
36//!
37//!     async fn health_check(&self) -> bool {
38//!         // Your implementation
39//!     }
40//! }
41//! ```
42
43use anyhow::Result;
44use async_trait::async_trait;
45use serde_json;
46use std::time::Duration;
47
48/// Core cache backend trait for both L1 and L2 caches
49///
50/// This trait defines the essential operations that any cache backend must support.
51/// Implement this trait to create custom L1 (in-memory) or L2 (distributed) cache backends.
52///
53/// # Required Operations
54///
55/// - `get`: Retrieve a value by key
56/// - `set_with_ttl`: Store a value with a time-to-live
57/// - `remove`: Delete a value by key
58/// - `health_check`: Verify cache backend is operational
59///
60/// # Thread Safety
61///
62/// Implementations must be `Send + Sync` to support concurrent access across async tasks.
63///
64/// # Performance Considerations
65///
66/// - `get` operations should be optimized for low latency (target: <1ms for L1, <5ms for L2)
67/// - `set_with_ttl` operations can be slightly slower but should still be fast
68/// - Consider connection pooling for distributed backends
69///
70/// # Example
71///
72/// See module-level documentation for a complete example.
73#[async_trait]
74pub trait CacheBackend: Send + Sync {
75    /// Get value from cache by key
76    ///
77    /// # Arguments
78    ///
79    /// * `key` - The cache key to retrieve
80    ///
81    /// # Returns
82    ///
83    /// * `Some(value)` - Value found in cache
84    /// * `None` - Key not found or expired
85    async fn get(&self, key: &str) -> Option<serde_json::Value>;
86
87    /// Set value in cache with time-to-live
88    ///
89    /// # Arguments
90    ///
91    /// * `key` - The cache key
92    /// * `value` - The value to store (must be JSON-serializable)
93    /// * `ttl` - Time-to-live duration
94    ///
95    /// # Returns
96    ///
97    /// * `Ok(())` - Value successfully cached
98    /// * `Err(e)` - Cache operation failed
99    async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()>;
100
101    /// Remove value from cache
102    ///
103    /// # Arguments
104    ///
105    /// * `key` - The cache key to remove
106    ///
107    /// # Returns
108    ///
109    /// * `Ok(())` - Value removed (or didn't exist)
110    /// * `Err(e)` - Cache operation failed
111    async fn remove(&self, key: &str) -> Result<()>;
112
113    /// Check if cache backend is healthy
114    ///
115    /// This method should verify that the cache backend is operational.
116    /// For distributed caches, this typically involves a ping or connectivity check.
117    ///
118    /// # Returns
119    ///
120    /// * `true` - Cache is healthy and operational
121    /// * `false` - Cache is unhealthy or unreachable
122    async fn health_check(&self) -> bool;
123
124    /// Get the name of this cache backend
125    ///
126    /// This is used for logging and debugging purposes.
127    ///
128    /// # Returns
129    ///
130    /// A string identifying this cache backend (e.g., "Moka", "Redis", "Memcached")
131    fn name(&self) -> &'static str {
132        "unknown"
133    }
134}
135
136/// Extended trait for L2 cache backends with TTL introspection
137///
138/// This trait extends `CacheBackend` with the ability to retrieve both a value
139/// and its remaining TTL. This is essential for implementing efficient L2-to-L1
140/// promotion with accurate TTL propagation.
141///
142/// # Use Cases
143///
144/// - L2-to-L1 promotion with same TTL
145/// - TTL-based cache warming strategies
146/// - Monitoring and analytics
147///
148/// # Example
149///
150/// ```rust,ignore
151/// use multi_tier_cache::{L2CacheBackend, async_trait};
152///
153/// #[async_trait]
154/// impl L2CacheBackend for MyDistributedCache {
155///     async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)> {
156///         // Retrieve value and calculate remaining TTL
157///         Some((value, Some(remaining_ttl)))
158///     }
159/// }
160/// ```
161#[async_trait]
162pub trait L2CacheBackend: CacheBackend {
163    /// Get value with its remaining TTL from L2 cache
164    ///
165    /// This method retrieves both the value and its remaining time-to-live.
166    /// This is used by the cache manager to promote entries from L2 to L1
167    /// with the correct TTL.
168    ///
169    /// # Arguments
170    ///
171    /// * `key` - The cache key to retrieve
172    ///
173    /// # Returns
174    ///
175    /// * `Some((value, Some(ttl)))` - Value found with remaining TTL
176    /// * `Some((value, None))` - Value found but no expiration set (never expires)
177    /// * `None` - Key not found or expired
178    ///
179    /// # TTL Semantics
180    ///
181    /// - TTL represents the **remaining** time until expiration
182    /// - `None` TTL means the key has no expiration
183    /// - Implementations should use backend-specific TTL commands (e.g., Redis TTL)
184    async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)>;
185}
186
187/// Optional trait for cache backends that support event streaming
188///
189/// This trait defines operations for event-driven architectures using
190/// streaming data structures like Redis Streams.
191///
192/// # Capabilities
193///
194/// - Publish events to streams with automatic trimming
195/// - Read latest entries (newest first)
196/// - Read entries with blocking support
197///
198/// # Backend Requirements
199///
200/// Not all cache backends support streaming. This trait is optional and
201/// should only be implemented by backends with native streaming support
202/// (e.g., Redis Streams, Kafka, Pulsar).
203///
204/// # Example
205///
206/// ```rust,ignore
207/// use multi_tier_cache::{StreamingBackend, async_trait};
208///
209/// #[async_trait]
210/// impl StreamingBackend for MyStreamingCache {
211///     async fn stream_add(
212///         &self,
213///         stream_key: &str,
214///         fields: Vec<(String, String)>,
215///         maxlen: Option<usize>,
216///     ) -> Result<String> {
217///         // Add entry to stream, return entry ID
218///     }
219///
220///     // ... implement other methods
221/// }
222/// ```
223#[async_trait]
224pub trait StreamingBackend: Send + Sync {
225    /// Add an entry to a stream
226    ///
227    /// # Arguments
228    ///
229    /// * `stream_key` - Name of the stream (e.g., "`events_stream`")
230    /// * `fields` - Vector of field-value pairs to add
231    /// * `maxlen` - Optional maximum stream length (older entries are trimmed)
232    ///
233    /// # Returns
234    ///
235    /// * `Ok(entry_id)` - The generated entry ID (e.g., "1234567890-0")
236    /// * `Err(e)` - Stream operation failed
237    ///
238    /// # Trimming Behavior
239    ///
240    /// If `maxlen` is specified, the stream is automatically trimmed to keep
241    /// approximately that many entries (oldest entries are removed).
242    async fn stream_add(
243        &self,
244        stream_key: &str,
245        fields: Vec<(String, String)>,
246        maxlen: Option<usize>,
247    ) -> Result<String>;
248
249    /// Read the latest N entries from a stream (newest first)
250    ///
251    /// # Arguments
252    ///
253    /// * `stream_key` - Name of the stream
254    /// * `count` - Maximum number of entries to retrieve
255    ///
256    /// # Returns
257    ///
258    /// * `Ok(entries)` - Vector of (`entry_id`, fields) tuples (newest first)
259    /// * `Err(e)` - Stream operation failed
260    ///
261    /// # Ordering
262    ///
263    /// Entries are returned in reverse chronological order (newest first).
264    async fn stream_read_latest(
265        &self,
266        stream_key: &str,
267        count: usize,
268    ) -> Result<Vec<(String, Vec<(String, String)>)>>;
269
270    /// Read entries from a stream with optional blocking
271    ///
272    /// # Arguments
273    ///
274    /// * `stream_key` - Name of the stream
275    /// * `last_id` - Last entry ID seen ("0" for beginning, "$" for new only)
276    /// * `count` - Maximum number of entries to retrieve
277    /// * `block_ms` - Optional blocking timeout in milliseconds (None = non-blocking)
278    ///
279    /// # Returns
280    ///
281    /// * `Ok(entries)` - Vector of (`entry_id`, fields) tuples
282    /// * `Err(e)` - Stream operation failed
283    ///
284    /// # Blocking Behavior
285    ///
286    /// - `None`: Non-blocking, returns immediately
287    /// - `Some(ms)`: Blocks up to `ms` milliseconds waiting for new entries
288    ///
289    /// # Use Cases
290    ///
291    /// - Non-blocking: Poll for new events
292    /// - Blocking: Long-polling for real-time event consumption
293    async fn stream_read(
294        &self,
295        stream_key: &str,
296        last_id: &str,
297        count: usize,
298        block_ms: Option<usize>,
299    ) -> Result<Vec<(String, Vec<(String, String)>)>>;
300}