multi_tier_cache/traits.rs
1//! Cache Backend Traits
2//!
3//! This module defines the trait abstractions that allow users to implement
4//! custom cache backends for both L1 (in-memory) and L2 (distributed) caches.
5//!
6//! # Architecture
7//!
8//! - `CacheBackend`: Core trait for all cache implementations
9//! - `L2CacheBackend`: Extended trait for L2 caches with TTL introspection
10//! - `StreamingBackend`: Optional trait for event streaming capabilities
11//!
12//! # Example: Custom L1 Backend
13//!
14//! ```rust,ignore
15//! use multi_tier_cache::{CacheBackend, async_trait};
16//! use std::time::Duration;
17//! use anyhow::Result;
18//!
19//! struct MyCustomCache {
20//! // Your implementation
21//! }
22//!
23//! #[async_trait]
24//! impl CacheBackend for MyCustomCache {
25//! async fn get(&self, key: &str) -> Option<serde_json::Value> {
26//! // Your implementation
27//! }
28//!
29//! async fn set_with_ttl(&self, key: &str, value: serde_json::Value, ttl: Duration) -> Result<()> {
30//! // Your implementation
31//! }
32//!
33//! async fn remove(&self, key: &str) -> Result<()> {
34//! // Your implementation
35//! }
36//!
37//! async fn health_check(&self) -> bool {
38//! // Your implementation
39//! }
40//! }
41//! ```
42
43use std::time::Duration;
44use anyhow::Result;
45use async_trait::async_trait;
46use serde_json;
47
48/// Core cache backend trait for both L1 and L2 caches
49///
50/// This trait defines the essential operations that any cache backend must support.
51/// Implement this trait to create custom L1 (in-memory) or L2 (distributed) cache backends.
52///
53/// # Required Operations
54///
55/// - `get`: Retrieve a value by key
56/// - `set_with_ttl`: Store a value with a time-to-live
57/// - `remove`: Delete a value by key
58/// - `health_check`: Verify cache backend is operational
59///
60/// # Thread Safety
61///
62/// Implementations must be `Send + Sync` to support concurrent access across async tasks.
63///
64/// # Performance Considerations
65///
66/// - `get` operations should be optimized for low latency (target: <1ms for L1, <5ms for L2)
67/// - `set_with_ttl` operations can be slightly slower but should still be fast
68/// - Consider connection pooling for distributed backends
69///
70/// # Example
71///
72/// See module-level documentation for a complete example.
73#[async_trait]
74pub trait CacheBackend: Send + Sync {
75 /// Get value from cache by key
76 ///
77 /// # Arguments
78 ///
79 /// * `key` - The cache key to retrieve
80 ///
81 /// # Returns
82 ///
83 /// * `Some(value)` - Value found in cache
84 /// * `None` - Key not found or expired
85 async fn get(&self, key: &str) -> Option<serde_json::Value>;
86
87 /// Set value in cache with time-to-live
88 ///
89 /// # Arguments
90 ///
91 /// * `key` - The cache key
92 /// * `value` - The value to store (must be JSON-serializable)
93 /// * `ttl` - Time-to-live duration
94 ///
95 /// # Returns
96 ///
97 /// * `Ok(())` - Value successfully cached
98 /// * `Err(e)` - Cache operation failed
99 async fn set_with_ttl(
100 &self,
101 key: &str,
102 value: serde_json::Value,
103 ttl: Duration,
104 ) -> Result<()>;
105
106 /// Remove value from cache
107 ///
108 /// # Arguments
109 ///
110 /// * `key` - The cache key to remove
111 ///
112 /// # Returns
113 ///
114 /// * `Ok(())` - Value removed (or didn't exist)
115 /// * `Err(e)` - Cache operation failed
116 async fn remove(&self, key: &str) -> Result<()>;
117
118 /// Check if cache backend is healthy
119 ///
120 /// This method should verify that the cache backend is operational.
121 /// For distributed caches, this typically involves a ping or connectivity check.
122 ///
123 /// # Returns
124 ///
125 /// * `true` - Cache is healthy and operational
126 /// * `false` - Cache is unhealthy or unreachable
127 async fn health_check(&self) -> bool;
128
129 /// Get the name of this cache backend
130 ///
131 /// This is used for logging and debugging purposes.
132 ///
133 /// # Returns
134 ///
135 /// A string identifying this cache backend (e.g., "Moka", "Redis", "Memcached")
136 fn name(&self) -> &str {
137 "unknown"
138 }
139}
140
141/// Extended trait for L2 cache backends with TTL introspection
142///
143/// This trait extends `CacheBackend` with the ability to retrieve both a value
144/// and its remaining TTL. This is essential for implementing efficient L2-to-L1
145/// promotion with accurate TTL propagation.
146///
147/// # Use Cases
148///
149/// - L2-to-L1 promotion with same TTL
150/// - TTL-based cache warming strategies
151/// - Monitoring and analytics
152///
153/// # Example
154///
155/// ```rust,ignore
156/// use multi_tier_cache::{L2CacheBackend, async_trait};
157///
158/// #[async_trait]
159/// impl L2CacheBackend for MyDistributedCache {
160/// async fn get_with_ttl(&self, key: &str) -> Option<(serde_json::Value, Option<Duration>)> {
161/// // Retrieve value and calculate remaining TTL
162/// Some((value, Some(remaining_ttl)))
163/// }
164/// }
165/// ```
166#[async_trait]
167pub trait L2CacheBackend: CacheBackend {
168 /// Get value with its remaining TTL from L2 cache
169 ///
170 /// This method retrieves both the value and its remaining time-to-live.
171 /// This is used by the cache manager to promote entries from L2 to L1
172 /// with the correct TTL.
173 ///
174 /// # Arguments
175 ///
176 /// * `key` - The cache key to retrieve
177 ///
178 /// # Returns
179 ///
180 /// * `Some((value, Some(ttl)))` - Value found with remaining TTL
181 /// * `Some((value, None))` - Value found but no expiration set (never expires)
182 /// * `None` - Key not found or expired
183 ///
184 /// # TTL Semantics
185 ///
186 /// - TTL represents the **remaining** time until expiration
187 /// - `None` TTL means the key has no expiration
188 /// - Implementations should use backend-specific TTL commands (e.g., Redis TTL)
189 async fn get_with_ttl(
190 &self,
191 key: &str,
192 ) -> Option<(serde_json::Value, Option<Duration>)>;
193}
194
195/// Optional trait for cache backends that support event streaming
196///
197/// This trait defines operations for event-driven architectures using
198/// streaming data structures like Redis Streams.
199///
200/// # Capabilities
201///
202/// - Publish events to streams with automatic trimming
203/// - Read latest entries (newest first)
204/// - Read entries with blocking support
205///
206/// # Backend Requirements
207///
208/// Not all cache backends support streaming. This trait is optional and
209/// should only be implemented by backends with native streaming support
210/// (e.g., Redis Streams, Kafka, Pulsar).
211///
212/// # Example
213///
214/// ```rust,ignore
215/// use multi_tier_cache::{StreamingBackend, async_trait};
216///
217/// #[async_trait]
218/// impl StreamingBackend for MyStreamingCache {
219/// async fn stream_add(
220/// &self,
221/// stream_key: &str,
222/// fields: Vec<(String, String)>,
223/// maxlen: Option<usize>,
224/// ) -> Result<String> {
225/// // Add entry to stream, return entry ID
226/// }
227///
228/// // ... implement other methods
229/// }
230/// ```
231#[async_trait]
232pub trait StreamingBackend: Send + Sync {
233 /// Add an entry to a stream
234 ///
235 /// # Arguments
236 ///
237 /// * `stream_key` - Name of the stream (e.g., "events_stream")
238 /// * `fields` - Vector of field-value pairs to add
239 /// * `maxlen` - Optional maximum stream length (older entries are trimmed)
240 ///
241 /// # Returns
242 ///
243 /// * `Ok(entry_id)` - The generated entry ID (e.g., "1234567890-0")
244 /// * `Err(e)` - Stream operation failed
245 ///
246 /// # Trimming Behavior
247 ///
248 /// If `maxlen` is specified, the stream is automatically trimmed to keep
249 /// approximately that many entries (oldest entries are removed).
250 async fn stream_add(
251 &self,
252 stream_key: &str,
253 fields: Vec<(String, String)>,
254 maxlen: Option<usize>,
255 ) -> Result<String>;
256
257 /// Read the latest N entries from a stream (newest first)
258 ///
259 /// # Arguments
260 ///
261 /// * `stream_key` - Name of the stream
262 /// * `count` - Maximum number of entries to retrieve
263 ///
264 /// # Returns
265 ///
266 /// * `Ok(entries)` - Vector of (entry_id, fields) tuples (newest first)
267 /// * `Err(e)` - Stream operation failed
268 ///
269 /// # Ordering
270 ///
271 /// Entries are returned in reverse chronological order (newest first).
272 async fn stream_read_latest(
273 &self,
274 stream_key: &str,
275 count: usize,
276 ) -> Result<Vec<(String, Vec<(String, String)>)>>;
277
278 /// Read entries from a stream with optional blocking
279 ///
280 /// # Arguments
281 ///
282 /// * `stream_key` - Name of the stream
283 /// * `last_id` - Last entry ID seen ("0" for beginning, "$" for new only)
284 /// * `count` - Maximum number of entries to retrieve
285 /// * `block_ms` - Optional blocking timeout in milliseconds (None = non-blocking)
286 ///
287 /// # Returns
288 ///
289 /// * `Ok(entries)` - Vector of (entry_id, fields) tuples
290 /// * `Err(e)` - Stream operation failed
291 ///
292 /// # Blocking Behavior
293 ///
294 /// - `None`: Non-blocking, returns immediately
295 /// - `Some(ms)`: Blocks up to `ms` milliseconds waiting for new entries
296 ///
297 /// # Use Cases
298 ///
299 /// - Non-blocking: Poll for new events
300 /// - Blocking: Long-polling for real-time event consumption
301 async fn stream_read(
302 &self,
303 stream_key: &str,
304 last_id: &str,
305 count: usize,
306 block_ms: Option<usize>,
307 ) -> Result<Vec<(String, Vec<(String, String)>)>>;
308}