hitbox_backend/composition/policy/write/mod.rs
1//! Write policies for controlling write operations across cache layers.
2//!
3//! This module defines the CompositionWritePolicy trait and its implementations.
4//! Different strategies (sequential, optimistic parallel) can be used to optimize
5//! write performance and availability based on application requirements.
6
7use async_trait::async_trait;
8use hitbox_core::{CacheKey, Offload};
9use std::future::Future;
10
11use crate::BackendError;
12
13pub mod optimistic_parallel;
14pub mod race;
15pub mod sequential;
16
17pub use optimistic_parallel::OptimisticParallelWritePolicy;
18pub use race::{RaceLoserPolicy, RaceWritePolicy};
19pub use sequential::SequentialWritePolicy;
20
21/// Policy trait for controlling write operations across cache layers.
22///
23/// This trait encapsulates the **control flow strategy** (sequential, parallel, conditional)
24/// for writing to multiple cache layers, while delegating the actual write operations
25/// to provided closures. This design allows the same policy to be used at both the
26/// `CacheBackend` level (typed data) and `Backend` level (raw bytes).
27///
28/// # Type Parameters
29///
30/// The policy is generic over:
31/// * `E` - The error type (e.g., `BackendError`)
32/// * `F1, F2` - Closures for writing to L1 and L2
33/// * `O` - The offload type for background task execution
34///
35/// # Example
36///
37/// ```ignore
38/// use hitbox_backend::composition::policy::CompositionWritePolicy;
39///
40/// let policy = SequentialWritePolicy::default();
41///
42/// // Use with CacheBackend level
43/// policy.execute_with(
44/// key.clone(),
45/// |k| async { l1.set::<User>(&k, value, ttl).await },
46/// |k| async { l2.set::<User>(&k, value, ttl).await },
47/// &offload,
48/// ).await?;
49/// ```
50#[async_trait]
51pub trait CompositionWritePolicy: Send + Sync {
52 /// Execute a write operation with custom write closures for each layer.
53 ///
54 /// The policy determines the control flow (when and how to call the closures),
55 /// while the closures handle the actual writing and any pre-processing
56 /// (like serialization or validation).
57 ///
58 /// # Arguments
59 /// * `key` - The cache key to write (owned for `'static` futures)
60 /// * `write_l1` - Closure that writes to L1
61 /// * `write_l2` - Closure that writes to L2
62 /// * `offload` - Offload manager for spawning background tasks (e.g., losing race futures)
63 ///
64 /// # Returns
65 /// Success if the policy's success criteria are met. Different policies have
66 /// different success criteria (e.g., both must succeed, or at least one must succeed).
67 ///
68 /// # Generic Parameters
69 /// * `F1, F2` - Closures for writing to L1 and L2 that return `BackendResult<()>`
70 /// * `O` - The offload type for background task execution
71 ///
72 /// # Error Handling
73 /// When both layers fail, implementations should preserve both errors in a
74 /// `CompositionError::BothLayersFailed` for better debugging.
75 async fn execute_with<F1, F2, Fut1, Fut2, O>(
76 &self,
77 key: CacheKey,
78 write_l1: F1,
79 write_l2: F2,
80 offload: &O,
81 ) -> Result<(), BackendError>
82 where
83 F1: FnOnce(CacheKey) -> Fut1 + Send,
84 F2: FnOnce(CacheKey) -> Fut2 + Send,
85 Fut1: Future<Output = Result<(), BackendError>> + Send + 'static,
86 Fut2: Future<Output = Result<(), BackendError>> + Send + 'static,
87 O: Offload<'static>;
88}