hitbox_backend/composition/policy/write/optimistic_parallel.rs
1//! Optimistic parallel write policy implementation.
2//!
3//! This policy writes to both L1 and L2 simultaneously and succeeds if at least
4//! one write succeeds, maximizing availability at the cost of potential inconsistency.
5
6use async_trait::async_trait;
7use hitbox_core::{CacheKey, Offload};
8use std::future::Future;
9
10use super::CompositionWritePolicy;
11use crate::{BackendError, composition::CompositionError};
12
13/// Optimistic parallel write policy: Write to both simultaneously, succeed if ≥1 succeeds.
14///
15/// This strategy provides:
16/// - Maximum availability (succeeds unless both fail)
17/// - Fast writes (parallel execution)
18/// - Weak consistency (layers may diverge)
19///
20/// # Behavior
21/// 1. Start both `write_l1(key)` and `write_l2(key)` in parallel
22/// 2. Wait for both to complete
23/// 3. Aggregate results:
24/// - Both succeed: Return Ok (best case)
25/// - One succeeds: Return Ok with warning (partial success)
26/// - Both fail: Return Err
27///
28/// # Consistency Guarantee
29/// If this operation returns `Ok(())`, **at least one** of L1 or L2 has been updated.
30/// This could mean:
31/// - Both updated (strong consistency)
32/// - Only L1 updated (L2 failed)
33/// - Only L2 updated (L1 failed)
34///
35/// # Tradeoffs
36/// - **Pros**: Highest availability, fast writes, tolerates partial failures
37/// - **Cons**: Layers may diverge, need monitoring for partial failures
38///
39/// # Use Cases
40/// - High availability requirements
41/// - Non-critical data where eventual consistency is acceptable
42/// - Systems with background reconciliation
43/// - Degraded mode operation
44#[derive(Debug, Clone, Copy, Default)]
45pub struct OptimisticParallelWritePolicy;
46
47impl OptimisticParallelWritePolicy {
48 /// Create a new optimistic parallel write policy.
49 pub fn new() -> Self {
50 Self
51 }
52}
53
54#[async_trait]
55impl CompositionWritePolicy for OptimisticParallelWritePolicy {
56 #[tracing::instrument(skip(self, key, write_l1, write_l2, _offload), level = "trace")]
57 async fn execute_with<F1, F2, Fut1, Fut2, O>(
58 &self,
59 key: CacheKey,
60 write_l1: F1,
61 write_l2: F2,
62 _offload: &O,
63 ) -> Result<(), BackendError>
64 where
65 F1: FnOnce(CacheKey) -> Fut1 + Send,
66 F2: FnOnce(CacheKey) -> Fut2 + Send,
67 Fut1: Future<Output = Result<(), BackendError>> + Send + 'static,
68 Fut2: Future<Output = Result<(), BackendError>> + Send + 'static,
69 O: Offload<'static>,
70 {
71 // Write to both in parallel
72 let (l1_result, l2_result) = futures::join!(write_l1(key.clone()), write_l2(key));
73
74 // Aggregate results - succeed if at least one succeeds
75 match (l1_result, l2_result) {
76 (Ok(()), Ok(())) => {
77 // Both succeeded - ideal case
78 tracing::trace!("Both L1 and L2 writes succeeded");
79 Ok(())
80 }
81 (Ok(()), Err(e)) => {
82 // L1 succeeded, L2 failed - partial success
83 tracing::warn!(
84 error = ?e,
85 "L2 write failed but L1 succeeded - partial success"
86 );
87 Ok(()) // Optimistic: succeed if L1 is ok
88 }
89 (Err(e), Ok(())) => {
90 // L1 failed, L2 succeeded - partial success
91 tracing::warn!(
92 error = ?e,
93 "L1 write failed but L2 succeeded - partial success"
94 );
95 Ok(()) // Optimistic: succeed if L2 is ok
96 }
97 (Err(e1), Err(e2)) => {
98 // Both failed - preserve both errors for debugging
99 tracing::error!(
100 l1_error = ?e1,
101 l2_error = ?e2,
102 "Both L1 and L2 writes failed"
103 );
104 Err(BackendError::InternalError(Box::new(
105 CompositionError::BothLayersFailed { l1: e1, l2: e2 },
106 )))
107 }
108 }
109 }
110}