Skip to main content

scirs2_core/error/
circuitbreaker.rs

1//! Circuit breaker pattern for error handling
2
3use crate::error::{CoreResult as Result, ErrorContext};
4use std::sync::atomic::{AtomicUsize, Ordering};
5use std::sync::{Arc, Mutex};
6use std::time::{Duration, Instant};
7
8/// State of the circuit breaker
9#[derive(Debug, Clone, Copy, PartialEq, Eq)]
10pub enum CircuitState {
11    /// Circuit is functioning normally
12    Closed,
13    /// Circuit is temporarily disabled
14    Open,
15    /// Circuit is testing if it should reopen
16    HalfOpen,
17}
18
19/// Status of circuit breaker
20#[derive(Debug, Clone)]
21pub struct CircuitBreakerStatus {
22    pub state: CircuitState,
23    pub failure_count: usize,
24    pub success_count: usize,
25    pub last_state_change: Instant,
26}
27
28/// Fallback strategy for failed operations
29#[derive(Debug, Clone)]
30pub enum FallbackStrategy {
31    /// Return default value
32    Default,
33    /// Use cached value
34    Cache,
35    /// Execute alternative function
36    Alternative,
37    /// Fail fast
38    FailFast,
39}
40
41/// Retry policy configuration
42#[derive(Debug, Clone)]
43pub struct RetryPolicy {
44    pub max_retries: usize,
45    pub initial_delay: Duration,
46    pub max_delay: Duration,
47    pub exponential_base: f64,
48}
49
50impl Default for RetryPolicy {
51    fn default() -> Self {
52        Self {
53            max_retries: 3,
54            initial_delay: Duration::from_millis(100),
55            max_delay: Duration::from_secs(10),
56            exponential_base: 2.0,
57        }
58    }
59}
60
61/// Circuit breaker for fault tolerance
62pub struct CircuitBreaker {
63    /// Current state
64    state: Arc<Mutex<CircuitState>>,
65    /// Failure count
66    failure_count: AtomicUsize,
67    /// Success count in half-open state
68    success_count: AtomicUsize,
69    /// Timestamp of last state change
70    last_state_change: Arc<Mutex<Instant>>,
71    /// Configuration
72    config: CircuitBreakerConfig,
73}
74
75/// Configuration for circuit breaker
76#[derive(Debug, Clone)]
77pub struct CircuitBreakerConfig {
78    /// Threshold for opening circuit
79    pub failure_threshold: usize,
80    /// Threshold for closing circuit from half-open
81    pub success_threshold: usize,
82    /// Timeout before attempting recovery
83    pub timeout: Duration,
84}
85
86impl Default for CircuitBreakerConfig {
87    fn default() -> Self {
88        Self {
89            failure_threshold: 5,
90            success_threshold: 3,
91            timeout: Duration::from_secs(30),
92        }
93    }
94}
95
96impl CircuitBreaker {
97    /// Create new circuit breaker
98    pub fn new(config: CircuitBreakerConfig) -> Self {
99        Self {
100            state: Arc::new(Mutex::new(CircuitState::Closed)),
101            failure_count: AtomicUsize::new(0),
102            success_count: AtomicUsize::new(0),
103            last_state_change: Arc::new(Mutex::new(Instant::now())),
104            config,
105        }
106    }
107
108    /// Get current state
109    pub fn state(&self) -> CircuitState {
110        *self.state.lock().expect("Operation failed")
111    }
112
113    /// Record success
114    pub fn record_success(&self) {
115        let mut state = self.state.lock().expect("Operation failed");
116        match *state {
117            CircuitState::HalfOpen => {
118                let count = self.success_count.fetch_add(1, Ordering::SeqCst) + 1;
119                if count >= self.config.success_threshold {
120                    *state = CircuitState::Closed;
121                    self.failure_count.store(0, Ordering::SeqCst);
122                    self.success_count.store(0, Ordering::SeqCst);
123                    *self.last_state_change.lock().expect("Operation failed") = Instant::now();
124                }
125            }
126            CircuitState::Closed => {
127                self.failure_count.store(0, Ordering::SeqCst);
128            }
129            CircuitState::Open => {}
130        }
131    }
132
133    /// Record failure
134    pub fn record_failure(&self) {
135        let mut state = self.state.lock().expect("Operation failed");
136        match *state {
137            CircuitState::Closed => {
138                let count = self.failure_count.fetch_add(1, Ordering::SeqCst) + 1;
139                if count >= self.config.failure_threshold {
140                    *state = CircuitState::Open;
141                    *self.last_state_change.lock().expect("Operation failed") = Instant::now();
142                }
143            }
144            CircuitState::HalfOpen => {
145                *state = CircuitState::Open;
146                self.failure_count.store(0, Ordering::SeqCst);
147                self.success_count.store(0, Ordering::SeqCst);
148                *self.last_state_change.lock().expect("Operation failed") = Instant::now();
149            }
150            CircuitState::Open => {}
151        }
152    }
153
154    /// Check if should transition from open to half-open
155    pub fn check_state(&self) {
156        let mut state = self.state.lock().expect("Operation failed");
157        if *state == CircuitState::Open {
158            let elapsed = self
159                .last_state_change
160                .lock()
161                .expect("Operation failed")
162                .elapsed();
163            if elapsed >= self.config.timeout {
164                *state = CircuitState::HalfOpen;
165                self.success_count.store(0, Ordering::SeqCst);
166                *self.last_state_change.lock().expect("Operation failed") = Instant::now();
167            }
168        }
169    }
170
171    /// Check if circuit allows request
172    pub fn is_allowed(&self) -> bool {
173        self.check_state();
174        let state = self.state();
175        state == CircuitState::Closed || state == CircuitState::HalfOpen
176    }
177
178    /// Get current status
179    pub fn status(&self) -> CircuitBreakerStatus {
180        CircuitBreakerStatus {
181            state: self.state(),
182            failure_count: self.failure_count.load(Ordering::SeqCst),
183            success_count: self.success_count.load(Ordering::SeqCst),
184            last_state_change: *self.last_state_change.lock().expect("Operation failed"),
185        }
186    }
187
188    /// Execute function with circuit breaker protection
189    pub fn execute<F, T>(&self, f: F) -> Result<T>
190    where
191        F: FnOnce() -> Result<T>,
192    {
193        use crate::error::CoreError;
194
195        // Check if we can execute
196        if !self.is_allowed() {
197            return Err(CoreError::ValueError(ErrorContext::new(
198                "Circuit breaker is open",
199            )));
200        }
201
202        // Execute the function
203        match f() {
204            Ok(result) => {
205                self.record_success();
206                Ok(result)
207            }
208            Err(e) => {
209                self.record_failure();
210                Err(e)
211            }
212        }
213    }
214}
215
216/// Executor with retry capabilities
217pub struct RetryExecutor {
218    policy: RetryPolicy,
219}
220
221impl RetryExecutor {
222    /// Create new retry executor
223    pub fn new(policy: RetryPolicy) -> Self {
224        Self { policy }
225    }
226
227    /// Execute function with retries
228    pub fn execute<F, T>(&self, mut f: F) -> Result<T>
229    where
230        F: FnMut() -> Result<T>,
231    {
232        let mut last_error = None;
233        for _ in 0..self.policy.max_retries {
234            match f() {
235                Ok(result) => return Ok(result),
236                Err(e) => last_error = Some(e),
237            }
238        }
239        Err(last_error.expect("Operation failed"))
240    }
241}
242
243/// Resilient executor with circuit breaker and fallback
244pub struct ResilientExecutor {
245    circuit_breaker: CircuitBreaker,
246    retry_executor: RetryExecutor,
247    fallback_strategy: FallbackStrategy,
248}
249
250impl ResilientExecutor {
251    /// Create new resilient executor
252    pub fn new(
253        circuit_breaker: CircuitBreaker,
254        retry_executor: RetryExecutor,
255        fallback_strategy: FallbackStrategy,
256    ) -> Self {
257        Self {
258            circuit_breaker,
259            retry_executor,
260            fallback_strategy,
261        }
262    }
263
264    /// Execute function with resilience
265    pub fn execute<F, T>(&self, f: F) -> Result<T>
266    where
267        F: FnMut() -> Result<T>,
268    {
269        if !self.circuit_breaker.is_allowed() {
270            return Err(crate::error::CoreError::ValueError(ErrorContext::new(
271                "Circuit breaker is open",
272            )));
273        }
274
275        match self.retry_executor.execute(f) {
276            Ok(result) => {
277                self.circuit_breaker.record_success();
278                Ok(result)
279            }
280            Err(e) => {
281                self.circuit_breaker.record_failure();
282                Err(e)
283            }
284        }
285    }
286}
287
288// Global registry of circuit breakers
289use once_cell::sync::Lazy;
290use std::collections::HashMap;
291use std::sync::RwLock;
292
293static CIRCUIT_BREAKERS: Lazy<RwLock<HashMap<String, Arc<CircuitBreaker>>>> =
294    Lazy::new(|| RwLock::new(HashMap::new()));
295
296/// Get a circuit breaker by name
297pub fn get_circuitbreaker(name: &str) -> Option<Arc<CircuitBreaker>> {
298    CIRCUIT_BREAKERS
299        .read()
300        .expect("Operation failed")
301        .get(name)
302        .cloned()
303}
304
305/// List all circuit breakers
306pub fn list_circuitbreakers() -> Vec<String> {
307    CIRCUIT_BREAKERS
308        .read()
309        .expect("Operation failed")
310        .keys()
311        .cloned()
312        .collect()
313}