atlassian_cli_api/
retry.rs1use crate::error::{ApiError, Result};
2use backoff::{backoff::Backoff, ExponentialBackoff};
3use std::time::Duration;
4use tracing::{debug, warn};
5
6#[derive(Clone, Debug)]
7pub struct RetryConfig {
8 pub max_retries: usize,
9 pub initial_interval: Duration,
10 pub max_interval: Duration,
11 pub multiplier: f64,
12}
13
14impl Default for RetryConfig {
15 fn default() -> Self {
16 Self {
17 max_retries: 3,
18 initial_interval: Duration::from_millis(500),
19 max_interval: Duration::from_secs(30),
20 multiplier: 2.0,
21 }
22 }
23}
24
25impl RetryConfig {
26 pub fn backoff(&self) -> ExponentialBackoff {
27 ExponentialBackoff {
28 current_interval: self.initial_interval,
29 initial_interval: self.initial_interval,
30 randomization_factor: 0.1,
31 multiplier: self.multiplier,
32 max_interval: self.max_interval,
33 max_elapsed_time: None,
34 ..Default::default()
35 }
36 }
37}
38
39pub async fn retry_with_backoff<F, Fut, T>(config: &RetryConfig, operation: F) -> Result<T>
40where
41 F: Fn() -> Fut,
42 Fut: std::future::Future<Output = Result<T>>,
43{
44 let mut backoff = config.backoff();
45 let mut attempts = 0;
46
47 loop {
48 attempts += 1;
49 debug!(attempt = attempts, "Executing request");
50
51 match operation().await {
52 Ok(result) => {
53 if attempts > 1 {
54 debug!(attempts, "Request succeeded after retries");
55 }
56 return Ok(result);
57 }
58 Err(err) if err.is_retryable() && attempts < config.max_retries => {
59 if let Some(wait) = backoff.next_backoff() {
60 warn!(
61 error = %err,
62 attempt = attempts,
63 wait_ms = wait.as_millis(),
64 "Request failed, retrying"
65 );
66 tokio::time::sleep(wait).await;
67 } else {
68 return Err(ApiError::Timeout { attempts });
69 }
70 }
71 Err(err) => {
72 if attempts >= config.max_retries {
73 warn!(attempts, "Max retries exceeded");
74 }
75 return Err(err);
76 }
77 }
78 }
79}