rig/http_client/
retry.rs

1//! Helpers to handle connection delays when receiving errors
2
3use super::Error;
4use std::time::Duration;
5
6pub trait RetryPolicy {
7    /// Submit a new retry delay based on the [`enum@Error`], last retry number and duration, if
8    /// available. A policy may also return `None` if it does not want to retry
9    fn retry(&self, error: &Error, last_retry: Option<(usize, Duration)>) -> Option<Duration>;
10
11    /// Set a new reconnection time if received from an event
12    fn set_reconnection_time(&mut self, duration: Duration);
13}
14
15/// A [`RetryPolicy`] which backs off exponentially
16#[derive(Debug, Clone)]
17pub struct ExponentialBackoff {
18    /// The start of the backoff
19    pub start: Duration,
20    /// The factor of which to backoff by
21    pub factor: f64,
22    /// The maximum duration to delay
23    pub max_duration: Option<Duration>,
24    /// The maximum number of retries before giving up
25    pub max_retries: Option<usize>,
26}
27
28impl ExponentialBackoff {
29    /// Create a new exponential backoff retry policy
30    pub const fn new(
31        start: Duration,
32        factor: f64,
33        max_duration: Option<Duration>,
34        max_retries: Option<usize>,
35    ) -> Self {
36        Self {
37            start,
38            factor,
39            max_duration,
40            max_retries,
41        }
42    }
43}
44
45impl RetryPolicy for ExponentialBackoff {
46    fn retry(&self, _error: &Error, last_retry: Option<(usize, Duration)>) -> Option<Duration> {
47        if let Some((retry_num, last_duration)) = last_retry {
48            if self.max_retries.is_none() || retry_num < self.max_retries.unwrap() {
49                let duration = last_duration.mul_f64(self.factor);
50                if let Some(max_duration) = self.max_duration {
51                    Some(duration.min(max_duration))
52                } else {
53                    Some(duration)
54                }
55            } else {
56                None
57            }
58        } else {
59            Some(self.start)
60        }
61    }
62    fn set_reconnection_time(&mut self, duration: Duration) {
63        self.start = duration;
64        if let Some(max_duration) = self.max_duration {
65            self.max_duration = Some(max_duration.max(duration))
66        }
67    }
68}
69
70/// A [`RetryPolicy`] which always emits the same delay
71#[derive(Debug, Clone)]
72pub struct Constant {
73    /// The delay to return
74    pub delay: Duration,
75    /// The maximum number of retries to return before giving up
76    pub max_retries: Option<usize>,
77}
78
79impl Constant {
80    /// Create a new constant retry policy
81    pub const fn new(delay: Duration, max_retries: Option<usize>) -> Self {
82        Self { delay, max_retries }
83    }
84}
85
86impl RetryPolicy for Constant {
87    fn retry(&self, _error: &Error, last_retry: Option<(usize, Duration)>) -> Option<Duration> {
88        if let Some((retry_num, _)) = last_retry {
89            if self.max_retries.is_none() || retry_num < self.max_retries.unwrap() {
90                Some(self.delay)
91            } else {
92                None
93            }
94        } else {
95            Some(self.delay)
96        }
97    }
98    fn set_reconnection_time(&mut self, duration: Duration) {
99        self.delay = duration;
100    }
101}
102
103/// A [`RetryPolicy`] which never retries
104#[derive(Debug, Clone, Copy, Default)]
105pub struct Never;
106
107impl RetryPolicy for Never {
108    fn retry(&self, _error: &Error, _last_retry: Option<(usize, Duration)>) -> Option<Duration> {
109        None
110    }
111    fn set_reconnection_time(&mut self, _duration: Duration) {}
112}
113
114/// The default [`RetryPolicy`] when initializing an event source
115pub const DEFAULT_RETRY: ExponentialBackoff = ExponentialBackoff::new(
116    Duration::from_millis(300),
117    2.,
118    Some(Duration::from_secs(5)),
119    None,
120);