pub struct RetryPolicy { /* private fields */ }Expand description
A policy that retries failed operations with configurable backoff.
§Examples
use do_over::{policy::Policy, retry::RetryPolicy};
use std::time::Duration;
let policy = RetryPolicy::fixed(3, Duration::from_millis(100));
let result = policy.execute(|| async {
// This operation will be retried up to 3 times on failure
Ok::<_, std::io::Error>("success")
}).await?;Implementations§
Source§impl RetryPolicy
impl RetryPolicy
Sourcepub fn fixed(max_retries: usize, delay: Duration) -> Self
pub fn fixed(max_retries: usize, delay: Duration) -> Self
Create a retry policy with fixed backoff.
§Arguments
max_retries- Maximum number of retry attemptsdelay- Fixed delay between retries
§Examples
use do_over::retry::RetryPolicy;
use std::time::Duration;
// Retry up to 3 times with 100ms between attempts
let policy = RetryPolicy::fixed(3, Duration::from_millis(100));Examples found in repository?
examples/comprehensive.rs (line 57)
53fn create_inventory_policy() -> InventoryPolicy {
54 Wrap::new(
55 RateLimiter::new(3, Duration::from_secs(1)), // 3 requests per second
56 Wrap::new(
57 RetryPolicy::fixed(2, Duration::from_millis(100)),
58 TimeoutPolicy::new(Duration::from_millis(500)),
59 ),
60 )
61}
62
63fn create_payment_policy() -> PaymentPolicy {
64 Wrap::new(
65 CircuitBreaker::new(3, Duration::from_secs(5)), // Opens after 3 failures
66 Wrap::new(
67 RetryPolicy::fixed(2, Duration::from_millis(200)),
68 TimeoutPolicy::new(Duration::from_secs(1)),
69 ),
70 )
71}More examples
examples/http_service.rs (line 13)
11async fn main() {
12 let policy = Wrap::new(
13 RetryPolicy::fixed(2, Duration::from_millis(50)),
14 TimeoutPolicy::new(Duration::from_secs(1)),
15 );
16
17 let app = Router::new().route(
18 "/",
19 get(|| async move {
20 let result: Result<&str, DoOverError<()>> =
21 policy.execute(|| async { Ok("hello from do-over") }).await;
22 result.unwrap()
23 }),
24 );
25
26 println!("Starting server on http://0.0.0.0:3000");
27 let listener = TcpListener::bind("0.0.0.0:3000").await.unwrap();
28 axum::serve(listener, app).await.unwrap();
29}examples/retry.rs (line 33)
29async fn fixed_backoff_example() {
30 println!("📌 Scenario 1: Fixed Backoff (fails twice, succeeds on third)");
31 println!(" Configuration: max_retries=2, delay=100ms\n");
32
33 let policy = RetryPolicy::fixed(2, Duration::from_millis(100));
34 let attempt_count = Arc::new(AtomicUsize::new(0));
35 let start = Instant::now();
36
37 let result: Result<String, DoOverError<String>> = policy
38 .execute(|| {
39 let attempts = Arc::clone(&attempt_count);
40 async move {
41 let attempt = attempts.fetch_add(1, Ordering::SeqCst) + 1;
42 let elapsed = start.elapsed().as_millis();
43
44 if attempt < 3 {
45 println!(
46 " [+{:>4}ms] Attempt {}: ❌ Simulated failure",
47 elapsed, attempt
48 );
49 Err(DoOverError::Inner(format!("Transient error on attempt {}", attempt)))
50 } else {
51 println!(
52 " [+{:>4}ms] Attempt {}: ✅ Success!",
53 elapsed, attempt
54 );
55 Ok("Operation completed successfully".to_string())
56 }
57 }
58 })
59 .await;
60
61 let total_time = start.elapsed().as_millis();
62 println!("\n Result: {:?}", result.unwrap());
63 println!(" Total time: {}ms (expected ~200ms for 2 retries)", total_time);
64}
65
66async fn exponential_backoff_example() {
67 println!("📌 Scenario 2: Exponential Backoff (show increasing delays)");
68 println!(" Configuration: max_retries=3, base=100ms, factor=2.0\n");
69
70 let policy = RetryPolicy::exponential(3, Duration::from_millis(100), 2.0);
71 let attempt_count = Arc::new(AtomicUsize::new(0));
72 let start = Instant::now();
73
74 let result: Result<String, DoOverError<String>> = policy
75 .execute(|| {
76 let attempts = Arc::clone(&attempt_count);
77 async move {
78 let attempt = attempts.fetch_add(1, Ordering::SeqCst) + 1;
79 let elapsed = start.elapsed().as_millis();
80
81 if attempt < 4 {
82 println!(
83 " [+{:>4}ms] Attempt {}: ❌ Simulated failure (next delay: {}ms)",
84 elapsed,
85 attempt,
86 100 * 2_i32.pow(attempt as u32)
87 );
88 Err(DoOverError::Inner(format!("Transient error")))
89 } else {
90 println!(
91 " [+{:>4}ms] Attempt {}: ✅ Success!",
92 elapsed, attempt
93 );
94 Ok("Operation completed with exponential backoff".to_string())
95 }
96 }
97 })
98 .await;
99
100 let total_time = start.elapsed().as_millis();
101 println!("\n Result: {:?}", result.unwrap());
102 println!(
103 " Total time: {}ms (expected ~1400ms: 200+400+800)",
104 total_time
105 );
106 println!(" Delays: 200ms → 400ms → 800ms (exponential growth)");
107}
108
109async fn exhausted_retries_example() {
110 println!("📌 Scenario 3: Exhausted Retries (all attempts fail)");
111 println!(" Configuration: max_retries=2, delay=50ms\n");
112
113 let policy = RetryPolicy::fixed(2, Duration::from_millis(50));
114 let attempt_count = Arc::new(AtomicUsize::new(0));
115 let start = Instant::now();
116
117 let result: Result<String, DoOverError<String>> = policy
118 .execute(|| {
119 let attempts = Arc::clone(&attempt_count);
120 async move {
121 let attempt = attempts.fetch_add(1, Ordering::SeqCst) + 1;
122 let elapsed = start.elapsed().as_millis();
123 println!(
124 " [+{:>4}ms] Attempt {}: ❌ Permanent failure",
125 elapsed, attempt
126 );
127 Err(DoOverError::Inner(format!(
128 "Service unavailable (attempt {})",
129 attempt
130 )))
131 }
132 })
133 .await;
134
135 let total_time = start.elapsed().as_millis();
136 println!("\n Result: {:?}", result.unwrap_err());
137 println!(
138 " Total time: {}ms (3 attempts, 2 retries exhausted)",
139 total_time
140 );
141}examples/composition.rs (line 43)
37async fn pattern1_retry_timeout() {
38 println!("📌 Pattern 1: Retry wrapping Timeout");
39 println!(" Wrap::new(Retry(3, 100ms), Timeout(200ms))");
40 println!(" → Each retry attempt gets its own 200ms timeout\n");
41
42 let policy = Wrap::new(
43 RetryPolicy::fixed(3, Duration::from_millis(100)),
44 TimeoutPolicy::new(Duration::from_millis(200)),
45 );
46
47 let attempt_count = Arc::new(AtomicUsize::new(0));
48 let start = Instant::now();
49
50 // Scenario: First 2 attempts timeout, third succeeds quickly
51 let result: Result<String, DoOverError<String>> = {
52 let ac = Arc::clone(&attempt_count);
53 policy
54 .execute(|| {
55 let count = Arc::clone(&ac);
56 async move {
57 let attempt = count.fetch_add(1, Ordering::SeqCst) + 1;
58 let elapsed = start.elapsed().as_millis();
59 println!(
60 " [+{:>4}ms] Attempt {}: Started",
61 elapsed, attempt
62 );
63
64 if attempt < 3 {
65 // First two attempts are slow - will timeout
66 println!(
67 " [+{:>4}ms] Attempt {}: Processing slowly (will timeout)...",
68 elapsed, attempt
69 );
70 tokio::time::sleep(Duration::from_millis(500)).await;
71 Ok("Should not reach here".to_string())
72 } else {
73 // Third attempt is fast
74 tokio::time::sleep(Duration::from_millis(50)).await;
75 let elapsed = start.elapsed().as_millis();
76 println!(
77 " [+{:>4}ms] Attempt {}: ✅ Completed quickly!",
78 elapsed, attempt
79 );
80 Ok("Success on attempt 3".to_string())
81 }
82 }
83 })
84 .await
85 };
86
87 let total_time = start.elapsed().as_millis();
88 println!("\n Result: {:?}", result.unwrap());
89 println!(" Total time: {}ms", total_time);
90 println!(" Total attempts: {}", attempt_count.load(Ordering::SeqCst));
91 println!("\n 💡 Each attempt had its own timeout; operation succeeded on 3rd try");
92}
93
94async fn pattern2_circuit_retry() {
95 println!("📌 Pattern 2: CircuitBreaker wrapping Retry");
96 println!(" Wrap::new(CircuitBreaker(3, 1s), Retry(2, 50ms))");
97 println!(" → Retries happen inside the circuit breaker\n");
98
99 let policy = Wrap::new(
100 CircuitBreaker::new(3, Duration::from_secs(1)),
101 RetryPolicy::fixed(2, Duration::from_millis(50)),
102 );
103
104 let failure_count = Arc::new(AtomicUsize::new(0));
105 let start = Instant::now();
106
107 // Make several calls that fail to open the circuit
108 println!(" Phase 1: Failing calls to open circuit");
109 for i in 1..=4 {
110 let fc = Arc::clone(&failure_count);
111 let s = start;
112 let result: Result<String, DoOverError<String>> = policy
113 .execute(|| {
114 let count = Arc::clone(&fc);
115 async move {
116 count.fetch_add(1, Ordering::SeqCst);
117 let elapsed = s.elapsed().as_millis();
118 println!(
119 " [+{:>4}ms] Call {}: Inner operation failing...",
120 elapsed, i
121 );
122 Err(DoOverError::Inner(format!("Service error")))
123 }
124 })
125 .await;
126
127 let elapsed = start.elapsed().as_millis();
128 match &result {
129 Err(DoOverError::Inner(_)) => {
130 println!(
131 " [+{:>4}ms] Call {}: ❌ Failed after retries",
132 elapsed, i
133 );
134 }
135 Err(DoOverError::CircuitOpen) => {
136 println!(
137 " [+{:>4}ms] Call {}: 🚫 Circuit is OPEN",
138 elapsed, i
139 );
140 }
141 _ => {}
142 }
143 }
144
145 let inner_calls = failure_count.load(Ordering::SeqCst);
146 println!("\n Summary:");
147 println!(" - Inner operation calls: {} (includes retries)", inner_calls);
148 println!(" - Circuit opened after threshold reached");
149 println!("\n 💡 Retries happened inside circuit breaker; failures accumulated to open circuit");
150}
151
152async fn pattern3_full_stack() {
153 println!("📌 Pattern 3: Full Resilience Stack");
154 println!(" Bulkhead(2) → Retry(2, 100ms) → Timeout(500ms)");
155 println!(" Recommended ordering for production systems\n");
156
157 // Note: Using Bulkhead + Retry + Timeout for this example to avoid
158 // CircuitBreaker clone issues in concurrent async context
159 let policy = Arc::new(Wrap::new(
160 Bulkhead::new(2),
161 Wrap::new(
162 RetryPolicy::fixed(2, Duration::from_millis(100)),
163 TimeoutPolicy::new(Duration::from_millis(500)),
164 ),
165 ));
166
167 let call_count = Arc::new(AtomicUsize::new(0));
168 let start = Instant::now();
169
170 println!(" Launching 4 concurrent requests (bulkhead limit is 2)...\n");
171
172 let mut handles = vec![];
173 for i in 1..=4 {
174 let p = Arc::clone(&policy);
175 let cc = Arc::clone(&call_count);
176 let s = start;
177
178 let handle = tokio::spawn(async move {
179 let result: Result<String, DoOverError<String>> = p
180 .execute(|| {
181 let count = Arc::clone(&cc);
182 async move {
183 let call = count.fetch_add(1, Ordering::SeqCst) + 1;
184 let elapsed = s.elapsed().as_millis();
185 println!(
186 " [+{:>4}ms] Request {}, call {}: Processing...",
187 elapsed, i, call
188 );
189 tokio::time::sleep(Duration::from_millis(200)).await;
190 let elapsed = s.elapsed().as_millis();
191 println!(
192 " [+{:>4}ms] Request {}, call {}: ✅ Done",
193 elapsed, i, call
194 );
195 Ok(format!("Response {}", i))
196 }
197 })
198 .await;
199
200 let elapsed = s.elapsed().as_millis();
201 match &result {
202 Ok(msg) => println!(" [+{:>4}ms] Request {}: ✅ {}", elapsed, i, msg),
203 Err(DoOverError::BulkheadFull) => {
204 println!(" [+{:>4}ms] Request {}: 🚫 Rejected by bulkhead", elapsed, i)
205 }
206 Err(e) => println!(" [+{:>4}ms] Request {}: ❌ {:?}", elapsed, i, e),
207 }
208 });
209 handles.push(handle);
210
211 tokio::time::sleep(Duration::from_millis(10)).await;
212 }
213
214 for handle in handles {
215 handle.await.unwrap();
216 }
217
218 let total_time = start.elapsed().as_millis();
219 let total_calls = call_count.load(Ordering::SeqCst);
220
221 println!("\n Summary:");
222 println!(" - Total inner operation calls: {}", total_calls);
223 println!(" - Total time: {}ms", total_time);
224 println!("\n Policy execution order:");
225 println!(" 1. Bulkhead: Limits to 2 concurrent executions");
226 println!(" 2. Retry: Retries transient failures");
227 println!(" 3. Timeout: Each attempt bounded to 500ms");
228}Sourcepub fn exponential(max_retries: usize, base: Duration, factor: f64) -> Self
pub fn exponential(max_retries: usize, base: Duration, factor: f64) -> Self
Create a retry policy with exponential backoff.
The delay for attempt n is calculated as: base * factor^n
§Arguments
max_retries- Maximum number of retry attemptsbase- Initial delay durationfactor- Multiplier for exponential growth (typically 2.0)
§Examples
use do_over::retry::RetryPolicy;
use std::time::Duration;
// Delays: 100ms, 200ms, 400ms, 800ms
let policy = RetryPolicy::exponential(4, Duration::from_millis(100), 2.0);Examples found in repository?
examples/retry.rs (line 70)
66async fn exponential_backoff_example() {
67 println!("📌 Scenario 2: Exponential Backoff (show increasing delays)");
68 println!(" Configuration: max_retries=3, base=100ms, factor=2.0\n");
69
70 let policy = RetryPolicy::exponential(3, Duration::from_millis(100), 2.0);
71 let attempt_count = Arc::new(AtomicUsize::new(0));
72 let start = Instant::now();
73
74 let result: Result<String, DoOverError<String>> = policy
75 .execute(|| {
76 let attempts = Arc::clone(&attempt_count);
77 async move {
78 let attempt = attempts.fetch_add(1, Ordering::SeqCst) + 1;
79 let elapsed = start.elapsed().as_millis();
80
81 if attempt < 4 {
82 println!(
83 " [+{:>4}ms] Attempt {}: ❌ Simulated failure (next delay: {}ms)",
84 elapsed,
85 attempt,
86 100 * 2_i32.pow(attempt as u32)
87 );
88 Err(DoOverError::Inner(format!("Transient error")))
89 } else {
90 println!(
91 " [+{:>4}ms] Attempt {}: ✅ Success!",
92 elapsed, attempt
93 );
94 Ok("Operation completed with exponential backoff".to_string())
95 }
96 }
97 })
98 .await;
99
100 let total_time = start.elapsed().as_millis();
101 println!("\n Result: {:?}", result.unwrap());
102 println!(
103 " Total time: {}ms (expected ~1400ms: 200+400+800)",
104 total_time
105 );
106 println!(" Delays: 200ms → 400ms → 800ms (exponential growth)");
107}Sourcepub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
Trait Implementations§
Source§impl Clone for RetryPolicy
impl Clone for RetryPolicy
Auto Trait Implementations§
impl Freeze for RetryPolicy
impl !RefUnwindSafe for RetryPolicy
impl Send for RetryPolicy
impl Sync for RetryPolicy
impl Unpin for RetryPolicy
impl !UnwindSafe for RetryPolicy
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more