Skip to main content

loa_core/
http.rs

1// ! Shared HTTP client with smart retry middleware
2//!
3//! Provides a centralized HTTP client configuration with:
4//! - Automatic retry logic for transient errors
5//! - Respect for server-provided Retry-After headers (429 rate limits)
6//! - Exponential backoff for other errors
7//! - Configurable timeouts
8//!
9//! This client can be used across the entire agent for consistent HTTP behavior.
10
11use async_trait::async_trait;
12use reqwest::{Request, Response, StatusCode};
13use reqwest_chain::Chainer;
14use reqwest_middleware::{ClientBuilder, Error};
15use std::time::Duration;
16
17/// Smart retry middleware that handles both Retry-After headers and exponential backoff
18///
19/// Behavior:
20/// - **429 Too Many Requests**: Reads `retry-after` header and sleeps for that duration before retrying
21/// - **5xx Server Errors**: Applies exponential backoff before retrying
22/// - **Network Errors**: Applies exponential backoff before retrying
23/// - **Other errors**: Returns immediately without retry
24struct SmartRetryChainer {
25    max_retries: u32,
26    base_delay: Duration,
27    max_delay: Duration,
28}
29
30impl SmartRetryChainer {
31    fn new(max_retries: u32, base_delay: Duration, max_delay: Duration) -> Self {
32        Self {
33            max_retries,
34            base_delay,
35            max_delay,
36        }
37    }
38
39    /// Parse Retry-After header value (supports both delay-seconds and HTTP-date)
40    fn parse_retry_after(&self, value: &str) -> Option<Duration> {
41        // Try parsing as seconds (most common format)
42        if let Ok(seconds) = value.parse::<u64>() {
43            return Some(Duration::from_secs(seconds));
44        }
45
46        // TODO: Add HTTP-date parsing if needed (less common for rate limiting)
47        None
48    }
49
50    /// Calculate exponential backoff delay with cap
51    fn calculate_backoff(&self, attempt: u32) -> Duration {
52        let delay = self.base_delay * 2_u32.saturating_pow(attempt);
53        delay.min(self.max_delay)
54    }
55}
56
57/// State tracked across retry attempts
58#[derive(Default)]
59struct RetryState {
60    attempt: u32,
61}
62
63#[async_trait]
64impl Chainer for SmartRetryChainer {
65    type State = RetryState;
66
67    async fn chain(
68        &self,
69        result: Result<Response, Error>,
70        state: &mut Self::State,
71        _request: &mut Request,
72    ) -> Result<Option<Response>, Error> {
73        // Extract response or propagate network error
74        let response = match result {
75            Ok(resp) => resp,
76            Err(err) => {
77                // Network error - retry with exponential backoff if under retry limit
78                if state.attempt >= self.max_retries {
79                    return Err(err);
80                }
81
82                let delay = self.calculate_backoff(state.attempt);
83                tracing::debug!(
84                    "Network error on attempt {}/{}, retrying after {:?}",
85                    state.attempt + 1,
86                    self.max_retries,
87                    delay
88                );
89
90                tokio::time::sleep(delay).await;
91                state.attempt += 1;
92
93                // Signal retry
94                return Ok(None);
95            }
96        };
97
98        let status = response.status();
99
100        // Determine if we should retry based on status code
101        let should_retry = match status {
102            StatusCode::TOO_MANY_REQUESTS => true, // 429
103            s if s.is_server_error() => true,      // 5xx
104            StatusCode::REQUEST_TIMEOUT => true,   // 408
105            _ => false,
106        };
107
108        // Accept response if no retry needed or max retries exceeded
109        if !should_retry || state.attempt >= self.max_retries {
110            return Ok(Some(response));
111        }
112
113        // Calculate retry delay
114        let delay = if status == StatusCode::TOO_MANY_REQUESTS {
115            // Try to read Retry-After header for 429
116            if let Some(retry_after_header) = response.headers().get("retry-after") {
117                if let Ok(value) = retry_after_header.to_str() {
118                    if let Some(parsed_delay) = self.parse_retry_after(value) {
119                        tracing::debug!(
120                            "429 rate limited on attempt {}/{}, respecting Retry-After: {:?}",
121                            state.attempt + 1,
122                            self.max_retries,
123                            parsed_delay
124                        );
125                        parsed_delay
126                    } else {
127                        // Failed to parse Retry-After, use exponential backoff
128                        let backoff = self.calculate_backoff(state.attempt);
129                        tracing::debug!(
130                            "429 rate limited on attempt {}/{}, failed to parse Retry-After, using backoff: {:?}",
131                            state.attempt + 1,
132                            self.max_retries,
133                            backoff
134                        );
135                        backoff
136                    }
137                } else {
138                    // Retry-After header invalid UTF-8, use exponential backoff
139                    self.calculate_backoff(state.attempt)
140                }
141            } else {
142                // No Retry-After header, use exponential backoff
143                let backoff = self.calculate_backoff(state.attempt);
144                tracing::debug!(
145                    "429 rate limited on attempt {}/{}, no Retry-After header, using backoff: {:?}",
146                    state.attempt + 1,
147                    self.max_retries,
148                    backoff
149                );
150                backoff
151            }
152        } else {
153            // Server error or timeout - use exponential backoff
154            let backoff = self.calculate_backoff(state.attempt);
155            tracing::debug!(
156                "Error {} on attempt {}/{}, retrying after {:?}",
157                status,
158                state.attempt + 1,
159                self.max_retries,
160                backoff
161            );
162            backoff
163        };
164
165        // Sleep before retry
166        tokio::time::sleep(delay).await;
167        state.attempt += 1;
168
169        // Signal retry (return None)
170        Ok(None)
171    }
172}
173
174/// Build a configured HTTP client with smart retry middleware
175///
176/// The client includes:
177/// - **Smart Retry Logic**: Respects server `Retry-After` headers for 429 responses
178/// - **Exponential Backoff**: For server errors (5xx) and network issues
179/// - **Timeout**: 10 second request timeout
180/// - **Max Retries**: 5 attempts
181///
182/// # Example
183///
184/// ```rust
185/// use elo::http::build_http_client;
186///
187/// #[tokio::main]
188/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
189///     let client = build_http_client();
190///     let response = client.get("https://api.example.com/status").send().await?;
191///     Ok(())
192/// }
193/// ```
194pub fn build_http_client() -> reqwest_middleware::ClientWithMiddleware {
195    // Base reqwest client with timeout
196    let reqwest_client = reqwest::Client::builder()
197        .timeout(Duration::from_secs(10))
198        .build()
199        .expect("Failed to build reqwest client");
200
201    // Create smart retry chainer
202    let retry_chainer = SmartRetryChainer::new(
203        5,                          // max_retries
204        Duration::from_millis(500), // base_delay
205        Duration::from_secs(30),    // max_delay
206    );
207
208    // Build client with chainer middleware
209    ClientBuilder::new(reqwest_client)
210        .with(reqwest_chain::ChainMiddleware::new(retry_chainer))
211        .build()
212}