ugi 0.2.1

Runtime-agnostic Rust request client with HTTP/1.1, HTTP/2, HTTP/3, H2C, WebSocket, SSE, and gRPC support
Documentation
use std::time::Duration;

use async_io::Timer;

use crate::error::{Error, ErrorKind};
use crate::request::Method;

/// Controls how many times a failed request may be retried.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RetryPolicy {
    /// Never retry.
    None,
    /// Retry up to `n` additional times after the initial attempt.
    Limit(usize),
}

impl Default for RetryPolicy {
    fn default() -> Self {
        Self::None
    }
}

/// Returns the number of retry attempts configured by the given policy.
pub(crate) fn retry_attempts(policy: RetryPolicy) -> usize {
    match policy {
        RetryPolicy::None => 0,
        RetryPolicy::Limit(limit) => limit,
    }
}

/// Returns `true` for methods that are safe to retry without side effects.
///
/// Per RFC 9110 §9.2 and §9.2.2, GET, HEAD, OPTIONS and TRACE are both safe
/// and idempotent, making them candidates for transparent retries.
pub(crate) fn is_idempotent_method(method: Method) -> bool {
    matches!(
        method,
        Method::Get | Method::Head | Method::Options | Method::Trace
    )
}

/// Returns `true` when a stale-connection error should cause a transparent
/// retry on a fresh connection.
///
/// Only safe/idempotent methods are retried so that we never silently
/// duplicate a non-idempotent request.
pub(crate) fn should_retry_stale_connection(method: Method, err: &Error) -> bool {
    is_idempotent_method(method) && err.kind() == &ErrorKind::StaleConnection
}

/// Returns `true` when a transport/timeout error should be retried according
/// to the remaining retry budget.
///
/// Used for explicit retry policies (e.g. `RetryPolicy::Limit(n)`).
pub(crate) fn should_retry_request(method: Method, err: &Error, remaining_attempts: usize) -> bool {
    remaining_attempts > 0
        && is_idempotent_method(method)
        && matches!(err.kind(), ErrorKind::Transport | ErrorKind::Timeout)
}

/// Waits for the exponential back-off delay before the next retry attempt.
///
/// The delay doubles for each consumed attempt: 100 ms, 200 ms, 400 ms, …,
/// capped at 5 seconds.  `attempt_index` is zero-based (0 = first retry).
pub(crate) async fn backoff(attempt_index: usize) {
    const BASE_MS: u64 = 100;
    const CAP_MS: u64 = 5_000;
    let ms = (BASE_MS * (1u64 << attempt_index)).min(CAP_MS);
    Timer::after(Duration::from_millis(ms)).await;
}