use alloy_json_rpc::{ErrorPayload, Id, RpcError, RpcResult};
use serde::Deserialize;
use serde_json::value::RawValue;
use std::{error::Error as StdError, fmt::Debug};
use thiserror::Error;
pub type TransportError<ErrResp = Box<RawValue>> = RpcError<TransportErrorKind, ErrResp>;
pub type TransportResult<T, ErrResp = Box<RawValue>> = RpcResult<T, TransportErrorKind, ErrResp>;
#[derive(Debug, Error)]
#[non_exhaustive]
pub enum TransportErrorKind {
#[error("missing response for request with ID {0}")]
MissingBatchResponse(Id),
#[error("backend connection task has stopped")]
BackendGone,
#[error("subscriptions are not available on this provider")]
PubsubUnavailable,
#[error("{0}")]
HttpError(#[from] HttpError),
#[error("{0}")]
Custom(#[source] Box<dyn StdError + Send + Sync + 'static>),
}
impl TransportErrorKind {
pub const fn recoverable(&self) -> bool {
matches!(self, Self::MissingBatchResponse(_))
}
pub fn custom_str(err: &str) -> TransportError {
RpcError::Transport(Self::Custom(err.into()))
}
pub fn custom(err: impl StdError + Send + Sync + 'static) -> TransportError {
RpcError::Transport(Self::Custom(Box::new(err)))
}
pub const fn missing_batch_response(id: Id) -> TransportError {
RpcError::Transport(Self::MissingBatchResponse(id))
}
pub const fn backend_gone() -> TransportError {
RpcError::Transport(Self::BackendGone)
}
pub const fn pubsub_unavailable() -> TransportError {
RpcError::Transport(Self::PubsubUnavailable)
}
pub const fn http_error(status: u16, body: String) -> TransportError {
RpcError::Transport(Self::HttpError(HttpError { status, body }))
}
pub fn is_retry_err(&self) -> bool {
match self {
Self::MissingBatchResponse(_) => true,
Self::HttpError(http_err) => {
http_err.is_rate_limit_err() || http_err.is_temporarily_unavailable()
}
Self::Custom(err) => {
let msg = err.to_string();
msg.contains("429 Too Many Requests")
}
_ => false,
}
}
}
#[derive(Debug, thiserror::Error)]
#[error(
"HTTP error {status} with {}",
if body.is_empty() { "empty body".to_string() } else { format!("body: {body}") }
)]
pub struct HttpError {
pub status: u16,
pub body: String,
}
impl HttpError {
pub const fn is_rate_limit_err(&self) -> bool {
self.status == 429
}
pub const fn is_temporarily_unavailable(&self) -> bool {
self.status == 503
}
}
pub(crate) trait RpcErrorExt {
fn is_retryable(&self) -> bool;
fn backoff_hint(&self) -> Option<std::time::Duration>;
}
impl RpcErrorExt for RpcError<TransportErrorKind> {
fn is_retryable(&self) -> bool {
match self {
Self::Transport(err) => err.is_retry_err(),
Self::SerError(_) => false,
Self::DeserError { text, .. } => {
if let Ok(resp) = serde_json::from_str::<ErrorPayload>(text) {
return resp.is_retry_err();
}
#[derive(Deserialize)]
struct Resp {
error: ErrorPayload,
}
if let Ok(resp) = serde_json::from_str::<Resp>(text) {
return resp.error.is_retry_err();
}
false
}
Self::ErrorResp(err) => err.is_retry_err(),
Self::NullResp => true,
_ => false,
}
}
fn backoff_hint(&self) -> Option<std::time::Duration> {
if let Self::ErrorResp(resp) = self {
let data = resp.try_data_as::<serde_json::Value>();
if let Some(Ok(data)) = data {
let backoff_seconds = &data["rate"]["backoff_seconds"];
if let Some(seconds) = backoff_seconds.as_u64() {
return Some(std::time::Duration::from_secs(seconds));
}
if let Some(seconds) = backoff_seconds.as_f64() {
return Some(std::time::Duration::from_secs(seconds as u64 + 1));
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_retry_error() {
let err = "{\"code\":-32007,\"message\":\"100/second request limit reached - reduce calls per second or upgrade your account at quicknode.com\"}";
let err = serde_json::from_str::<ErrorPayload>(err).unwrap();
assert!(TransportError::ErrorResp(err).is_retryable());
}
#[test]
fn test_retry_error_429() {
let err = r#"{"code":429,"event":-33200,"message":"Too Many Requests","details":"You have surpassed your allowed throughput limit. Reduce the amount of requests per second or upgrade for more capacity."}"#;
let err = serde_json::from_str::<ErrorPayload>(err).unwrap();
assert!(TransportError::ErrorResp(err).is_retryable());
}
}