#![cfg(not(target_arch = "wasm32"))]
mod support;
use std::time::Duration;
use futures_util::future::join_all;
use support::{delay_layer::DelayLayer, server};
use tower::{layer::util::Identity, limit::ConcurrencyLimitLayer, timeout::TimeoutLayer};
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn non_op_layer() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(Identity::new())
.no_proxy()
.build()
.unwrap();
let res = client.get(url).send().await;
assert!(res.is_ok());
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn non_op_layer_with_timeout() {
let _ = env_logger::try_init();
let client = wreq::Client::builder()
.connector_layer(Identity::new())
.connect_timeout(Duration::from_millis(200))
.no_proxy()
.build()
.unwrap();
let url = "http://192.0.2.1:81/slow";
let _res = client.get(url).send().await;
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn with_connect_timeout_layer_never_returning() {
let _ = env_logger::try_init();
let client = wreq::Client::builder()
.connector_layer(TimeoutLayer::new(Duration::from_millis(100)))
.no_proxy()
.build()
.unwrap();
let url = "http://192.0.2.1:81/slow";
let _res = client.get(url).send().await;
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn with_connect_timeout_layer_slow() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(DelayLayer::new(Duration::from_millis(200)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(100)))
.no_proxy()
.build()
.unwrap();
let res = client.get(url).send().await;
let err = res.unwrap_err();
assert!(err.is_connect() && err.is_timeout());
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn multiple_timeout_layers_under_threshold() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(DelayLayer::new(Duration::from_millis(100)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(200)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(300)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(500)))
.connect_timeout(Duration::from_millis(200))
.no_proxy()
.build()
.unwrap();
let res = client.get(url).send().await;
assert!(res.is_ok());
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn multiple_timeout_layers_over_threshold() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(DelayLayer::new(Duration::from_millis(100)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(50)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(50)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(50)))
.connect_timeout(Duration::from_millis(50))
.no_proxy()
.build()
.unwrap();
let res = client.get(url).send().await;
let err = res.unwrap_err();
assert!(err.is_connect() && err.is_timeout());
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn with_concurrency_limit_layer_timeout() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(DelayLayer::new(Duration::from_millis(100)))
.connector_layer(ConcurrencyLimitLayer::new(1))
.timeout(Duration::from_millis(200))
.pool_max_idle_per_host(0) .no_proxy()
.build()
.unwrap();
let res = client.get(url.clone()).send().await;
assert!(res.is_ok());
let mut futures = Vec::new();
for _ in 0..3 {
futures.push(client.clone().get(url.clone()).send());
}
let all_res = join_all(futures).await;
let timed_out = all_res
.into_iter()
.any(|res| res.is_err_and(|err| err.is_timeout()));
assert!(timed_out, "at least one request should have timed out");
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn with_concurrency_limit_layer_success() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::builder()
.connector_layer(DelayLayer::new(Duration::from_millis(100)))
.connector_layer(TimeoutLayer::new(Duration::from_millis(200)))
.connector_layer(ConcurrencyLimitLayer::new(1))
.timeout(Duration::from_millis(1000))
.pool_max_idle_per_host(0) .no_proxy()
.build()
.unwrap();
let res = client.get(url.clone()).send().await;
assert!(res.is_ok());
let mut futures = Vec::new();
for _ in 0..3 {
futures.push(client.clone().get(url.clone()).send());
}
let all_res = join_all(futures).await;
for res in all_res.into_iter() {
assert!(
res.is_ok(),
"neither outer long timeout or inner short timeout should be exceeded"
);
}
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn no_generic_bounds_required_for_client_new() {
let _ = env_logger::try_init();
let server = server::http(move |_req| async { http::Response::default() });
let url = format!("http://{}", server.addr());
let client = wreq::Client::new();
let res = client.get(url).send().await;
assert!(res.is_ok());
}