use std::net::SocketAddr;
#[apigate::hook]
async fn inject_user_id(ctx: &mut apigate::PartsCtx) -> apigate::HookResult {
let user_id = ctx.header("x-user-id").unwrap_or("anonymous").to_owned();
ctx.set_header("x-user-id", &user_id)?;
Ok(())
}
#[apigate::service(name = "sales", prefix = "/sales", policy = "sticky")]
mod sales {
use super::*;
#[apigate::get("/user", before = [inject_user_id])]
async fn user_profile() {}
#[apigate::get("/{id}", policy = "path_sticky")]
async fn by_id() {}
#[apigate::get("/fast", policy = "least_req")]
async fn fast() {}
#[apigate::get("/optimized", policy = "least_time")]
async fn optimized() {}
#[apigate::get("/ping", policy = "round_robin")]
async fn ping() {}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let listen: SocketAddr = "127.0.0.1:8080".parse()?;
let app = apigate::App::builder()
.mount_service(sales::routes(), ["http://127.0.0.1:8081"])
.policy("sticky", apigate::Policy::header_sticky("x-user-id"))
.policy("path_sticky", apigate::Policy::path_sticky("id"))
.policy("least_req", apigate::Policy::least_request())
.policy("least_time", apigate::Policy::least_time())
.policy("round_robin", apigate::Policy::round_robin())
.build()?;
print!(
"\
policy - http://{listen}
HeaderSticky: curl -H 'x-user-id: user-1' http://{listen}/sales/user
PathSticky: curl http://{listen}/sales/abc-123
LeastRequest: curl http://{listen}/sales/fast
LeastTime: curl http://{listen}/sales/optimized
RoundRobin: curl http://{listen}/sales/ping
With multiple backends, load balancing distributes requests across them.
With one backend, all strategies resolve to the same backend.
Upstream: caddy run --config apigate/examples/upstream/Caddyfile
"
);
apigate::run(listen, app).await?;
Ok(())
}