tower_redis_cell/lib.rs
1//! This crate provides a `Tower` service with rate-limiting functionality
2//! backed by `Valkey` or `Redis` deployed with the [Redis Cell](https://github.com/brandur/redis-cell)
3//! module.
4//!
5//! The service and layer this crate provides are transport agnostic (just like
6//! [`Tower`](https://github.com/tower-rs/tower) itself), but here is a basic
7//! example using [`axum`](https://github.com/tokio-rs/axum).
8//!
9//! First, let's define [`Rule`] provider: the rule is defined per request, and
10//! contains [`Key`] (e.g. IP, API key, user ID), [`redis_cell::Policy`],
11//! and - optionally - a resource name (useful for tracing, debugging, audit).
12//!
13//!```
14//! use axum::http::{Request, Method};
15//! use tower_redis_cell::redis_cell::{Key, Policy};
16//! use tower_redis_cell::{ProvideRule, ProvideRuleResult, Rule};
17//!
18//! const BASIC_POLICY: Policy = Policy::from_tokens_per_second(1);
19//! const STRICT_POLICY: Policy = Policy::from_tokens_per_day(5).max_burst(5);
20//!
21//! #[derive(Clone)]
22//! struct RuleProvider;
23//!
24//! impl<T> ProvideRule<Request<T>> for RuleProvider {
25//! fn provide<'a>(&self, req: &'a Request<T>) -> ProvideRuleResult<'a> {
26//! let api_key = req
27//! .headers()
28//! .get("x-api-key")
29//! .and_then(|val| val.to_str().ok())
30//! .ok_or("cannot define key, since 'x-api-key' header is missing")?;
31//!
32//! let (path, method) = (req.uri().path(), req.method());
33//!
34//! let rule = if path.contains("/embed") &&
35//! (method == Method::POST || method == Method::PUT) {
36//! let key = Key::triple(api_key, path, method.as_str());
37//! let rule = Rule::new(key, STRICT_POLICY).resource("embeddings::create");
38//! return Ok(Some(rule));
39//! } else {
40//! Rule::new(api_key, BASIC_POLICY)
41//! };
42//!
43//! Ok(Some(rule))
44//! }
45//! }
46//!```
47//!
48//! We now need to instantiate [RateLimitConfig] (which expects a rule provider
49//! and an error handler), procure a Valkey/Redis client and use those to create
50//! [RateLimitLayer]. Note that we are using [`ConnectionManager`](redis::aio::ConnectionManager)
51//! in this example, but dy default anything [`ConnectionLike`](https://docs.rs/redis/latest/redis/aio/trait.ConnectionLike.html)
52//! will do. There is also an option to use a pool, but you will need to enable
53//! a corresponding feature for that (currently, `deadpool` is supported).
54//!
55//!```no_run
56//! # use axum::http::Request;
57//! # use tower_redis_cell::{ProvideRule, ProvideRuleResult};
58//! # #[derive(Clone)]
59//! # struct RuleProvider;
60//! # impl<T> ProvideRule<Request<T>> for RuleProvider {
61//! # fn provide<'a>(&self, req: &'a Request<T>) -> ProvideRuleResult<'a> { todo!() }
62//! # }
63//!
64//! use axum::http::{StatusCode, header};
65//! use axum::response::{AppendHeaders, IntoResponse, Response};
66//! use axum::{Router, body::Body, routing::get, routing::post};
67//! use tower_redis_cell::{Error, RateLimitLayer, RateLimitConfig};
68//!
69//! #[tokio::main]
70//! async fn main() {
71//! tracing_subscriber::fmt()
72//! .with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
73//! .init();
74//!
75//! let client = redis::Client::open("redis://127.0.0.1/").unwrap();
76//! let config = redis::aio::ConnectionManagerConfig::new();
77//! let connection = redis::aio::ConnectionManager::new_with_config(client, config)
78//! .await
79//! .unwrap();
80//!
81//! let config = RateLimitConfig::new(RuleProvider, |err, _req| {
82//! match err {
83//! Error::ProvideRule(err) => {
84//! tracing::warn!(
85//! key = ?err.key,
86//! detail = err.detail.as_deref(),
87//! "failed to define rule for request"
88//! );
89//! (StatusCode::UNAUTHORIZED, err.to_string()).into_response()
90//! }
91//! Error::RateLimit(err) => {
92//! tracing::warn!(
93//! key = %err.rule.key,
94//! policy = err.rule.policy.name,
95//! "request throttled"
96//! );
97//! (
98//! StatusCode::TOO_MANY_REQUESTS,
99//! AppendHeaders([(header::RETRY_AFTER, err.details.retry_after)]),
100//! Body::from("too many requests"),
101//! )
102//! .into_response()
103//! }
104//! err => {
105//! tracing::error!(err = %err, "unexpected error");
106//! (StatusCode::INTERNAL_SERVER_ERROR).into_response()
107//! }
108//! }
109//! });
110//!
111//! let app = Router::new()
112//! .route("/", get(|| async { "Hello, World!" }))
113//! .route("/embded", post(|| async { "Create embeddings" }))
114//! .layer(RateLimitLayer::new(config, connection));
115//!
116//! let listener = tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap();
117//! axum::serve(listener, app).await.unwrap();
118//! }
119//!```
120//! Note that we are in-lining the error handler above, but this might as well be
121//! a free standing function. Also, you can optionally provide [`RateLimitConfig::on_success`]
122//! and [`RateLimitConfig::on_unruled`] handlers, which both provide a mutable access
123//! to the response, and so - if needed - you can set any additional headers.
124
125// #![deny(missing_docs)]
126#![cfg_attr(docsrs, feature(doc_cfg))]
127
128mod config;
129mod error;
130mod rule;
131mod service;
132
133pub use config::RateLimitConfig;
134pub use error::{Error, ProvideRuleError};
135pub use rule::{
136 ProvideRule, ProvideRuleResult, RequestAllowedDetails, RequestBlockedDetails, Rule,
137};
138pub use service::{RateLimit, RateLimitLayer};
139
140#[cfg(feature = "deadpool")]
141pub mod deadpool {
142 pub use crate::service::deadpool::{RateLimit, RateLimitLayer};
143}
144
145pub use redis_cell_rs as redis_cell;