#![allow(dead_code)]
#![allow(clippy::cast_sign_loss)]
#![allow(clippy::missing_const_for_fn)]
#![allow(clippy::unnecessary_literal_bound)]
#![allow(clippy::too_many_lines)]
#![allow(clippy::cast_possible_truncation)]
#![allow(clippy::cast_precision_loss)]
#![allow(clippy::uninlined_format_args)]
#![allow(clippy::items_after_statements)]
#![allow(clippy::needless_continue)]
#![allow(clippy::struct_excessive_bools)]
#![allow(clippy::fn_params_excessive_bools)]
use async_trait::async_trait;
use openai_ergonomic::{Error, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant, SystemTime};
use tokio::time::sleep;
use tracing::{debug, error, info, warn};
#[async_trait]
trait Middleware: Send + Sync + std::fmt::Debug {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
let _ = context;
Ok(())
}
async fn after_response(&self, context: &mut ResponseContext) -> Result<()> {
let _ = context;
Ok(())
}
async fn on_error(&self, context: &mut ErrorContext) -> Result<ErrorAction> {
let _ = context;
Ok(ErrorAction::Propagate)
}
fn name(&self) -> &str;
fn priority(&self) -> i32 {
100
}
}
#[derive(Debug)]
struct RequestContext {
method: String,
url: String,
headers: HashMap<String, String>,
body: String,
metadata: HashMap<String, String>,
start_time: Instant,
request_id: String,
}
#[derive(Debug)]
struct ResponseContext {
request: RequestContext,
status_code: u16,
headers: HashMap<String, String>,
body: String,
duration: Duration,
metadata: HashMap<String, String>,
}
#[derive(Debug)]
struct ErrorContext {
request: RequestContext,
error: Error,
retry_count: i32,
metadata: HashMap<String, String>,
}
#[derive(Debug)]
enum ErrorAction {
Propagate,
Retry,
CustomResponse(String),
TransformError(Error),
}
#[derive(Debug)]
struct MiddlewareChain {
middlewares: Vec<Arc<dyn Middleware>>,
}
impl MiddlewareChain {
fn new() -> Self {
Self {
middlewares: Vec::new(),
}
}
fn add_middleware(mut self, middleware: Arc<dyn Middleware>) -> Self {
self.middlewares.push(middleware);
self.middlewares.sort_by_key(|m| m.priority());
self
}
async fn process_request(&self, mut context: RequestContext) -> Result<ResponseContext> {
for middleware in &self.middlewares {
debug!(
"Executing before_request for middleware: {}",
middleware.name()
);
if let Err(e) = middleware.before_request(&mut context).await {
warn!(
"Middleware {} failed in before_request: {}",
middleware.name(),
e
);
return self
.handle_error(ErrorContext {
request: context,
error: e,
retry_count: 0,
metadata: HashMap::new(),
})
.await;
}
}
let response_result = self.execute_request(&context).await;
match response_result {
Ok(mut response_context) => {
for middleware in self.middlewares.iter().rev() {
debug!(
"Executing after_response for middleware: {}",
middleware.name()
);
if let Err(e) = middleware.after_response(&mut response_context).await {
warn!(
"Middleware {} failed in after_response: {}",
middleware.name(),
e
);
return self
.handle_error(ErrorContext {
request: response_context.request,
error: e,
retry_count: 0,
metadata: HashMap::new(),
})
.await;
}
}
Ok(response_context)
}
Err(e) => {
self.handle_error(ErrorContext {
request: context,
error: e,
retry_count: 0,
metadata: HashMap::new(),
})
.await
}
}
}
async fn handle_error(&self, mut error_context: ErrorContext) -> Result<ResponseContext> {
for middleware in &self.middlewares {
debug!("Executing on_error for middleware: {}", middleware.name());
match middleware.on_error(&mut error_context).await? {
ErrorAction::Propagate => {}
ErrorAction::Retry => {
if error_context.retry_count < 3 {
info!("Retrying request due to middleware: {}", middleware.name());
error_context.retry_count += 1;
sleep(Duration::from_millis(
1000 * (error_context.retry_count as u64),
))
.await;
return Box::pin(self.process_request(error_context.request)).await;
}
warn!("Max retries exceeded, propagating error");
}
ErrorAction::CustomResponse(body) => {
info!(
"Returning custom response from middleware: {}",
middleware.name()
);
return Ok(ResponseContext {
request: error_context.request,
status_code: 200,
headers: HashMap::new(),
body,
duration: Duration::from_millis(0),
metadata: HashMap::new(),
});
}
ErrorAction::TransformError(new_error) => {
error_context.error = new_error;
continue;
}
}
}
Err(error_context.error)
}
async fn execute_request(&self, context: &RequestContext) -> Result<ResponseContext> {
debug!("Executing request: {} {}", context.method, context.url);
sleep(Duration::from_millis(100)).await;
let response_body = serde_json::json!({
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1_677_652_288,
"model": "gpt-3.5-turbo",
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": "This is a simulated response processed through middleware"
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 20,
"completion_tokens": 15,
"total_tokens": 35
}
})
.to_string();
Ok(ResponseContext {
request: RequestContext {
method: context.method.clone(),
url: context.url.clone(),
headers: context.headers.clone(),
body: context.body.clone(),
metadata: context.metadata.clone(),
start_time: context.start_time,
request_id: context.request_id.clone(),
},
status_code: 200,
headers: HashMap::from([("content-type".to_string(), "application/json".to_string())]),
body: response_body,
duration: context.start_time.elapsed(),
metadata: HashMap::new(),
})
}
}
#[derive(Debug)]
struct AuthenticationMiddleware {
api_key: String,
organization_id: Option<String>,
}
impl AuthenticationMiddleware {
fn new(api_key: String, organization_id: Option<String>) -> Self {
Self {
api_key,
organization_id,
}
}
}
#[async_trait]
impl Middleware for AuthenticationMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
debug!("Adding authentication headers");
context.headers.insert(
"Authorization".to_string(),
format!("Bearer {}", self.api_key),
);
if let Some(org_id) = &self.organization_id {
context
.headers
.insert("OpenAI-Organization".to_string(), org_id.clone());
}
context
.metadata
.insert("authenticated".to_string(), "true".to_string());
Ok(())
}
fn name(&self) -> &str {
"authentication"
}
fn priority(&self) -> i32 {
10 }
}
#[derive(Debug)]
struct RateLimitingMiddleware {
requests_per_minute: i32,
request_timestamps: Arc<Mutex<Vec<SystemTime>>>,
}
impl RateLimitingMiddleware {
fn new(requests_per_minute: i32) -> Self {
Self {
requests_per_minute,
request_timestamps: Arc::new(Mutex::new(Vec::new())),
}
}
fn should_rate_limit(&self) -> bool {
let mut timestamps = self.request_timestamps.lock().unwrap();
let now = SystemTime::now();
let one_minute_ago = now - Duration::from_secs(60);
timestamps.retain(|×tamp| timestamp > one_minute_ago);
timestamps.len() >= self.requests_per_minute as usize
}
fn record_request(&self) {
let mut timestamps = self.request_timestamps.lock().unwrap();
timestamps.push(SystemTime::now());
}
}
#[async_trait]
impl Middleware for RateLimitingMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
if self.should_rate_limit() {
warn!("Rate limit exceeded, delaying request");
let delay = Duration::from_secs(60) / self.requests_per_minute as u32;
sleep(delay).await;
}
self.record_request();
context
.metadata
.insert("rate_limited".to_string(), "checked".to_string());
debug!("Rate limiting check passed");
Ok(())
}
fn name(&self) -> &str {
"rate_limiting"
}
fn priority(&self) -> i32 {
20
}
}
#[derive(Debug)]
struct LoggingMiddleware {
log_requests: bool,
log_responses: bool,
log_headers: bool,
log_body: bool,
}
impl LoggingMiddleware {
fn new(log_requests: bool, log_responses: bool, log_headers: bool, log_body: bool) -> Self {
Self {
log_requests,
log_responses,
log_headers,
log_body,
}
}
}
#[async_trait]
impl Middleware for LoggingMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
if self.log_requests {
info!(
"Request [{}]: {} {}",
context.request_id, context.method, context.url
);
if self.log_headers {
for (key, value) in &context.headers {
debug!(
"Request header [{}]: {}: {}",
context.request_id, key, value
);
}
}
if self.log_body && !context.body.is_empty() {
debug!("Request body [{}]: {}", context.request_id, context.body);
}
}
Ok(())
}
async fn after_response(&self, context: &mut ResponseContext) -> Result<()> {
if self.log_responses {
info!(
"Response [{}]: {} ({:?})",
context.request.request_id, context.status_code, context.duration
);
if self.log_headers {
for (key, value) in &context.headers {
debug!(
"Response header [{}]: {}: {}",
context.request.request_id, key, value
);
}
}
if self.log_body {
debug!(
"Response body [{}]: {}",
context.request.request_id, context.body
);
}
}
Ok(())
}
async fn on_error(&self, context: &mut ErrorContext) -> Result<ErrorAction> {
error!(
"Request error [{}]: {}",
context.request.request_id, context.error
);
Ok(ErrorAction::Propagate)
}
fn name(&self) -> &str {
"logging"
}
fn priority(&self) -> i32 {
1000 }
}
#[derive(Debug)]
struct ValidationMiddleware {
max_request_size: usize,
required_headers: Vec<String>,
}
impl ValidationMiddleware {
fn new(max_request_size: usize, required_headers: Vec<String>) -> Self {
Self {
max_request_size,
required_headers,
}
}
}
#[async_trait]
impl Middleware for ValidationMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
if context.body.len() > self.max_request_size {
return Err(Error::InvalidRequest(format!(
"Request body too large: {} bytes (max: {} bytes)",
context.body.len(),
self.max_request_size
)));
}
for header in &self.required_headers {
if !context.headers.contains_key(header) {
return Err(Error::InvalidRequest(format!(
"Required header missing: {}",
header
)));
}
}
context
.metadata
.insert("validated".to_string(), "true".to_string());
debug!("Request validation passed");
Ok(())
}
fn name(&self) -> &str {
"validation"
}
fn priority(&self) -> i32 {
15
}
}
#[derive(Debug)]
struct ResponseTransformationMiddleware;
#[async_trait]
impl Middleware for ResponseTransformationMiddleware {
async fn after_response(&self, context: &mut ResponseContext) -> Result<()> {
if let Ok(mut response_json) = serde_json::from_str::<serde_json::Value>(&context.body) {
if let Some(obj) = response_json.as_object_mut() {
obj.insert(
"processing_time_ms".to_string(),
serde_json::Value::Number(serde_json::Number::from(
context.duration.as_millis() as u64,
)),
);
obj.insert(
"request_id".to_string(),
serde_json::Value::String(context.request.request_id.clone()),
);
context.body = serde_json::to_string(&response_json).map_err(|e| {
Error::InvalidRequest(format!("Response transformation failed: {}", e))
})?;
}
}
debug!("Response transformation completed");
Ok(())
}
fn name(&self) -> &str {
"response_transformation"
}
fn priority(&self) -> i32 {
50
}
}
#[derive(Debug)]
struct RetryMiddleware {
max_retries: i32,
base_delay_ms: u64,
}
impl RetryMiddleware {
fn new(max_retries: i32, base_delay_ms: u64) -> Self {
Self {
max_retries,
base_delay_ms,
}
}
}
#[async_trait]
impl Middleware for RetryMiddleware {
async fn on_error(&self, context: &mut ErrorContext) -> Result<ErrorAction> {
match &context.error {
Error::InvalidRequest(msg) if msg.contains("rate limit") || msg.contains("timeout") => {
if context.retry_count < self.max_retries {
let delay_ms = self.base_delay_ms * 2_u64.pow(context.retry_count as u32);
info!(
"Retrying request {} (attempt {}/{}) after {}ms",
context.request.request_id,
context.retry_count + 1,
self.max_retries,
delay_ms
);
sleep(Duration::from_millis(delay_ms)).await;
Ok(ErrorAction::Retry)
} else {
warn!(
"Max retries ({}) exceeded for request {}",
self.max_retries, context.request.request_id
);
Ok(ErrorAction::Propagate)
}
}
_ => Ok(ErrorAction::Propagate),
}
}
fn name(&self) -> &str {
"retry"
}
fn priority(&self) -> i32 {
900 }
}
#[derive(Debug)]
struct MetricsMiddleware {
request_count: Arc<Mutex<u64>>,
error_count: Arc<Mutex<u64>>,
total_duration: Arc<Mutex<Duration>>,
}
impl MetricsMiddleware {
fn new() -> Self {
Self {
request_count: Arc::new(Mutex::new(0)),
error_count: Arc::new(Mutex::new(0)),
total_duration: Arc::new(Mutex::new(Duration::ZERO)),
}
}
fn get_metrics(&self) -> MetricsSnapshot {
let request_count = *self.request_count.lock().unwrap();
let error_count = *self.error_count.lock().unwrap();
let total_duration = *self.total_duration.lock().unwrap();
MetricsSnapshot {
request_count,
error_count,
success_count: request_count - error_count,
average_duration: if request_count > 0 {
total_duration / request_count as u32
} else {
Duration::ZERO
},
error_rate: if request_count > 0 {
(error_count as f64 / request_count as f64) * 100.0
} else {
0.0
},
}
}
}
#[derive(Debug)]
struct MetricsSnapshot {
request_count: u64,
error_count: u64,
success_count: u64,
average_duration: Duration,
error_rate: f64,
}
impl MetricsSnapshot {
fn print_metrics(&self) {
info!("=== Metrics Summary ===");
info!("Total requests: {}", self.request_count);
info!("Successful requests: {}", self.success_count);
info!("Failed requests: {}", self.error_count);
info!("Error rate: {:.2}%", self.error_rate);
info!("Average duration: {:?}", self.average_duration);
}
}
#[async_trait]
impl Middleware for MetricsMiddleware {
async fn before_request(&self, _context: &mut RequestContext) -> Result<()> {
*self.request_count.lock().unwrap() += 1;
Ok(())
}
async fn after_response(&self, context: &mut ResponseContext) -> Result<()> {
*self.total_duration.lock().unwrap() += context.duration;
Ok(())
}
async fn on_error(&self, _context: &mut ErrorContext) -> Result<ErrorAction> {
*self.error_count.lock().unwrap() += 1;
Ok(ErrorAction::Propagate)
}
fn name(&self) -> &str {
"metrics"
}
fn priority(&self) -> i32 {
1
}
}
#[derive(Debug)]
struct MiddlewareClient {
chain: MiddlewareChain,
base_url: String,
}
impl MiddlewareClient {
fn new(base_url: String) -> Self {
Self {
chain: MiddlewareChain::new(),
base_url,
}
}
fn with_middleware(mut self, middleware: Arc<dyn Middleware>) -> Self {
self.chain = self.chain.add_middleware(middleware);
self
}
async fn chat_completion(&self, messages: Vec<ChatMessage>) -> Result<String> {
let request_id = format!("req_{}", generate_request_id());
let request_body = serde_json::json!({
"model": "gpt-3.5-turbo",
"messages": messages,
"max_tokens": 150
});
let context = RequestContext {
method: "POST".to_string(),
url: format!("{}/v1/chat/completions", self.base_url),
headers: HashMap::from([("content-type".to_string(), "application/json".to_string())]),
body: request_body.to_string(),
metadata: HashMap::new(),
start_time: Instant::now(),
request_id,
};
let response = self.chain.process_request(context).await?;
if let Ok(response_json) = serde_json::from_str::<serde_json::Value>(&response.body) {
if let Some(choices) = response_json["choices"].as_array() {
if let Some(first_choice) = choices.first() {
if let Some(content) = first_choice["message"]["content"].as_str() {
return Ok(content.to_string());
}
}
}
}
Err(Error::InvalidRequest(
"Failed to parse response".to_string(),
))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChatMessage {
role: String,
content: String,
}
impl ChatMessage {
fn user(content: &str) -> Self {
Self {
role: "user".to_string(),
content: content.to_string(),
}
}
fn system(content: &str) -> Self {
Self {
role: "system".to_string(),
content: content.to_string(),
}
}
fn assistant(content: &str) -> Self {
Self {
role: "assistant".to_string(),
content: content.to_string(),
}
}
}
fn generate_request_id() -> String {
use std::time::{SystemTime, UNIX_EPOCH};
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
format!("{:x}", timestamp)
}
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
)
.init();
info!("Starting middleware patterns example");
info!("=== Example 1: Basic Middleware Chain ===");
let metrics_middleware = Arc::new(MetricsMiddleware::new());
let metrics_ref = Arc::clone(&metrics_middleware);
let client = MiddlewareClient::new("https://api.openai.com".to_string())
.with_middleware(Arc::new(AuthenticationMiddleware::new(
"test-api-key".to_string(),
Some("org-test".to_string()),
)))
.with_middleware(Arc::new(ValidationMiddleware::new(
10_000, vec!["Authorization".to_string()],
)))
.with_middleware(Arc::new(RateLimitingMiddleware::new(60))) .with_middleware(Arc::new(LoggingMiddleware::new(true, true, false, false)))
.with_middleware(Arc::new(ResponseTransformationMiddleware))
.with_middleware(metrics_middleware);
let messages = vec![
ChatMessage::system("You are a helpful assistant."),
ChatMessage::user("Hello, how are you?"),
];
match client.chat_completion(messages).await {
Ok(response) => {
info!("Received response: {}", response);
}
Err(e) => {
error!("Request failed: {}", e);
}
}
metrics_ref.get_metrics().print_metrics();
info!("\n=== Example 2: Error Handling and Retry ===");
let error_client = MiddlewareClient::new("https://api.openai.com".to_string())
.with_middleware(Arc::new(AuthenticationMiddleware::new(
"test-api-key".to_string(),
None,
)))
.with_middleware(Arc::new(RetryMiddleware::new(3, 1000))) .with_middleware(Arc::new(LoggingMiddleware::new(true, true, false, false)));
let retry_messages = vec![ChatMessage::user("Test retry functionality")];
match error_client.chat_completion(retry_messages).await {
Ok(response) => {
info!("Retry example completed: {}", response);
}
Err(e) => {
warn!("Retry example failed after all attempts: {}", e);
}
}
info!("\n=== Example 3: Custom Request Modification ===");
#[derive(Debug)]
struct CustomHeaderMiddleware {
headers: HashMap<String, String>,
}
impl CustomHeaderMiddleware {
fn new() -> Self {
let mut headers = HashMap::new();
headers.insert(
"X-Custom-Client".to_string(),
"openai-ergonomic".to_string(),
);
headers.insert(
"X-Request-Source".to_string(),
"middleware-example".to_string(),
);
Self { headers }
}
}
#[async_trait]
impl Middleware for CustomHeaderMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
for (key, value) in &self.headers {
context.headers.insert(key.clone(), value.clone());
}
if let Ok(mut body_json) = serde_json::from_str::<serde_json::Value>(&context.body) {
if let Some(obj) = body_json.as_object_mut() {
obj.insert(
"temperature".to_string(),
serde_json::Value::Number(serde_json::Number::from_f64(0.7).unwrap()),
);
obj.insert(
"user".to_string(),
serde_json::Value::String(format!(
"middleware_user_{}",
context.request_id
)),
);
context.body = serde_json::to_string(&body_json).map_err(|e| {
Error::InvalidRequest(format!("Body modification failed: {}", e))
})?;
}
}
debug!("Added custom headers and modified request body");
Ok(())
}
fn name(&self) -> &str {
"custom_header"
}
fn priority(&self) -> i32 {
30
}
}
let custom_client = MiddlewareClient::new("https://api.openai.com".to_string())
.with_middleware(Arc::new(AuthenticationMiddleware::new(
"test-api-key".to_string(),
None,
)))
.with_middleware(Arc::new(CustomHeaderMiddleware::new()))
.with_middleware(Arc::new(LoggingMiddleware::new(true, true, true, true)));
let custom_messages = vec![ChatMessage::user("Test custom middleware modifications")];
match custom_client.chat_completion(custom_messages).await {
Ok(response) => {
info!("Custom middleware example completed: {}", response);
}
Err(e) => {
error!("Custom middleware example failed: {}", e);
}
}
info!("\n=== Example 4: Conditional Middleware ===");
#[derive(Debug)]
struct ConditionalMiddleware {
condition: fn(&RequestContext) -> bool,
action: String,
}
impl ConditionalMiddleware {
fn new(condition: fn(&RequestContext) -> bool, action: String) -> Self {
Self { condition, action }
}
}
#[async_trait]
impl Middleware for ConditionalMiddleware {
async fn before_request(&self, context: &mut RequestContext) -> Result<()> {
if (self.condition)(context) {
info!("Conditional middleware executing: {}", self.action);
context
.metadata
.insert("conditional_action".to_string(), self.action.clone());
} else {
debug!("Conditional middleware skipped");
}
Ok(())
}
fn name(&self) -> &str {
"conditional"
}
fn priority(&self) -> i32 {
40
}
}
let conditional_client = MiddlewareClient::new("https://api.openai.com".to_string())
.with_middleware(Arc::new(AuthenticationMiddleware::new(
"test-api-key".to_string(),
None,
)))
.with_middleware(Arc::new(ConditionalMiddleware::new(
|ctx| ctx.body.contains("special"),
"Special request processing enabled".to_string(),
)))
.with_middleware(Arc::new(LoggingMiddleware::new(true, true, false, false)));
let normal_messages = vec![ChatMessage::user("Regular request")];
info!("Sending normal request (should skip conditional middleware)");
match conditional_client.chat_completion(normal_messages).await {
Ok(response) => {
info!("Normal request completed: {}", response);
}
Err(e) => {
error!("Normal request failed: {}", e);
}
}
let special_messages = vec![ChatMessage::user(
"This is a special request that triggers middleware",
)];
info!("Sending special request (should trigger conditional middleware)");
match conditional_client.chat_completion(special_messages).await {
Ok(response) => {
info!("Special request completed: {}", response);
}
Err(e) => {
error!("Special request failed: {}", e);
}
}
info!("\n=== Example 5: Performance Monitoring ===");
#[derive(Debug)]
struct PerformanceMiddleware {
slow_request_threshold: Duration,
}
impl PerformanceMiddleware {
fn new(threshold: Duration) -> Self {
Self {
slow_request_threshold: threshold,
}
}
}
#[async_trait]
impl Middleware for PerformanceMiddleware {
async fn after_response(&self, context: &mut ResponseContext) -> Result<()> {
if context.duration > self.slow_request_threshold {
warn!(
"Slow request detected [{}]: {:?} (threshold: {:?})",
context.request.request_id, context.duration, self.slow_request_threshold
);
context.metadata.insert(
"performance_warning".to_string(),
"slow_request".to_string(),
);
} else {
debug!(
"Request performance OK [{}]: {:?}",
context.request.request_id, context.duration
);
}
Ok(())
}
fn name(&self) -> &str {
"performance"
}
fn priority(&self) -> i32 {
60
}
}
let perf_metrics = Arc::new(MetricsMiddleware::new());
let perf_metrics_ref = Arc::clone(&perf_metrics);
let perf_client = MiddlewareClient::new("https://api.openai.com".to_string())
.with_middleware(Arc::new(AuthenticationMiddleware::new(
"test-api-key".to_string(),
None,
)))
.with_middleware(Arc::new(PerformanceMiddleware::new(Duration::from_millis(
200,
))))
.with_middleware(perf_metrics)
.with_middleware(Arc::new(LoggingMiddleware::new(true, true, false, false)));
for i in 1..=5 {
let perf_messages = vec![ChatMessage::user(&format!(
"Performance test request {}",
i
))];
match perf_client.chat_completion(perf_messages).await {
Ok(response) => {
info!("Performance test {} completed: {}", i, response);
}
Err(e) => {
error!("Performance test {} failed: {}", i, e);
}
}
sleep(Duration::from_millis(100)).await;
}
perf_metrics_ref.get_metrics().print_metrics();
info!("Middleware patterns example completed successfully!");
Ok(())
}