pg-api 0.1.0

A high-performance PostgreSQL REST API driver with rate limiting, connection pooling, and observability
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
use tokio::runtime::Runtime;
use serde_json::json;

// Mock function to simulate API query processing
async fn process_query(query: &str, params: Vec<serde_json::Value>) -> String {
    // Simulate query processing
    format!("Processed: {} with {} params", query, params.len())
}

// Mock function to simulate batch processing
async fn process_batch(queries: Vec<(&str, Vec<serde_json::Value>)>) -> Vec<String> {
    let mut results = Vec::new();
    for (query, params) in queries {
        results.push(process_query(query, params).await);
    }
    results
}

fn benchmark_single_query(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    
    let mut group = c.benchmark_group("single_query");
    
    // Test different query sizes
    for query_size in [10, 100, 1000].iter() {
        let query = "SELECT * FROM users WHERE id = $1".repeat(*query_size / 30);
        let params = vec![json!(1)];
        
        group.throughput(Throughput::Elements(*query_size as u64));
        group.bench_with_input(
            BenchmarkId::from_parameter(query_size),
            query_size,
            |b, _| {
                b.to_async(&rt).iter(|| async {
                    process_query(black_box(&query), black_box(params.clone())).await
                });
            },
        );
    }
    group.finish();
}

fn benchmark_batch_queries(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    
    let mut group = c.benchmark_group("batch_queries");
    
    // Test different batch sizes
    for batch_size in [10, 50, 100, 500].iter() {
        let queries: Vec<(&str, Vec<serde_json::Value>)> = (0..*batch_size)
            .map(|i| {
                ("SELECT * FROM users WHERE id = $1", vec![json!(i)])
            })
            .collect();
        
        group.throughput(Throughput::Elements(*batch_size as u64));
        group.bench_with_input(
            BenchmarkId::from_parameter(batch_size),
            batch_size,
            |b, _| {
                b.to_async(&rt).iter(|| async {
                    process_batch(black_box(queries.clone())).await
                });
            },
        );
    }
    group.finish();
}

fn benchmark_json_serialization(c: &mut Criterion) {
    let mut group = c.benchmark_group("json_serialization");
    
    // Test different payload sizes
    for num_rows in [10, 100, 1000, 10000].iter() {
        let data: Vec<serde_json::Value> = (0..*num_rows)
            .map(|i| {
                json!({
                    "id": i,
                    "name": format!("User {}", i),
                    "email": format!("user{}@example.com", i),
                    "created_at": "2024-01-01T00:00:00Z",
                    "active": i % 2 == 0,
                })
            })
            .collect();
        
        group.throughput(Throughput::Elements(*num_rows as u64));
        group.bench_with_input(
            BenchmarkId::from_parameter(num_rows),
            num_rows,
            |b, _| {
                b.iter(|| {
                    let serialized = serde_json::to_string(&data).unwrap();
                    black_box(serialized);
                });
            },
        );
    }
    group.finish();
}

fn benchmark_connection_pool(c: &mut Criterion) {
    let rt = Runtime::new().unwrap();
    
    let mut group = c.benchmark_group("connection_pool");
    
    // Simulate connection pool operations
    for pool_size in [10, 25, 50, 100].iter() {
        group.bench_with_input(
            BenchmarkId::new("acquire_release", pool_size),
            pool_size,
            |b, &size| {
                b.to_async(&rt).iter(|| async move {
                    // Simulate acquiring and releasing connection
                    tokio::time::sleep(tokio::time::Duration::from_micros(black_box(size) as u64)).await;
                });
            },
        );
    }
    group.finish();
}

fn benchmark_rate_limiting(c: &mut Criterion) {
    let mut group = c.benchmark_group("rate_limiting");
    
    // Test rate limiter performance
    for requests_per_second in [100, 1000, 10000, 100000].iter() {
        group.throughput(Throughput::Elements(*requests_per_second as u64));
        group.bench_with_input(
            BenchmarkId::from_parameter(requests_per_second),
            requests_per_second,
            |b, &rps| {
                b.iter(|| {
                    // Simulate rate limit check
                    let current_time = std::time::SystemTime::now()
                        .duration_since(std::time::UNIX_EPOCH)
                        .unwrap()
                        .as_secs();
                    let allowed = (current_time % 60) < (rps / 60) as u64;
                    black_box(allowed);
                });
            },
        );
    }
    group.finish();
}

criterion_group!(
    benches,
    benchmark_single_query,
    benchmark_batch_queries,
    benchmark_json_serialization,
    benchmark_connection_pool,
    benchmark_rate_limiting
);
criterion_main!(benches);