Skip to main content

nano_get_bench/
nano-get-bench.rs

1use std::io::{Read, Write};
2use std::net::TcpListener;
3use std::thread;
4use std::time::{Duration, Instant};
5
6use nano_get::{Client, ConnectionPolicy, Request};
7
8fn spawn_persistent_server(
9    responses: usize,
10    response_bytes: Vec<u8>,
11) -> (String, thread::JoinHandle<()>) {
12    let listener = TcpListener::bind("127.0.0.1:0").expect("bind");
13    listener.set_nonblocking(false).expect("set_nonblocking");
14    let addr = listener.local_addr().expect("local_addr");
15    let handle = thread::spawn(move || {
16        let (mut stream, _) = listener.accept().expect("accept");
17        let mut pending = Vec::new();
18        let mut chunk = [0u8; 4096];
19        for _ in 0..responses {
20            loop {
21                if pending.windows(4).any(|w| w == b"\r\n\r\n") {
22                    let end = pending
23                        .windows(4)
24                        .position(|w| w == b"\r\n\r\n")
25                        .expect("request terminator")
26                        + 4;
27                    pending.drain(..end);
28                    break;
29                }
30                let n = stream.read(&mut chunk).expect("read request");
31                if n == 0 {
32                    return;
33                }
34                pending.extend_from_slice(&chunk[..n]);
35            }
36            stream.write_all(&response_bytes).expect("write response");
37        }
38    });
39    (format!("http://{}", addr), handle)
40}
41
42fn bench_get(iterations: usize) -> Duration {
43    let response = b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello".to_vec();
44    let (base_url, handle) = spawn_persistent_server(iterations, response);
45
46    let client = Client::builder()
47        .connection_policy(ConnectionPolicy::Reuse)
48        .build();
49    let mut session = client.session();
50
51    let start = Instant::now();
52    for i in 0..iterations {
53        let req = Request::get(format!("{}/g{}", base_url, i)).expect("request");
54        let resp = session.execute(req).expect("execute");
55        assert_eq!(resp.body, b"hello");
56    }
57    let elapsed = start.elapsed();
58    handle.join().expect("join server");
59    elapsed
60}
61
62fn bench_head(iterations: usize) -> Duration {
63    let response = b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\nX-Bench: 1\r\n\r\n".to_vec();
64    let (base_url, handle) = spawn_persistent_server(iterations, response);
65
66    let client = Client::builder()
67        .connection_policy(ConnectionPolicy::Reuse)
68        .build();
69    let mut session = client.session();
70
71    let start = Instant::now();
72    for i in 0..iterations {
73        let req = Request::head(format!("{}/h{}", base_url, i)).expect("request");
74        let resp = session.execute(req).expect("execute");
75        assert!(resp.body.is_empty());
76    }
77    let elapsed = start.elapsed();
78    handle.join().expect("join server");
79    elapsed
80}
81
82fn bench_pipeline(total_requests: usize, pipeline_depth: usize) -> Duration {
83    let response = b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello".to_vec();
84    let (base_url, handle) = spawn_persistent_server(total_requests, response);
85
86    let client = Client::builder()
87        .connection_policy(ConnectionPolicy::Reuse)
88        .build();
89    let mut session = client.session();
90
91    let start = Instant::now();
92    let mut sent = 0usize;
93    while sent < total_requests {
94        let batch_size = (total_requests - sent).min(pipeline_depth);
95        let mut batch = Vec::with_capacity(batch_size);
96        for index in 0..batch_size {
97            batch.push(Request::get(format!("{}/p{}", base_url, sent + index)).expect("request"));
98        }
99        let responses = session.execute_pipelined(&batch).expect("execute");
100        assert_eq!(responses.len(), batch_size);
101        for response in responses {
102            assert_eq!(response.body, b"hello");
103        }
104        sent += batch_size;
105    }
106    let elapsed = start.elapsed();
107    handle.join().expect("join server");
108    elapsed
109}
110
111fn fmt_ops(iter: usize, d: Duration) -> f64 {
112    iter as f64 / d.as_secs_f64()
113}
114
115fn main() {
116    let warmup = 2_000;
117    let iterations = 30_000;
118    let pipeline_iterations = 32_000;
119    let pipeline_depth = 8;
120
121    let _ = bench_get(warmup);
122    let _ = bench_head(warmup);
123    let _ = bench_pipeline(warmup, pipeline_depth);
124
125    let get_time = bench_get(iterations);
126    let head_time = bench_head(iterations);
127    let pipeline_time = bench_pipeline(pipeline_iterations, pipeline_depth);
128
129    println!(
130        "GET  {} ops in {:?} => {:.0} req/s",
131        iterations,
132        get_time,
133        fmt_ops(iterations, get_time)
134    );
135    println!(
136        "HEAD {} ops in {:?} => {:.0} req/s",
137        iterations,
138        head_time,
139        fmt_ops(iterations, head_time)
140    );
141    println!(
142        "PIPE {} ops in {:?} => {:.0} req/s (depth={})",
143        pipeline_iterations,
144        pipeline_time,
145        fmt_ops(pipeline_iterations, pipeline_time),
146        pipeline_depth
147    );
148}