Skip to main content

http_handle/
perf_server.rs

1// SPDX-License-Identifier: AGPL-3.0-only
2// Copyright (c) 2023 - 2026 HTTP Handle
3
4//! High-performance async-first HTTP/1 server primitives.
5
6#[cfg(feature = "high-perf")]
7#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
8use crate::error::ServerError;
9#[cfg(feature = "high-perf")]
10#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
11use crate::request::Request;
12#[cfg(feature = "high-perf")]
13#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
14use crate::response::Response;
15#[cfg(feature = "high-perf")]
16#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
17use crate::server::{
18    ConnectionPolicy, KEEPALIVE_IDLE_TIMEOUT, MAX_KEEPALIVE_REQUESTS,
19    Server, build_response_for_request_with_metrics,
20};
21
22#[cfg(feature = "high-perf")]
23#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
24use std::collections::HashMap;
25#[cfg(feature = "high-perf")]
26#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
27use std::path::{Path, PathBuf};
28#[cfg(feature = "high-perf")]
29#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
30use std::sync::atomic::{AtomicUsize, Ordering};
31#[cfg(feature = "high-perf")]
32#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
33use std::sync::{Arc, OnceLock, RwLock};
34#[cfg(feature = "high-perf")]
35#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
36use std::time::UNIX_EPOCH;
37
38#[cfg(feature = "high-perf")]
39#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
40use tokio::io::{AsyncReadExt, AsyncWriteExt};
41#[cfg(feature = "high-perf")]
42#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
43use tokio::sync::Semaphore;
44#[cfg(feature = "high-perf")]
45#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
46use tokio::time::{Duration, timeout};
47
48/// Runtime limits for the high-performance server mode.
49#[cfg(feature = "high-perf")]
50#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
51///
52/// # Examples
53///
54/// ```rust
55/// use http_handle::perf_server::PerfLimits;
56/// let limits = PerfLimits::default();
57/// assert!(limits.max_inflight > 0);
58/// ```
59///
60/// # Panics
61///
62/// This type does not panic.
63#[derive(Clone, Copy, Debug)]
64pub struct PerfLimits {
65    /// Maximum number of concurrently processed connections.
66    pub max_inflight: usize,
67    /// Maximum number of queued connections waiting for a slot.
68    pub max_queue: usize,
69    /// Minimum file size (bytes) for sendfile fast-path attempts.
70    pub sendfile_threshold_bytes: u64,
71}
72
73#[cfg(feature = "high-perf")]
74#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
75impl Default for PerfLimits {
76    fn default() -> Self {
77        Self {
78            max_inflight: 256,
79            max_queue: 1024,
80            sendfile_threshold_bytes: 64 * 1024,
81        }
82    }
83}
84
85/// Synchronous entry point that builds a multi-threaded Tokio runtime
86/// and runs [`start_high_perf`] on it. Use this on multi-core hosts when
87/// [`start_high_perf`] called from a `current_thread` runtime is leaving
88/// cores idle — bombardier load tests showed sync `Server::start`
89/// outperforming the async path 3× under 256-connection keep-alive
90/// purely because the async path was funneling all connections through
91/// one OS thread.
92///
93/// `worker_threads = None` lets Tokio pick (defaults to logical CPU
94/// count). Pass `Some(n)` to pin the worker count for reproducible
95/// benchmarking or container CPU limits.
96///
97/// Owning the runtime internally means callers don't need to add
98/// `rt-multi-thread` to their tokio features list and don't need to
99/// reason about runtime flavour mismatches between the bind site and
100/// the accept loop.
101///
102/// # Examples
103///
104/// ```rust,no_run
105/// use http_handle::Server;
106/// use http_handle::perf_server::{start_high_perf_multi_thread, PerfLimits};
107///
108/// let server = Server::new("127.0.0.1:8080", ".");
109/// // Default worker count (one per logical core).
110/// let _ = start_high_perf_multi_thread(server, PerfLimits::default(), None);
111/// ```
112///
113/// # Errors
114///
115/// Returns an error when the multi-thread runtime cannot be built or
116/// the underlying [`start_high_perf`] accept loop fails.
117///
118/// # Panics
119///
120/// This function does not panic.
121#[cfg(feature = "high-perf-multi-thread")]
122#[cfg_attr(docsrs, doc(cfg(feature = "high-perf-multi-thread")))]
123pub fn start_high_perf_multi_thread(
124    server: Server,
125    limits: PerfLimits,
126    worker_threads: Option<usize>,
127) -> Result<(), ServerError> {
128    let mut builder = tokio::runtime::Builder::new_multi_thread();
129    let _ = builder.enable_all();
130    if let Some(n) = worker_threads {
131        let _ = builder.worker_threads(n.max(1));
132    }
133    let runtime = builder.build().map_err(ServerError::from)?;
134    runtime.block_on(start_high_perf(server, limits))
135}
136
137/// Starts an async-first accept loop with adaptive backpressure.
138///
139/// This path prioritizes throughput-per-core by avoiding a thread-per-connection model,
140/// enforcing queue limits, and using a sendfile fast-path for large static files.
141#[cfg(feature = "high-perf")]
142#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
143///
144/// # Examples
145///
146/// ```rust,no_run
147/// use http_handle::perf_server::{start_high_perf, PerfLimits};
148/// use http_handle::Server;
149/// # #[tokio::main(flavor = "current_thread")]
150/// # async fn main() {
151/// let server = Server::new("127.0.0.1:8080", ".");
152/// let _ = start_high_perf(server, PerfLimits::default()).await;
153/// # }
154/// ```
155///
156/// # Errors
157///
158/// Returns an error when socket binding or accept fails.
159///
160/// # Panics
161///
162/// This function does not panic.
163pub async fn start_high_perf(
164    server: Server,
165    limits: PerfLimits,
166) -> Result<(), ServerError> {
167    let listener = tokio::net::TcpListener::bind(server.address())
168        .await
169        .map_err(ServerError::from)?;
170
171    let inflight = Arc::new(Semaphore::new(limits.max_inflight.max(1)));
172    let queued = Arc::new(AtomicUsize::new(0));
173
174    loop {
175        let (stream, _addr) =
176            listener.accept().await.map_err(ServerError::from)?;
177
178        let permit = match inflight.clone().try_acquire_owned() {
179            Ok(permit) => permit,
180            Err(_) => {
181                let queued_now =
182                    queued.fetch_add(1, Ordering::SeqCst) + 1;
183                if queued_now > limits.max_queue {
184                    let _ = queued.fetch_sub(1, Ordering::SeqCst);
185                    continue;
186                }
187                let acquired = timeout(
188                    Duration::from_millis(20),
189                    inflight.clone().acquire_owned(),
190                )
191                .await;
192                let _ = queued.fetch_sub(1, Ordering::SeqCst);
193                match acquired {
194                    Ok(Ok(permit)) => permit,
195                    _ => continue,
196                }
197            }
198        };
199
200        let server_clone = server.clone();
201        let limits_clone = limits;
202        drop(tokio::spawn(async move {
203            let _permit = permit;
204            let _ = handle_async_connection(
205                stream,
206                &server_clone,
207                &limits_clone,
208            )
209            .await;
210        }));
211    }
212}
213
214#[cfg(feature = "high-perf")]
215#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
216async fn handle_async_connection(
217    mut stream: tokio::net::TcpStream,
218    server: &Server,
219    limits: &PerfLimits,
220) -> Result<(), ServerError> {
221    // Disable Nagle so header+body are not held by the kernel waiting
222    // for a delayed ACK on small payloads.
223    let _ = stream.set_nodelay(true);
224    let request_timeout =
225        server.request_timeout().unwrap_or(Duration::from_secs(30));
226
227    // HTTP/1.1 persistent-connection loop. The first request gets the
228    // configured per-request timeout; subsequent requests on the same
229    // TCP connection get the tighter idle timeout so an inactive client
230    // is reaped promptly without holding the inflight permit. Re-uses
231    // ConnectionPolicy from the sync path so HTTP/1.0 and explicit
232    // `Connection: close` semantics match across server entry points.
233    // Read buffer hoisted out of the loop: dhat profiling showed this
234    // 16 KiB allocation at the top of every iteration was the single
235    // largest source of per-request heap pressure (~41 % of total
236    // allocated bytes across 1024 sequential roundtrips). Reusing it
237    // drops 16 KiB × N requests of allocator traffic on every kept-
238    // alive connection. Each iteration parses `&buffer[..read]` so
239    // stale bytes from a prior iteration are never observed.
240    let mut buffer = vec![0_u8; 16 * 1024];
241    for i in 0..MAX_KEEPALIVE_REQUESTS {
242        let read_deadline = if i == 0 {
243            request_timeout
244        } else {
245            KEEPALIVE_IDLE_TIMEOUT
246        };
247        let read = match timeout(
248            read_deadline,
249            stream.read(&mut buffer),
250        )
251        .await
252        {
253            Ok(Ok(0)) => return Ok(()), // peer FIN
254            Ok(Ok(n)) => n,
255            Ok(Err(_)) | Err(_) => return Ok(()), // read error or idle timeout
256        };
257
258        let request = parse_request_from_bytes(&buffer[..read])?;
259        let policy = ConnectionPolicy::from_request(&request);
260
261        if try_send_static_file_fast_path(
262            &mut stream,
263            server,
264            &request,
265            limits.sendfile_threshold_bytes,
266            policy,
267        )
268        .await?
269        {
270            if policy == ConnectionPolicy::Close {
271                return Ok(());
272            }
273            continue;
274        }
275
276        let mut response =
277            build_response_for_request_with_metrics(server, &request);
278        response.set_connection_header(policy.header_value());
279        if send_response_async(&mut stream, &response).await.is_err() {
280            return Ok(());
281        }
282        if policy == ConnectionPolicy::Close {
283            return Ok(());
284        }
285    }
286    Ok(())
287}
288
289#[cfg(feature = "high-perf")]
290#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
291fn parse_request_from_bytes(
292    bytes: &[u8],
293) -> Result<Request, ServerError> {
294    let text = std::str::from_utf8(bytes).map_err(|_| {
295        ServerError::invalid_request("request is not valid UTF-8")
296    })?;
297    let (head, _) = text.split_once("\r\n\r\n").ok_or_else(|| {
298        ServerError::invalid_request("incomplete HTTP request head")
299    })?;
300
301    let mut lines = head.lines();
302    let request_line = lines.next().ok_or_else(|| {
303        ServerError::invalid_request("missing request line")
304    })?;
305    let mut parts = request_line.split_whitespace();
306    let method = parts
307        .next()
308        .ok_or_else(|| ServerError::invalid_request("missing method"))?
309        .to_string();
310    let path = parts
311        .next()
312        .ok_or_else(|| ServerError::invalid_request("missing path"))?
313        .to_string();
314    let version = parts
315        .next()
316        .ok_or_else(|| {
317            ServerError::invalid_request("missing HTTP version")
318        })?
319        .to_string();
320
321    let mut headers: Vec<(String, String)> = Vec::with_capacity(8);
322    for line in lines {
323        if line.is_empty() {
324            break;
325        }
326        // SIMD ':' search via memchr; same rationale as src/request.rs.
327        let bytes = line.as_bytes();
328        if let Some(colon) = memchr::memchr(b':', bytes) {
329            let (name, value) = line.split_at(colon);
330            let value = &value[1..];
331            headers.push((
332                name.trim().to_ascii_lowercase(),
333                value.trim().to_string(),
334            ));
335        }
336    }
337
338    Ok(Request {
339        method,
340        path,
341        version,
342        headers,
343    })
344}
345
346#[cfg(feature = "high-perf")]
347#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
348async fn send_response_async(
349    stream: &mut tokio::net::TcpStream,
350    response: &Response,
351) -> Result<(), ServerError> {
352    use std::fmt::Write as _;
353    // Pre-size for typical response sizes; growth is rare. Mirrors P0.A
354    // on the sync path: no intermediate format!() allocations on each
355    // header line — write! goes directly into the existing buffer.
356    let mut header = String::with_capacity(256);
357    let _ = write!(
358        &mut header,
359        "HTTP/1.1 {} {}\r\n",
360        response.status_code, response.status_text
361    );
362
363    let mut has_content_length = false;
364    let mut has_connection = false;
365    for (name, value) in &response.headers {
366        if name.eq_ignore_ascii_case("content-length") {
367            has_content_length = true;
368        }
369        if name.eq_ignore_ascii_case("connection") {
370            has_connection = true;
371        }
372        let _ = write!(&mut header, "{}: {}\r\n", name, value);
373    }
374    if !has_content_length {
375        let _ = write!(
376            &mut header,
377            "Content-Length: {}\r\n",
378            response.body.len()
379        );
380    }
381    if !has_connection {
382        header.push_str("Connection: close\r\n");
383    }
384    header.push_str("\r\n");
385
386    stream
387        .write_all(header.as_bytes())
388        .await
389        .map_err(ServerError::from)?;
390    if !response.body.is_empty() {
391        stream
392            .write_all(&response.body)
393            .await
394            .map_err(ServerError::from)?;
395    }
396    stream.flush().await.map_err(ServerError::from)?;
397    Ok(())
398}
399
400/// Maximum number of pre-serialised static-file responses kept in the
401/// in-process cache. With a 64 KiB body cap (the default
402/// `sendfile_threshold_bytes`) the worst-case footprint is bounded at
403/// ~16 MiB across all entries.
404#[cfg(feature = "high-perf")]
405const RESPONSE_CACHE_MAX: usize = 256;
406
407/// Pre-serialised stable head + body for a static file. The
408/// per-request `Connection` header and the headers-end CRLF are NOT
409/// included — they're written in a small dynamic suffix on each hit.
410/// Status line, `Content-Length`, `Content-Type`, `Accept-Ranges`,
411/// `Content-Encoding` (if a precompressed asset was selected), `Vary`,
412/// and `Cache-Control` (immutable assets) are all baked in.
413#[cfg(feature = "high-perf")]
414#[derive(Debug)]
415struct CachedResponse {
416    head_prefix: Arc<Vec<u8>>,
417    body: Arc<Vec<u8>>,
418}
419
420#[cfg(feature = "high-perf")]
421type ResponseCacheKey = (PathBuf, u64, u64); // (canonical_path, mtime_secs, file_len)
422
423#[cfg(feature = "high-perf")]
424type ResponseCache =
425    RwLock<HashMap<ResponseCacheKey, Arc<CachedResponse>>>;
426
427#[cfg(feature = "high-perf")]
428static RESPONSE_CACHE: OnceLock<ResponseCache> = OnceLock::new();
429
430#[cfg(feature = "high-perf")]
431fn response_cache() -> &'static ResponseCache {
432    RESPONSE_CACHE.get_or_init(|| RwLock::new(HashMap::new()))
433}
434
435#[cfg(feature = "high-perf")]
436fn mtime_secs(metadata: &std::fs::Metadata) -> u64 {
437    metadata
438        .modified()
439        .ok()
440        .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
441        .map_or(0_u64, |d| d.as_secs())
442}
443
444/// Build the stable head prefix (status line + cacheable headers).
445///
446/// The output is everything up to but NOT including the per-request
447/// `Connection: ...\r\n\r\n` suffix. `is_immutable` and `encoding`
448/// shape the headers, so they're folded into the cache key implicitly
449/// via the `(path, mtime, len)` tuple — a different precompressed
450/// candidate or a different file lives at a different `serving_path`.
451#[cfg(feature = "high-perf")]
452fn build_head_prefix(
453    len: u64,
454    content_type: &str,
455    encoding: Option<&'static str>,
456    is_immutable: bool,
457) -> Vec<u8> {
458    let mut prefix = Vec::with_capacity(192);
459    prefix.extend_from_slice(b"HTTP/1.1 200 OK\r\nContent-Length: ");
460    prefix.extend_from_slice(len.to_string().as_bytes());
461    prefix.extend_from_slice(b"\r\nContent-Type: ");
462    prefix.extend_from_slice(content_type.as_bytes());
463    prefix.extend_from_slice(b"\r\nAccept-Ranges: bytes\r\n");
464    if let Some(enc) = encoding {
465        prefix.extend_from_slice(b"Content-Encoding: ");
466        prefix.extend_from_slice(enc.as_bytes());
467        prefix.extend_from_slice(b"\r\nVary: Accept-Encoding\r\n");
468    }
469    if is_immutable {
470        prefix.extend_from_slice(
471            b"Cache-Control: public, max-age=31536000, immutable\r\n",
472        );
473    }
474    prefix
475}
476
477/// Insert a freshly-built `CachedResponse` under cap-based eviction
478/// that mirrors the existing ETag LRU pattern in `src/server.rs`.
479#[cfg(feature = "high-perf")]
480fn insert_cached_response(
481    key: ResponseCacheKey,
482    value: Arc<CachedResponse>,
483) {
484    if let Ok(mut write) = response_cache().write() {
485        if write.len() >= RESPONSE_CACHE_MAX {
486            // Crude eviction: drop the first quarter when the cap is
487            // exceeded. Workloads with high path churn that fill the
488            // cache get a periodic pause; typical static-content
489            // serving stays inside the cap and never evicts. Mirrors
490            // `src/server.rs::compute_etag` exactly so the two caches
491            // share the same operational shape.
492            let drop_count = RESPONSE_CACHE_MAX / 4;
493            let to_remove: Vec<_> =
494                write.keys().take(drop_count).cloned().collect();
495            for k in to_remove {
496                let _ = write.remove(&k);
497            }
498        }
499        let _ = write.insert(key, value);
500    }
501}
502
503#[cfg(feature = "high-perf")]
504#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
505async fn try_send_static_file_fast_path(
506    stream: &mut tokio::net::TcpStream,
507    server: &Server,
508    request: &Request,
509    sendfile_threshold_bytes: u64,
510    policy: ConnectionPolicy,
511) -> Result<bool, ServerError> {
512    if request.method() != "GET" && request.method() != "HEAD" {
513        return Ok(false);
514    }
515    if request.header("range").is_some() {
516        return Ok(false);
517    }
518
519    let Some(file_path) = resolve_static_path(
520        server.document_root(),
521        server.canonical_document_root(),
522        request.path(),
523    ) else {
524        return Ok(false);
525    };
526
527    let (serving_path, encoding) =
528        negotiate_precompressed(&file_path, request);
529    let metadata =
530        std::fs::metadata(&serving_path).map_err(ServerError::from)?;
531    let len = metadata.len();
532
533    // Per-request dynamic suffix: the only header that can't be
534    // cached because it depends on the keep-alive policy. The trailing
535    // CRLF closes the header section so the body can follow.
536    let connection_suffix =
537        format!("Connection: {}\r\n\r\n", policy.header_value());
538
539    // Above-threshold (large-file) path skips the cache entirely.
540    // Caching megabyte-class bodies would defeat the OOM guard; the
541    // sendfile path also bypasses any user-space copy, so caching the
542    // body would be wasted work even ignoring memory cost. Build the
543    // head fresh, write it, then delegate to sendfile / async copy.
544    if len >= sendfile_threshold_bytes {
545        let head_prefix = build_head_prefix(
546            len,
547            content_type_for_path(&file_path),
548            encoding,
549            is_probably_immutable_asset(request.path()),
550        );
551        stream
552            .write_all(&head_prefix)
553            .await
554            .map_err(ServerError::from)?;
555        stream
556            .write_all(connection_suffix.as_bytes())
557            .await
558            .map_err(ServerError::from)?;
559
560        if request.method() == "HEAD" {
561            stream.flush().await.map_err(ServerError::from)?;
562            return Ok(true);
563        }
564
565        #[cfg(unix)]
566        {
567            if try_sendfile_unix(stream, &serving_path, len).await? {
568                stream.flush().await.map_err(ServerError::from)?;
569                return Ok(true);
570            }
571        }
572        // Above-threshold path that didn't sendfile (non-unix, or
573        // sendfile rejected): defer to the async file copy so the
574        // reactor isn't pinned reading a multi-MiB file synchronously.
575        let mut file = tokio::fs::File::open(&serving_path)
576            .await
577            .map_err(ServerError::from)?;
578        let _bytes_copied = tokio::io::copy(&mut file, stream)
579            .await
580            .map_err(ServerError::from)?;
581        stream.flush().await.map_err(ServerError::from)?;
582        return Ok(true);
583    }
584
585    // Small-file fast path with response cache. Key by
586    // (canonical_serving_path, mtime, len) so a touch / replace of
587    // the file invalidates without an extra stat() — the metadata
588    // we already needed becomes the cache key. Hits skip BOTH the
589    // syscall to read the file AND the `format!` chain that builds
590    // the stable head section. Misses do exactly the same work as
591    // the pre-cache path plus one insert.
592    let mtime = mtime_secs(&metadata);
593    let cache_key: ResponseCacheKey =
594        (serving_path.clone(), mtime, len);
595
596    let cached: Option<Arc<CachedResponse>> = response_cache()
597        .read()
598        .ok()
599        .and_then(|read| read.get(&cache_key).cloned());
600
601    let cached_response = match cached {
602        Some(arc) => arc,
603        None => {
604            let head_prefix = Arc::new(build_head_prefix(
605                len,
606                content_type_for_path(&file_path),
607                encoding,
608                is_probably_immutable_asset(request.path()),
609            ));
610            let body = Arc::new(
611                std::fs::read(&serving_path)
612                    .map_err(ServerError::from)?,
613            );
614            let response =
615                Arc::new(CachedResponse { head_prefix, body });
616            insert_cached_response(cache_key, Arc::clone(&response));
617            response
618        }
619    };
620
621    // Coalesce head_prefix + per-request connection_suffix into one
622    // buffer, then fold the body in for the GET case so the entire
623    // response ships in a single `write_all`. Splitting into multiple
624    // `write_all`s on a `tokio::net::TcpStream` produces extra syscalls
625    // per request — meaningful overhead under multi-thread bombardier
626    // load. The connection_suffix is small (<32 B) and the head prefix
627    // is bounded by the headers we serialise (<256 B for typical
628    // paths), so the combined buffer is ~bytes-not-MiB-class.
629    let mut prelude = Vec::with_capacity(
630        cached_response.head_prefix.len() + connection_suffix.len(),
631    );
632    prelude.extend_from_slice(&cached_response.head_prefix);
633    prelude.extend_from_slice(connection_suffix.as_bytes());
634
635    if request.method() == "HEAD" {
636        stream
637            .write_all(&prelude)
638            .await
639            .map_err(ServerError::from)?;
640        stream.flush().await.map_err(ServerError::from)?;
641        return Ok(true);
642    }
643
644    // For small bodies (the cache cap is `sendfile_threshold_bytes`),
645    // appending the body to `prelude` keeps the response in one
646    // syscall. The cap-bounded buffer never grows past
647    // `sendfile_threshold_bytes` + ~256 B of headers, so the extra
648    // copy is cheap relative to the syscall + reactor wakeup we
649    // avoid by emitting one `write_all`.
650    prelude.extend_from_slice(&cached_response.body);
651    stream
652        .write_all(&prelude)
653        .await
654        .map_err(ServerError::from)?;
655    stream.flush().await.map_err(ServerError::from)?;
656    Ok(true)
657}
658
659#[cfg(feature = "high-perf")]
660#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
661fn resolve_static_path(
662    root: &Path,
663    canonical_root: &Path,
664    request_path: &str,
665) -> Option<PathBuf> {
666    let mut path = root.to_path_buf();
667    let rel = request_path.trim_start_matches('/');
668
669    if rel.is_empty() {
670        path.push("index.html");
671    } else {
672        for part in rel.split('/') {
673            if part == ".." {
674                let _ = path.pop();
675            } else {
676                path.push(part);
677            }
678        }
679    }
680
681    let resolved = std::fs::canonicalize(&path).ok()?;
682    if !resolved.starts_with(canonical_root) {
683        return None;
684    }
685
686    let meta = std::fs::metadata(&resolved).ok()?;
687    if meta.is_dir() {
688        let index = resolved.join("index.html");
689        let index_meta = std::fs::metadata(&index).ok()?;
690        if index_meta.is_file() {
691            return Some(index);
692        }
693        return None;
694    }
695
696    if meta.is_file() { Some(resolved) } else { None }
697}
698
699#[cfg(feature = "high-perf")]
700#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
701fn negotiate_precompressed(
702    path: &Path,
703    request: &Request,
704) -> (PathBuf, Option<&'static str>) {
705    let mut serving_path = path.to_path_buf();
706    let mut encoding = None;
707
708    if let Some(accept) = request.header("accept-encoding") {
709        if accept.contains("br") {
710            let candidate =
711                PathBuf::from(format!("{}.br", path.display()));
712            if candidate.is_file() {
713                serving_path = candidate;
714                encoding = Some("br");
715                return (serving_path, encoding);
716            }
717        }
718        if accept.contains("zstd") || accept.contains("zst") {
719            let candidate =
720                PathBuf::from(format!("{}.zst", path.display()));
721            if candidate.is_file() {
722                serving_path = candidate;
723                encoding = Some("zstd");
724                return (serving_path, encoding);
725            }
726        }
727        if accept.contains("gzip") {
728            let candidate =
729                PathBuf::from(format!("{}.gz", path.display()));
730            if candidate.is_file() {
731                serving_path = candidate;
732                encoding = Some("gzip");
733            }
734        }
735    }
736
737    (serving_path, encoding)
738}
739
740#[cfg(feature = "high-perf")]
741#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
742fn content_type_for_path(path: &Path) -> &'static str {
743    match path
744        .extension()
745        .and_then(|v| v.to_str())
746        .unwrap_or_default()
747    {
748        "html" | "htm" => "text/html",
749        "css" => "text/css",
750        "js" | "mjs" => "application/javascript",
751        "json" => "application/json",
752        "wasm" => "application/wasm",
753        "svg" => "image/svg+xml",
754        "png" => "image/png",
755        "jpg" | "jpeg" => "image/jpeg",
756        "gif" => "image/gif",
757        _ => "application/octet-stream",
758    }
759}
760
761#[cfg(feature = "high-perf")]
762#[cfg_attr(docsrs, doc(cfg(feature = "high-perf")))]
763fn is_probably_immutable_asset(path: &str) -> bool {
764    let file = path.rsplit('/').next().unwrap_or(path);
765    let Some((stem, _ext)) = file.rsplit_once('.') else {
766        return false;
767    };
768    let Some(hash) = stem.rsplit('-').next() else {
769        return false;
770    };
771    hash.len() >= 8 && hash.chars().all(|c| c.is_ascii_hexdigit())
772}
773
774#[cfg(all(
775    feature = "high-perf",
776    any(target_os = "linux", target_os = "android")
777))]
778async fn try_sendfile_unix(
779    stream: &tokio::net::TcpStream,
780    path: &Path,
781    len: u64,
782) -> Result<bool, ServerError> {
783    use std::os::fd::AsRawFd;
784    // File::open is a blocking syscall; run it on the blocking pool so
785    // the Tokio reactor thread is never stalled opening files.
786    let path_owned = path.to_path_buf();
787    let file = tokio::task::spawn_blocking(move || {
788        std::fs::File::open(path_owned)
789    })
790    .await
791    .map_err(|e| ServerError::TaskFailed(e.to_string()))?
792    .map_err(ServerError::from)?;
793    let mut offset: libc::off_t = 0;
794    let mut sent: u64 = 0;
795
796    while sent < len {
797        let remaining = (len - sent) as usize;
798        let chunk = remaining.min(1 << 20);
799        // Safety: both fds are owned for the duration of this call —
800        // `stream` is borrowed from the caller (the TcpStream lives on
801        // the stack frame above) and `file` is the local std::fs::File
802        // we just opened. `offset` is a local `libc::off_t` we write
803        // through. `chunk` is bounded above by `len - sent` and below
804        // by 1 (the loop guard `sent < len`). The kernel either fills
805        // the requested transfer or returns the count actually sent;
806        // we handle the negative-rc and EAGAIN cases below.
807        #[allow(unsafe_code)]
808        let rc = unsafe {
809            libc::sendfile(
810                stream.as_raw_fd(),
811                file.as_raw_fd(),
812                &mut offset,
813                chunk,
814            )
815        };
816        if rc == 0 {
817            break;
818        }
819        if rc < 0 {
820            let err = std::io::Error::last_os_error();
821            if matches!(err.raw_os_error(), Some(libc::EAGAIN)) {
822                tokio::task::yield_now().await;
823                continue;
824            }
825            return Ok(false);
826        }
827        sent = sent.saturating_add(rc as u64);
828    }
829
830    Ok(sent > 0)
831}
832
833#[cfg(all(
834    feature = "high-perf",
835    unix,
836    not(any(target_os = "linux", target_os = "android"))
837))]
838async fn try_sendfile_unix(
839    _stream: &tokio::net::TcpStream,
840    _path: &Path,
841    _len: u64,
842) -> Result<bool, ServerError> {
843    Ok(false)
844}
845
846#[cfg(all(test, feature = "high-perf"))]
847mod tests {
848    use super::*;
849    use tokio::io::AsyncReadExt;
850    use tokio::io::AsyncWriteExt;
851    use tokio::time::Duration;
852
853    #[test]
854    fn immutable_asset_detection() {
855        assert!(is_probably_immutable_asset("/assets/app-abcdef12.js"));
856        assert!(!is_probably_immutable_asset("/assets/app.js"));
857    }
858
859    #[test]
860    fn parse_request_from_bytes_parses_headers() {
861        let request = parse_request_from_bytes(
862            b"GET / HTTP/1.1\r\nHost: localhost\r\nAccept-Encoding: gzip\r\n\r\n",
863        )
864        .expect("parse");
865        assert_eq!(request.method(), "GET");
866        assert_eq!(request.path(), "/");
867        assert_eq!(request.header("host"), Some("localhost"));
868        assert_eq!(request.header("accept-encoding"), Some("gzip"));
869    }
870
871    #[test]
872    fn parse_request_from_bytes_rejects_invalid_inputs() {
873        assert!(parse_request_from_bytes(b"\xFF").is_err());
874        assert!(
875            parse_request_from_bytes(b"GET / HTTP/1.1\r\n").is_err()
876        );
877        assert!(
878            parse_request_from_bytes(b"/ HTTP/1.1\r\n\r\n").is_err()
879        );
880        assert!(parse_request_from_bytes(b"\r\n\r\n").is_err());
881        assert!(parse_request_from_bytes(b"GET\r\n\r\n").is_err());
882        assert!(parse_request_from_bytes(b"GET / \r\n\r\n").is_err());
883    }
884
885    #[test]
886    fn resolve_static_path_and_content_type_behave() {
887        let dir = tempfile::tempdir().expect("tempdir");
888        let root = dir.path();
889        std::fs::write(root.join("index.html"), "ok").expect("write");
890        std::fs::create_dir(root.join("nested")).expect("mkdir");
891        std::fs::write(root.join("nested").join("index.html"), "n")
892            .expect("write");
893        let canonical_root =
894            std::fs::canonicalize(root).expect("canonical");
895
896        let p1 = resolve_static_path(root, &canonical_root, "/")
897            .expect("root index");
898        assert!(p1.ends_with("index.html"));
899        let p2 = resolve_static_path(root, &canonical_root, "/nested")
900            .expect("nested index");
901        assert!(p2.ends_with("nested/index.html"));
902        assert!(
903            resolve_static_path(
904                root,
905                &canonical_root,
906                "/../../etc/passwd"
907            )
908            .is_none()
909        );
910
911        assert_eq!(
912            content_type_for_path(Path::new("a.html")),
913            "text/html"
914        );
915        assert_eq!(
916            content_type_for_path(Path::new("a.css")),
917            "text/css"
918        );
919        assert_eq!(
920            content_type_for_path(Path::new("a.js")),
921            "application/javascript"
922        );
923        assert_eq!(
924            content_type_for_path(Path::new("a.bin")),
925            "application/octet-stream"
926        );
927        assert_eq!(
928            content_type_for_path(Path::new("a.json")),
929            "application/json"
930        );
931        assert_eq!(
932            content_type_for_path(Path::new("a.wasm")),
933            "application/wasm"
934        );
935        assert_eq!(
936            content_type_for_path(Path::new("a.svg")),
937            "image/svg+xml"
938        );
939        assert_eq!(
940            content_type_for_path(Path::new("a.png")),
941            "image/png"
942        );
943        assert_eq!(
944            content_type_for_path(Path::new("a.jpg")),
945            "image/jpeg"
946        );
947        assert_eq!(
948            content_type_for_path(Path::new("a.gif")),
949            "image/gif"
950        );
951    }
952
953    #[test]
954    fn negotiate_precompressed_prefers_br_then_zstd_then_gzip() {
955        let dir = tempfile::tempdir().expect("tempdir");
956        let base = dir.path().join("index.html");
957        std::fs::write(&base, "x").expect("base");
958
959        let headers =
960            vec![("accept-encoding".to_string(), "gzip".to_string())];
961        let req_gz = Request {
962            method: "GET".to_string(),
963            path: "/index.html".to_string(),
964            version: "HTTP/1.1".to_string(),
965            headers,
966        };
967        std::fs::write(format!("{}.gz", base.display()), "x")
968            .expect("gz");
969        let (p, e) = negotiate_precompressed(&base, &req_gz);
970        assert!(p.ends_with("index.html.gz"));
971        assert_eq!(e, Some("gzip"));
972
973        std::fs::write(format!("{}.zst", base.display()), "x")
974            .expect("zst");
975        let headers = vec![(
976            "accept-encoding".to_string(),
977            "zstd,gzip".to_string(),
978        )];
979        let req_zst = Request {
980            method: "GET".to_string(),
981            path: "/index.html".to_string(),
982            version: "HTTP/1.1".to_string(),
983            headers,
984        };
985        let (p, e) = negotiate_precompressed(&base, &req_zst);
986        assert!(p.ends_with("index.html.zst"));
987        assert_eq!(e, Some("zstd"));
988
989        std::fs::write(format!("{}.br", base.display()), "x")
990            .expect("br");
991        let headers = vec![(
992            "accept-encoding".to_string(),
993            "br,zstd,gzip".to_string(),
994        )];
995        let req_br = Request {
996            method: "GET".to_string(),
997            path: "/index.html".to_string(),
998            version: "HTTP/1.1".to_string(),
999            headers,
1000        };
1001        let (p, e) = negotiate_precompressed(&base, &req_br);
1002        assert!(p.ends_with("index.html.br"));
1003        assert_eq!(e, Some("br"));
1004
1005        let headers =
1006            vec![("accept-encoding".to_string(), "gzip".to_string())];
1007        let req_gz_missing = Request {
1008            method: "GET".to_string(),
1009            path: "/index.html".to_string(),
1010            version: "HTTP/1.1".to_string(),
1011            headers,
1012        };
1013        std::fs::remove_file(format!("{}.gz", base.display()))
1014            .expect("remove gz");
1015        let (p, e) = negotiate_precompressed(&base, &req_gz_missing);
1016        assert!(p.ends_with("index.html"));
1017        assert_eq!(e, None);
1018    }
1019
1020    #[tokio::test(flavor = "current_thread")]
1021    async fn try_send_static_file_fast_path_serves_get_and_head() {
1022        let dir = tempfile::tempdir().expect("tempdir");
1023        let root = dir.path();
1024        std::fs::write(
1025            root.join("app-abcdef12.js"),
1026            "console.log('ok');",
1027        )
1028        .expect("write");
1029
1030        let server = Server::builder()
1031            .address("127.0.0.1:0")
1032            .document_root(root.to_string_lossy().as_ref())
1033            .build()
1034            .expect("server");
1035        let request = Request {
1036            method: "GET".into(),
1037            path: "/app-abcdef12.js".into(),
1038            version: "HTTP/1.1".into(),
1039            headers: Vec::new(),
1040        };
1041
1042        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1043            .await
1044            .expect("bind");
1045        let addr = listener.local_addr().expect("addr");
1046        let client_task = tokio::spawn(async move {
1047            tokio::net::TcpStream::connect(addr).await.expect("connect")
1048        });
1049        let (server_stream, _) =
1050            listener.accept().await.expect("accept");
1051        let mut client = client_task.await.expect("join");
1052
1053        let server_clone = server.clone();
1054        let server_task = tokio::spawn(async move {
1055            let mut stream = server_stream;
1056            try_send_static_file_fast_path(
1057                &mut stream,
1058                &server_clone,
1059                &request,
1060                u64::MAX,
1061                ConnectionPolicy::Close,
1062            )
1063            .await
1064            .expect("send")
1065        });
1066
1067        let mut bytes = Vec::new();
1068        let _ = client.read_to_end(&mut bytes).await.expect("read");
1069        assert!(server_task.await.expect("join"));
1070
1071        let text = String::from_utf8(bytes).expect("utf8");
1072        assert!(text.contains("HTTP/1.1 200 OK"));
1073        assert!(text.contains(
1074            "Cache-Control: public, max-age=31536000, immutable"
1075        ));
1076        assert!(text.contains("application/javascript"));
1077
1078        let request_head = Request {
1079            method: "HEAD".into(),
1080            path: "/app-abcdef12.js".into(),
1081            version: "HTTP/1.1".into(),
1082            headers: Vec::new(),
1083        };
1084
1085        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1086            .await
1087            .expect("bind");
1088        let addr = listener.local_addr().expect("addr");
1089        let client_task = tokio::spawn(async move {
1090            tokio::net::TcpStream::connect(addr).await.expect("connect")
1091        });
1092        let (server_stream, _) =
1093            listener.accept().await.expect("accept");
1094        let mut client = client_task.await.expect("join");
1095        let server_clone = server.clone();
1096        let server_task = tokio::spawn(async move {
1097            let mut stream = server_stream;
1098            try_send_static_file_fast_path(
1099                &mut stream,
1100                &server_clone,
1101                &request_head,
1102                u64::MAX,
1103                ConnectionPolicy::Close,
1104            )
1105            .await
1106            .expect("send")
1107        });
1108        let mut bytes = Vec::new();
1109        let _ = client.read_to_end(&mut bytes).await.expect("read");
1110        assert!(server_task.await.expect("join"));
1111        let text = String::from_utf8(bytes).expect("utf8");
1112        assert!(text.contains("HTTP/1.1 200 OK"));
1113        assert!(!text.contains("console.log('ok')"));
1114    }
1115
1116    #[tokio::test(flavor = "current_thread")]
1117    async fn try_send_static_file_fast_path_rejects_non_get_and_range()
1118    {
1119        let dir = tempfile::tempdir().expect("tempdir");
1120        let root = dir.path();
1121        std::fs::write(root.join("index.html"), "ok").expect("write");
1122
1123        let server = Server::builder()
1124            .address("127.0.0.1:0")
1125            .document_root(root.to_string_lossy().as_ref())
1126            .build()
1127            .expect("server");
1128
1129        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1130            .await
1131            .expect("bind");
1132        let addr = listener.local_addr().expect("addr");
1133        let client_task = tokio::spawn(async move {
1134            tokio::net::TcpStream::connect(addr).await.expect("connect")
1135        });
1136        let (mut server_stream, _) =
1137            listener.accept().await.expect("accept");
1138        let _client = client_task.await.expect("join");
1139
1140        let post_req = Request {
1141            method: "POST".into(),
1142            path: "/index.html".into(),
1143            version: "HTTP/1.1".into(),
1144            headers: Vec::new(),
1145        };
1146        assert!(
1147            !try_send_static_file_fast_path(
1148                &mut server_stream,
1149                &server,
1150                &post_req,
1151                u64::MAX,
1152                ConnectionPolicy::Close,
1153            )
1154            .await
1155            .expect("ok")
1156        );
1157
1158        let headers = vec![("range".into(), "bytes=0-3".into())];
1159        let range_req = Request {
1160            method: "GET".into(),
1161            path: "/index.html".into(),
1162            version: "HTTP/1.1".into(),
1163            headers,
1164        };
1165        assert!(
1166            !try_send_static_file_fast_path(
1167                &mut server_stream,
1168                &server,
1169                &range_req,
1170                u64::MAX,
1171                ConnectionPolicy::Close,
1172            )
1173            .await
1174            .expect("ok")
1175        );
1176    }
1177
1178    #[tokio::test(flavor = "current_thread")]
1179    async fn send_response_async_adds_default_headers() {
1180        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1181            .await
1182            .expect("bind");
1183        let addr = listener.local_addr().expect("addr");
1184        let client_task = tokio::spawn(async move {
1185            tokio::net::TcpStream::connect(addr).await.expect("connect")
1186        });
1187        let (mut server_stream, _) =
1188            listener.accept().await.expect("accept");
1189        let mut client = client_task.await.expect("join");
1190
1191        let response = Response::new(200, "OK", b"hello".to_vec());
1192        send_response_async(&mut server_stream, &response)
1193            .await
1194            .expect("send");
1195        drop(server_stream);
1196
1197        let mut bytes = Vec::new();
1198        let _ = client.read_to_end(&mut bytes).await.expect("read");
1199        let text = String::from_utf8(bytes).expect("utf8");
1200        assert!(text.contains("HTTP/1.1 200 OK"));
1201        assert!(text.contains("Content-Length: 5"));
1202        assert!(text.contains("Connection: close"));
1203        assert!(text.ends_with("hello"));
1204    }
1205
1206    #[tokio::test(flavor = "current_thread")]
1207    async fn send_response_async_keeps_existing_headers() {
1208        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1209            .await
1210            .expect("bind");
1211        let addr = listener.local_addr().expect("addr");
1212        let client_task = tokio::spawn(async move {
1213            tokio::net::TcpStream::connect(addr).await.expect("connect")
1214        });
1215        let (mut server_stream, _) =
1216            listener.accept().await.expect("accept");
1217        let mut client = client_task.await.expect("join");
1218
1219        let mut response = Response::new(204, "No Content", Vec::new());
1220        response.headers.push(("Content-Length".into(), "0".into()));
1221        response
1222            .headers
1223            .push(("Connection".into(), "keep-alive".into()));
1224        send_response_async(&mut server_stream, &response)
1225            .await
1226            .expect("send");
1227        drop(server_stream);
1228
1229        let mut bytes = Vec::new();
1230        let _ = client.read_to_end(&mut bytes).await.expect("read");
1231        let text = String::from_utf8(bytes).expect("utf8");
1232        assert!(text.contains("Content-Length: 0"));
1233        assert!(text.contains("Connection: keep-alive"));
1234        assert!(!text.contains("Connection: close"));
1235    }
1236
1237    #[tokio::test(flavor = "current_thread")]
1238    async fn handle_async_connection_rejects_invalid_utf8() {
1239        let dir = tempfile::tempdir().expect("tempdir");
1240        let server = Server::builder()
1241            .address("127.0.0.1:0")
1242            .document_root(dir.path().to_string_lossy().as_ref())
1243            .build()
1244            .expect("server");
1245
1246        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1247            .await
1248            .expect("bind");
1249        let addr = listener.local_addr().expect("addr");
1250        let client_task = tokio::spawn(async move {
1251            let mut stream = tokio::net::TcpStream::connect(addr)
1252                .await
1253                .expect("connect");
1254            stream.write_all(b"\xFF\xFE").await.expect("write");
1255            stream
1256        });
1257        let (server_stream, _) =
1258            listener.accept().await.expect("accept");
1259        let _client = client_task.await.expect("join");
1260
1261        let err = handle_async_connection(
1262            server_stream,
1263            &server,
1264            &PerfLimits::default(),
1265        )
1266        .await
1267        .expect_err("invalid utf8 should fail");
1268        assert!(err.to_string().contains("Invalid request"));
1269    }
1270
1271    #[tokio::test(flavor = "current_thread")]
1272    async fn handle_async_connection_returns_ok_on_clean_close() {
1273        let dir = tempfile::tempdir().expect("tempdir");
1274        let server = Server::builder()
1275            .address("127.0.0.1:0")
1276            .document_root(dir.path().to_string_lossy().as_ref())
1277            .build()
1278            .expect("server");
1279
1280        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1281            .await
1282            .expect("bind");
1283        let addr = listener.local_addr().expect("addr");
1284        let client_task = tokio::spawn(async move {
1285            let stream = tokio::net::TcpStream::connect(addr)
1286                .await
1287                .expect("connect");
1288            drop(stream);
1289        });
1290        let (server_stream, _) =
1291            listener.accept().await.expect("accept");
1292        client_task.await.expect("join");
1293
1294        handle_async_connection(
1295            server_stream,
1296            &server,
1297            &PerfLimits::default(),
1298        )
1299        .await
1300        .expect("clean close");
1301    }
1302
1303    #[tokio::test(flavor = "current_thread")]
1304    async fn handle_async_connection_sends_built_response() {
1305        let dir = tempfile::tempdir().expect("tempdir");
1306        let root = dir.path();
1307        std::fs::create_dir(root.join("404")).expect("404 dir");
1308        std::fs::write(root.join("404/index.html"), "not found")
1309            .expect("404");
1310        let server = Server::builder()
1311            .address("127.0.0.1:0")
1312            .document_root(root.to_string_lossy().as_ref())
1313            .build()
1314            .expect("server");
1315
1316        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1317            .await
1318            .expect("bind");
1319        let addr = listener.local_addr().expect("addr");
1320        let client_task = tokio::spawn(async move {
1321            let mut stream = tokio::net::TcpStream::connect(addr)
1322                .await
1323                .expect("connect");
1324            stream
1325                .write_all(
1326                    b"GET /missing.txt HTTP/1.1\r\nHost: localhost\r\n\r\n",
1327                )
1328                .await
1329                .expect("write");
1330            stream
1331        });
1332        let (server_stream, _) =
1333            listener.accept().await.expect("accept");
1334        let mut client = client_task.await.expect("join");
1335        handle_async_connection(
1336            server_stream,
1337            &server,
1338            &PerfLimits::default(),
1339        )
1340        .await
1341        .expect("handled");
1342
1343        let mut bytes = Vec::new();
1344        let _ = client.read_to_end(&mut bytes).await.expect("read");
1345        let text = String::from_utf8(bytes).expect("utf8");
1346        assert!(text.contains("HTTP/1.1"));
1347    }
1348
1349    #[tokio::test(flavor = "current_thread")]
1350    async fn fast_path_includes_precompressed_encoding_headers() {
1351        let dir = tempfile::tempdir().expect("tempdir");
1352        let root = dir.path();
1353        std::fs::write(root.join("index.html"), "plain").expect("base");
1354        std::fs::write(root.join("index.html.gz"), "gzdata")
1355            .expect("gz");
1356        let server = Server::builder()
1357            .address("127.0.0.1:0")
1358            .document_root(root.to_string_lossy().as_ref())
1359            .build()
1360            .expect("server");
1361
1362        let headers =
1363            vec![("accept-encoding".to_string(), "gzip".to_string())];
1364        let req = Request {
1365            method: "GET".into(),
1366            path: "/index.html".into(),
1367            version: "HTTP/1.1".into(),
1368            headers,
1369        };
1370
1371        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1372            .await
1373            .expect("bind");
1374        let addr = listener.local_addr().expect("addr");
1375        let client_task = tokio::spawn(async move {
1376            tokio::net::TcpStream::connect(addr).await.expect("connect")
1377        });
1378        let (mut server_stream, _) =
1379            listener.accept().await.expect("accept");
1380        let mut client = client_task.await.expect("join");
1381
1382        assert!(
1383            try_send_static_file_fast_path(
1384                &mut server_stream,
1385                &server,
1386                &req,
1387                u64::MAX,
1388                ConnectionPolicy::Close,
1389            )
1390            .await
1391            .expect("served")
1392        );
1393        drop(server_stream);
1394        let mut bytes = Vec::new();
1395        let _ = client.read_to_end(&mut bytes).await.expect("read");
1396        let text = String::from_utf8(bytes).expect("utf8");
1397        assert!(text.contains("Content-Encoding: gzip"));
1398        assert!(text.contains("Vary: Accept-Encoding"));
1399    }
1400
1401    /// Drives two GETs against the same file via the fast path and
1402    /// asserts that the second one was served from the response cache
1403    /// — i.e. the `(path, mtime, len)` key resolved to an entry that
1404    /// survived between calls. Cache contents are inspected directly
1405    /// since the cache is process-local. Doubles as a smoke test for
1406    /// `build_head_prefix` (status line + immutable Content-Type) and
1407    /// `insert_cached_response` (the cap-and-evict path is exercised
1408    /// separately in the eviction test).
1409    #[tokio::test(flavor = "current_thread")]
1410    async fn fast_path_response_cache_serves_repeat_requests_from_memory()
1411     {
1412        let dir = tempfile::tempdir().expect("tempdir");
1413        let root = dir.path();
1414        std::fs::write(root.join("index.html"), b"hello-cache")
1415            .expect("seed");
1416        let server = Server::builder()
1417            .address("127.0.0.1:0")
1418            .document_root(root.to_string_lossy().as_ref())
1419            .build()
1420            .expect("server");
1421
1422        // Snapshot the cache so we can assert the populated key
1423        // belongs to this test and not bleed-over from a previous run.
1424        let key_count_before =
1425            response_cache().read().map(|r| r.len()).unwrap_or(0);
1426
1427        for _ in 0..2 {
1428            let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1429                .await
1430                .expect("bind");
1431            let addr = listener.local_addr().expect("addr");
1432            let client_task = tokio::spawn(async move {
1433                tokio::net::TcpStream::connect(addr)
1434                    .await
1435                    .expect("connect")
1436            });
1437            let (mut server_stream, _) =
1438                listener.accept().await.expect("accept");
1439            let mut client = client_task.await.expect("join");
1440            let request = Request {
1441                method: "GET".into(),
1442                path: "/index.html".into(),
1443                version: "HTTP/1.1".into(),
1444                headers: Vec::new(),
1445            };
1446            assert!(
1447                try_send_static_file_fast_path(
1448                    &mut server_stream,
1449                    &server,
1450                    &request,
1451                    u64::MAX,
1452                    ConnectionPolicy::Close,
1453                )
1454                .await
1455                .expect("served")
1456            );
1457            drop(server_stream);
1458            let mut sink = Vec::new();
1459            let _ = client
1460                .read_to_end(&mut sink)
1461                .await
1462                .expect("client read");
1463            let text = String::from_utf8(sink).expect("utf8");
1464            assert!(text.contains("HTTP/1.1 200 OK"));
1465            assert!(text.contains("Content-Length: 11"));
1466            assert!(text.contains("hello-cache"));
1467        }
1468
1469        let key_count_after =
1470            response_cache().read().map(|r| r.len()).unwrap_or(0);
1471        assert!(
1472            key_count_after > key_count_before,
1473            "cache should have at least one new entry after two GETs (was {key_count_before}, now {key_count_after})"
1474        );
1475    }
1476
1477    /// Forces the cap-and-evict path of `insert_cached_response`. The
1478    /// cache is process-wide so the test pre-fills past the cap with
1479    /// synthetic keys (no real file I/O) and asserts the post-insert
1480    /// length sits at-or-below `RESPONSE_CACHE_MAX`.
1481    #[test]
1482    fn response_cache_evicts_when_full() {
1483        let cache = response_cache();
1484        if let Ok(mut write) = cache.write() {
1485            for i in 0..(RESPONSE_CACHE_MAX + 1) as u64 {
1486                let key: ResponseCacheKey =
1487                    (PathBuf::from(format!("/synthetic/{i}")), i, i);
1488                let value = Arc::new(CachedResponse {
1489                    head_prefix: Arc::new(Vec::new()),
1490                    body: Arc::new(Vec::new()),
1491                });
1492                let _ = write.insert(key, value);
1493            }
1494        }
1495        let len_before =
1496            cache.read().map(|r| r.len()).unwrap_or_default();
1497        let trigger_key: ResponseCacheKey =
1498            (PathBuf::from("/synthetic/trigger"), u64::MAX, u64::MAX);
1499        let trigger_value = Arc::new(CachedResponse {
1500            head_prefix: Arc::new(Vec::new()),
1501            body: Arc::new(Vec::new()),
1502        });
1503        insert_cached_response(trigger_key, trigger_value);
1504        let len_after =
1505            cache.read().map(|r| r.len()).unwrap_or_default();
1506        assert!(
1507            len_after <= RESPONSE_CACHE_MAX,
1508            "cache len {len_after} exceeds cap {RESPONSE_CACHE_MAX} (was {len_before} before trigger insert)"
1509        );
1510    }
1511
1512    #[test]
1513    fn resolve_static_path_handles_missing_dir_index_and_immutable_edge_cases()
1514     {
1515        let dir = tempfile::tempdir().expect("tempdir");
1516        let root = dir.path();
1517        std::fs::create_dir(root.join("dir-no-index")).expect("mkdir");
1518        let canonical_root =
1519            std::fs::canonicalize(root).expect("canonical");
1520        assert!(
1521            resolve_static_path(root, &canonical_root, "/dir-no-index")
1522                .is_none()
1523        );
1524        assert!(!is_probably_immutable_asset("/assets/noext"));
1525        assert!(!is_probably_immutable_asset("/assets/file.js"));
1526    }
1527
1528    #[tokio::test(flavor = "current_thread")]
1529    async fn try_send_static_file_fast_path_missing_file_returns_false()
1530    {
1531        let dir = tempfile::tempdir().expect("tempdir");
1532        let server = Server::builder()
1533            .address("127.0.0.1:0")
1534            .document_root(dir.path().to_string_lossy().as_ref())
1535            .build()
1536            .expect("server");
1537        let request = Request {
1538            method: "GET".into(),
1539            path: "/missing.txt".into(),
1540            version: "HTTP/1.1".into(),
1541            headers: Vec::new(),
1542        };
1543
1544        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1545            .await
1546            .expect("bind");
1547        let addr = listener.local_addr().expect("addr");
1548        let client_task = tokio::spawn(async move {
1549            tokio::net::TcpStream::connect(addr).await.expect("connect")
1550        });
1551        let (mut server_stream, _) =
1552            listener.accept().await.expect("accept");
1553        let _client = client_task.await.expect("join");
1554
1555        let served = try_send_static_file_fast_path(
1556            &mut server_stream,
1557            &server,
1558            &request,
1559            u64::MAX,
1560            ConnectionPolicy::Close,
1561        )
1562        .await
1563        .expect("missing file should map to false");
1564        assert!(!served);
1565    }
1566
1567    #[cfg(any(target_os = "linux", target_os = "android"))]
1568    #[tokio::test(flavor = "current_thread")]
1569    async fn try_sendfile_unix_sends_file_bytes() {
1570        let dir = tempfile::tempdir().expect("tempdir");
1571        let path = dir.path().join("blob.bin");
1572        let payload = b"abcdef123456";
1573        std::fs::write(&path, payload).expect("write");
1574
1575        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1576            .await
1577            .expect("bind");
1578        let addr = listener.local_addr().expect("addr");
1579        let client_task = tokio::spawn(async move {
1580            tokio::net::TcpStream::connect(addr).await.expect("connect")
1581        });
1582        let (server_stream, _) =
1583            listener.accept().await.expect("accept");
1584        let mut client = client_task.await.expect("join");
1585
1586        let sent = try_sendfile_unix(
1587            &server_stream,
1588            &path,
1589            payload.len() as u64,
1590        )
1591        .await
1592        .expect("sendfile");
1593        assert!(sent);
1594        drop(server_stream);
1595
1596        let mut got = Vec::new();
1597        let _ = client.read_to_end(&mut got).await.expect("read");
1598        assert_eq!(got, payload);
1599    }
1600
1601    #[tokio::test(flavor = "current_thread")]
1602    async fn start_high_perf_accepts_and_serves_then_can_abort() {
1603        let dir = tempfile::tempdir().expect("tempdir");
1604        std::fs::write(dir.path().join("index.html"), "ok")
1605            .expect("write");
1606
1607        let probe = std::net::TcpListener::bind("127.0.0.1:0")
1608            .expect("probe bind");
1609        let addr = probe.local_addr().expect("probe addr");
1610        drop(probe);
1611
1612        let server = Server::builder()
1613            .address(&addr.to_string())
1614            .document_root(dir.path().to_string_lossy().as_ref())
1615            .build()
1616            .expect("server");
1617        let limits = PerfLimits {
1618            max_inflight: 1,
1619            max_queue: 1,
1620            sendfile_threshold_bytes: u64::MAX,
1621        };
1622
1623        let task = tokio::spawn(async move {
1624            let _ = start_high_perf(server, limits).await;
1625        });
1626
1627        tokio::time::sleep(Duration::from_millis(50)).await;
1628        let mut client = tokio::net::TcpStream::connect(addr)
1629            .await
1630            .expect("connect");
1631        client
1632            .write_all(
1633                b"GET /index.html HTTP/1.1\r\nHost: localhost\r\n\r\n",
1634            )
1635            .await
1636            .expect("write");
1637        let mut buf = vec![0_u8; 512];
1638        let read =
1639            timeout(Duration::from_secs(1), client.read(&mut buf))
1640                .await
1641                .expect("timed read")
1642                .expect("read");
1643        assert!(read > 0);
1644        let text = String::from_utf8_lossy(&buf[..read]);
1645        assert!(text.contains("HTTP/1.1 200 OK"));
1646
1647        task.abort();
1648        let join = task.await;
1649        assert!(join.is_err());
1650    }
1651
1652    #[tokio::test(flavor = "current_thread")]
1653    async fn start_high_perf_drops_connections_when_queue_is_full() {
1654        let dir = tempfile::tempdir().expect("tempdir");
1655        std::fs::write(dir.path().join("index.html"), "ok")
1656            .expect("write");
1657
1658        let probe = std::net::TcpListener::bind("127.0.0.1:0")
1659            .expect("probe bind");
1660        let addr = probe.local_addr().expect("probe addr");
1661        drop(probe);
1662
1663        let server = Server::builder()
1664            .address(&addr.to_string())
1665            .document_root(dir.path().to_string_lossy().as_ref())
1666            .build()
1667            .expect("server");
1668        // One in-flight, zero queued: second concurrent connect must be
1669        // rejected via the `queued_now > max_queue` branch.
1670        let limits = PerfLimits {
1671            max_inflight: 1,
1672            max_queue: 0,
1673            sendfile_threshold_bytes: u64::MAX,
1674        };
1675
1676        let task = tokio::spawn(async move {
1677            let _ = start_high_perf(server, limits).await;
1678        });
1679
1680        tokio::time::sleep(Duration::from_millis(50)).await;
1681
1682        // Hold the single in-flight slot by connecting but never sending a
1683        // request — the async handler stays blocked in `read` until timeout.
1684        let _hold = tokio::net::TcpStream::connect(addr)
1685            .await
1686            .expect("first connect");
1687        tokio::time::sleep(Duration::from_millis(30)).await;
1688
1689        // Fire multiple short-lived connections; each should be accepted
1690        // then immediately dropped by the server (queue full / acquire timeout).
1691        let mut dropped = 0_usize;
1692        for _ in 0..8 {
1693            let mut probe_stream = tokio::net::TcpStream::connect(addr)
1694                .await
1695                .expect("probe connect");
1696            // The server drops the accepted socket in its `continue`, so the
1697            // read end returns EOF quickly.
1698            let mut buf = [0_u8; 8];
1699            let read = timeout(
1700                Duration::from_millis(200),
1701                probe_stream.read(&mut buf),
1702            )
1703            .await;
1704            if matches!(read, Ok(Ok(0))) {
1705                dropped += 1;
1706            }
1707        }
1708        assert!(
1709            dropped > 0,
1710            "expected at least one connection to be dropped by queue guard",
1711        );
1712
1713        task.abort();
1714        let _ = task.await;
1715    }
1716
1717    #[tokio::test(flavor = "current_thread")]
1718    async fn start_high_perf_falls_through_queue_timeout_path() {
1719        // Exercise the `queued_now <= max_queue` branch where the connection
1720        // waits on `acquire_owned` with a bounded timeout. A single in-flight
1721        // slot is held indefinitely so queued connects never acquire; they
1722        // drop after the 20ms acquire timeout.
1723        let dir = tempfile::tempdir().expect("tempdir");
1724        std::fs::write(dir.path().join("index.html"), "ok")
1725            .expect("write");
1726        let probe = std::net::TcpListener::bind("127.0.0.1:0")
1727            .expect("probe bind");
1728        let addr = probe.local_addr().expect("probe addr");
1729        drop(probe);
1730
1731        let server = Server::builder()
1732            .address(&addr.to_string())
1733            .document_root(dir.path().to_string_lossy().as_ref())
1734            .build()
1735            .expect("server");
1736        let limits = PerfLimits {
1737            max_inflight: 1,
1738            // Allow one queued connect so `queued_now <= max_queue` and we hit
1739            // the timeout-acquire branch.
1740            max_queue: 4,
1741            sendfile_threshold_bytes: u64::MAX,
1742        };
1743
1744        let task = tokio::spawn(async move {
1745            let _ = start_high_perf(server, limits).await;
1746        });
1747
1748        tokio::time::sleep(Duration::from_millis(50)).await;
1749
1750        // Hold the in-flight slot.
1751        let _hold = tokio::net::TcpStream::connect(addr)
1752            .await
1753            .expect("first connect");
1754        tokio::time::sleep(Duration::from_millis(30)).await;
1755
1756        // Queue up a few more — each waits on the 20ms acquire timeout
1757        // then gets dropped.
1758        for _ in 0..3 {
1759            let mut probe_stream = tokio::net::TcpStream::connect(addr)
1760                .await
1761                .expect("probe connect");
1762            let mut buf = [0_u8; 8];
1763            let _ = timeout(
1764                Duration::from_millis(200),
1765                probe_stream.read(&mut buf),
1766            )
1767            .await;
1768        }
1769
1770        task.abort();
1771        let _ = task.await;
1772    }
1773
1774    #[tokio::test(flavor = "current_thread")]
1775    async fn try_send_static_file_fast_path_invokes_sendfile_threshold()
1776    {
1777        let dir = tempfile::tempdir().expect("tempdir");
1778        let root = dir.path();
1779        let body: Vec<u8> = (0..2048_u32).map(|i| i as u8).collect();
1780        std::fs::write(root.join("blob.bin"), &body).expect("write");
1781
1782        let server = Server::builder()
1783            .address("127.0.0.1:0")
1784            .document_root(root.to_string_lossy().as_ref())
1785            .build()
1786            .expect("server");
1787        let request = Request {
1788            method: "GET".into(),
1789            path: "/blob.bin".into(),
1790            version: "HTTP/1.1".into(),
1791            headers: Vec::new(),
1792        };
1793
1794        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1795            .await
1796            .expect("bind");
1797        let addr = listener.local_addr().expect("addr");
1798        let client_task = tokio::spawn(async move {
1799            tokio::net::TcpStream::connect(addr).await.expect("connect")
1800        });
1801        let (mut server_stream, _) =
1802            listener.accept().await.expect("accept");
1803        let mut client = client_task.await.expect("join");
1804
1805        // Threshold = 0 forces the sendfile fast-path branch. On Linux it
1806        // succeeds; on other Unix platforms it falls through to tokio::io::copy.
1807        let served = try_send_static_file_fast_path(
1808            &mut server_stream,
1809            &server,
1810            &request,
1811            0,
1812            ConnectionPolicy::Close,
1813        )
1814        .await
1815        .expect("served");
1816        assert!(served);
1817        drop(server_stream);
1818
1819        let mut bytes = Vec::new();
1820        let _ = client.read_to_end(&mut bytes).await.expect("read");
1821        let head_end = bytes
1822            .windows(4)
1823            .position(|w| w == b"\r\n\r\n")
1824            .expect("header terminator");
1825        let head_text =
1826            String::from_utf8_lossy(&bytes[..head_end]).to_string();
1827        assert!(head_text.contains("HTTP/1.1 200 OK"));
1828        assert_eq!(&bytes[head_end + 4..], body.as_slice());
1829    }
1830
1831    #[cfg(unix)]
1832    #[tokio::test(flavor = "current_thread")]
1833    async fn try_sendfile_unix_non_linux_returns_false() {
1834        // The non-Linux/Android Unix fallback unconditionally returns `Ok(false)`.
1835        // Linux has its own impl so we skip the assertion there.
1836        #[cfg(not(any(target_os = "linux", target_os = "android")))]
1837        {
1838            let dir = tempfile::tempdir().expect("tempdir");
1839            let path = dir.path().join("f.bin");
1840            std::fs::write(&path, b"x").expect("write");
1841            let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1842                .await
1843                .expect("bind");
1844            let addr = listener.local_addr().expect("addr");
1845            drop(tokio::spawn(async move {
1846                tokio::net::TcpStream::connect(addr).await.expect("c")
1847            }));
1848            let (server_stream, _) =
1849                listener.accept().await.expect("accept");
1850            let sent = try_sendfile_unix(&server_stream, &path, 1)
1851                .await
1852                .expect("stub");
1853            assert!(!sent);
1854        }
1855    }
1856
1857    #[test]
1858    fn resolve_static_path_rejects_symlink_escape() {
1859        let dir = tempfile::tempdir().expect("tempdir");
1860        let root = dir.path().join("root");
1861        std::fs::create_dir(&root).expect("mkroot");
1862        let outside = dir.path().join("outside");
1863        std::fs::create_dir(&outside).expect("mkoutside");
1864        std::fs::write(outside.join("secret.txt"), "shh")
1865            .expect("write secret");
1866        let canonical_root =
1867            std::fs::canonicalize(&root).expect("canonical");
1868        #[cfg(unix)]
1869        {
1870            let link = root.join("link.txt");
1871            std::os::unix::fs::symlink(
1872                outside.join("secret.txt"),
1873                &link,
1874            )
1875            .expect("symlink");
1876            assert!(
1877                resolve_static_path(
1878                    &root,
1879                    &canonical_root,
1880                    "/link.txt"
1881                )
1882                .is_none(),
1883                "symlink pointing outside root must not resolve",
1884            );
1885        }
1886        #[cfg(not(unix))]
1887        {
1888            let _ = outside;
1889            let _ = canonical_root;
1890        }
1891    }
1892
1893    /// Covers the `Connection: close` early-return after a successful
1894    /// fast-path send inside `handle_async_connection`. Drives a fresh
1895    /// connection that asks for keep-alive close so the loop exits via
1896    /// the post-fast-path `if policy == ConnectionPolicy::Close`.
1897    #[tokio::test(flavor = "current_thread")]
1898    async fn handle_async_connection_closes_after_fast_path_when_requested()
1899     {
1900        use crate::Server;
1901        let dir = tempfile::tempdir().expect("tempdir");
1902        std::fs::write(dir.path().join("index.html"), "ok")
1903            .expect("write");
1904        std::fs::create_dir(dir.path().join("404")).expect("404 dir");
1905        std::fs::write(dir.path().join("404/index.html"), b"404")
1906            .expect("write 404");
1907
1908        let probe =
1909            std::net::TcpListener::bind("127.0.0.1:0").expect("probe");
1910        let addr = probe.local_addr().expect("addr").to_string();
1911        drop(probe);
1912
1913        let server = Server::builder()
1914            .address(&addr)
1915            .document_root(dir.path().to_string_lossy().as_ref())
1916            .build()
1917            .expect("server");
1918
1919        let server_task = tokio::spawn(async move {
1920            let _ =
1921                start_high_perf(server, PerfLimits::default()).await;
1922        });
1923
1924        // Wait for the server to bind.
1925        for _ in 0..50 {
1926            if tokio::net::TcpStream::connect(&addr).await.is_ok() {
1927                break;
1928            }
1929            tokio::time::sleep(Duration::from_millis(20)).await;
1930        }
1931
1932        // Single roundtrip with explicit `Connection: close` so the
1933        // server's post-fast-path branch returns Ok(()) instead of
1934        // looping into another idle wait.
1935        let mut s = tokio::net::TcpStream::connect(&addr)
1936            .await
1937            .expect("connect");
1938        s.write_all(
1939            b"GET /index.html HTTP/1.1\r\nHost: b\r\nConnection: close\r\n\r\n",
1940        )
1941        .await
1942        .expect("write");
1943        let mut sink = Vec::with_capacity(512);
1944        let _ = s.read_to_end(&mut sink).await.expect("read");
1945        let body = String::from_utf8_lossy(&sink);
1946        assert!(body.contains("HTTP/1.1 200 OK"));
1947        assert!(body.contains("Connection: close"));
1948
1949        server_task.abort();
1950        let _ = server_task.await;
1951    }
1952
1953    /// Drives the post-fallback `Connection: close` branch of
1954    /// [`handle_async_connection`]. The fast path returns `false` for
1955    /// a missing file, the keep-alive loop falls through to
1956    /// `build_response_for_request_with_metrics`, sends the 404, and
1957    /// must then exit via the `policy == ConnectionPolicy::Close`
1958    /// arm rather than blocking on another idle read. Exercises the
1959    /// async-path counterpart to the sync `handle_connection` close
1960    /// branch.
1961    #[tokio::test(flavor = "current_thread")]
1962    async fn handle_async_connection_closes_after_404_when_requested() {
1963        let dir = tempfile::tempdir().expect("tempdir");
1964        let root = dir.path();
1965        std::fs::create_dir(root.join("404")).expect("404 dir");
1966        std::fs::write(root.join("404/index.html"), "not found")
1967            .expect("404");
1968        let server = Server::builder()
1969            .address("127.0.0.1:0")
1970            .document_root(root.to_string_lossy().as_ref())
1971            .build()
1972            .expect("server");
1973
1974        let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
1975            .await
1976            .expect("bind");
1977        let addr = listener.local_addr().expect("addr");
1978        let client_task = tokio::spawn(async move {
1979            let mut stream = tokio::net::TcpStream::connect(addr)
1980                .await
1981                .expect("connect");
1982            stream
1983                .write_all(
1984                    b"GET /missing.txt HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n",
1985                )
1986                .await
1987                .expect("write");
1988            stream
1989        });
1990        let (server_stream, _) =
1991            listener.accept().await.expect("accept");
1992        let mut client = client_task.await.expect("join");
1993        // The handler must return promptly because the
1994        // `Connection: close` policy short-circuits the keep-alive
1995        // loop after the 404 is written. If the close branch were
1996        // missing the loop would re-enter the read path and block on
1997        // the idle-timeout instead.
1998        handle_async_connection(
1999            server_stream,
2000            &server,
2001            &PerfLimits::default(),
2002        )
2003        .await
2004        .expect("handled");
2005
2006        let mut bytes = Vec::new();
2007        let _ = client.read_to_end(&mut bytes).await.expect("read");
2008        let text = String::from_utf8(bytes).expect("utf8");
2009        assert!(text.contains("Connection: close"));
2010    }
2011
2012    /// Smoke test for the multi-thread entry point: serves one request,
2013    /// then aborts the runtime via panic. This verifies the function
2014    /// builds the runtime and dispatches into the existing accept loop;
2015    /// throughput is validated separately via the bombardier load harness.
2016    #[cfg(feature = "high-perf-multi-thread")]
2017    #[test]
2018    fn start_high_perf_multi_thread_serves_one_request() {
2019        use crate::Server;
2020        use std::io::{Read, Write};
2021
2022        let dir = tempfile::tempdir().expect("tempdir");
2023        std::fs::write(dir.path().join("index.html"), "ok-mt")
2024            .expect("write");
2025        std::fs::create_dir(dir.path().join("404")).expect("404 dir");
2026        std::fs::write(dir.path().join("404/index.html"), b"404")
2027            .expect("write 404");
2028
2029        let probe =
2030            std::net::TcpListener::bind("127.0.0.1:0").expect("probe");
2031        let addr = probe.local_addr().expect("addr").to_string();
2032        drop(probe);
2033
2034        let server = Server::builder()
2035            .address(&addr)
2036            .document_root(dir.path().to_string_lossy().as_ref())
2037            .build()
2038            .expect("server");
2039
2040        // Two worker threads is enough to prove the runtime is
2041        // multi-threaded without paying for full CPU detection cost
2042        // in the test harness.
2043        let server_thread = std::thread::spawn(move || {
2044            let _ = start_high_perf_multi_thread(
2045                server,
2046                PerfLimits::default(),
2047                Some(2),
2048            );
2049        });
2050
2051        // Wait for bind, then send one Connection: close request.
2052        let mut connected = None;
2053        for _ in 0..50 {
2054            if let Ok(s) = std::net::TcpStream::connect(&addr) {
2055                connected = Some(s);
2056                break;
2057            }
2058            std::thread::sleep(std::time::Duration::from_millis(20));
2059        }
2060        let mut s = connected.expect("server never bound");
2061        s.write_all(
2062            b"GET /index.html HTTP/1.1\r\nHost: b\r\nConnection: close\r\n\r\n",
2063        )
2064        .expect("write");
2065        let mut sink = Vec::with_capacity(256);
2066        let _ = s.read_to_end(&mut sink).expect("read");
2067        let body = String::from_utf8_lossy(&sink);
2068        assert!(body.contains("HTTP/1.1 200 OK"), "got {body:?}");
2069        assert!(body.contains("ok-mt"), "got {body:?}");
2070
2071        // Server thread is in an infinite accept loop; leak it. The
2072        // process exits cleanly after the test runner finishes.
2073        drop(server_thread);
2074    }
2075}