chromiumoxide/conn.rs
1use std::collections::VecDeque;
2use std::marker::PhantomData;
3use std::pin::Pin;
4use std::task::ready;
5
6use futures_util::stream::{FuturesOrdered, SplitSink};
7use futures_util::{SinkExt, Stream, StreamExt};
8use std::future::Future;
9use std::task::{Context, Poll};
10use tokio::sync::mpsc;
11use tokio_tungstenite::tungstenite::Message as WsMessage;
12use tokio_tungstenite::MaybeTlsStream;
13use tokio_tungstenite::{tungstenite::protocol::WebSocketConfig, WebSocketStream};
14
15use chromiumoxide_cdp::cdp::browser_protocol::target::SessionId;
16use chromiumoxide_types::{CallId, EventMessage, Message, MethodCall, MethodId};
17
18use crate::error::CdpError;
19use crate::error::Result;
20
21type ConnectStream = MaybeTlsStream<tokio::net::TcpStream>;
22
23/// Exchanges the messages with the websocket
24#[must_use = "streams do nothing unless polled"]
25#[derive(Debug)]
26pub struct Connection<T: EventMessage> {
27 /// Queue of commands to send.
28 pending_commands: VecDeque<MethodCall>,
29 /// The websocket of the chromium instance
30 ws: WebSocketStream<ConnectStream>,
31 /// The identifier for a specific command
32 next_id: usize,
33 /// Whether the write buffer has unsent data that needs flushing.
34 needs_flush: bool,
35 /// The phantom marker.
36 _marker: PhantomData<T>,
37}
38
39lazy_static::lazy_static! {
40 /// Nagle's algorithm disabled?
41 static ref DISABLE_NAGLE: bool = match std::env::var("DISABLE_NAGLE") {
42 Ok(disable_nagle) => disable_nagle == "true",
43 _ => true
44 };
45 /// Websocket config defaults
46 static ref WEBSOCKET_DEFAULTS: bool = match std::env::var("WEBSOCKET_DEFAULTS") {
47 Ok(d) => d == "true",
48 _ => false
49 };
50}
51
52/// Default number of WebSocket connection retry attempts.
53pub const DEFAULT_CONNECTION_RETRIES: u32 = 4;
54
55/// Initial backoff delay between connection retries (in milliseconds).
56const INITIAL_BACKOFF_MS: u64 = 50;
57
58/// Maximum backoff delay between connection retries (in milliseconds).
59const MAX_BACKOFF_MS: u64 = 2_000;
60
61impl<T: EventMessage + Unpin> Connection<T> {
62 pub async fn connect(debug_ws_url: impl AsRef<str>) -> Result<Self> {
63 Self::connect_with_retries(debug_ws_url, DEFAULT_CONNECTION_RETRIES).await
64 }
65
66 pub async fn connect_with_retries(debug_ws_url: impl AsRef<str>, retries: u32) -> Result<Self> {
67 let mut config = WebSocketConfig::default();
68
69 // Cap the internal write buffer so a slow receiver cannot cause
70 // unbounded memory growth (default is usize::MAX).
71 config.max_write_buffer_size = 4 * 1024 * 1024;
72
73 if !*WEBSOCKET_DEFAULTS {
74 config.max_message_size = None;
75 config.max_frame_size = None;
76 }
77
78 let url = debug_ws_url.as_ref();
79 let use_uring = crate::uring_fs::is_enabled();
80 let mut last_err = None;
81
82 for attempt in 0..=retries {
83 let result = if use_uring {
84 Self::connect_uring(url, config).await
85 } else {
86 Self::connect_default(url, config).await
87 };
88
89 match result {
90 Ok(ws) => {
91 return Ok(Self {
92 pending_commands: Default::default(),
93 ws,
94 next_id: 0,
95 needs_flush: false,
96 _marker: Default::default(),
97 });
98 }
99 Err(e) => {
100 // Detect non-retriable errors early to avoid wasting time
101 // on connections that will never succeed.
102 let should_retry = match &e {
103 // Connection refused — nothing is listening on this port.
104 CdpError::Io(io_err)
105 if io_err.kind() == std::io::ErrorKind::ConnectionRefused =>
106 {
107 false
108 }
109 // HTTP response to a WebSocket upgrade (e.g. wrong path
110 // returns 404 / redirect) — retrying the same URL won't help.
111 CdpError::Ws(tungstenite_err) => !matches!(
112 tungstenite_err,
113 tokio_tungstenite::tungstenite::Error::Http(_)
114 | tokio_tungstenite::tungstenite::Error::HttpFormat(_)
115 ),
116 _ => true,
117 };
118
119 last_err = Some(e);
120
121 if !should_retry {
122 break;
123 }
124
125 if attempt < retries {
126 let backoff_ms =
127 (INITIAL_BACKOFF_MS * 3u64.saturating_pow(attempt)).min(MAX_BACKOFF_MS);
128 tokio::time::sleep(std::time::Duration::from_millis(backoff_ms)).await;
129 }
130 }
131 }
132 }
133
134 Err(last_err.unwrap_or_else(|| CdpError::msg("connection failed")))
135 }
136
137 /// Default path: let tokio-tungstenite handle TCP connect + WS handshake.
138 async fn connect_default(
139 url: &str,
140 config: WebSocketConfig,
141 ) -> Result<WebSocketStream<ConnectStream>> {
142 let (ws, _) =
143 tokio_tungstenite::connect_async_with_config(url, Some(config), *DISABLE_NAGLE).await?;
144 Ok(ws)
145 }
146
147 /// io_uring path: pre-connect the TCP socket via io_uring, then do WS
148 /// handshake over the pre-connected stream.
149 async fn connect_uring(
150 url: &str,
151 config: WebSocketConfig,
152 ) -> Result<WebSocketStream<ConnectStream>> {
153 use tokio_tungstenite::tungstenite::client::IntoClientRequest;
154
155 let request = url.into_client_request()?;
156 let host = request
157 .uri()
158 .host()
159 .ok_or_else(|| CdpError::msg("no host in CDP WebSocket URL"))?;
160 let port = request.uri().port_u16().unwrap_or(9222);
161
162 // Resolve host → SocketAddr (CDP is always localhost, so this is fast).
163 let addr_str = format!("{}:{}", host, port);
164 let addr: std::net::SocketAddr = match addr_str.parse() {
165 Ok(a) => a,
166 Err(_) => {
167 // Hostname needs DNS — fall back to default path.
168 return Self::connect_default(url, config).await;
169 }
170 };
171
172 // TCP connect via io_uring.
173 let std_stream = crate::uring_fs::tcp_connect(addr)
174 .await
175 .map_err(CdpError::Io)?;
176
177 // Set non-blocking + Nagle.
178 std_stream.set_nonblocking(true).map_err(CdpError::Io)?;
179 if *DISABLE_NAGLE {
180 let _ = std_stream.set_nodelay(true);
181 }
182
183 // Wrap in tokio TcpStream.
184 let tokio_stream = tokio::net::TcpStream::from_std(std_stream).map_err(CdpError::Io)?;
185
186 // WebSocket handshake over the pre-connected stream.
187 let (ws, _) = tokio_tungstenite::client_async_with_config(
188 request,
189 MaybeTlsStream::Plain(tokio_stream),
190 Some(config),
191 )
192 .await?;
193
194 Ok(ws)
195 }
196}
197
198impl<T: EventMessage> Connection<T> {
199 fn next_call_id(&mut self) -> CallId {
200 let id = CallId::new(self.next_id);
201 self.next_id = self.next_id.wrapping_add(1);
202 id
203 }
204
205 /// Queue in the command to send over the socket and return the id for this
206 /// command
207 pub fn submit_command(
208 &mut self,
209 method: MethodId,
210 session_id: Option<SessionId>,
211 params: serde_json::Value,
212 ) -> serde_json::Result<CallId> {
213 let id = self.next_call_id();
214 let call = MethodCall {
215 id,
216 method,
217 session_id: session_id.map(Into::into),
218 params,
219 };
220 self.pending_commands.push_back(call);
221 Ok(id)
222 }
223
224 /// Buffer all queued commands into the WebSocket sink, then flush once.
225 ///
226 /// This batches multiple CDP commands into a single TCP write instead of
227 /// flushing after every individual message.
228 fn start_send_next(&mut self, cx: &mut Context<'_>) -> Result<()> {
229 // Complete any pending flush from a previous poll first.
230 if self.needs_flush {
231 match self.ws.poll_flush_unpin(cx) {
232 Poll::Ready(Ok(())) => self.needs_flush = false,
233 Poll::Ready(Err(e)) => return Err(e.into()),
234 Poll::Pending => return Ok(()),
235 }
236 }
237
238 // Buffer as many queued commands as the sink will accept.
239 let mut sent_any = false;
240 while !self.pending_commands.is_empty() {
241 match self.ws.poll_ready_unpin(cx) {
242 Poll::Ready(Ok(())) => {
243 let Some(cmd) = self.pending_commands.pop_front() else {
244 break;
245 };
246 tracing::trace!("Sending {:?}", cmd);
247 let msg = serde_json::to_string(&cmd)?;
248 self.ws.start_send_unpin(msg.into())?;
249 sent_any = true;
250 }
251 _ => break,
252 }
253 }
254
255 // Flush the entire batch in one write.
256 if sent_any {
257 match self.ws.poll_flush_unpin(cx) {
258 Poll::Ready(Ok(())) => {}
259 Poll::Ready(Err(e)) => return Err(e.into()),
260 Poll::Pending => self.needs_flush = true,
261 }
262 }
263
264 Ok(())
265 }
266}
267
268/// Capacity of the bounded channel feeding the background WS writer task.
269/// Large enough that bursts of CDP commands never block the handler, small
270/// enough to apply back-pressure before memory grows without bound.
271const WS_CMD_CHANNEL_CAPACITY: usize = 2048;
272
273/// Capacity of the bounded channel from the background WS reader task to
274/// the Handler. Keeps decoded CDP messages buffered so the reader task
275/// can keep reading the socket while the Handler processes a backlog;
276/// applies TCP-level back-pressure on Chrome when the Handler is slow
277/// (the reader awaits channel capacity, stops draining the socket).
278const WS_READ_CHANNEL_CAPACITY: usize = 1024;
279
280/// Maximum number of in-flight decodes the reader pipeline holds at
281/// once. While any of these is still running on the blocking pool,
282/// the reader can keep draining the socket and starting new decodes,
283/// up to this cap. Applies per-connection; the resulting decoded
284/// messages are emitted to the Handler in strict WS arrival order
285/// via a `FuturesOrdered` queue — no behavior change versus the
286/// serial loop, just concurrent execution of independent decodes.
287const MAX_IN_FLIGHT_DECODES: usize = 32;
288
289/// Payload size at/above which `decode_message` runs via
290/// `tokio::task::spawn_blocking` instead of inline on the reader task.
291///
292/// `serde_json::from_slice` is CPU-bound with no `.await` points, so
293/// a multi-MB payload can occupy one tokio worker thread for tens of
294/// milliseconds. Offloading to the blocking thread pool above a
295/// threshold keeps the reader task cooperatively yielding — critical
296/// on single-threaded runtimes where the reader shares its worker
297/// with the Handler, user tasks, and timers.
298///
299/// The threshold is chosen so that typical CDP traffic (events,
300/// responses, small evaluates) stays on the inline fast path and
301/// doesn't pay the ~10-30 µs `spawn_blocking` hand-off cost, while
302/// screenshot payloads, wide network events, and huge console
303/// payloads take the offloaded path.
304const LARGE_FRAME_THRESHOLD: usize = 256 * 1024; // 256 KiB
305
306/// Split parts returned by [`Connection::into_async`].
307#[derive(Debug)]
308pub struct AsyncConnection<T: EventMessage> {
309 /// Receive half for decoded CDP messages. Backed by a bounded mpsc
310 /// fed by a dedicated background reader task — decode runs on that
311 /// task, never on the Handler task, so large CDP responses (multi-MB
312 /// screenshots, huge event payloads) cannot stall the Handler's
313 /// event loop.
314 pub reader: WsReader<T>,
315 /// Sender half for submitting outgoing CDP commands.
316 pub cmd_tx: mpsc::Sender<MethodCall>,
317 /// Handle to the background writer task.
318 pub writer_handle: tokio::task::JoinHandle<Result<()>>,
319 /// Handle to the background reader task (reads + decodes WS frames).
320 pub reader_handle: tokio::task::JoinHandle<()>,
321 /// Next command-call-id counter (continue numbering from where Connection left off).
322 pub next_id: usize,
323}
324
325impl<T: EventMessage + Unpin + Send + 'static> Connection<T> {
326 /// Consume the connection and split into a background reader + writer
327 /// pair, exposing the Handler-facing ends via `AsyncConnection`.
328 ///
329 /// Two `tokio::spawn`'d tasks are created:
330 ///
331 /// * `ws_write_loop` — batches outgoing commands and flushes them in
332 /// one write per wakeup.
333 /// * `ws_read_loop` — reads WS frames, decodes them to typed
334 /// `Message<T>`, and forwards them via a bounded mpsc to the
335 /// Handler. Ping/pong/malformed frames are skipped on this task
336 /// and never reach the Handler. Large-message decode (SerDe CPU
337 /// work) runs here, **not** on the Handler task, so the Handler's
338 /// poll loop never stalls for tens of milliseconds on a 10 MB
339 /// screenshot response.
340 ///
341 /// The design uses only `tokio::spawn` (cooperative async) — no
342 /// `spawn_blocking` or blocking thread-pool — so it scales with the
343 /// tokio runtime's worker threads on multi-threaded runtimes, and
344 /// interleaves cleanly with the Handler task on single-threaded
345 /// runtimes.
346 pub fn into_async(self) -> AsyncConnection<T> {
347 let (ws_sink, ws_stream) = self.ws.split();
348 let (cmd_tx, cmd_rx) = mpsc::channel(WS_CMD_CHANNEL_CAPACITY);
349 let (msg_tx, msg_rx) = mpsc::channel::<Result<Box<Message<T>>>>(WS_READ_CHANNEL_CAPACITY);
350
351 let writer_handle = tokio::spawn(ws_write_loop(ws_sink, cmd_rx));
352 let reader_handle = tokio::spawn(ws_read_loop::<T, _>(ws_stream, msg_tx));
353
354 let reader = WsReader {
355 rx: msg_rx,
356 _marker: PhantomData,
357 };
358
359 AsyncConnection {
360 reader,
361 cmd_tx,
362 writer_handle,
363 reader_handle,
364 next_id: self.next_id,
365 }
366 }
367}
368
369/// An entry in the reader's decode pipeline.
370///
371/// Small frames have been decoded inline on the reader task and sit
372/// in `Ready(Some(result))` waiting their turn to emit — zero
373/// allocation beyond the `Option`. Large frames were offloaded to
374/// `tokio::task::spawn_blocking`, so their entry is the
375/// corresponding `JoinHandle`.
376///
377/// A single concrete enum means `FuturesOrdered<InFlightDecode<T>>`
378/// can hold either kind without `Box<dyn Future>`, keeping the
379/// pipeline cost-proportional to the workload.
380enum InFlightDecode<T: EventMessage + Send + 'static> {
381 /// Small-frame fast path: already decoded inline. `take()`'d
382 /// exactly once when `FuturesOrdered` first polls it to Ready.
383 Ready(Option<Result<Box<Message<T>>>>),
384 /// Large-frame path: decoding on the blocking thread pool.
385 Blocking(tokio::task::JoinHandle<Result<Box<Message<T>>>>),
386}
387
388impl<T: EventMessage + Send + 'static> Future for InFlightDecode<T> {
389 type Output = Result<Box<Message<T>>>;
390
391 fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
392 // Safety: both variants are structurally pin-agnostic —
393 // `Option<Result<..>>` is `Unpin`, and `tokio::task::JoinHandle`
394 // is documented as `Unpin`. So we can project out a `&mut`
395 // without unsafe.
396 match self.get_mut() {
397 InFlightDecode::Ready(slot) => Poll::Ready(
398 slot.take()
399 .expect("InFlightDecode::Ready polled after completion"),
400 ),
401 InFlightDecode::Blocking(handle) => match Pin::new(handle).poll(cx) {
402 Poll::Ready(Ok(res)) => Poll::Ready(res),
403 Poll::Ready(Err(join_err)) => Poll::Ready(Err(CdpError::msg(format!(
404 "WS decode blocking task join error: {join_err}"
405 )))),
406 Poll::Pending => Poll::Pending,
407 },
408 }
409 }
410}
411
412/// Emit a single decoded-frame result to the Handler, logging parse
413/// errors. Returns `true` if the channel is still open, `false` if
414/// the Handler has dropped the receiver (caller should exit).
415async fn emit_decoded<T>(
416 tx: &mpsc::Sender<Result<Box<Message<T>>>>,
417 res: Result<Box<Message<T>>>,
418) -> bool
419where
420 T: EventMessage + Send + 'static,
421{
422 match res {
423 Ok(msg) => tx.send(Ok(msg)).await.is_ok(),
424 Err(err) => {
425 tracing::debug!(
426 target: "chromiumoxide::conn::raw_ws::parse_errors",
427 "Dropping malformed WS frame: {err}",
428 );
429 true
430 }
431 }
432}
433
434/// Background task that reads frames from the WebSocket, decodes them to
435/// typed CDP `Message<T>`, and forwards them to the Handler over a
436/// bounded mpsc.
437///
438/// Runs on a `tokio::spawn`'d task. Small-to-medium frames are
439/// decoded inline (fast path); payloads at or above
440/// [`LARGE_FRAME_THRESHOLD`] are offloaded to `spawn_blocking` so
441/// multi-MB deserialization doesn't monopolise a tokio worker
442/// thread — especially important on single-threaded runtimes where
443/// the reader, Handler, and user tasks share the same worker.
444///
445/// Flow per frame:
446///
447/// * `Text` / `Binary` → [`decode_ws_frame`]; decoded `Ok(msg)` is
448/// sent to the Handler. Decode errors are logged and the frame is
449/// dropped (same behavior as the legacy inline decode path).
450/// * `Close` → loop exits cleanly, dropping `tx`. The Handler's
451/// `next_message().await` returns `None` on the next call.
452/// * `Ping` / `Pong` / unexpected frame types → skipped silently; they
453/// never cross the channel to the Handler.
454/// * Transport error → forwarded as `Err(CdpError::Ws(..))`, then the
455/// loop exits (the WS half is considered dead after an error).
456///
457/// Back-pressure: the outbound `tx` is bounded. If the Handler is busy
458/// and the channel fills, `tx.send(..).await` parks this task, which
459/// stops draining the WS socket. TCP flow control then applies
460/// back-pressure to Chrome instead of letting memory grow without bound.
461async fn ws_read_loop<T, S>(mut stream: S, tx: mpsc::Sender<Result<Box<Message<T>>>>)
462where
463 T: EventMessage + Send + 'static,
464 S: Stream<Item = std::result::Result<WsMessage, tokio_tungstenite::tungstenite::Error>> + Unpin,
465{
466 // Pipeline of decodes in strict arrival order. Small-frame decodes
467 // are produced inline (zero allocation, borrowing the frame body);
468 // large-frame decodes are offloaded to `spawn_blocking`. Both
469 // variants share a single concrete `InFlightDecode<T>` so the
470 // queue avoids `Box<dyn Future>` overhead.
471 let mut in_flight: FuturesOrdered<InFlightDecode<T>> = FuturesOrdered::new();
472
473 // Shutdown state. When the stream signals `Close`, transport
474 // error, or end-of-stream, we stop reading new frames but keep
475 // running the select loop so the emit arm can flush any still
476 // in-flight decodes *interleaved with* whatever else the runtime
477 // is doing. A pending transport error is surfaced to the Handler
478 // only after the in-order flush completes.
479 let mut stream_terminated = false;
480 let mut pending_err: Option<CdpError> = None;
481
482 loop {
483 tokio::select! {
484 // Bias: emit already-ready decodes before reading more
485 // frames. Keeps the pipeline small in the steady state
486 // while still allowing concurrency under burst, and —
487 // critically during shutdown — drains the pipeline one
488 // ready item at a time inside the select loop instead
489 // of blocking in a dedicated drain helper.
490 biased;
491
492 // Emit the head of the pipeline as soon as it is ready.
493 // `FuturesOrdered::next` preserves submit order, so
494 // downstream delivery is byte-identical to the serial
495 // loop's ordering guarantee.
496 Some(res) = in_flight.next(), if !in_flight.is_empty() => {
497 if !emit_decoded(&tx, res).await {
498 return;
499 }
500 }
501
502 // Read the next frame if the pipeline has capacity and
503 // the stream hasn't terminated. Disabled once the stream
504 // signals end (Close / None / Err) so subsequent loop
505 // iterations only do emit work.
506 maybe_frame = stream.next(),
507 if !stream_terminated && in_flight.len() < MAX_IN_FLIGHT_DECODES =>
508 {
509 match maybe_frame {
510 Some(Ok(WsMessage::Text(text))) => {
511 // Zero-copy enqueue. The small-frame fast
512 // path decodes inline *now* (borrowing
513 // `text`, keeping the `raw_text_for_logging`
514 // preview); the large-frame path moves the
515 // `Utf8Bytes` (`Send + 'static`) directly
516 // into `spawn_blocking` without an
517 // intermediate allocation.
518 if text.len() >= LARGE_FRAME_THRESHOLD {
519 in_flight.push_back(InFlightDecode::Blocking(
520 tokio::task::spawn_blocking(move || {
521 decode_message::<T>(text.as_bytes(), None)
522 }),
523 ));
524 } else {
525 let res = decode_message::<T>(text.as_bytes(), Some(&text));
526 in_flight.push_back(InFlightDecode::Ready(Some(res)));
527 }
528 }
529 Some(Ok(WsMessage::Binary(buf))) => {
530 // Same shape as Text: move `Bytes`
531 // (`Send + 'static`) into `spawn_blocking`
532 // for large payloads, decode inline for
533 // small ones.
534 if buf.len() >= LARGE_FRAME_THRESHOLD {
535 in_flight.push_back(InFlightDecode::Blocking(
536 tokio::task::spawn_blocking(move || {
537 decode_message::<T>(&buf, None)
538 }),
539 ));
540 } else {
541 let res = decode_message::<T>(&buf, None);
542 in_flight.push_back(InFlightDecode::Ready(Some(res)));
543 }
544 }
545 Some(Ok(WsMessage::Close(_))) => {
546 stream_terminated = true;
547 }
548 Some(Ok(WsMessage::Ping(_))) | Some(Ok(WsMessage::Pong(_))) => {}
549 Some(Ok(msg)) => {
550 tracing::debug!(
551 target: "chromiumoxide::conn::raw_ws::parse_errors",
552 "Unexpected WS message type: {:?}",
553 msg
554 );
555 }
556 Some(Err(err)) => {
557 // Defer the error until after the already
558 // in-flight decodes have emitted — preserves
559 // the ordering contract that callers see
560 // frames up to the failure point before the
561 // error itself.
562 stream_terminated = true;
563 pending_err = Some(CdpError::Ws(err));
564 }
565 None => {
566 // Stream ended (connection closed without a
567 // `Close` frame). No more input, but
568 // in_flight may still hold pending decodes.
569 stream_terminated = true;
570 }
571 }
572 }
573
574 // Both arms disabled: `in_flight` is empty AND
575 // `stream_terminated`. We have nothing more to do.
576 else => {
577 break;
578 }
579 }
580 }
581
582 if let Some(err) = pending_err {
583 let _ = tx.send(Err(err)).await;
584 }
585}
586
587/// Background task that batches and flushes outgoing CDP commands.
588async fn ws_write_loop(
589 mut sink: SplitSink<WebSocketStream<ConnectStream>, WsMessage>,
590 mut rx: mpsc::Receiver<MethodCall>,
591) -> Result<()> {
592 while let Some(call) = rx.recv().await {
593 let msg = crate::serde_json::to_string(&call)?;
594 sink.feed(WsMessage::Text(msg.into()))
595 .await
596 .map_err(CdpError::Ws)?;
597
598 // Batch: drain all buffered commands without waiting.
599 while let Ok(call) = rx.try_recv() {
600 let msg = crate::serde_json::to_string(&call)?;
601 sink.feed(WsMessage::Text(msg.into()))
602 .await
603 .map_err(CdpError::Ws)?;
604 }
605
606 // Flush the entire batch in one write.
607 sink.flush().await.map_err(CdpError::Ws)?;
608 }
609 Ok(())
610}
611
612/// Handler-facing read half of the split WebSocket connection.
613///
614/// Decoded CDP messages are produced by a dedicated background task
615/// (see [`ws_read_loop`]) and forwarded over a bounded mpsc. `WsReader`
616/// itself is a thin `Receiver` wrapper — calling `next_message()` does
617/// a single `rx.recv().await` with no per-message decoding work on the
618/// caller's task. This keeps the Handler's poll loop free of CPU-bound
619/// deserialize time, which matters for large (multi-MB) CDP responses
620/// such as screenshots and wide-header network events.
621#[derive(Debug)]
622pub struct WsReader<T: EventMessage> {
623 rx: mpsc::Receiver<Result<Box<Message<T>>>>,
624 _marker: PhantomData<T>,
625}
626
627impl<T: EventMessage + Unpin> WsReader<T> {
628 /// Read the next CDP message from the WebSocket.
629 ///
630 /// Returns `None` when the background reader task has exited
631 /// (connection closed or sender dropped). This call does only a
632 /// channel `recv` — the actual WS read + JSON decode happens on
633 /// the background `ws_read_loop` task.
634 pub async fn next_message(&mut self) -> Option<Result<Box<Message<T>>>> {
635 self.rx.recv().await
636 }
637}
638
639impl<T: EventMessage + Unpin> Stream for Connection<T> {
640 type Item = Result<Box<Message<T>>>;
641
642 fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
643 let pin = self.get_mut();
644
645 // Send and flush outgoing messages
646 if let Err(err) = pin.start_send_next(cx) {
647 return Poll::Ready(Some(Err(err)));
648 }
649
650 // Read from the websocket, skipping non-data frames (pings,
651 // pongs, malformed messages) without yielding back to the
652 // executor. This avoids a full round-trip per skipped frame.
653 //
654 // Cap consecutive skips so a flood of non-data frames (many
655 // pings, malformed/unexpected types) cannot starve the
656 // runtime — yield Pending after `MAX_SKIPS_PER_POLL` and
657 // self-wake so we resume on the next tick.
658 const MAX_SKIPS_PER_POLL: u32 = 16;
659 let mut skips: u32 = 0;
660 loop {
661 match ready!(pin.ws.poll_next_unpin(cx)) {
662 Some(Ok(WsMessage::Text(text))) => {
663 match decode_message::<T>(text.as_bytes(), Some(&text)) {
664 Ok(msg) => return Poll::Ready(Some(Ok(msg))),
665 Err(err) => {
666 tracing::debug!(
667 target: "chromiumoxide::conn::raw_ws::parse_errors",
668 "Dropping malformed text WS frame: {err}",
669 );
670 skips += 1;
671 }
672 }
673 }
674 Some(Ok(WsMessage::Binary(buf))) => match decode_message::<T>(&buf, None) {
675 Ok(msg) => return Poll::Ready(Some(Ok(msg))),
676 Err(err) => {
677 tracing::debug!(
678 target: "chromiumoxide::conn::raw_ws::parse_errors",
679 "Dropping malformed binary WS frame: {err}",
680 );
681 skips += 1;
682 }
683 },
684 Some(Ok(WsMessage::Close(_))) => return Poll::Ready(None),
685 Some(Ok(WsMessage::Ping(_))) | Some(Ok(WsMessage::Pong(_))) => {
686 skips += 1;
687 }
688 Some(Ok(msg)) => {
689 tracing::debug!(
690 target: "chromiumoxide::conn::raw_ws::parse_errors",
691 "Unexpected WS message type: {:?}",
692 msg
693 );
694 skips += 1;
695 }
696 Some(Err(err)) => return Poll::Ready(Some(Err(CdpError::Ws(err)))),
697 None => return Poll::Ready(None),
698 }
699
700 if skips >= MAX_SKIPS_PER_POLL {
701 cx.waker().wake_by_ref();
702 return Poll::Pending;
703 }
704 }
705 }
706}
707
708/// Shared decode path for both text and binary WS frames.
709/// `raw_text_for_logging` is only provided for textual frames so we can log the original
710/// payload on parse failure if desired.
711#[cfg(not(feature = "serde_stacker"))]
712fn decode_message<T: EventMessage>(
713 bytes: &[u8],
714 raw_text_for_logging: Option<&str>,
715) -> Result<Box<Message<T>>> {
716 match serde_json::from_slice::<Box<Message<T>>>(bytes) {
717 Ok(msg) => {
718 tracing::trace!("Received {:?}", msg);
719 Ok(msg)
720 }
721 Err(err) => {
722 if let Some(txt) = raw_text_for_logging {
723 let preview = &txt[..txt.len().min(512)];
724 tracing::debug!(
725 target: "chromiumoxide::conn::raw_ws::parse_errors",
726 msg_len = txt.len(),
727 "Skipping unrecognized WS message {err} preview={preview}",
728 );
729 } else {
730 tracing::debug!(
731 target: "chromiumoxide::conn::raw_ws::parse_errors",
732 "Skipping unrecognized binary WS message {err}",
733 );
734 }
735 Err(err.into())
736 }
737 }
738}
739
740/// Shared decode path for both text and binary WS frames.
741/// `raw_text_for_logging` is only provided for textual frames so we can log the original
742/// payload on parse failure if desired.
743#[cfg(feature = "serde_stacker")]
744fn decode_message<T: EventMessage>(
745 bytes: &[u8],
746 raw_text_for_logging: Option<&str>,
747) -> Result<Box<Message<T>>> {
748 use serde::Deserialize;
749 let mut de = serde_json::Deserializer::from_slice(bytes);
750
751 de.disable_recursion_limit();
752
753 let de = serde_stacker::Deserializer::new(&mut de);
754
755 match Box::<Message<T>>::deserialize(de) {
756 Ok(msg) => {
757 tracing::trace!("Received {:?}", msg);
758 Ok(msg)
759 }
760 Err(err) => {
761 if let Some(txt) = raw_text_for_logging {
762 let preview = &txt[..txt.len().min(512)];
763 tracing::debug!(
764 target: "chromiumoxide::conn::raw_ws::parse_errors",
765 msg_len = txt.len(),
766 "Skipping unrecognized WS message {err} preview={preview}",
767 );
768 } else {
769 tracing::debug!(
770 target: "chromiumoxide::conn::raw_ws::parse_errors",
771 "Skipping unrecognized binary WS message {err}",
772 );
773 }
774 Err(err.into())
775 }
776 }
777}
778
779#[cfg(test)]
780mod ws_read_loop_tests {
781 //! Unit tests for the `ws_read_loop` background reader task.
782 //!
783 //! These tests feed a synthetic `Stream<Item = Result<WsMessage, _>>`
784 //! into `ws_read_loop` — no real WebSocket, no Chrome — and observe
785 //! what comes out the other side of the mpsc channel.
786 //!
787 //! The properties under test are the ones that make the reader-task
788 //! decoupling safe: FIFO ordering, no-deadlock on a bounded channel
789 //! under back-pressure, silent drop of non-data frames, graceful
790 //! transport-error propagation, and clean exit on `Close`.
791 //!
792 //! The typed events are `chromiumoxide_cdp::cdp::CdpEventMessage` —
793 //! the same instantiation the real Handler uses — so these tests
794 //! exercise the actual decode path (`serde_json::from_slice`), not
795 //! a simplified fake.
796 use super::*;
797 use chromiumoxide_cdp::cdp::CdpEventMessage;
798 use chromiumoxide_types::CallId;
799 use futures_util::stream;
800 use tokio::sync::mpsc;
801 use tokio_tungstenite::tungstenite::Message as WsMessage;
802
803 /// Build a CDP `Response` WS frame as text — the smallest valid CDP
804 /// message. `id` tags the frame for ordering assertions.
805 fn response_frame(id: u64) -> WsMessage {
806 WsMessage::Text(
807 format!(r#"{{"id":{id},"result":{{"ok":true}}}}"#)
808 .to_string()
809 .into(),
810 )
811 }
812
813 /// Build a frame far larger than a typical socket chunk, to exercise
814 /// the "large message" path that motivated this refactor. The blob
815 /// field pushes serde_json through a big allocation even though the
816 /// envelope is tiny.
817 fn large_response_frame(id: u64, blob_bytes: usize) -> WsMessage {
818 let blob = "x".repeat(blob_bytes);
819 WsMessage::Text(
820 format!(r#"{{"id":{id},"result":{{"blob":"{blob}"}}}}"#)
821 .to_string()
822 .into(),
823 )
824 }
825
826 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
827 async fn forwards_messages_in_stream_order() {
828 let frames = vec![
829 Ok(response_frame(1)),
830 Ok(response_frame(2)),
831 Ok(response_frame(3)),
832 ];
833 let stream = stream::iter(frames);
834 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(8);
835 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
836
837 for expected in [1u64, 2, 3] {
838 let msg = rx.recv().await.expect("msg").expect("decode ok");
839 if let Message::Response(resp) = *msg {
840 assert_eq!(resp.id, CallId::new(expected as usize));
841 } else {
842 panic!("expected Response");
843 }
844 }
845 assert!(rx.recv().await.is_none(), "channel must close on EOF");
846 task.await.expect("reader task join");
847 }
848
849 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
850 async fn pings_and_pongs_never_reach_the_handler() {
851 let frames = vec![
852 Ok(WsMessage::Ping(vec![1, 2, 3].into())),
853 Ok(response_frame(7)),
854 Ok(WsMessage::Pong(vec![].into())),
855 Ok(response_frame(8)),
856 ];
857 let stream = stream::iter(frames);
858 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(8);
859 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
860
861 for expected in [7u64, 8] {
862 let msg = rx.recv().await.expect("msg").expect("decode ok");
863 if let Message::Response(resp) = *msg {
864 assert_eq!(resp.id, CallId::new(expected as usize));
865 }
866 }
867 assert!(rx.recv().await.is_none());
868 task.await.expect("reader task join");
869 }
870
871 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
872 async fn malformed_frames_do_not_block_subsequent_valid_frames() {
873 let frames = vec![
874 Ok(WsMessage::Text("{not valid json".to_string().into())),
875 Ok(response_frame(42)),
876 ];
877 let stream = stream::iter(frames);
878 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(8);
879 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
880
881 let msg = rx.recv().await.expect("msg").expect("decode ok");
882 if let Message::Response(resp) = *msg {
883 assert_eq!(resp.id, CallId::new(42));
884 }
885 assert!(rx.recv().await.is_none());
886 task.await.expect("reader task join");
887 }
888
889 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
890 async fn close_frame_terminates_the_reader() {
891 let frames = vec![
892 Ok(response_frame(1)),
893 Ok(WsMessage::Close(None)),
894 Ok(response_frame(2)), // unreachable after Close
895 ];
896 let stream = stream::iter(frames);
897 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(8);
898 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
899
900 let msg = rx.recv().await.expect("msg").expect("decode ok");
901 if let Message::Response(resp) = *msg {
902 assert_eq!(resp.id, CallId::new(1));
903 }
904 assert!(
905 rx.recv().await.is_none(),
906 "reader must exit on Close; frames after Close must not appear"
907 );
908 task.await.expect("reader task join");
909 }
910
911 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
912 async fn transport_error_is_forwarded_once_then_reader_exits() {
913 let frames = vec![
914 Ok(response_frame(1)),
915 Err(tokio_tungstenite::tungstenite::Error::ConnectionClosed),
916 Ok(response_frame(2)),
917 ];
918 let stream = stream::iter(frames);
919 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(8);
920 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
921
922 let msg = rx.recv().await.expect("msg").expect("ok");
923 assert!(matches!(*msg, Message::Response(_)));
924 match rx.recv().await {
925 Some(Err(CdpError::Ws(_))) => {}
926 other => panic!("expected forwarded Ws error, got {other:?}"),
927 }
928 assert!(rx.recv().await.is_none());
929 task.await.expect("reader task join");
930 }
931
932 /// Back-pressure property: with the smallest possible channel and
933 /// many frames, the reader task awaits capacity after each send and
934 /// never deadlocks. This is the core "no deadlock" proof for the
935 /// new design — if the reader held anything across its `.await` that
936 /// the consumer needed, the consumer's `recv().await` would block
937 /// forever. Completion under a 5s watchdog proves it doesn't.
938 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
939 async fn bounded_channel_does_not_deadlock_under_backpressure() {
940 const N: u64 = 512;
941 let frames: Vec<_> = (1..=N).map(|id| Ok(response_frame(id))).collect();
942 let stream = stream::iter(frames);
943
944 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(1);
945 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
946
947 let deadline = std::time::Duration::from_secs(5);
948 let collected = tokio::time::timeout(deadline, async {
949 let mut seen = 0u64;
950 while let Some(frame) = rx.recv().await {
951 let msg = frame.expect("decode ok");
952 if let Message::Response(resp) = *msg {
953 seen += 1;
954 assert_eq!(
955 resp.id,
956 CallId::new(seen as usize),
957 "back-pressure must preserve FIFO order"
958 );
959 }
960 }
961 seen
962 })
963 .await
964 .expect("reader must make forward progress despite cap-1 back-pressure");
965
966 assert_eq!(collected, N, "all frames must arrive");
967 task.await.expect("reader task join");
968 }
969
970 /// Large message (>1 MB) is decoded correctly on the background
971 /// task. This is the specific scenario the reader-task refactor
972 /// was built for — we don't measure time here (benches cover that),
973 /// we just prove the end-to-end path works without corruption or
974 /// deadlock.
975 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
976 async fn large_message_decodes_without_corruption() {
977 let big = 2 * 1024 * 1024; // 2 MB payload
978 let frames = vec![Ok(large_response_frame(100, big)), Ok(response_frame(101))];
979 let stream = stream::iter(frames);
980 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(4);
981 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
982
983 let first = rx.recv().await.expect("msg").expect("ok");
984 if let Message::Response(resp) = *first {
985 assert_eq!(resp.id, CallId::new(100));
986 }
987 let second = rx.recv().await.expect("msg").expect("ok");
988 if let Message::Response(resp) = *second {
989 assert_eq!(resp.id, CallId::new(101));
990 }
991 assert!(rx.recv().await.is_none());
992 task.await.expect("reader task join");
993 }
994
995 /// FIFO ordering under the pipelined reader when large-frame
996 /// decodes run in parallel via `spawn_blocking`.
997 ///
998 /// This test submits an interleaved sequence of large and small
999 /// frames. Large frames take the `spawn_blocking` path (decode
1000 /// on the blocking pool, variable completion order); small
1001 /// frames take the inline path (decode immediately). The
1002 /// pipeline's `FuturesOrdered` queue must emit them to the
1003 /// Handler in strict arrival order regardless of which
1004 /// blocking-pool thread finishes first.
1005 ///
1006 /// If the ordering guarantee were ever broken — e.g. by
1007 /// accidentally swapping `FuturesOrdered` for `FuturesUnordered`
1008 /// — id sequence checks here would catch it immediately.
1009 #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
1010 async fn pipelined_large_and_small_frames_keep_fifo_order() {
1011 let big = 2 * 1024 * 1024; // 2 MB payload — forces spawn_blocking
1012 let frames = vec![
1013 Ok(large_response_frame(1, big)),
1014 Ok(response_frame(2)),
1015 Ok(response_frame(3)),
1016 Ok(large_response_frame(4, big)),
1017 Ok(response_frame(5)),
1018 Ok(large_response_frame(6, big)),
1019 Ok(response_frame(7)),
1020 Ok(response_frame(8)),
1021 ];
1022 let expected: Vec<usize> = (1..=8).collect();
1023
1024 let stream = stream::iter(frames);
1025 let (tx, mut rx) = mpsc::channel::<Result<Box<Message<CdpEventMessage>>>>(16);
1026 let task = tokio::spawn(ws_read_loop::<CdpEventMessage, _>(stream, tx));
1027
1028 let deadline = std::time::Duration::from_secs(10);
1029 let observed = tokio::time::timeout(deadline, async {
1030 let mut ids = Vec::with_capacity(expected.len());
1031 while let Some(frame) = rx.recv().await {
1032 let msg = frame.expect("decode ok");
1033 if let Message::Response(resp) = *msg {
1034 ids.push(CallId::new(ids.len() + 1));
1035 assert_eq!(
1036 resp.id,
1037 *ids.last().unwrap(),
1038 "pipelined reader must emit frames in strict arrival order \
1039 regardless of per-frame decode latency"
1040 );
1041 }
1042 }
1043 ids
1044 })
1045 .await
1046 .expect("pipelined reader should make forward progress within 10s");
1047
1048 assert_eq!(
1049 observed.len(),
1050 expected.len(),
1051 "all {} frames must reach the Handler",
1052 expected.len()
1053 );
1054 task.await.expect("reader task join");
1055 }
1056}