dbg-cli 0.3.2

A universal debugger CLI that lets AI agents observe runtime state instead of guessing from source code
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
use std::collections::VecDeque;
use std::os::fd::{AsRawFd, BorrowedFd, OwnedFd};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{self, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Condvar, LazyLock, Mutex};
use std::thread::JoinHandle;
use std::time::{Duration, Instant};

use anyhow::{Context, Result, bail};
use nix::poll::{PollFd, PollFlags, poll};
use nix::pty::{OpenptyResult, openpty};
use nix::sys::signal::Signal;
use nix::unistd::{ForkResult, Pid, close, dup2, execvp, fork, setsid};
use regex::Regex;

static ANSI_RE: LazyLock<Regex> =
    LazyLock::new(|| Regex::new(r"\x1b\[[0-9;]*[A-Za-z]|\x1b\[K|\x1b\[2K").unwrap());

/// An event emitted by the reader thread.
///
/// The reader owns the PTY master read side and produces a stream of
/// events that the daemon consumes. This decouples reading from
/// command dispatch so async debuggers (node-inspect, async gdb) don't
/// lose stop banners that arrive between commands.
pub enum PtyEvent {
    /// A chunk of raw output bytes. Multiple `Data` events may precede
    /// a single `Prompt`; the daemon concatenates them.
    Data(Vec<u8>),
    /// The prompt regex matched the accumulated output. The debugger
    /// is ready for input. The reader resets its internal match buffer
    /// after emitting this.
    Prompt,
    /// The reader detected EOF or a fatal read error. Child is gone.
    Exit,
}

/// Kind of entry stored in the event log. The log is a tamer,
/// persistent view of the channel — same information, but retained so
/// `dbg events` can replay what happened.
///
/// `Output`, `Prompt`, `Exit` are pushed by the reader thread. `Stop`
/// is pushed by the daemon after parse_hit succeeds on an execution
/// command's output — the bytes field carries a JSON HitEvent.
/// `Stdout` is emitted by transports that can distinguish inferior
/// program output from debugger chatter (protocol backends: V8
/// Inspector, DAP). The PTY transport never emits `Stdout` because a
/// TTY mixes both streams at the OS level; everything goes into
/// `Output`. Agents filter with `--kind=stdout` to see only the
/// program's own writes.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum EventKind {
    Output,
    Prompt,
    Exit,
    Stop,
    Stdout,
}

impl EventKind {
    pub fn as_str(self) -> &'static str {
        match self {
            EventKind::Output => "output",
            EventKind::Prompt => "prompt",
            EventKind::Exit => "exit",
            EventKind::Stop => "stop",
            EventKind::Stdout => "stdout",
        }
    }

    /// Parse a kind name (lowercase) for filtering. Returns None if
    /// the string doesn't match any known kind.
    pub fn parse(s: &str) -> Option<Self> {
        match s {
            "output" => Some(EventKind::Output),
            "prompt" => Some(EventKind::Prompt),
            "exit" => Some(EventKind::Exit),
            "stop" => Some(EventKind::Stop),
            "stdout" => Some(EventKind::Stdout),
            _ => None,
        }
    }
}

/// An entry in the event log. `seq` is monotonic and session-unique;
/// agents pass it as `--since` to query incrementally.
#[derive(Clone, Debug)]
pub struct EventEntry {
    pub seq: u64,
    /// Milliseconds since the session started.
    pub ts_ms: u64,
    pub kind: EventKind,
    /// Raw bytes. Empty for Prompt/Exit.
    pub bytes: Vec<u8>,
}

/// Bounded ring buffer of events. Capped at `MAX_EVENTS`; older entries
/// are dropped silently. The `last_seq` counter keeps incrementing even
/// across drops so agents can tell if they missed events.
const MAX_EVENTS: usize = 2048;

struct EventLog {
    entries: VecDeque<EventEntry>,
    last_seq: u64,
    started: Instant,
}

impl EventLog {
    fn new() -> Self {
        Self {
            entries: VecDeque::with_capacity(MAX_EVENTS),
            last_seq: 0,
            started: Instant::now(),
        }
    }

    fn push(&mut self, kind: EventKind, bytes: Vec<u8>) {
        self.last_seq += 1;
        let ts_ms = self.started.elapsed().as_millis() as u64;
        if self.entries.len() == MAX_EVENTS {
            self.entries.pop_front();
        }
        self.entries.push_back(EventEntry {
            seq: self.last_seq,
            ts_ms,
            kind,
            bytes,
        });
    }

    /// Return entries with `seq > since`. `since = 0` returns the full log.
    fn since(&self, since: u64) -> Vec<EventEntry> {
        self.entries
            .iter()
            .filter(|e| e.seq > since)
            .cloned()
            .collect()
    }
}

/// Shared event-log handle. Hands out snapshots of the log and supports
/// blocking until new events arrive via an internal `Condvar`. Cloning
/// the handle is an Arc bump; all clones see the same log.
///
/// The handle is a separate type from `DebuggerProcess` so daemon
/// handlers can clone it, drop the session mutex, and wait on the
/// condvar without blocking other commands.
#[derive(Clone)]
pub struct LogHandle(Arc<(Mutex<EventLog>, Condvar)>);

impl LogHandle {
    pub fn new() -> Self {
        Self(Arc::new((Mutex::new(EventLog::new()), Condvar::new())))
    }

    /// Append an event and notify all waiters. Used both by the reader
    /// thread (Output / Prompt / Exit) and by the daemon (Stop, emitted
    /// after parse_hit succeeds on an execution command's output).
    pub fn push(&self, kind: EventKind, bytes: Vec<u8>) {
        let (lock, cvar) = &*self.0;
        lock.lock().unwrap().push(kind, bytes);
        cvar.notify_all();
    }

    /// Non-blocking snapshot of entries with `seq > since`.
    pub fn since(&self, since: u64) -> Vec<EventEntry> {
        self.0.0.lock().unwrap().since(since)
    }

    /// Current highest assigned seq (even if that entry was evicted).
    pub fn last_seq(&self) -> u64 {
        self.0.0.lock().unwrap().last_seq
    }

    /// Block up to `timeout` for any entry with `seq > since`. If one
    /// already exists, returns immediately. Spurious wakeups loop
    /// internally; the closure re-checks the predicate each wake.
    pub fn since_wait(&self, since: u64, timeout: Duration) -> Vec<EventEntry> {
        let (lock, cvar) = &*self.0;
        let guard = lock.lock().unwrap();
        let (guard, _result) = cvar
            .wait_timeout_while(guard, timeout, |log| log.last_seq <= since)
            .unwrap();
        guard.since(since)
    }
}

/// Transport-agnostic debugger I/O. The daemon holds a
/// `Box<dyn DebuggerIo>` and talks to the debugger through this
/// interface regardless of whether the underlying transport is a PTY,
/// a V8 Inspector WebSocket, or a DAP JSON-RPC subprocess.
///
/// All implementations must be `Send + Sync` because the daemon's
/// connection-handling threads call into them from inside a mutex.
///
/// Implementations:
///   * `DebuggerProcess` — PTY transport, default for line-oriented
///     debuggers (pdb, jdb, lldb, gdb, …).
///   * Protocol transports (coming in later steps) — Inspector, DAP.
pub trait DebuggerIo: Send + Sync {
    /// Send a command and wait for the prompt / ready state. Returns
    /// the debugger's response between command echo and the next
    /// prompt.
    fn send_and_wait(&self, cmd: &str, timeout: Duration) -> Result<String>;

    /// Drain any events that arrived asynchronously (e.g. a deferred
    /// stop banner from a prior `continue`). Non-blocking.
    fn drain_pending(&self) -> Option<String>;

    /// Wait for the initial prompt / first-ready signal after spawn.
    fn wait_for_prompt(&self, timeout: Duration) -> Result<String>;

    /// Clone the shared event-log handle. Callers drop the session
    /// mutex before waiting on the log's condvar.
    fn log(&self) -> LogHandle;

    /// PID of the process to SIGINT for `cancel` / `quit`. Protocol
    /// attach-mode transports may not have one; those use a
    /// protocol-level interrupt request instead and should override.
    fn child_pid(&self) -> Pid;

    /// Is the debugger still alive?
    fn is_alive(&self) -> bool;

    /// Graceful shutdown — send quit command then SIGKILL on timeout.
    fn quit(&self, quit_cmd: &str);

    /// Structured hit event produced by the transport's async channel
    /// (e.g. a V8 Inspector `Debugger.paused`). When `Some`, the
    /// daemon uses it directly and skips the text-based `parse_hit`
    /// pipeline — no regex banner scraping, no async banner races.
    /// Returns `None` on text/PTY transports; the `parse_hit` path
    /// handles them.
    ///
    /// Contract: each call takes the pending event out. Callers drain
    /// once per execution command, immediately after the command
    /// returns. Returning `Some` more than once per actual pause is a
    /// transport bug.
    fn pending_hit(&self) -> Option<crate::backend::canonical::HitEvent> {
        None
    }

    /// Transport-direct dispatch for structured canonical requests.
    /// Transports that can consume the structured form (DAP, Inspector)
    /// return `Some(Ok/Err)` after servicing the request; returning
    /// `None` signals "not handled — fall back to `send_and_wait` on the
    /// formatted native command." PTY backends always return `None`.
    fn dispatch_structured(
        &self,
        _req: &crate::backend::canonical::CanonicalReq,
        _timeout: Duration,
    ) -> Option<Result<String>> {
        None
    }
}

/// A debugger process running in a PTY. The reader thread owns the
/// read side of the master fd; the daemon holds this struct and writes
/// commands + consumes events from the channel.
pub struct DebuggerProcess {
    master: OwnedFd,
    child_pid: Pid,
    /// Wrapped in a Mutex so `DebuggerProcess: Sync`. The Receiver
    /// itself isn't `Sync`, but all access paths hold the daemon's
    /// session lock, so contention here is zero.
    rx: Mutex<Receiver<PtyEvent>>,
    /// Shared handle to the reader's event log. Clonable — daemon
    /// handlers grab their own clone so they can wait on the condvar
    /// without pinning the session mutex.
    log: LogHandle,
    shutdown: Arc<AtomicBool>,
    reader: Option<JoinHandle<()>>,
    prompt_re: Regex,
}

impl DebuggerProcess {
    /// Spawn a debugger in a PTY and start the reader thread.
    pub fn spawn(
        bin: &str,
        args: &[String],
        env_extra: &[(String, String)],
        prompt_pattern: &str,
    ) -> Result<Self> {
        let OpenptyResult { master, slave } = openpty(None, None)?;

        // Safety: fork is unsafe because it duplicates the process.
        let fork_result = unsafe { fork() }?;
        match fork_result {
            ForkResult::Child => {
                drop(master);
                setsid().ok();

                let slave_fd = slave.as_raw_fd();
                dup2(slave_fd, 0).ok();
                dup2(slave_fd, 1).ok();
                dup2(slave_fd, 2).ok();
                if slave_fd > 2 {
                    close(slave_fd).ok();
                }

                // Mutate the child's environment in place then exec.
                // Safe: the child is single-threaded immediately after fork().
                // Portable across Linux and macOS (macOS libc has no execvpe).
                unsafe {
                    for (k, v) in env_extra {
                        std::env::set_var(k, v);
                    }
                    std::env::set_var("TERM", "dumb");
                }

                let c_bin =
                    std::ffi::CString::new(bin).unwrap_or_else(|_| std::process::exit(127));
                let mut c_args = vec![c_bin.clone()];
                for a in args {
                    c_args.push(
                        std::ffi::CString::new(a.as_str())
                            .unwrap_or_else(|_| std::process::exit(127)),
                    );
                }

                execvp(&c_bin, &c_args).ok();
                std::process::exit(127);
            }
            ForkResult::Parent { child } => {
                drop(slave);

                let prompt_re =
                    Regex::new(prompt_pattern).context("invalid prompt pattern")?;
                let reader_prompt_re = prompt_re.clone();
                let master_fd = master.as_raw_fd();
                let (tx, rx) = mpsc::channel::<PtyEvent>();
                let shutdown = Arc::new(AtomicBool::new(false));
                let reader_shutdown = shutdown.clone();
                let log = LogHandle::new();
                let reader_log = log.clone();

                let reader = std::thread::Builder::new()
                    .name("dbg-pty-reader".into())
                    .spawn(move || {
                        reader_loop(
                            master_fd,
                            reader_prompt_re,
                            tx,
                            reader_shutdown,
                            reader_log,
                        )
                    })
                    .context("failed to spawn reader thread")?;

                Ok(Self {
                    master,
                    child_pid: child,
                    rx: Mutex::new(rx),
                    log,
                    shutdown,
                    reader: Some(reader),
                    prompt_re,
                })
            }
        }
    }

    /// Write bytes to the master fd without creating a File (which would
    /// close the fd on drop or panic).
    fn write_master(&self, data: &[u8]) -> Result<()> {
        let fd = self.master.as_raw_fd();
        let mut written = 0;
        while written < data.len() {
            match nix::unistd::write(
                unsafe { BorrowedFd::borrow_raw(fd) },
                &data[written..],
            ) {
                Ok(n) => written += n,
                Err(nix::errno::Errno::EINTR) => continue,
                Err(e) => return Err(e.into()),
            }
        }
        Ok(())
    }

    /// Drain any events that arrived since the last `send_and_wait` or
    /// `drain_pending` call. Returns the accumulated output bytes (ANSI
    /// stripped). Non-blocking — never waits for new data.
    ///
    /// Used by the daemon at the head of each command to process stop
    /// banners that arrived asynchronously from the last execution
    /// command (e.g., node-inspect delivers `break in …` after having
    /// already ack-prompted the `cont`).
    pub fn drain_pending(&self) -> Option<String> {
        let rx = self.rx.lock().unwrap();
        let mut accumulated: Vec<u8> = Vec::new();
        let mut saw_data = false;
        loop {
            match rx.try_recv() {
                Ok(PtyEvent::Data(bytes)) => {
                    saw_data = true;
                    accumulated.extend(bytes);
                }
                Ok(PtyEvent::Prompt) => {}
                Ok(PtyEvent::Exit) => break,
                Err(_) => break,
            }
        }
        if !saw_data {
            return None;
        }
        Some(strip_ansi(&String::from_utf8_lossy(&accumulated)))
    }

    /// Wait for the initial prompt after spawn.
    pub fn wait_for_prompt(&self, timeout: Duration) -> Result<String> {
        let rx = self.rx.lock().unwrap();
        let mut collected: Vec<u8> = Vec::new();
        let deadline = Instant::now() + timeout;
        loop {
            let remaining = deadline.saturating_duration_since(Instant::now());
            if remaining.is_zero() {
                bail!("timeout waiting for initial prompt");
            }
            match rx.recv_timeout(remaining) {
                Ok(PtyEvent::Data(bytes)) => collected.extend(bytes),
                Ok(PtyEvent::Prompt) => {
                    return Ok(strip_ansi(&String::from_utf8_lossy(&collected)));
                }
                Ok(PtyEvent::Exit) => bail!("debugger exited before producing prompt"),
                Err(RecvTimeoutError::Timeout) => {
                    bail!("timeout waiting for initial prompt")
                }
                Err(RecvTimeoutError::Disconnected) => {
                    bail!("reader thread died before initial prompt")
                }
            }
        }
    }

    /// Send a command and wait for the prompt. Returns the debugger's
    /// response between our command and the next prompt.
    ///
    /// Call sites that need to handle async stop events should call
    /// `drain_pending()` first; this method only collects events that
    /// arrive after the command is written.
    pub fn send_and_wait(&self, cmd: &str, timeout: Duration) -> Result<String> {
        // Sticky "session has exited" guard. Once the child is gone,
        // the reader-thread channel is drained/closed and the loop
        // below would bail with "reader thread disconnected" — loudly
        // and for every subsequent verb. Return a clean, recognizable
        // status instead so agents can distinguish a dead session
        // (typical after the debuggee runs to completion) from a
        // genuine protocol error.
        if !self.is_alive() {
            return Ok("(debuggee has exited — live inspection is over, but captured state is \
still available: `dbg hits <loc>`, `dbg stack`, `dbg locals`, `dbg cross <sym>`, \
`dbg sessions`. Start a fresh session with `dbg start` when ready.)".to_string());
        }
        if let Err(e) = self.write_master(format!("{cmd}\n").as_bytes()) {
            // EIO / EPIPE on write almost always means the PTY master
            // closed under us because the debugger exited between the
            // alive-check above and the write. Surface the same clean
            // sticky message rather than the raw errno.
            if !self.is_alive() {
                return Ok("(debuggee has exited — live inspection is over, but captured state is \
still available: `dbg hits <loc>`, `dbg stack`, `dbg locals`, `dbg cross <sym>`, \
`dbg sessions`. Start a fresh session with `dbg start` when ready.)".to_string());
            }
            return Err(e);
        }

        let rx = self.rx.lock().unwrap();
        let mut collected: Vec<u8> = Vec::new();
        let deadline = Instant::now() + timeout;
        loop {
            let remaining = deadline.saturating_duration_since(Instant::now());
            if remaining.is_zero() {
                bail!("timeout waiting for prompt");
            }
            match rx.recv_timeout(remaining) {
                Ok(PtyEvent::Data(bytes)) => collected.extend(bytes),
                Ok(PtyEvent::Prompt) => break,
                Ok(PtyEvent::Exit) => break,
                Err(RecvTimeoutError::Timeout) => bail!("timeout waiting for prompt"),
                Err(RecvTimeoutError::Disconnected) => {
                    // Reader thread exited — child is gone. Return the
                    // sticky status so the agent sees a consistent
                    // message regardless of which verb first noticed.
                    return Ok(
                        "(debuggee has exited — live inspection is over, but captured state is \
still available: `dbg hits <loc>`, `dbg stack`, `dbg locals`, `dbg cross <sym>`, \
`dbg sessions`. Start a fresh session with `dbg start` when ready.)".to_string(),
                    );
                }
            }
        }

        let raw = String::from_utf8_lossy(&collected).to_string();
        let clean = strip_ansi(&raw);
        let no_prompts = self.prompt_re.replace_all(&clean, "");

        let lines: Vec<&str> = no_prompts.lines().collect();
        let start = if !lines.is_empty() && lines[0].contains(cmd.trim()) {
            1
        } else {
            0
        };
        let mut end = lines.len();
        while end > start && lines[end - 1].trim().is_empty() {
            end -= 1;
        }
        Ok(lines[start..end].join("\n").trim().to_string())
    }

    /// Clone a shared handle to the event log. Handlers that need to
    /// wait for new events drop the session mutex first, then call
    /// `since_wait` on the handle — otherwise a blocking wait would
    /// pin the session.
    pub fn log(&self) -> LogHandle {
        self.log.clone()
    }

    /// The PID of the child process, for out-of-band signalling (e.g.
    /// interrupting a running command from the quit handler).
    pub fn child_pid(&self) -> Pid {
        self.child_pid
    }

    /// Check if the child process is still alive.
    pub fn is_alive(&self) -> bool {
        nix::sys::wait::waitpid(self.child_pid, Some(nix::sys::wait::WaitPidFlag::WNOHANG))
            .is_ok_and(|s| matches!(s, nix::sys::wait::WaitStatus::StillAlive))
    }

    /// Send quit command and wait for exit.
    pub fn quit(&self, quit_cmd: &str) {
        if self.is_alive() {
            let _ = self.write_master(format!("{quit_cmd}\n").as_bytes());
            std::thread::sleep(Duration::from_millis(500));
            if self.is_alive() {
                let _ = nix::sys::signal::kill(self.child_pid, Signal::SIGKILL);
            }
        }
    }
}

/// Trait-object forwarder so the daemon can hold `Box<dyn DebuggerIo>`
/// and route the same interface to future transports (Inspector, DAP)
/// without touching call sites.
impl DebuggerIo for DebuggerProcess {
    fn send_and_wait(&self, cmd: &str, timeout: Duration) -> Result<String> {
        DebuggerProcess::send_and_wait(self, cmd, timeout)
    }
    fn drain_pending(&self) -> Option<String> {
        DebuggerProcess::drain_pending(self)
    }
    fn wait_for_prompt(&self, timeout: Duration) -> Result<String> {
        DebuggerProcess::wait_for_prompt(self, timeout)
    }
    fn log(&self) -> LogHandle {
        DebuggerProcess::log(self)
    }
    fn child_pid(&self) -> Pid {
        DebuggerProcess::child_pid(self)
    }
    fn is_alive(&self) -> bool {
        DebuggerProcess::is_alive(self)
    }
    fn quit(&self, quit_cmd: &str) {
        DebuggerProcess::quit(self, quit_cmd)
    }
}

/// Reader thread entry point. Reads PTY bytes, coalesces them into
/// Output chunks at prompt boundaries, and emits events on the channel
/// and into the persistent log. Exits when the shutdown flag is set or
/// EOF. Coalescing keeps the event log readable — one Output entry per
/// "command response" instead of one per 4KB PTY read.
fn reader_loop(
    master_fd: std::os::fd::RawFd,
    prompt_re: Regex,
    tx: Sender<PtyEvent>,
    shutdown: Arc<AtomicBool>,
    log: LogHandle,
) {
    let mut buf = [0u8; 4096];
    // Pending output bytes not yet emitted. Flushed to a single Output
    // event when a prompt is detected, when it grows past 64KB, or on
    // exit.
    let mut pending: Vec<u8> = Vec::new();

    let flush_output =
        |pending: &mut Vec<u8>, tx: &Sender<PtyEvent>, log: &LogHandle| -> bool {
            if pending.is_empty() {
                return true;
            }
            let bytes = std::mem::take(pending);
            log.push(EventKind::Output, bytes.clone());
            tx.send(PtyEvent::Data(bytes)).is_ok()
        };

    let emit_marker = |kind: EventKind, tx: &Sender<PtyEvent>, log: &LogHandle| -> bool {
        log.push(kind, Vec::new());
        let ev = match kind {
            EventKind::Prompt => PtyEvent::Prompt,
            EventKind::Exit => PtyEvent::Exit,
            // The reader only emits Prompt/Exit via this helper.
            // Output carries bytes so it goes through flush_output.
            // Stop is emitted by the daemon, never by the reader.
            EventKind::Output | EventKind::Stop | EventKind::Stdout => {
                unreachable!("emit_marker called with {kind:?}")
            }
        };
        tx.send(ev).is_ok()
    };

    loop {
        if shutdown.load(Ordering::Relaxed) {
            return;
        }

        let borrowed = unsafe { BorrowedFd::borrow_raw(master_fd) };
        let pollfd = PollFd::new(borrowed, PollFlags::POLLIN);
        match poll(&mut [pollfd], 100u16) {
            Ok(0) => continue,
            Ok(_) => {}
            Err(nix::errno::Errno::EINTR) => continue,
            Err(_) => {
                let _ = flush_output(&mut pending, &tx, &log);
                let _ = emit_marker(EventKind::Exit, &tx, &log);
                return;
            }
        }

        let n = match nix::unistd::read(master_fd, &mut buf) {
            Ok(0) => {
                let _ = flush_output(&mut pending, &tx, &log);
                let _ = emit_marker(EventKind::Exit, &tx, &log);
                return;
            }
            Ok(n) => n,
            Err(nix::errno::Errno::EINTR) => continue,
            Err(_) => {
                let _ = flush_output(&mut pending, &tx, &log);
                let _ = emit_marker(EventKind::Exit, &tx, &log);
                return;
            }
        };

        pending.extend_from_slice(&buf[..n]);

        // Prompt detection operates on the ANSI-stripped view of the
        // entire pending buffer. Cheap enough since pending is capped.
        let pending_str = String::from_utf8_lossy(&pending);
        let cleaned = strip_ansi(&pending_str);
        if prompt_re.is_match(&cleaned) {
            if !flush_output(&mut pending, &tx, &log) {
                return;
            }
            if !emit_marker(EventKind::Prompt, &tx, &log) {
                return;
            }
        } else if pending.len() > 64 * 1024 {
            // Safety valve: stream large outputs to the log without
            // waiting for a prompt. Agents tailing via `dbg events`
            // still see progress on long-running commands.
            if !flush_output(&mut pending, &tx, &log) {
                return;
            }
        }
    }
}

fn strip_ansi(s: &str) -> String {
    if !s.contains('\x1b') {
        return s.to_string();
    }
    ANSI_RE.replace_all(s, "").to_string()
}

impl Drop for DebuggerProcess {
    fn drop(&mut self) {
        self.shutdown.store(true, Ordering::Relaxed);
        let _ = nix::sys::signal::kill(self.child_pid, Signal::SIGTERM);
        if let Some(h) = self.reader.take() {
            // Best-effort: reader polls shutdown flag every 100ms.
            let _ = h.join();
        }
    }
}