Skip to main content

aft/bash_background/
registry.rs

1use std::collections::{HashMap, VecDeque};
2use std::fs;
3use std::path::{Path, PathBuf};
4use std::process::{Child, Command, Stdio};
5use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
6#[cfg(unix)]
7use std::sync::OnceLock;
8use std::sync::{Arc, Mutex};
9use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
10
11use serde::Serialize;
12
13use crate::context::SharedProgressSender;
14use crate::protocol::{BashCompletedFrame, BashLongRunningFrame, PushFrame};
15
16#[cfg(unix)]
17use std::os::unix::process::CommandExt;
18#[cfg(windows)]
19use std::os::windows::process::CommandExt;
20
21use super::buffer::BgBuffer;
22use super::persistence::{
23    create_capture_file, delete_task_bundle, read_exit_marker, read_task, session_tasks_dir,
24    task_paths, unix_millis, update_task, write_kill_marker_if_absent, write_task, ExitMarker,
25    PersistedTask, TaskPaths,
26};
27use super::process::is_process_alive;
28#[cfg(unix)]
29use super::process::terminate_pgid;
30#[cfg(windows)]
31use super::process::terminate_pid;
32use super::{BgTaskInfo, BgTaskStatus};
33// Note: `resolve_windows_shell` is no longer imported at module scope —
34// production code in `spawn_detached_child` uses `shell_candidates()`
35// with retry instead, and the function remains in `windows_shell.rs`
36// for tests and as a future helper.
37
38/// Default timeout for background bash tasks: 30 minutes.
39/// Agents can override per-call via the `timeout` parameter (in ms).
40const DEFAULT_BG_TIMEOUT: Duration = Duration::from_secs(30 * 60);
41const STALE_RUNNING_AFTER: Duration = Duration::from_secs(24 * 60 * 60);
42const PERSISTED_GC_GRACE: Duration = Duration::from_secs(24 * 60 * 60);
43const QUARANTINE_GC_GRACE: Duration = Duration::from_secs(30 * 24 * 60 * 60);
44
45/// Tail-bytes captured into BashCompletedFrame and BgCompletion records so the
46/// plugin can inline a preview into the system-reminder. Sized for ~3-4 lines
47/// of typical command output (git status, test results, exit messages) — short
48/// enough that round-tripping multiple completions in one reminder stays well
49/// under the model's context budget but long enough that most successful runs
50/// don't need a follow-up `bash_status` call.
51const BG_COMPLETION_PREVIEW_BYTES: usize = 300;
52
53#[derive(Debug, Clone, Serialize)]
54pub struct BgCompletion {
55    pub task_id: String,
56    /// Intentionally omitted from serialized completion payloads: push frames
57    /// carry `session_id` at the BashCompletedFrame envelope level for routing.
58    #[serde(skip_serializing)]
59    pub session_id: String,
60    pub status: BgTaskStatus,
61    pub exit_code: Option<i32>,
62    pub command: String,
63    /// Tail of stdout+stderr (≤300 bytes) at completion time, read once and
64    /// cached so push-frame consumers and `bash_drain_completions` callers see
65    /// the same preview without racing against later output rotation. Empty
66    /// when not captured (e.g., persisted task seen on startup before buffer
67    /// reattachment).
68    #[serde(default, skip_serializing_if = "String::is_empty")]
69    pub output_preview: String,
70    /// True when the captured tail is shorter than the actual output (because
71    /// rotation occurred or the output exceeds the preview cap). Plugins use
72    /// this to render a `…` prefix and signal that `bash_status` would return
73    /// more.
74    #[serde(default, skip_serializing_if = "is_false")]
75    pub output_truncated: bool,
76}
77
78fn is_false(v: &bool) -> bool {
79    !*v
80}
81
82#[derive(Debug, Clone, Serialize)]
83pub struct BgTaskSnapshot {
84    #[serde(flatten)]
85    pub info: BgTaskInfo,
86    pub exit_code: Option<i32>,
87    pub child_pid: Option<u32>,
88    pub workdir: String,
89    pub output_preview: String,
90    pub output_truncated: bool,
91    pub output_path: Option<String>,
92    pub stderr_path: Option<String>,
93}
94
95#[derive(Clone)]
96pub struct BgTaskRegistry {
97    pub(crate) inner: Arc<RegistryInner>,
98}
99
100pub(crate) struct RegistryInner {
101    pub(crate) tasks: Mutex<HashMap<String, Arc<BgTask>>>,
102    pub(crate) completions: Mutex<VecDeque<BgCompletion>>,
103    pub(crate) progress_sender: SharedProgressSender,
104    watchdog_started: AtomicBool,
105    pub(crate) shutdown: AtomicBool,
106    pub(crate) long_running_reminder_enabled: AtomicBool,
107    pub(crate) long_running_reminder_interval_ms: AtomicU64,
108    persisted_gc_started: AtomicBool,
109    #[cfg(test)]
110    persisted_gc_runs: AtomicU64,
111    /// Output compression callback. Set by `AppContext` after construction.
112    /// Takes (command, raw_output) and returns compressed text. Called from
113    /// the watchdog thread when a task reaches a terminal state and from
114    /// `bash_status`/`list` snapshot reads. When `None`, output is returned
115    /// uncompressed.
116    pub(crate) compressor: Mutex<Option<Box<dyn Fn(&str, String) -> String + Send + Sync>>>,
117}
118
119pub(crate) struct BgTask {
120    pub(crate) task_id: String,
121    pub(crate) session_id: String,
122    pub(crate) paths: TaskPaths,
123    pub(crate) started: Instant,
124    pub(crate) last_reminder_at: Mutex<Option<Instant>>,
125    pub(crate) terminal_at: Mutex<Option<Instant>>,
126    pub(crate) state: Mutex<BgTaskState>,
127}
128
129pub(crate) struct BgTaskState {
130    pub(crate) metadata: PersistedTask,
131    pub(crate) child: Option<Child>,
132    pub(crate) detached: bool,
133    pub(crate) buffer: BgBuffer,
134}
135
136impl BgTaskRegistry {
137    pub fn new(progress_sender: SharedProgressSender) -> Self {
138        Self {
139            inner: Arc::new(RegistryInner {
140                tasks: Mutex::new(HashMap::new()),
141                completions: Mutex::new(VecDeque::new()),
142                progress_sender,
143                watchdog_started: AtomicBool::new(false),
144                shutdown: AtomicBool::new(false),
145                long_running_reminder_enabled: AtomicBool::new(true),
146                long_running_reminder_interval_ms: AtomicU64::new(600_000),
147                persisted_gc_started: AtomicBool::new(false),
148                #[cfg(test)]
149                persisted_gc_runs: AtomicU64::new(0),
150                compressor: Mutex::new(None),
151            }),
152        }
153    }
154
155    /// Install the output-compression callback. Called by `main.rs` after
156    /// `AppContext` is constructed so that snapshot/completion paths can
157    /// invoke `compress::compress_with_registry` without holding a context
158    /// reference. When called multiple times, the latest installation wins.
159    pub fn set_compressor<F>(&self, compressor: F)
160    where
161        F: Fn(&str, String) -> String + Send + Sync + 'static,
162    {
163        if let Ok(mut slot) = self.inner.compressor.lock() {
164            *slot = Some(Box::new(compressor));
165        }
166    }
167
168    /// Apply the installed compressor (if any) to `output`. Returns `output`
169    /// untouched when no compressor is installed.
170    pub(crate) fn compress_output(&self, command: &str, output: String) -> String {
171        let Ok(slot) = self.inner.compressor.lock() else {
172            return output;
173        };
174        match slot.as_ref() {
175            Some(compressor) => compressor(command, output),
176            None => output,
177        }
178    }
179
180    pub fn configure_long_running_reminders(&self, enabled: bool, interval_ms: u64) {
181        self.inner
182            .long_running_reminder_enabled
183            .store(enabled, Ordering::SeqCst);
184        self.inner
185            .long_running_reminder_interval_ms
186            .store(interval_ms, Ordering::SeqCst);
187    }
188
189    #[cfg(unix)]
190    #[allow(clippy::too_many_arguments)]
191    pub fn spawn(
192        &self,
193        command: &str,
194        session_id: String,
195        workdir: PathBuf,
196        env: HashMap<String, String>,
197        timeout: Option<Duration>,
198        storage_dir: PathBuf,
199        max_running: usize,
200        notify_on_completion: bool,
201        compressed: bool,
202        project_root: Option<PathBuf>,
203    ) -> Result<String, String> {
204        self.start_watchdog();
205
206        let running = self.running_count();
207        if running >= max_running {
208            return Err(format!(
209                "background bash task limit exceeded: {running} running (max {max_running})"
210            ));
211        }
212
213        let timeout = timeout.or(Some(DEFAULT_BG_TIMEOUT));
214        let timeout_ms = timeout.map(|timeout| timeout.as_millis() as u64);
215        let task_id = self.generate_unique_task_id()?;
216        let paths = task_paths(&storage_dir, &session_id, &task_id);
217        fs::create_dir_all(&paths.dir)
218            .map_err(|e| format!("failed to create background task dir: {e}"))?;
219
220        let mut metadata = PersistedTask::starting(
221            task_id.clone(),
222            session_id.clone(),
223            command.to_string(),
224            workdir.clone(),
225            project_root,
226            timeout_ms,
227            notify_on_completion,
228            compressed,
229        );
230        write_task(&paths.json, &metadata)
231            .map_err(|e| format!("failed to persist background task metadata: {e}"))?;
232
233        // Pre-create capture files so the watchdog/buffer can always
234        // open them for reading. The spawn helper opens its own handles
235        // per attempt because each `Command::spawn()` consumes them.
236        create_capture_file(&paths.stdout)
237            .map_err(|e| format!("failed to create stdout capture file: {e}"))?;
238        create_capture_file(&paths.stderr)
239            .map_err(|e| format!("failed to create stderr capture file: {e}"))?;
240
241        let child = match spawn_detached_child(command, &paths, &workdir, &env) {
242            Ok(child) => child,
243            Err(error) => {
244                crate::slog_warn!("failed to spawn background bash task {task_id}; deleting partial bundle: {error}");
245                let _ = delete_task_bundle(&paths);
246                return Err(error);
247            }
248        };
249
250        let child_pid = child.id();
251        metadata.mark_running(child_pid, child_pid as i32);
252        write_task(&paths.json, &metadata)
253            .map_err(|e| format!("failed to persist running background task metadata: {e}"))?;
254
255        let task = Arc::new(BgTask {
256            task_id: task_id.clone(),
257            session_id,
258            paths: paths.clone(),
259            started: Instant::now(),
260            last_reminder_at: Mutex::new(None),
261            terminal_at: Mutex::new(None),
262            state: Mutex::new(BgTaskState {
263                metadata,
264                child: Some(child),
265                detached: false,
266                buffer: BgBuffer::new(paths.stdout.clone(), paths.stderr.clone()),
267            }),
268        });
269
270        self.inner
271            .tasks
272            .lock()
273            .map_err(|_| "background task registry lock poisoned".to_string())?
274            .insert(task_id.clone(), task);
275
276        Ok(task_id)
277    }
278
279    #[cfg(windows)]
280    #[allow(clippy::too_many_arguments)]
281    pub fn spawn(
282        &self,
283        command: &str,
284        session_id: String,
285        workdir: PathBuf,
286        env: HashMap<String, String>,
287        timeout: Option<Duration>,
288        storage_dir: PathBuf,
289        max_running: usize,
290        notify_on_completion: bool,
291        compressed: bool,
292        project_root: Option<PathBuf>,
293    ) -> Result<String, String> {
294        self.start_watchdog();
295
296        let running = self.running_count();
297        if running >= max_running {
298            return Err(format!(
299                "background bash task limit exceeded: {running} running (max {max_running})"
300            ));
301        }
302
303        let timeout = timeout.or(Some(DEFAULT_BG_TIMEOUT));
304        let timeout_ms = timeout.map(|timeout| timeout.as_millis() as u64);
305        let task_id = self.generate_unique_task_id()?;
306        let paths = task_paths(&storage_dir, &session_id, &task_id);
307        fs::create_dir_all(&paths.dir)
308            .map_err(|e| format!("failed to create background task dir: {e}"))?;
309
310        let mut metadata = PersistedTask::starting(
311            task_id.clone(),
312            session_id.clone(),
313            command.to_string(),
314            workdir.clone(),
315            project_root,
316            timeout_ms,
317            notify_on_completion,
318            compressed,
319        );
320        write_task(&paths.json, &metadata)
321            .map_err(|e| format!("failed to persist background task metadata: {e}"))?;
322
323        // Capture files are pre-created so the watchdog/buffer can always
324        // open them for reading even if the child hasn't written anything
325        // yet. The spawn helper opens its own handles per attempt because
326        // each `Command::spawn()` consumes them, and on Windows we may
327        // retry across multiple shell candidates if the first one fails.
328        create_capture_file(&paths.stdout)
329            .map_err(|e| format!("failed to create stdout capture file: {e}"))?;
330        create_capture_file(&paths.stderr)
331            .map_err(|e| format!("failed to create stderr capture file: {e}"))?;
332
333        let child = match spawn_detached_child(command, &paths, &workdir, &env) {
334            Ok(child) => child,
335            Err(error) => {
336                crate::slog_warn!("failed to spawn background bash task {task_id}; deleting partial bundle: {error}");
337                let _ = delete_task_bundle(&paths);
338                return Err(error);
339            }
340        };
341
342        let child_pid = child.id();
343        metadata.status = BgTaskStatus::Running;
344        metadata.child_pid = Some(child_pid);
345        metadata.pgid = None;
346        write_task(&paths.json, &metadata)
347            .map_err(|e| format!("failed to persist running background task metadata: {e}"))?;
348
349        let task = Arc::new(BgTask {
350            task_id: task_id.clone(),
351            session_id,
352            paths: paths.clone(),
353            started: Instant::now(),
354            last_reminder_at: Mutex::new(None),
355            terminal_at: Mutex::new(None),
356            state: Mutex::new(BgTaskState {
357                metadata,
358                child: Some(child),
359                detached: false,
360                buffer: BgBuffer::new(paths.stdout.clone(), paths.stderr.clone()),
361            }),
362        });
363
364        self.inner
365            .tasks
366            .lock()
367            .map_err(|_| "background task registry lock poisoned".to_string())?
368            .insert(task_id.clone(), task);
369
370        Ok(task_id)
371    }
372
373    pub fn replay_session(&self, storage_dir: &Path, session_id: &str) -> Result<(), String> {
374        self.start_watchdog();
375        if !self.inner.persisted_gc_started.swap(true, Ordering::SeqCst) {
376            if let Err(error) = self.maybe_gc_persisted(storage_dir) {
377                crate::slog_warn!("failed to GC persisted background bash tasks: {error}");
378            }
379        }
380        let dir = session_tasks_dir(storage_dir, session_id);
381        if !dir.exists() {
382            return Ok(());
383        }
384
385        let entries = fs::read_dir(&dir)
386            .map_err(|e| format!("failed to read background task dir {}: {e}", dir.display()))?;
387        for entry in entries.flatten() {
388            let path = entry.path();
389            if path.extension().and_then(|extension| extension.to_str()) != Some("json") {
390                continue;
391            }
392            let Ok(mut metadata) = read_task(&path) else {
393                continue;
394            };
395            if metadata.session_id != session_id {
396                continue;
397            }
398
399            let paths = task_paths(storage_dir, session_id, &metadata.task_id);
400            match metadata.status {
401                BgTaskStatus::Starting => {
402                    metadata.mark_terminal(
403                        BgTaskStatus::Failed,
404                        None,
405                        Some("spawn aborted".to_string()),
406                    );
407                    let _ = write_task(&paths.json, &metadata);
408                    self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
409                }
410                BgTaskStatus::Running | BgTaskStatus::Killing => {
411                    if self.running_metadata_is_stale(&metadata) {
412                        metadata.mark_terminal(
413                            BgTaskStatus::Killed,
414                            None,
415                            Some("orphaned (>24h)".to_string()),
416                        );
417                        if !paths.exit.exists() {
418                            let _ = write_kill_marker_if_absent(&paths.exit);
419                        }
420                        let _ = write_task(&paths.json, &metadata);
421                        self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
422                    } else if let Ok(Some(marker)) = read_exit_marker(&paths.exit) {
423                        let reason = (metadata.status == BgTaskStatus::Killing).then(|| {
424                            "recovered from inconsistent killing state on replay".to_string()
425                        });
426                        if reason.is_some() {
427                            crate::slog_warn!("background task {} had killing state with exit marker; preferring marker",
428                            metadata.task_id);
429                        }
430                        metadata = terminal_metadata_from_marker(metadata, marker, reason);
431                        let _ = write_task(&paths.json, &metadata);
432                        self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
433                    } else if metadata.status == BgTaskStatus::Killing {
434                        if !paths.exit.exists() {
435                            let _ = write_kill_marker_if_absent(&paths.exit);
436                        }
437                        metadata.mark_terminal(
438                            BgTaskStatus::Killed,
439                            None,
440                            Some("recovered from inconsistent killing state on replay".to_string()),
441                        );
442                        let _ = write_task(&paths.json, &metadata);
443                        self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
444                    } else if metadata.child_pid.is_some_and(|pid| !is_process_alive(pid)) {
445                        metadata.mark_terminal(
446                            BgTaskStatus::Failed,
447                            None,
448                            Some("process exited without exit marker".to_string()),
449                        );
450                        let _ = write_task(&paths.json, &metadata);
451                        self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
452                    } else {
453                        self.insert_rehydrated_task(metadata, paths, true)?;
454                    }
455                }
456                _ if metadata.status.is_terminal() => {
457                    // Borrow `paths` for the completion enqueue BEFORE
458                    // `insert_rehydrated_task` consumes it. The completion
459                    // helper only reads from `paths` (stdout/stderr/exit) to
460                    // reconstruct a tail preview, so it must see the same
461                    // paths the rehydrated task will own.
462                    self.enqueue_completion_if_needed(&metadata, Some(&paths), false);
463                    self.insert_rehydrated_task(metadata, paths, true)?;
464                }
465                _ => {}
466            }
467        }
468
469        Ok(())
470    }
471
472    pub fn status(
473        &self,
474        task_id: &str,
475        session_id: &str,
476        project_root: Option<&Path>,
477        storage_dir: Option<&Path>,
478        preview_bytes: usize,
479    ) -> Option<BgTaskSnapshot> {
480        let mut task = self.task_for_session(task_id, session_id);
481        if task.is_none() {
482            if let Some(storage_dir) = storage_dir {
483                let _ = self.replay_session(storage_dir, session_id);
484                task = self.task_for_session(task_id, session_id);
485            }
486        }
487        let Some(task) = task else {
488            return self.status_relaxed(
489                task_id,
490                session_id,
491                project_root?,
492                storage_dir?,
493                preview_bytes,
494            );
495        };
496        let _ = self.poll_task(&task);
497        let mut snapshot = task.snapshot(preview_bytes);
498        self.maybe_compress_snapshot(&task, &mut snapshot);
499        Some(snapshot)
500    }
501
502    fn status_relaxed_task(
503        &self,
504        task_id: &str,
505        project_root: &Path,
506        storage_dir: &Path,
507    ) -> Option<Arc<BgTask>> {
508        let canonical_project = canonicalized_path(project_root);
509        let root = storage_dir.join("bash-tasks");
510        let entries = fs::read_dir(&root).ok()?;
511        for entry in entries.flatten() {
512            let dir = entry.path();
513            if !dir.is_dir() {
514                continue;
515            }
516            let path = dir.join(format!("{task_id}.json"));
517            if !path.exists() {
518                continue;
519            }
520            let Ok(metadata) = read_task(&path) else {
521                continue;
522            };
523            let metadata_project = metadata.project_root.as_deref().map(canonicalized_path);
524            if metadata_project.as_deref() != Some(canonical_project.as_path()) {
525                continue;
526            }
527            if let Some(task) = self.task(task_id) {
528                let matches_project = task
529                    .state
530                    .lock()
531                    .map(|state| {
532                        state
533                            .metadata
534                            .project_root
535                            .as_deref()
536                            .map(canonicalized_path)
537                            .as_deref()
538                            == Some(canonical_project.as_path())
539                    })
540                    .unwrap_or(false);
541                return matches_project.then_some(task);
542            }
543            let paths = task_paths(storage_dir, &metadata.session_id, &metadata.task_id);
544            if self.insert_rehydrated_task(metadata, paths, true).is_err() {
545                return None;
546            }
547            return self.task(task_id);
548        }
549        None
550    }
551
552    pub(super) fn status_relaxed(
553        &self,
554        task_id: &str,
555        _session_id: &str,
556        project_root: &Path,
557        storage_dir: &Path,
558        preview_bytes: usize,
559    ) -> Option<BgTaskSnapshot> {
560        let task = self.status_relaxed_task(task_id, project_root, storage_dir)?;
561        let _ = self.poll_task(&task);
562        let mut snapshot = task.snapshot(preview_bytes);
563        self.maybe_compress_snapshot(&task, &mut snapshot);
564        Some(snapshot)
565    }
566
567    pub fn maybe_gc_persisted(&self, storage_dir: &Path) -> Result<usize, String> {
568        #[cfg(test)]
569        self.inner.persisted_gc_runs.fetch_add(1, Ordering::SeqCst);
570
571        let mut deleted = 0usize;
572
573        let root = storage_dir.join("bash-tasks");
574        if root.exists() {
575            let session_dirs = fs::read_dir(&root).map_err(|e| {
576                format!(
577                    "failed to read background task root {}: {e}",
578                    root.display()
579                )
580            })?;
581            for session_entry in session_dirs.flatten() {
582                let session_dir = session_entry.path();
583                if !session_dir.is_dir() {
584                    continue;
585                }
586                let task_entries = match fs::read_dir(&session_dir) {
587                    Ok(entries) => entries,
588                    Err(error) => {
589                        crate::slog_warn!(
590                            "failed to read background task session dir {}: {error}",
591                            session_dir.display()
592                        );
593                        continue;
594                    }
595                };
596                for task_entry in task_entries.flatten() {
597                    let json_path = task_entry.path();
598                    if json_path
599                        .extension()
600                        .and_then(|extension| extension.to_str())
601                        != Some("json")
602                    {
603                        continue;
604                    }
605                    if modified_within(&json_path, PERSISTED_GC_GRACE) {
606                        continue;
607                    }
608                    let metadata = match read_task(&json_path) {
609                        Ok(metadata) => metadata,
610                        Err(error) => {
611                            crate::slog_warn!(
612                                "quarantining corrupt background task metadata {}: {error}",
613                                json_path.display()
614                            );
615                            quarantine_corrupt_task_json(storage_dir, &session_dir, &json_path)?;
616                            continue;
617                        }
618                    };
619                    if !(metadata.status.is_terminal() && metadata.completion_delivered) {
620                        continue;
621                    }
622                    let paths = task_paths(storage_dir, &metadata.session_id, &metadata.task_id);
623                    match delete_task_bundle(&paths) {
624                        Ok(()) => {
625                            deleted += 1;
626                            log::debug!(
627                                "deleted persisted background task bundle {}",
628                                metadata.task_id
629                            );
630                        }
631                        Err(error) => {
632                            crate::slog_warn!(
633                                "failed to delete background task bundle {}: {error}",
634                                metadata.task_id
635                            );
636                            continue;
637                        }
638                    }
639                }
640            }
641        }
642        gc_quarantine(storage_dir);
643        Ok(deleted)
644    }
645
646    pub fn list(&self, preview_bytes: usize) -> Vec<BgTaskSnapshot> {
647        let tasks = self
648            .inner
649            .tasks
650            .lock()
651            .map(|tasks| tasks.values().cloned().collect::<Vec<_>>())
652            .unwrap_or_default();
653        tasks
654            .into_iter()
655            .map(|task| {
656                let _ = self.poll_task(&task);
657                let mut snapshot = task.snapshot(preview_bytes);
658                self.maybe_compress_snapshot(&task, &mut snapshot);
659                snapshot
660            })
661            .collect()
662    }
663
664    /// Compress `output_preview` in place when the task is in a terminal
665    /// state. Live tail of running tasks stays raw so agents debugging
666    /// long-running bash see exactly what the process emitted, not a
667    /// heuristic-collapsed view. Per-task opt-out via the `compressed`
668    /// field on `PersistedTask` short-circuits before the compress pipeline.
669    fn maybe_compress_snapshot(&self, task: &Arc<BgTask>, snapshot: &mut BgTaskSnapshot) {
670        if !snapshot.info.status.is_terminal() {
671            return;
672        }
673        let compressed_flag = task
674            .state
675            .lock()
676            .map(|state| state.metadata.compressed)
677            .unwrap_or(true);
678        if !compressed_flag {
679            return;
680        }
681        let raw = std::mem::take(&mut snapshot.output_preview);
682        snapshot.output_preview = self.compress_output(&snapshot.info.command, raw);
683    }
684
685    pub fn kill(&self, task_id: &str, session_id: &str) -> Result<BgTaskSnapshot, String> {
686        self.kill_with_status(task_id, session_id, BgTaskStatus::Killed)
687    }
688
689    pub fn promote(&self, task_id: &str, session_id: &str) -> Result<bool, String> {
690        let task = self
691            .task_for_session(task_id, session_id)
692            .ok_or_else(|| format!("background task not found: {task_id}"))?;
693        let mut state = task
694            .state
695            .lock()
696            .map_err(|_| "background task lock poisoned".to_string())?;
697        let updated = update_task(&task.paths.json, |metadata| {
698            metadata.notify_on_completion = true;
699            metadata.completion_delivered = false;
700        })
701        .map_err(|e| format!("failed to promote background task: {e}"))?;
702        state.metadata = updated;
703        if state.metadata.status.is_terminal() {
704            state.buffer.enforce_terminal_cap();
705            self.enqueue_completion_locked(&state.metadata, Some(&state.buffer), true);
706        }
707        Ok(true)
708    }
709
710    pub(crate) fn kill_for_timeout(&self, task_id: &str, session_id: &str) -> Result<(), String> {
711        self.kill_with_status(task_id, session_id, BgTaskStatus::TimedOut)
712            .map(|_| ())
713    }
714
715    pub fn cleanup_finished(&self, older_than: Duration) {
716        let cutoff = Instant::now().checked_sub(older_than);
717        let removable_paths: Vec<(String, TaskPaths)> =
718            if let Ok(mut tasks) = self.inner.tasks.lock() {
719                let removable = tasks
720                    .iter()
721                    .filter_map(|(task_id, task)| {
722                        let delivered_terminal = task
723                            .state
724                            .lock()
725                            .map(|state| {
726                                state.metadata.status.is_terminal()
727                                    && state.metadata.completion_delivered
728                            })
729                            .unwrap_or(false);
730                        if !delivered_terminal {
731                            return None;
732                        }
733
734                        let terminal_at = task.terminal_at.lock().ok().and_then(|at| *at);
735                        let expired = match (terminal_at, cutoff) {
736                            (Some(terminal_at), Some(cutoff)) => terminal_at <= cutoff,
737                            (Some(_), None) => true,
738                            (None, _) => false,
739                        };
740                        expired.then(|| task_id.clone())
741                    })
742                    .collect::<Vec<_>>();
743
744                removable
745                    .into_iter()
746                    .filter_map(|task_id| {
747                        tasks
748                            .remove(&task_id)
749                            .map(|task| (task_id, task.paths.clone()))
750                    })
751                    .collect()
752            } else {
753                Vec::new()
754            };
755
756        for (task_id, paths) in removable_paths {
757            match delete_task_bundle(&paths) {
758                Ok(()) => log::debug!("deleted persisted background task bundle {task_id}"),
759                Err(error) => crate::slog_warn!(
760                    "failed to delete persisted background task bundle {task_id}: {error}"
761                ),
762            }
763        }
764    }
765
766    pub fn drain_completions(&self) -> Vec<BgCompletion> {
767        self.drain_completions_for_session(None)
768    }
769
770    pub fn drain_completions_for_session(&self, session_id: Option<&str>) -> Vec<BgCompletion> {
771        let mut completions = match self.inner.completions.lock() {
772            Ok(completions) => completions,
773            Err(_) => return Vec::new(),
774        };
775
776        let drained = if let Some(session_id) = session_id {
777            let mut matched = Vec::new();
778            let mut retained = VecDeque::new();
779            while let Some(completion) = completions.pop_front() {
780                if completion.session_id == session_id {
781                    matched.push(completion);
782                } else {
783                    retained.push_back(completion);
784                }
785            }
786            *completions = retained;
787            matched
788        } else {
789            completions.drain(..).collect()
790        };
791        drop(completions);
792
793        for completion in &drained {
794            if let Some(task) = self.task_for_session(&completion.task_id, &completion.session_id) {
795                let _ = task.set_completion_delivered(true);
796            }
797        }
798
799        drained
800    }
801
802    pub fn pending_completions_for_session(&self, session_id: &str) -> Vec<BgCompletion> {
803        self.inner
804            .completions
805            .lock()
806            .map(|completions| {
807                completions
808                    .iter()
809                    .filter(|completion| completion.session_id == session_id)
810                    .cloned()
811                    .collect()
812            })
813            .unwrap_or_default()
814    }
815
816    pub fn detach(&self) {
817        self.inner.shutdown.store(true, Ordering::SeqCst);
818        if let Ok(mut tasks) = self.inner.tasks.lock() {
819            for task in tasks.values() {
820                if let Ok(mut state) = task.state.lock() {
821                    state.child = None;
822                    state.detached = true;
823                }
824            }
825            tasks.clear();
826        }
827    }
828
829    pub fn shutdown(&self) {
830        let tasks = self
831            .inner
832            .tasks
833            .lock()
834            .map(|tasks| {
835                tasks
836                    .values()
837                    .map(|task| (task.task_id.clone(), task.session_id.clone()))
838                    .collect::<Vec<_>>()
839            })
840            .unwrap_or_default();
841        for (task_id, session_id) in tasks {
842            let _ = self.kill(&task_id, &session_id);
843        }
844    }
845
846    pub(crate) fn poll_task(&self, task: &Arc<BgTask>) -> Result<(), String> {
847        let marker = match read_exit_marker(&task.paths.exit) {
848            Ok(Some(marker)) => marker,
849            Ok(None) => return Ok(()),
850            Err(error) => return Err(format!("failed to read exit marker: {error}")),
851        };
852        self.finalize_from_marker(task, marker, None)
853    }
854
855    pub(crate) fn reap_child(&self, task: &Arc<BgTask>) {
856        let Ok(mut state) = task.state.lock() else {
857            return;
858        };
859        if let Some(child) = state.child.as_mut() {
860            if matches!(child.try_wait(), Ok(Some(_))) {
861                // Child has exited. If the wrapper successfully wrote an
862                // exit marker, the next `poll_task()` cycle will pick it up
863                // and finalize via `finalize_from_marker`. But if the
864                // wrapper crashed before writing the marker (e.g. SIGKILL,
865                // power loss, wrapper bug), the task would forever appear
866                // Running until `timeout_ms` expired — and if no timeout
867                // was set, until the 24h `running_metadata_is_stale` cutoff
868                // hit at the next aft restart. Same condition as the replay
869                // path's "PID dead but no marker" branch (see line 338).
870                //
871                // To avoid that hidden hang, mark the task Failed
872                // immediately with the same reason string used by replay,
873                // but only if the marker is genuinely absent. If a marker
874                // appeared on disk between try_wait() returning and now
875                // (race window), prefer the marker — let the next poll
876                // cycle finalize from it.
877                state.child = None;
878                state.detached = true;
879                if state.metadata.status.is_terminal() {
880                    return;
881                }
882                if matches!(read_exit_marker(&task.paths.exit), Ok(Some(_))) {
883                    return;
884                }
885                let updated = update_task(&task.paths.json, |metadata| {
886                    metadata.mark_terminal(
887                        BgTaskStatus::Failed,
888                        None,
889                        Some("process exited without exit marker".to_string()),
890                    );
891                });
892                if let Ok(metadata) = updated {
893                    state.metadata = metadata;
894                    task.mark_terminal_now();
895                    state.buffer.enforce_terminal_cap();
896                    self.enqueue_completion_locked(&state.metadata, Some(&state.buffer), true);
897                }
898            }
899        }
900    }
901
902    pub(crate) fn running_tasks(&self) -> Vec<Arc<BgTask>> {
903        self.inner
904            .tasks
905            .lock()
906            .map(|tasks| {
907                tasks
908                    .values()
909                    .filter(|task| task.is_running())
910                    .cloned()
911                    .collect()
912            })
913            .unwrap_or_default()
914    }
915
916    fn insert_rehydrated_task(
917        &self,
918        metadata: PersistedTask,
919        paths: TaskPaths,
920        detached: bool,
921    ) -> Result<(), String> {
922        let task_id = metadata.task_id.clone();
923        let session_id = metadata.session_id.clone();
924        let started = started_instant_from_unix_millis(metadata.started_at);
925        let task = Arc::new(BgTask {
926            task_id: task_id.clone(),
927            session_id,
928            paths: paths.clone(),
929            started,
930            last_reminder_at: Mutex::new(None),
931            terminal_at: Mutex::new(metadata.status.is_terminal().then(Instant::now)),
932            state: Mutex::new(BgTaskState {
933                metadata,
934                child: None,
935                detached,
936                buffer: BgBuffer::new(paths.stdout.clone(), paths.stderr.clone()),
937            }),
938        });
939        self.inner
940            .tasks
941            .lock()
942            .map_err(|_| "background task registry lock poisoned".to_string())?
943            .insert(task_id, task);
944        Ok(())
945    }
946
947    fn kill_with_status(
948        &self,
949        task_id: &str,
950        session_id: &str,
951        terminal_status: BgTaskStatus,
952    ) -> Result<BgTaskSnapshot, String> {
953        let task = self
954            .task_for_session(task_id, session_id)
955            .ok_or_else(|| format!("background task not found: {task_id}"))?;
956
957        {
958            let mut state = task
959                .state
960                .lock()
961                .map_err(|_| "background task lock poisoned".to_string())?;
962            if state.metadata.status.is_terminal() {
963                return Ok(task.snapshot_locked(&state, 5 * 1024));
964            }
965
966            if let Ok(Some(marker)) = read_exit_marker(&task.paths.exit) {
967                state.metadata =
968                    terminal_metadata_from_marker(state.metadata.clone(), marker, None);
969                task.mark_terminal_now();
970                state.child = None;
971                state.detached = true;
972                state.buffer.enforce_terminal_cap();
973                write_task(&task.paths.json, &state.metadata)
974                    .map_err(|e| format!("failed to persist terminal state: {e}"))?;
975                self.enqueue_completion_locked(&state.metadata, Some(&state.buffer), true);
976                return Ok(task.snapshot_locked(&state, 5 * 1024));
977            }
978
979            state.metadata.status = BgTaskStatus::Killing;
980            write_task(&task.paths.json, &state.metadata)
981                .map_err(|e| format!("failed to persist killing state: {e}"))?;
982
983            #[cfg(unix)]
984            if let Some(pgid) = state.metadata.pgid {
985                terminate_pgid(pgid, state.child.as_mut());
986            }
987            #[cfg(windows)]
988            if let Some(child) = state.child.as_mut() {
989                super::process::terminate_process(child);
990            } else if let Some(pid) = state.metadata.child_pid {
991                terminate_pid(pid);
992            }
993            if let Some(child) = state.child.as_mut() {
994                let _ = child.wait();
995            }
996            state.child = None;
997            state.detached = true;
998
999            if !task.paths.exit.exists() {
1000                write_kill_marker_if_absent(&task.paths.exit)
1001                    .map_err(|e| format!("failed to write kill marker: {e}"))?;
1002            }
1003
1004            let exit_code = if terminal_status == BgTaskStatus::TimedOut {
1005                Some(124)
1006            } else {
1007                None
1008            };
1009            state
1010                .metadata
1011                .mark_terminal(terminal_status, exit_code, None);
1012            task.mark_terminal_now();
1013            write_task(&task.paths.json, &state.metadata)
1014                .map_err(|e| format!("failed to persist killed state: {e}"))?;
1015            state.buffer.enforce_terminal_cap();
1016            self.enqueue_completion_locked(&state.metadata, Some(&state.buffer), true);
1017        }
1018
1019        Ok(task.snapshot(5 * 1024))
1020    }
1021
1022    fn finalize_from_marker(
1023        &self,
1024        task: &Arc<BgTask>,
1025        marker: ExitMarker,
1026        reason: Option<String>,
1027    ) -> Result<(), String> {
1028        let mut state = task
1029            .state
1030            .lock()
1031            .map_err(|_| "background task lock poisoned".to_string())?;
1032        if state.metadata.status.is_terminal() {
1033            return Ok(());
1034        }
1035
1036        let updated = update_task(&task.paths.json, |metadata| {
1037            let new_metadata = terminal_metadata_from_marker(metadata.clone(), marker, reason);
1038            *metadata = new_metadata;
1039        })
1040        .map_err(|e| format!("failed to persist terminal state: {e}"))?;
1041        state.metadata = updated;
1042        task.mark_terminal_now();
1043        state.child = None;
1044        state.detached = true;
1045        state.buffer.enforce_terminal_cap();
1046        self.enqueue_completion_locked(&state.metadata, Some(&state.buffer), true);
1047        Ok(())
1048    }
1049
1050    fn enqueue_completion_if_needed(
1051        &self,
1052        metadata: &PersistedTask,
1053        paths: Option<&TaskPaths>,
1054        emit_frame: bool,
1055    ) {
1056        if metadata.status.is_terminal() && !metadata.completion_delivered {
1057            self.enqueue_completion_from_parts(metadata, None, paths, emit_frame);
1058        }
1059    }
1060
1061    fn enqueue_completion_locked(
1062        &self,
1063        metadata: &PersistedTask,
1064        buffer: Option<&BgBuffer>,
1065        emit_frame: bool,
1066    ) {
1067        self.enqueue_completion_from_parts(metadata, buffer, None, emit_frame);
1068    }
1069
1070    fn enqueue_completion_from_parts(
1071        &self,
1072        metadata: &PersistedTask,
1073        buffer: Option<&BgBuffer>,
1074        paths: Option<&TaskPaths>,
1075        emit_frame: bool,
1076    ) {
1077        if !metadata.status.is_terminal() || metadata.completion_delivered {
1078            return;
1079        }
1080        // Read tail once at completion time and cache on the BgCompletion so
1081        // both the push-frame consumer (running session) and any later
1082        // `bash_drain_completions` poll (different session, restart) see the
1083        // same preview without racing against rotation.
1084        let (raw_preview, output_truncated) = match buffer {
1085            Some(buf) => buf.read_tail(BG_COMPLETION_PREVIEW_BYTES),
1086            None => paths
1087                .map(|paths| read_tail_from_disk(paths, BG_COMPLETION_PREVIEW_BYTES))
1088                .unwrap_or_else(|| (String::new(), false)),
1089        };
1090        // Compress at completion time so push-frame consumers and later
1091        // `bash_drain_completions` poll-callers see the same compressed text.
1092        // Per-task `compressed: false` opts out; otherwise the compressor is
1093        // a no-op when `experimental.bash.compress=false`.
1094        let output_preview = if metadata.compressed {
1095            self.compress_output(&metadata.command, raw_preview)
1096        } else {
1097            raw_preview
1098        };
1099        let completion = BgCompletion {
1100            task_id: metadata.task_id.clone(),
1101            session_id: metadata.session_id.clone(),
1102            status: metadata.status.clone(),
1103            exit_code: metadata.exit_code,
1104            command: metadata.command.clone(),
1105            output_preview,
1106            output_truncated,
1107        };
1108        if let Ok(mut completions) = self.inner.completions.lock() {
1109            if completions
1110                .iter()
1111                .any(|completion| completion.task_id == metadata.task_id)
1112            {
1113                return;
1114            }
1115            completions.push_back(completion.clone());
1116        } else {
1117            return;
1118        }
1119
1120        if emit_frame {
1121            self.emit_bash_completed(completion);
1122        }
1123    }
1124
1125    fn emit_bash_completed(&self, completion: BgCompletion) {
1126        let Ok(progress_sender) = self
1127            .inner
1128            .progress_sender
1129            .lock()
1130            .map(|sender| sender.clone())
1131        else {
1132            return;
1133        };
1134        let Some(sender) = progress_sender.as_ref() else {
1135            return;
1136        };
1137        // Clone the callback out of the registry mutex before writing to stdout;
1138        // otherwise a blocked push-frame write could pin the mutex and starve
1139        // unrelated progress-sender updates.
1140        // Bg task transitions are discovered by the watchdog thread, so the
1141        // sender is shared behind a Mutex. It still uses the same stdout writer
1142        // closure as foreground progress frames, preserving the existing lock/
1143        // flush behavior in main.rs.
1144        sender(PushFrame::BashCompleted(BashCompletedFrame::new(
1145            completion.task_id,
1146            completion.session_id,
1147            completion.status,
1148            completion.exit_code,
1149            completion.command,
1150            completion.output_preview,
1151            completion.output_truncated,
1152        )));
1153    }
1154
1155    pub(crate) fn maybe_emit_long_running_reminder(&self, task: &Arc<BgTask>) {
1156        if !self
1157            .inner
1158            .long_running_reminder_enabled
1159            .load(Ordering::SeqCst)
1160        {
1161            return;
1162        }
1163        let interval_ms = self
1164            .inner
1165            .long_running_reminder_interval_ms
1166            .load(Ordering::SeqCst);
1167        if interval_ms == 0 {
1168            return;
1169        }
1170        let interval = Duration::from_millis(interval_ms);
1171        let now = Instant::now();
1172        let Ok(mut last_reminder_at) = task.last_reminder_at.lock() else {
1173            return;
1174        };
1175        let since = last_reminder_at.unwrap_or(task.started);
1176        if now.duration_since(since) < interval {
1177            return;
1178        }
1179        let command = task
1180            .state
1181            .lock()
1182            .map(|state| state.metadata.command.clone())
1183            .unwrap_or_default();
1184        *last_reminder_at = Some(now);
1185        self.emit_bash_long_running(BashLongRunningFrame::new(
1186            task.task_id.clone(),
1187            task.session_id.clone(),
1188            command,
1189            task.started.elapsed().as_millis() as u64,
1190        ));
1191    }
1192
1193    fn emit_bash_long_running(&self, frame: BashLongRunningFrame) {
1194        let Ok(progress_sender) = self
1195            .inner
1196            .progress_sender
1197            .lock()
1198            .map(|sender| sender.clone())
1199        else {
1200            return;
1201        };
1202        if let Some(sender) = progress_sender.as_ref() {
1203            sender(PushFrame::BashLongRunning(frame));
1204        }
1205    }
1206
1207    fn task(&self, task_id: &str) -> Option<Arc<BgTask>> {
1208        self.inner
1209            .tasks
1210            .lock()
1211            .ok()
1212            .and_then(|tasks| tasks.get(task_id).cloned())
1213    }
1214
1215    fn task_for_session(&self, task_id: &str, session_id: &str) -> Option<Arc<BgTask>> {
1216        self.task(task_id)
1217            .filter(|task| task.session_id == session_id)
1218    }
1219
1220    fn running_count(&self) -> usize {
1221        self.inner
1222            .tasks
1223            .lock()
1224            .map(|tasks| tasks.values().filter(|task| task.is_running()).count())
1225            .unwrap_or(0)
1226    }
1227
1228    fn start_watchdog(&self) {
1229        if !self.inner.watchdog_started.swap(true, Ordering::SeqCst) {
1230            super::watchdog::start(self.clone());
1231        }
1232    }
1233
1234    fn running_metadata_is_stale(&self, metadata: &PersistedTask) -> bool {
1235        unix_millis().saturating_sub(metadata.started_at) > STALE_RUNNING_AFTER.as_millis() as u64
1236    }
1237
1238    #[cfg(test)]
1239    pub fn task_json_path(&self, task_id: &str, session_id: &str) -> Option<PathBuf> {
1240        self.task_for_session(task_id, session_id)
1241            .map(|task| task.paths.json.clone())
1242    }
1243
1244    #[cfg(test)]
1245    pub fn task_exit_path(&self, task_id: &str, session_id: &str) -> Option<PathBuf> {
1246        self.task_for_session(task_id, session_id)
1247            .map(|task| task.paths.exit.clone())
1248    }
1249
1250    /// Generate a `bash-{16hex}` slug that is unique against live tasks and queued completions.
1251    fn generate_unique_task_id(&self) -> Result<String, String> {
1252        for _ in 0..32 {
1253            let candidate = random_slug();
1254            let tasks = self
1255                .inner
1256                .tasks
1257                .lock()
1258                .map_err(|_| "background task registry lock poisoned".to_string())?;
1259            if tasks.contains_key(&candidate) {
1260                continue;
1261            }
1262            let completions = self
1263                .inner
1264                .completions
1265                .lock()
1266                .map_err(|_| "background completions lock poisoned".to_string())?;
1267            if completions
1268                .iter()
1269                .any(|completion| completion.task_id == candidate)
1270            {
1271                continue;
1272            }
1273            return Ok(candidate);
1274        }
1275        Err("failed to allocate unique background task id after 32 attempts".to_string())
1276    }
1277}
1278
1279impl Default for BgTaskRegistry {
1280    fn default() -> Self {
1281        Self::new(Arc::new(Mutex::new(None)))
1282    }
1283}
1284
1285fn modified_within(path: &Path, grace: Duration) -> bool {
1286    fs::metadata(path)
1287        .and_then(|metadata| metadata.modified())
1288        .ok()
1289        .and_then(|modified| SystemTime::now().duration_since(modified).ok())
1290        .map(|age| age < grace)
1291        .unwrap_or(false)
1292}
1293
1294fn canonicalized_path(path: &Path) -> PathBuf {
1295    fs::canonicalize(path).unwrap_or_else(|_| path.to_path_buf())
1296}
1297
1298fn started_instant_from_unix_millis(started_at: u64) -> Instant {
1299    let now_ms = SystemTime::now()
1300        .duration_since(UNIX_EPOCH)
1301        .ok()
1302        .map(|duration| duration.as_millis() as u64)
1303        .unwrap_or(started_at);
1304    let elapsed_ms = now_ms.saturating_sub(started_at);
1305    Instant::now()
1306        .checked_sub(Duration::from_millis(elapsed_ms))
1307        .unwrap_or_else(Instant::now)
1308}
1309
1310fn gc_quarantine(storage_dir: &Path) {
1311    let quarantine_root = storage_dir.join("bash-tasks-quarantine");
1312    let Ok(session_dirs) = fs::read_dir(&quarantine_root) else {
1313        return;
1314    };
1315    for session_entry in session_dirs.flatten() {
1316        let session_quarantine_dir = session_entry.path();
1317        if !session_quarantine_dir.is_dir() {
1318            continue;
1319        }
1320        let entries = match fs::read_dir(&session_quarantine_dir) {
1321            Ok(entries) => entries,
1322            Err(error) => {
1323                crate::slog_warn!(
1324                    "failed to read background task quarantine dir {}: {error}",
1325                    session_quarantine_dir.display()
1326                );
1327                continue;
1328            }
1329        };
1330        for entry in entries.flatten() {
1331            let path = entry.path();
1332            if modified_within(&path, QUARANTINE_GC_GRACE) {
1333                continue;
1334            }
1335            let result = if path.is_dir() {
1336                fs::remove_dir_all(&path)
1337            } else {
1338                fs::remove_file(&path)
1339            };
1340            match result {
1341                Ok(()) => log::debug!(
1342                    "deleted old background task quarantine entry {}",
1343                    path.display()
1344                ),
1345                Err(error) => crate::slog_warn!(
1346                    "failed to delete old background task quarantine entry {}: {error}",
1347                    path.display()
1348                ),
1349            }
1350        }
1351        let _ = fs::remove_dir(&session_quarantine_dir);
1352    }
1353    let _ = fs::remove_dir(&quarantine_root);
1354}
1355
1356fn quarantine_corrupt_task_json(
1357    storage_dir: &Path,
1358    session_dir: &Path,
1359    json_path: &Path,
1360) -> Result<(), String> {
1361    let session_hash = session_dir
1362        .file_name()
1363        .and_then(|name| name.to_str())
1364        .ok_or_else(|| {
1365            format!(
1366                "invalid background task session dir: {}",
1367                session_dir.display()
1368            )
1369        })?;
1370    let task_name = json_path
1371        .file_name()
1372        .and_then(|name| name.to_str())
1373        .ok_or_else(|| format!("invalid background task json path: {}", json_path.display()))?;
1374    let unix_ts = SystemTime::now()
1375        .duration_since(UNIX_EPOCH)
1376        .map(|duration| duration.as_secs())
1377        .unwrap_or(0);
1378    let quarantine_dir = storage_dir.join("bash-tasks-quarantine").join(session_hash);
1379    fs::create_dir_all(&quarantine_dir).map_err(|e| {
1380        format!(
1381            "failed to create background task quarantine dir {}: {e}",
1382            quarantine_dir.display()
1383        )
1384    })?;
1385    let target = quarantine_dir.join(format!("{task_name}.corrupt-{unix_ts}"));
1386    fs::rename(json_path, &target).map_err(|e| {
1387        format!(
1388            "failed to quarantine corrupt background task metadata {} to {}: {e}",
1389            json_path.display(),
1390            target.display()
1391        )
1392    })?;
1393
1394    for sibling in task_sibling_paths(json_path) {
1395        if !sibling.exists() {
1396            continue;
1397        }
1398        let Some(sibling_name) = sibling.file_name().and_then(|name| name.to_str()) else {
1399            crate::slog_warn!(
1400                "skipping background task sibling with invalid name during quarantine: {}",
1401                sibling.display()
1402            );
1403            continue;
1404        };
1405        let sibling_target = quarantine_dir.join(format!("{sibling_name}.corrupt-{unix_ts}"));
1406        if let Err(error) = fs::rename(&sibling, &sibling_target) {
1407            crate::slog_warn!(
1408                "failed to quarantine background task sibling {} to {}: {error}",
1409                sibling.display(),
1410                sibling_target.display()
1411            );
1412        }
1413    }
1414
1415    let _ = fs::remove_dir(session_dir);
1416    Ok(())
1417}
1418
1419fn task_sibling_paths(json_path: &Path) -> Vec<PathBuf> {
1420    let Some(parent) = json_path.parent() else {
1421        return Vec::new();
1422    };
1423    let Some(stem) = json_path.file_stem().and_then(|stem| stem.to_str()) else {
1424        return Vec::new();
1425    };
1426    ["stdout", "stderr", "exit", "ps1", "bat", "sh"]
1427        .into_iter()
1428        .map(|extension| parent.join(format!("{stem}.{extension}")))
1429        .collect()
1430}
1431
1432fn read_tail_from_disk(paths: &TaskPaths, max_bytes: usize) -> (String, bool) {
1433    let stdout = fs::read(&paths.stdout).unwrap_or_default();
1434    let stderr = fs::read(&paths.stderr).unwrap_or_default();
1435    let mut bytes = Vec::with_capacity(stdout.len().saturating_add(stderr.len()));
1436    bytes.extend_from_slice(&stdout);
1437    bytes.extend_from_slice(&stderr);
1438    if bytes.len() <= max_bytes {
1439        return (String::from_utf8_lossy(&bytes).into_owned(), false);
1440    }
1441    let start = bytes.len().saturating_sub(max_bytes);
1442    (String::from_utf8_lossy(&bytes[start..]).into_owned(), true)
1443}
1444
1445impl BgTask {
1446    fn snapshot(&self, preview_bytes: usize) -> BgTaskSnapshot {
1447        let state = self
1448            .state
1449            .lock()
1450            .unwrap_or_else(|poison| poison.into_inner());
1451        self.snapshot_locked(&state, preview_bytes)
1452    }
1453
1454    fn snapshot_locked(&self, state: &BgTaskState, preview_bytes: usize) -> BgTaskSnapshot {
1455        let metadata = &state.metadata;
1456        let duration_ms = metadata.duration_ms.or_else(|| {
1457            metadata
1458                .status
1459                .is_terminal()
1460                .then(|| self.started.elapsed().as_millis() as u64)
1461        });
1462        let (output_preview, output_truncated) = state.buffer.read_tail(preview_bytes);
1463        BgTaskSnapshot {
1464            info: BgTaskInfo {
1465                task_id: self.task_id.clone(),
1466                status: metadata.status.clone(),
1467                command: metadata.command.clone(),
1468                started_at: metadata.started_at,
1469                duration_ms,
1470            },
1471            exit_code: metadata.exit_code,
1472            child_pid: metadata.child_pid,
1473            workdir: metadata.workdir.display().to_string(),
1474            output_preview,
1475            output_truncated,
1476            output_path: state
1477                .buffer
1478                .output_path()
1479                .map(|path| path.display().to_string()),
1480            stderr_path: Some(state.buffer.stderr_path().display().to_string()),
1481        }
1482    }
1483
1484    pub(crate) fn is_running(&self) -> bool {
1485        self.state
1486            .lock()
1487            .map(|state| state.metadata.status == BgTaskStatus::Running)
1488            .unwrap_or(false)
1489    }
1490
1491    fn mark_terminal_now(&self) {
1492        if let Ok(mut terminal_at) = self.terminal_at.lock() {
1493            if terminal_at.is_none() {
1494                *terminal_at = Some(Instant::now());
1495            }
1496        }
1497    }
1498
1499    fn set_completion_delivered(&self, delivered: bool) -> Result<(), String> {
1500        let mut state = self
1501            .state
1502            .lock()
1503            .map_err(|_| "background task lock poisoned".to_string())?;
1504        let updated = update_task(&self.paths.json, |metadata| {
1505            metadata.completion_delivered = delivered;
1506        })
1507        .map_err(|e| format!("failed to update completion delivery: {e}"))?;
1508        state.metadata = updated;
1509        Ok(())
1510    }
1511}
1512
1513fn terminal_metadata_from_marker(
1514    mut metadata: PersistedTask,
1515    marker: ExitMarker,
1516    reason: Option<String>,
1517) -> PersistedTask {
1518    match marker {
1519        ExitMarker::Code(code) => {
1520            let status = if code == 0 {
1521                BgTaskStatus::Completed
1522            } else {
1523                BgTaskStatus::Failed
1524            };
1525            metadata.mark_terminal(status, Some(code), reason);
1526        }
1527        ExitMarker::Killed => metadata.mark_terminal(BgTaskStatus::Killed, None, reason),
1528    }
1529    metadata
1530}
1531
1532#[cfg(unix)]
1533fn detached_shell_command(command: &str, exit_path: &Path) -> Command {
1534    let shell = resolve_posix_shell();
1535    let mut cmd = Command::new(&shell);
1536    cmd.arg("-c")
1537        .arg("\"$0\" -c \"$1\"; code=$?; printf \"%s\" \"$code\" > \"$2.tmp.$$\"; mv -f \"$2.tmp.$$\" \"$2\"")
1538        .arg(&shell)
1539        .arg(command)
1540        .arg(exit_path);
1541    unsafe {
1542        cmd.pre_exec(|| {
1543            if libc::setsid() == -1 {
1544                return Err(std::io::Error::last_os_error());
1545            }
1546            Ok(())
1547        });
1548    }
1549    cmd
1550}
1551
1552#[cfg(unix)]
1553fn resolve_posix_shell() -> PathBuf {
1554    static POSIX_SHELL: OnceLock<PathBuf> = OnceLock::new();
1555    POSIX_SHELL
1556        .get_or_init(|| {
1557            std::env::var_os("BASH")
1558                .filter(|value| !value.is_empty())
1559                .map(PathBuf::from)
1560                .filter(|path| path.exists())
1561                .or_else(|| which::which("bash").ok())
1562                .or_else(|| which::which("zsh").ok())
1563                .unwrap_or_else(|| PathBuf::from("/bin/sh"))
1564        })
1565        .clone()
1566}
1567
1568#[cfg(windows)]
1569fn detached_shell_command_for(
1570    shell: crate::windows_shell::WindowsShell,
1571    command: &str,
1572    exit_path: &Path,
1573    paths: &TaskPaths,
1574    creation_flags: u32,
1575) -> Result<Command, String> {
1576    use crate::windows_shell::WindowsShell;
1577    // Write the wrapper to a temp file alongside the other task files,
1578    // then invoke the shell with the file path as a single clean
1579    // argument. This sidesteps the entire Windows command-line quoting
1580    // mess (Rust std-lib quoting + cmd /C parser + PowerShell -Command
1581    // parser all interacting with embedded quotes in the wrapper).
1582    //
1583    // Path arguments don't need quoting in the same problematic way
1584    // because: (1) we use no-space task IDs (bash-XXXXXXXX) so the path
1585    // contains no characters that need shell escaping; (2) the wrapper
1586    // body's internal quotes never reach the shell command line — the
1587    // shell reads them from disk by file syntax rules, not command-line
1588    // parser rules.
1589    let wrapper_body = shell.wrapper_script(command, exit_path);
1590    let wrapper_ext = match shell {
1591        WindowsShell::Pwsh | WindowsShell::Powershell => "ps1",
1592        WindowsShell::Cmd => "bat",
1593        // POSIX shells (git-bash etc.) execute the wrapper through `-c`,
1594        // so the file extension is purely cosmetic; `.sh` matches what an
1595        // operator would expect when grepping the spill directory.
1596        WindowsShell::Posix(_) => "sh",
1597    };
1598    let wrapper_path = paths.dir.join(format!(
1599        "{}.{}",
1600        paths
1601            .json
1602            .file_stem()
1603            .and_then(|s| s.to_str())
1604            .unwrap_or("wrapper"),
1605        wrapper_ext
1606    ));
1607    fs::write(&wrapper_path, wrapper_body)
1608        .map_err(|e| format!("failed to write background bash wrapper script: {e}"))?;
1609
1610    let mut cmd = Command::new(shell.binary().as_ref());
1611    match shell {
1612        WindowsShell::Pwsh | WindowsShell::Powershell => {
1613            // -File runs the script with no quoting issues. `-NoLogo`,
1614            // `-NoProfile`, etc. apply to the host before the file runs.
1615            cmd.args([
1616                "-NoLogo",
1617                "-NoProfile",
1618                "-NonInteractive",
1619                "-ExecutionPolicy",
1620                "Bypass",
1621                "-File",
1622            ]);
1623            cmd.arg(&wrapper_path);
1624        }
1625        WindowsShell::Cmd => {
1626            // `cmd /D /C "<bat-file-path>"` — invoking a .bat
1627            // file via /C is well-defined; the file's contents are
1628            // read line-by-line by cmd's batch processor, NOT
1629            // re-interpreted by the /C parser. This avoids the
1630            // "filename syntax incorrect" errors that came from
1631            // having complex compound commands on the cmd line.
1632            cmd.args(["/D", "/C"]);
1633            cmd.arg(&wrapper_path);
1634        }
1635        WindowsShell::Posix(_) => {
1636            // git-bash and other POSIX shells run the wrapper script with
1637            // `<binary> <wrapper-path>` (the wrapper is just a shell
1638            // script). No special flags needed — the `trap` and atomic
1639            // exit-marker rename in `wrapper_script` are POSIX-standard.
1640            cmd.arg(&wrapper_path);
1641        }
1642    }
1643
1644    // Win32 process creation flags. Caller selects whether to include
1645    // CREATE_BREAKAWAY_FROM_JOB — see `detached_shell_command_for` callers
1646    // for the breakaway-fallback strategy.
1647    cmd.creation_flags(creation_flags);
1648    Ok(cmd)
1649}
1650
1651/// Spawn a detached background bash child process.
1652///
1653/// On Unix this is a single spawn against `/bin/sh`. On Windows it walks
1654/// `WindowsShell::shell_candidates()` (pwsh.exe → powershell.exe →
1655/// cmd.exe) and retries with the next candidate when the previous one
1656/// fails to spawn with `NotFound` — the same runtime safety net the
1657/// foreground bash path has, so issue #27 callers landing on cmd.exe
1658/// fallback can also use background bash. The wrapper script is
1659/// regenerated per attempt because PowerShell wrappers embed the shell
1660/// binary by name; the stdout/stderr capture handles are also reopened
1661/// per attempt because `Command::spawn()` consumes them.
1662///
1663/// Errors other than `NotFound` (PermissionDenied, OutOfMemory, etc.)
1664/// return immediately without retry — they indicate a problem with the
1665/// resolved shell that retrying with a different shell won't fix.
1666fn spawn_detached_child(
1667    command: &str,
1668    paths: &TaskPaths,
1669    workdir: &Path,
1670    env: &HashMap<String, String>,
1671) -> Result<std::process::Child, String> {
1672    #[cfg(not(windows))]
1673    {
1674        let stdout = create_capture_file(&paths.stdout)
1675            .map_err(|e| format!("failed to open stdout capture file: {e}"))?;
1676        let stderr = create_capture_file(&paths.stderr)
1677            .map_err(|e| format!("failed to open stderr capture file: {e}"))?;
1678        detached_shell_command(command, &paths.exit)
1679            .current_dir(workdir)
1680            .envs(env)
1681            .stdin(Stdio::null())
1682            .stdout(Stdio::from(stdout))
1683            .stderr(Stdio::from(stderr))
1684            .spawn()
1685            .map_err(|e| format!("failed to spawn background bash command: {e}"))
1686    }
1687    #[cfg(windows)]
1688    {
1689        use crate::windows_shell::shell_candidates;
1690        // Spawn priority: pwsh → powershell → git-bash → cmd. Same as the
1691        // legacy foreground bash spawn path. v0.20 routes ALL bash through
1692        // this background spawn helper, including foreground tool calls
1693        // where the model writes PowerShell-syntax (`$var = ...`,
1694        // `Start-Sleep`, `Add-Content`) — those fail outright under cmd.
1695        // The earlier v0.18-era cmd-first override worked around a
1696        // PowerShell detached-output bug; that bug is fixed at the
1697        // process-flag layer (CREATE_NO_WINDOW instead of DETACHED_PROCESS,
1698        // see flag block below), so we no longer need to misroute PS
1699        // commands through cmd.
1700        let candidates: Vec<crate::windows_shell::WindowsShell> = shell_candidates();
1701        // Win32 process creation flags. We try with CREATE_BREAKAWAY_FROM_JOB
1702        // first (so the bg child outlives the AFT process when AFT is killed),
1703        // then fall back without it for environments where the parent is in a
1704        // Job Object that doesn't grant `JOB_OBJECT_LIMIT_BREAKAWAY_OK`. CI
1705        // runners (GitHub Actions windows-2022) and some MDM-managed corp
1706        // environments hit this — `CreateProcess` returns Access Denied (5).
1707        // Without breakaway, the child still runs detached but will be torn
1708        // down with the parent if the parent process group is signaled.
1709        //
1710        // We use CREATE_NO_WINDOW (no visible console window, but the
1711        // child still has a hidden console) rather than DETACHED_PROCESS
1712        // (no console at all). PowerShell-based wrappers that perform
1713        // file I/O via [System.IO.File] need a console handle to flush
1714        // stdout/stderr correctly even when redirected — under
1715        // DETACHED_PROCESS, pwsh sometimes silently exits before
1716        // executing later script statements (the Move-Item that writes
1717        // the exit marker never runs), leaving the bg task forever
1718        // marked Failed: process exited without exit marker. cmd.exe
1719        // wrappers tolerate DETACHED_PROCESS, but switching to
1720        // CREATE_NO_WINDOW costs nothing for cmd and unblocks pwsh.
1721        const FLAG_CREATE_NEW_PROCESS_GROUP: u32 = 0x0000_0200;
1722        const FLAG_CREATE_BREAKAWAY_FROM_JOB: u32 = 0x0100_0000;
1723        const FLAG_CREATE_NO_WINDOW: u32 = 0x0800_0000;
1724        let with_breakaway =
1725            FLAG_CREATE_NO_WINDOW | FLAG_CREATE_NEW_PROCESS_GROUP | FLAG_CREATE_BREAKAWAY_FROM_JOB;
1726        let without_breakaway = FLAG_CREATE_NO_WINDOW | FLAG_CREATE_NEW_PROCESS_GROUP;
1727        let mut last_error: Option<String> = None;
1728        for (idx, shell) in candidates.iter().enumerate() {
1729            // Per-shell, try with breakaway first. If the process is in a
1730            // restrictive job, the breakaway flag triggers Access Denied
1731            // (os error 5). Retry once without breakaway.
1732            for &flags in &[with_breakaway, without_breakaway] {
1733                // Re-open capture handles per attempt; spawn() consumes them.
1734                let stdout = create_capture_file(&paths.stdout)
1735                    .map_err(|e| format!("failed to open stdout capture file: {e}"))?;
1736                let stderr = create_capture_file(&paths.stderr)
1737                    .map_err(|e| format!("failed to open stderr capture file: {e}"))?;
1738                let mut cmd =
1739                    detached_shell_command_for(shell.clone(), command, &paths.exit, paths, flags)?;
1740                cmd.current_dir(workdir)
1741                    .envs(env)
1742                    .stdin(Stdio::null())
1743                    .stdout(Stdio::from(stdout))
1744                    .stderr(Stdio::from(stderr));
1745                match cmd.spawn() {
1746                    Ok(child) => {
1747                        if idx > 0 {
1748                            crate::slog_warn!("background bash spawn fell back to {} after {} earlier candidate(s) failed; \
1749                             the cached PATH probe disagreed with runtime spawn — likely PATH \
1750                             inheritance, antivirus / AppLocker / Defender ASR, or sandbox policy.",
1751                            shell.binary(),
1752                            idx);
1753                        }
1754                        if flags == without_breakaway {
1755                            crate::slog_warn!(
1756                                "background bash spawn: CREATE_BREAKAWAY_FROM_JOB rejected \
1757                             (likely a restrictive Job Object — CI sandbox or MDM policy). \
1758                             Spawned without breakaway; the bg task will be torn down if the \
1759                             AFT process group is killed."
1760                            );
1761                        }
1762                        return Ok(child);
1763                    }
1764                    Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
1765                        crate::slog_warn!("background bash spawn: {} returned NotFound at runtime — trying next candidate",
1766                        shell.binary());
1767                        last_error = Some(format!("{}: {e}", shell.binary()));
1768                        // Skip the without-breakaway retry for NotFound — the
1769                        // binary itself is missing, breakaway flag is irrelevant.
1770                        break;
1771                    }
1772                    Err(e) if flags == with_breakaway && e.raw_os_error() == Some(5) => {
1773                        // Access Denied during breakaway — retry without it.
1774                        crate::slog_warn!(
1775                            "background bash spawn: CREATE_BREAKAWAY_FROM_JOB rejected with \
1776                         Access Denied — retrying {} without breakaway",
1777                            shell.binary()
1778                        );
1779                        last_error = Some(format!("{}: {e}", shell.binary()));
1780                        continue;
1781                    }
1782                    Err(e) => {
1783                        return Err(format!(
1784                            "failed to spawn background bash command via {}: {e}",
1785                            shell.binary()
1786                        ));
1787                    }
1788                }
1789            }
1790        }
1791        Err(format!(
1792            "failed to spawn background bash command: no Windows shell could be spawned. \
1793             Last error: {}. PATH-probed candidates: {:?}",
1794            last_error.unwrap_or_else(|| "no candidates were attempted".to_string()),
1795            candidates.iter().map(|s| s.binary()).collect::<Vec<_>>()
1796        ))
1797    }
1798}
1799
1800fn random_slug() -> String {
1801    let mut bytes = [0u8; 4];
1802    // getrandom is a transitive dependency; use it directly for OS entropy.
1803    getrandom::fill(&mut bytes).unwrap_or_else(|_| {
1804        // Extremely unlikely fallback: time + pid mix.
1805        let t = SystemTime::now()
1806            .duration_since(UNIX_EPOCH)
1807            .map(|d| d.subsec_nanos())
1808            .unwrap_or(0);
1809        let p = std::process::id();
1810        bytes.copy_from_slice(&(t ^ p).to_le_bytes());
1811    });
1812    // `bash-` + 8 lowercase hex chars — compact, OS-entropy backed.
1813    let hex: String = bytes.iter().map(|b| format!("{b:02x}")).collect();
1814    format!("bash-{hex}")
1815}
1816
1817#[cfg(test)]
1818mod tests {
1819    use std::collections::HashMap;
1820    #[cfg(windows)]
1821    use std::fs;
1822    use std::sync::{Arc, Mutex};
1823    use std::time::Duration;
1824    #[cfg(windows)]
1825    use std::time::Instant;
1826
1827    use super::*;
1828
1829    #[cfg(unix)]
1830    const QUICK_SUCCESS_COMMAND: &str = "true";
1831    #[cfg(windows)]
1832    const QUICK_SUCCESS_COMMAND: &str = "cmd /c exit 0";
1833
1834    #[cfg(unix)]
1835    const LONG_RUNNING_COMMAND: &str = "sleep 5";
1836    #[cfg(windows)]
1837    const LONG_RUNNING_COMMAND: &str = "cmd /c timeout /t 5 /nobreak > nul";
1838
1839    /// Spawn a child process that exits immediately and return it after
1840    /// it has terminated. Used by reap_child tests to simulate the
1841    /// "child exists and is dead" state when the watchdog has already
1842    /// nulled out the original child handle.
1843    fn spawn_dead_child() -> std::process::Child {
1844        #[cfg(unix)]
1845        let mut cmd = std::process::Command::new("true");
1846        #[cfg(windows)]
1847        let mut cmd = {
1848            let mut c = std::process::Command::new("cmd");
1849            c.args(["/c", "exit", "0"]);
1850            c
1851        };
1852        cmd.stdin(std::process::Stdio::null());
1853        cmd.stdout(std::process::Stdio::null());
1854        cmd.stderr(std::process::Stdio::null());
1855        let mut child = cmd.spawn().expect("spawn replacement child for reap test");
1856        // Poll try_wait() until the child actually exits, instead of calling
1857        // wait() which closes the OS handle. On Windows, after wait()
1858        // closes the handle, subsequent try_wait() calls (which reap_child
1859        // depends on) return Err — the test was inadvertently giving
1860        // reap_child an unusable child handle. Polling try_wait() keeps the
1861        // handle open and observes natural exit, matching the production
1862        // shape where the watchdog discovers an exited child for the first
1863        // time.
1864        let started = Instant::now();
1865        loop {
1866            match child.try_wait() {
1867                Ok(Some(_)) => break,
1868                Ok(None) => {
1869                    if started.elapsed() > Duration::from_secs(5) {
1870                        panic!("dead-child stand-in did not exit within 5s");
1871                    }
1872                    std::thread::sleep(Duration::from_millis(10));
1873                }
1874                Err(error) => panic!("dead-child try_wait failed: {error}"),
1875            }
1876        }
1877        child
1878    }
1879
1880    #[test]
1881    fn cleanup_finished_removes_terminal_tasks_older_than_threshold() {
1882        let registry = BgTaskRegistry::default();
1883        let dir = tempfile::tempdir().unwrap();
1884        let task_id = registry
1885            .spawn(
1886                QUICK_SUCCESS_COMMAND,
1887                "session".to_string(),
1888                dir.path().to_path_buf(),
1889                HashMap::new(),
1890                Some(Duration::from_secs(30)),
1891                dir.path().to_path_buf(),
1892                10,
1893                true,
1894                false,
1895                Some(dir.path().to_path_buf()),
1896            )
1897            .unwrap();
1898        registry
1899            .kill_with_status(&task_id, "session", BgTaskStatus::Killed)
1900            .unwrap();
1901        let completions = registry.drain_completions_for_session(Some("session"));
1902        assert_eq!(completions.len(), 1);
1903
1904        registry.cleanup_finished(Duration::ZERO);
1905
1906        assert!(registry.inner.tasks.lock().unwrap().is_empty());
1907    }
1908
1909    #[test]
1910    fn cleanup_finished_retains_undelivered_terminals() {
1911        let registry = BgTaskRegistry::default();
1912        let dir = tempfile::tempdir().unwrap();
1913        let task_id = registry
1914            .spawn(
1915                QUICK_SUCCESS_COMMAND,
1916                "session".to_string(),
1917                dir.path().to_path_buf(),
1918                HashMap::new(),
1919                Some(Duration::from_secs(30)),
1920                dir.path().to_path_buf(),
1921                10,
1922                true,
1923                false,
1924                Some(dir.path().to_path_buf()),
1925            )
1926            .unwrap();
1927        registry
1928            .kill_with_status(&task_id, "session", BgTaskStatus::Killed)
1929            .unwrap();
1930
1931        registry.cleanup_finished(Duration::ZERO);
1932
1933        assert!(registry.inner.tasks.lock().unwrap().contains_key(&task_id));
1934    }
1935
1936    /// Issue #27 Oracle review P1 + P2 test gap: verify that the live
1937    /// watchdog path (reap_child) marks a task Failed when the child
1938    /// has exited but no exit marker was written. Before this fix the
1939    /// task would remain `Running` until timeout, even though the
1940    /// process was definitely dead.
1941    ///
1942    /// Cross-platform: uses a quick-exiting command that does NOT go
1943    /// through the wrapper script (we manually clear the exit marker
1944    /// after spawn to simulate the wrapper crashing before write).
1945    #[test]
1946    fn reap_child_marks_failed_when_child_exits_without_exit_marker() {
1947        let registry = BgTaskRegistry::new(Arc::new(Mutex::new(None)));
1948        let dir = tempfile::tempdir().unwrap();
1949        let task_id = registry
1950            .spawn(
1951                QUICK_SUCCESS_COMMAND,
1952                "session".to_string(),
1953                dir.path().to_path_buf(),
1954                HashMap::new(),
1955                Some(Duration::from_secs(30)),
1956                dir.path().to_path_buf(),
1957                10,
1958                true,
1959                false,
1960                Some(dir.path().to_path_buf()),
1961            )
1962            .unwrap();
1963
1964        let task = registry.task_for_session(&task_id, "session").unwrap();
1965
1966        // Wait for the child to actually exit and the wrapper to either
1967        // write the marker or fail. Then nuke the marker to simulate
1968        // wrapper crash before write. Poll up to 5s; this is plenty for a
1969        // `true`/`cmd /c exit 0` invocation.
1970        let started = Instant::now();
1971        loop {
1972            let exited = {
1973                let mut state = task.state.lock().unwrap();
1974                if let Some(child) = state.child.as_mut() {
1975                    matches!(child.try_wait(), Ok(Some(_)))
1976                } else {
1977                    true
1978                }
1979            };
1980            if exited {
1981                break;
1982            }
1983            assert!(
1984                started.elapsed() < Duration::from_secs(5),
1985                "child should exit quickly"
1986            );
1987            std::thread::sleep(Duration::from_millis(20));
1988        }
1989
1990        // Stop the watchdog so it doesn't race with our manual reap_child.
1991        // On fast Windows runners the watchdog ticks (every 500ms) can
1992        // observe the child exit and reap it before this test's assertion
1993        // fires, leaving us with state.child = None and an already-terminal
1994        // status. We specifically want to test reap_child's logic when
1995        // invoked manually on a Running-but-actually-dead task, so we need
1996        // exclusive control over the reap path here.
1997        registry
1998            .inner
1999            .shutdown
2000            .store(true, std::sync::atomic::Ordering::SeqCst);
2001        // Give the watchdog at most one tick (500ms) to notice shutdown
2002        // before we touch task state. Without this, an in-flight watchdog
2003        // iteration could still race with our state setup below.
2004        std::thread::sleep(Duration::from_millis(550));
2005
2006        // Wrapper likely wrote the marker by now; remove it to simulate
2007        // a wrapper crash that exited before persisting the exit code.
2008        let _ = std::fs::remove_file(&task.paths.exit);
2009
2010        // The watchdog may have already reaped the child handle and
2011        // marked the task terminal before we got here. Reset both so
2012        // reap_child has the "Running task whose child just exited"
2013        // shape it's designed to handle. We don't restore state.child
2014        // (the underlying OS process is gone), but reap_child's
2015        // try_wait path won't be exercised; we're testing the
2016        // status-transition logic when state.child is set to a dead
2017        // child OR None and the marker is missing.
2018        //
2019        // CRITICAL on Windows: the watchdog ticks fast enough that the
2020        // JSON on disk may already say `Completed`. `update_task` (called
2021        // by `reap_child`) reads from disk, applies the closure, but
2022        // ROLLS BACK if the original on-disk state was already terminal
2023        // (see persistence.rs::update_task). So we must reset BOTH
2024        // in-memory metadata AND the JSON on disk to a Running state to
2025        // give reap_child the fresh shape it expects to operate on.
2026        {
2027            let mut state = task.state.lock().unwrap();
2028            state.metadata.status = BgTaskStatus::Running;
2029            state.metadata.status_reason = None;
2030            state.metadata.exit_code = None;
2031            state.metadata.finished_at = None;
2032            state.metadata.duration_ms = None;
2033            // Persist the reset state to disk so update_task's terminal
2034            // rollback guard sees a non-terminal starting point.
2035            crate::bash_background::persistence::write_task(
2036                &task.paths.json,
2037                &state.metadata,
2038            )
2039            .expect("persist reset Running metadata for reap_child test");
2040            // If the watchdog already nulled state.child, we need to
2041            // simulate "child exists and is dead" so reap_child's
2042            // try_wait path runs. Spawn a quick-exit child as a stand-in.
2043            if state.child.is_none() {
2044                state.child = Some(spawn_dead_child());
2045            }
2046        }
2047        // Clear the terminal_at marker too so mark_terminal_now() can fire
2048        // again inside reap_child.
2049        *task.terminal_at.lock().unwrap() = None;
2050
2051        // Sanity: task is still Running per metadata (replay/poll hasn't
2052        // observed the missing marker yet).
2053        assert!(
2054            task.is_running(),
2055            "precondition: metadata.status == Running"
2056        );
2057        assert!(
2058            !task.paths.exit.exists(),
2059            "precondition: exit marker absent"
2060        );
2061
2062        // Invoke the watchdog's reap_child directly. The fix should mark
2063        // the task Failed with the documented reason string, instead of
2064        // just dropping the child handle and leaving status=Running.
2065        registry.reap_child(&task);
2066
2067        let state = task.state.lock().unwrap();
2068        assert!(
2069            state.metadata.status.is_terminal(),
2070            "reap_child must transition to terminal when PID dead and no marker. \
2071             Got status={:?}",
2072            state.metadata.status
2073        );
2074        assert_eq!(
2075            state.metadata.status,
2076            BgTaskStatus::Failed,
2077            "must specifically be Failed (not Killed): status={:?}",
2078            state.metadata.status
2079        );
2080        assert_eq!(
2081            state.metadata.status_reason.as_deref(),
2082            Some("process exited without exit marker"),
2083            "reason must match replay path's wording: {:?}",
2084            state.metadata.status_reason
2085        );
2086        assert!(
2087            state.child.is_none(),
2088            "child handle must be released after reap"
2089        );
2090        assert!(state.detached, "task must be marked detached after reap");
2091    }
2092
2093    /// Companion to the above: when the exit marker DOES exist on disk
2094    /// at reap_child time (race window — wrapper finished writing
2095    /// between try_wait and the marker check), reap_child must NOT mark
2096    /// the task Failed. Instead it leaves status=Running and lets the
2097    /// next poll_task() cycle finalize via the marker.
2098    #[test]
2099    fn reap_child_preserves_running_when_exit_marker_exists() {
2100        let registry = BgTaskRegistry::new(Arc::new(Mutex::new(None)));
2101        let dir = tempfile::tempdir().unwrap();
2102        let task_id = registry
2103            .spawn(
2104                QUICK_SUCCESS_COMMAND,
2105                "session".to_string(),
2106                dir.path().to_path_buf(),
2107                HashMap::new(),
2108                Some(Duration::from_secs(30)),
2109                dir.path().to_path_buf(),
2110                10,
2111                true,
2112                false,
2113                Some(dir.path().to_path_buf()),
2114            )
2115            .unwrap();
2116
2117        let task = registry.task_for_session(&task_id, "session").unwrap();
2118
2119        // Wait for child to exit AND for the marker to land. Both happen
2120        // shortly after the wrapper finishes — but we want both observed.
2121        let started = Instant::now();
2122        loop {
2123            let exited = {
2124                let mut state = task.state.lock().unwrap();
2125                if let Some(child) = state.child.as_mut() {
2126                    matches!(child.try_wait(), Ok(Some(_)))
2127                } else {
2128                    true
2129                }
2130            };
2131            if exited && task.paths.exit.exists() {
2132                break;
2133            }
2134            assert!(
2135                started.elapsed() < Duration::from_secs(5),
2136                "child should exit and write marker quickly"
2137            );
2138            std::thread::sleep(Duration::from_millis(20));
2139        }
2140
2141        // Stop the watchdog so it doesn't race with our manual reap_child.
2142        // On fast Windows runners the watchdog can call poll_task (which
2143        // finalizes via marker) before this test asserts the
2144        // "marker exists, status still Running" invariant. We want
2145        // exclusive control over the reap path.
2146        registry
2147            .inner
2148            .shutdown
2149            .store(true, std::sync::atomic::Ordering::SeqCst);
2150        std::thread::sleep(Duration::from_millis(550));
2151
2152        // If the watchdog already finalized the task before we stopped it,
2153        // restore the test setup: reset status to Running and ensure the
2154        // marker file is still on disk. We're testing reap_child's
2155        // behavior when called manually with both child-exited AND
2156        // marker-present, regardless of whether the watchdog beat us.
2157        {
2158            let mut state = task.state.lock().unwrap();
2159            state.metadata.status = BgTaskStatus::Running;
2160            state.metadata.status_reason = None;
2161            if state.child.is_none() {
2162                state.child = Some(spawn_dead_child());
2163            }
2164        }
2165        *task.terminal_at.lock().unwrap() = None;
2166        // Make sure the marker is still on disk (poll_task removes it on
2167        // finalization). Recreate it if needed.
2168        if !task.paths.exit.exists() {
2169            std::fs::write(&task.paths.exit, "0").expect("write replacement exit marker");
2170        }
2171
2172        // reap_child sees: child exited, marker exists. It should:
2173        //  - drop state.child / set state.detached = true
2174        //  - NOT change status (poll_task will finalize via marker next tick)
2175        registry.reap_child(&task);
2176
2177        let state = task.state.lock().unwrap();
2178        assert!(
2179            state.child.is_none(),
2180            "child handle still released even when marker exists"
2181        );
2182        assert!(
2183            state.detached,
2184            "task still marked detached even when marker exists"
2185        );
2186        // Status remains Running because reap_child defers to poll_task
2187        // when a marker exists. It would be wrong for reap to record the
2188        // marker outcome (poll_task does that with proper exit-code
2189        // parsing).
2190        assert_eq!(
2191            state.metadata.status,
2192            BgTaskStatus::Running,
2193            "reap_child must defer to poll_task when marker exists"
2194        );
2195    }
2196
2197    #[test]
2198    fn cleanup_finished_keeps_running_tasks() {
2199        let registry = BgTaskRegistry::new(Arc::new(Mutex::new(None)));
2200        let dir = tempfile::tempdir().unwrap();
2201        let task_id = registry
2202            .spawn(
2203                LONG_RUNNING_COMMAND,
2204                "session".to_string(),
2205                dir.path().to_path_buf(),
2206                HashMap::new(),
2207                Some(Duration::from_secs(30)),
2208                dir.path().to_path_buf(),
2209                10,
2210                true,
2211                false,
2212                Some(dir.path().to_path_buf()),
2213            )
2214            .unwrap();
2215
2216        registry.cleanup_finished(Duration::ZERO);
2217
2218        assert!(registry.inner.tasks.lock().unwrap().contains_key(&task_id));
2219        let _ = registry.kill(&task_id, "session");
2220    }
2221
2222    #[cfg(windows)]
2223    fn wait_for_file(path: &Path) -> String {
2224        let started = Instant::now();
2225        loop {
2226            if path.exists() {
2227                return fs::read_to_string(path).expect("read file");
2228            }
2229            assert!(
2230                started.elapsed() < Duration::from_secs(30),
2231                "timed out waiting for {}",
2232                path.display()
2233            );
2234            std::thread::sleep(Duration::from_millis(100));
2235        }
2236    }
2237
2238    #[cfg(windows)]
2239    fn spawn_windows_registry_command(
2240        command: &str,
2241    ) -> (BgTaskRegistry, tempfile::TempDir, String) {
2242        let registry = BgTaskRegistry::new(Arc::new(Mutex::new(None)));
2243        let dir = tempfile::tempdir().unwrap();
2244        let task_id = registry
2245            .spawn(
2246                command,
2247                "session".to_string(),
2248                dir.path().to_path_buf(),
2249                HashMap::new(),
2250                Some(Duration::from_secs(30)),
2251                dir.path().to_path_buf(),
2252                10,
2253                false,
2254                false,
2255                Some(dir.path().to_path_buf()),
2256            )
2257            .unwrap();
2258        (registry, dir, task_id)
2259    }
2260
2261    #[cfg(windows)]
2262    #[test]
2263    fn windows_spawn_writes_exit_marker_for_zero_exit() {
2264        let (registry, _dir, task_id) = spawn_windows_registry_command("cmd /c exit 0");
2265        let exit_path = registry.task_exit_path(&task_id, "session").unwrap();
2266
2267        let content = wait_for_file(&exit_path);
2268
2269        assert_eq!(content.trim(), "0");
2270    }
2271
2272    #[cfg(windows)]
2273    #[test]
2274    fn windows_spawn_writes_exit_marker_for_nonzero_exit() {
2275        let (registry, _dir, task_id) = spawn_windows_registry_command("cmd /c exit 42");
2276        let exit_path = registry.task_exit_path(&task_id, "session").unwrap();
2277
2278        let content = wait_for_file(&exit_path);
2279
2280        assert_eq!(content.trim(), "42");
2281    }
2282
2283    #[cfg(windows)]
2284    #[test]
2285    fn windows_spawn_captures_stdout_to_disk() {
2286        let (registry, _dir, task_id) = spawn_windows_registry_command("cmd /c echo hello");
2287        let task = registry.task_for_session(&task_id, "session").unwrap();
2288        let stdout_path = task.paths.stdout.clone();
2289        let exit_path = task.paths.exit.clone();
2290
2291        let _ = wait_for_file(&exit_path);
2292        let stdout = fs::read_to_string(stdout_path).expect("read stdout");
2293
2294        assert!(stdout.contains("hello"), "stdout was {stdout:?}");
2295    }
2296
2297    #[cfg(windows)]
2298    #[test]
2299    fn windows_spawn_uses_pwsh_when_available() {
2300        // Without $SHELL set, $SHELL probe yields None and pwsh wins.
2301        // (We intentionally pass None for shell_env to keep this test
2302        // independent of the runner's actual env.)
2303        let candidates = crate::windows_shell::shell_candidates_with(
2304            |binary| match binary {
2305                "pwsh.exe" => Some(std::path::PathBuf::from(r"C:\pwsh\pwsh.exe")),
2306                "powershell.exe" => Some(std::path::PathBuf::from(r"C:\ps\powershell.exe")),
2307                _ => None,
2308            },
2309            || None,
2310        );
2311        let shell = candidates.first().expect("at least one candidate").clone();
2312        assert_eq!(shell, crate::windows_shell::WindowsShell::Pwsh);
2313        assert_eq!(shell.binary().as_ref(), "pwsh.exe");
2314    }
2315
2316    /// Issue #27 Oracle review P1, updated: cmd wrapper writes a `.bat` file
2317    /// that batch-evaluates `%ERRORLEVEL%` on its own line (line-by-line
2318    /// evaluation is the default for batch files; parse-time expansion only
2319    /// applies to compound `&`-chained inline commands). Capturing
2320    /// `%ERRORLEVEL%` into `set CODE=%ERRORLEVEL%` immediately after the user
2321    /// command runs records the real run-time exit code.
2322    #[cfg(windows)]
2323    #[test]
2324    fn windows_shell_cmd_wrapper_writes_exit_marker_with_move() {
2325        let exit_path = Path::new(r"C:\Temp\bash-test.exit");
2326        let script =
2327            crate::windows_shell::WindowsShell::Cmd.wrapper_script("cmd /c exit 42", exit_path);
2328
2329        // Batch wrapper: capture exit code into CODE on the line after the
2330        // user command, then write CODE to a temp marker file before
2331        // atomic-renaming it into place.
2332        assert!(
2333            script.contains("set CODE=%ERRORLEVEL%"),
2334            "wrapper must capture exit code into CODE: {script}"
2335        );
2336        assert!(
2337            script.contains("echo %CODE% >"),
2338            "wrapper must echo CODE to a temp marker file: {script}"
2339        );
2340        assert!(
2341            script.contains("move /Y"),
2342            "wrapper must use atomic move to write the marker: {script}"
2343        );
2344        // move output must be redirected to nul to avoid polluting the
2345        // user's captured stdout with "1 file(s) moved." lines.
2346        assert!(
2347            script.contains("> nul"),
2348            "wrapper must redirect move output to nul: {script}"
2349        );
2350        // exit /B %CODE% propagates the real exit code so wait() sees it.
2351        assert!(
2352            script.contains("exit /B %CODE%"),
2353            "wrapper must propagate the captured exit code: {script}"
2354        );
2355        assert!(script.contains(r#""C:\Temp\bash-test.exit.tmp""#));
2356        assert!(script.contains(r#""C:\Temp\bash-test.exit""#));
2357    }
2358
2359    /// `bg_command()` for Cmd no longer needs `/V:ON` — the wrapper is now
2360    /// written to a `.bat` file where batch-line evaluation captures
2361    /// `%ERRORLEVEL%` correctly without delayed expansion. We still need
2362    /// `/D` (skip AutoRun) and `/S` (simple quote-stripping for paths with
2363    /// internal `"`-quoting from `cmd_quote`).
2364    #[cfg(windows)]
2365    #[test]
2366    fn windows_shell_cmd_bg_command_uses_minimal_cmd_flags() {
2367        use crate::windows_shell::WindowsShell;
2368        let cmd = WindowsShell::Cmd.bg_command("echo wrapped");
2369        let args: Vec<&std::ffi::OsStr> = cmd.get_args().collect();
2370        let args_strs: Vec<&str> = args.iter().filter_map(|a| a.to_str()).collect();
2371        assert_eq!(
2372            args_strs,
2373            vec!["/D", "/S", "/C", "echo wrapped"],
2374            "Cmd::bg_command must prepend /D /S /C"
2375        );
2376    }
2377
2378    /// PowerShell variants don't need `/V:ON`-style flags; their args
2379    /// are the same for foreground (`command()`) and background
2380    /// (`bg_command()`).
2381    #[cfg(windows)]
2382    #[test]
2383    fn windows_shell_pwsh_bg_command_uses_standard_args() {
2384        use crate::windows_shell::WindowsShell;
2385        let cmd = WindowsShell::Pwsh.bg_command("Get-Date");
2386        let args: Vec<&std::ffi::OsStr> = cmd.get_args().collect();
2387        let args_strs: Vec<&str> = args.iter().filter_map(|a| a.to_str()).collect();
2388        assert!(
2389            args_strs.contains(&"-Command"),
2390            "Pwsh::bg_command must use -Command: {args_strs:?}"
2391        );
2392        assert!(
2393            args_strs.contains(&"Get-Date"),
2394            "Pwsh::bg_command must include the user command body"
2395        );
2396    }
2397
2398    /// Issue #27 Oracle review P1 + P2 test gap: end-to-end proof that the
2399    /// **cmd.exe-specific** wrapper path captures the user command's
2400    /// run-time exit code correctly. The existing
2401    /// `windows_spawn_writes_exit_marker_for_nonzero_exit` test would also
2402    /// pass with the buggy `%ERRORLEVEL%` wrapper if the Windows machine
2403    /// had pwsh.exe or powershell.exe on PATH (which is typical) — the
2404    /// outer wrapper would be PowerShell, not cmd, and PowerShell's
2405    /// `$LASTEXITCODE` captures the inner `cmd /c exit 42` correctly.
2406    ///
2407    /// This test directly spawns via `WindowsShell::Cmd.bg_command()` to
2408    /// force the cmd-wrapper code path, then writes the exit marker and
2409    /// asserts it contains "42" not "0". With the pre-fix `%ERRORLEVEL%`
2410    /// wrapper, this test would fail because `%ERRORLEVEL%` parse-time
2411    /// expansion would record cmd's startup ERRORLEVEL (typically 0)
2412    /// regardless of what the user command returned.
2413    /// **Disabled.** This test exercises `WindowsShell::Cmd.bg_command()` —
2414    /// the inline command-line wrapper helper that production code does
2415    /// NOT use anymore. v0.19.4 switched bg-bash to a file-based wrapper
2416    /// (`<task>.bat` / `<task>.ps1`) because the inline cmd-line quoting
2417    /// produced silent failures on Windows 11 (move /Y could not parse
2418    /// path arguments through cmd's /C parser). The `bg_command` helper
2419    /// is kept only for parity with `WindowsShell::Cmd.command()` shape;
2420    /// the production spawn path goes through `detached_shell_command_for`
2421    /// which writes the wrapper to disk and invokes `cmd /V:ON /D /C
2422    /// <bat-path>`.
2423    ///
2424    /// The `!ERRORLEVEL!` correctness this test was meant to verify is
2425    /// covered live by the Windows e2e harness scenario 2d
2426    /// (`bg bash records non-zero exit code (cmd /c exit 42)`), which
2427    /// exercises the real file-based wrapper end-to-end via the protocol.
2428    #[allow(dead_code)]
2429    #[cfg(any())] // disabled on all targets
2430    fn windows_cmd_wrapper_records_real_exit_code_disabled() {}
2431}