Skip to main content

purple_ssh/
containers.rs

1use std::collections::HashMap;
2use std::time::{SystemTime, UNIX_EPOCH};
3
4use log::{error, info};
5
6use serde::{Deserialize, Serialize};
7
8use crate::ssh_context::{OwnedSshContext, SshContext};
9
10// ---------------------------------------------------------------------------
11// ContainerInfo model
12// ---------------------------------------------------------------------------
13
14/// Metadata for a single container (from `docker ps -a` / `podman ps -a`).
15///
16/// Deserialization is tolerant of both docker and podman JSON shapes.
17/// Docker uses `ID` plus scalar `Names`/`Ports`; podman uses `Id` plus
18/// `Names` as an array and `Ports` as an array of objects. The custom
19/// helpers below coerce both into the docker-shaped scalar fields the
20/// rest of purple (UI, cache, MCP) already understands.
21#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
22pub struct ContainerInfo {
23    #[serde(rename = "ID", alias = "Id")]
24    pub id: String,
25    #[serde(rename = "Names", deserialize_with = "deserialize_names_field")]
26    pub names: String,
27    #[serde(rename = "Image")]
28    pub image: String,
29    #[serde(rename = "State")]
30    pub state: String,
31    #[serde(rename = "Status", default)]
32    pub status: String,
33    // `default` covers the missing-key case directly via Default::default()
34    // and bypasses `deserialize_with`. The custom deserializer therefore
35    // only runs when `Ports` is present (scalar, array or explicit null).
36    #[serde(
37        rename = "Ports",
38        deserialize_with = "deserialize_ports_field",
39        default
40    )]
41    pub ports: String,
42}
43
44/// Accept `Names` as either a scalar string (docker) or an array of
45/// strings (podman). Multiple names join with `,` to match docker's
46/// own comma-joined rendering. Unexpected shapes (number, object,
47/// null) propagate as serde errors; `parse_container_ps` drops the
48/// offending row via `.ok()`, which is the right behaviour for a row
49/// that has lost its identity.
50fn deserialize_names_field<'de, D>(deserializer: D) -> Result<String, D::Error>
51where
52    D: serde::Deserializer<'de>,
53{
54    #[derive(Deserialize)]
55    #[serde(untagged)]
56    enum NamesField {
57        Scalar(String),
58        Array(Vec<String>),
59    }
60    match NamesField::deserialize(deserializer)? {
61        NamesField::Scalar(s) => Ok(s),
62        NamesField::Array(arr) => Ok(arr.join(",")),
63    }
64}
65
66/// Accept `Ports` as either a scalar string (docker) or an array of
67/// port objects (podman). Podman entries are rendered into the same
68/// `host_ip:host_port->container_port/proto` form docker emits, so
69/// downstream UI rendering stays uniform. An explicit JSON null is
70/// tolerated and produces an empty string: podman uses `null` to mean
71/// "no ports published", which is semantically valid and the row must
72/// remain visible.
73fn deserialize_ports_field<'de, D>(deserializer: D) -> Result<String, D::Error>
74where
75    D: serde::Deserializer<'de>,
76{
77    #[derive(Deserialize)]
78    #[serde(untagged)]
79    enum PortsField {
80        Scalar(String),
81        Array(Vec<PodmanPort>),
82    }
83    match Option::<PortsField>::deserialize(deserializer)? {
84        Some(PortsField::Scalar(s)) => Ok(s),
85        Some(PortsField::Array(arr)) => Ok(format_podman_ports(&arr)),
86        None => Ok(String::new()),
87    }
88}
89
90#[derive(Deserialize)]
91struct PodmanPort {
92    #[serde(default)]
93    host_ip: String,
94    #[serde(default)]
95    container_port: u32,
96    #[serde(default)]
97    host_port: u32,
98    #[serde(default = "podman_port_default_range")]
99    range: u32,
100    #[serde(default)]
101    protocol: String,
102}
103
104fn podman_port_default_range() -> u32 {
105    1
106}
107
108fn format_podman_ports(ports: &[PodmanPort]) -> String {
109    // ~24 chars per typical port entry. Pre-allocating avoids the
110    // intermediate Vec<String> + repeated re-allocations that the prior
111    // map/collect/join chain produced for compose stacks with many
112    // published ports.
113    let mut out = String::with_capacity(ports.len().saturating_mul(24));
114    for (i, p) in ports.iter().enumerate() {
115        if i > 0 {
116            out.push_str(", ");
117        }
118        write_podman_port(p, &mut out);
119    }
120    out
121}
122
123fn write_podman_port(p: &PodmanPort, out: &mut String) {
124    use std::fmt::Write as _;
125    let protocol = if p.protocol.is_empty() {
126        "tcp"
127    } else {
128        p.protocol.as_str()
129    };
130    if p.host_port != 0 {
131        // Podman emits an empty `host_ip` for both IPv4 wildcard and IPv6
132        // wildcard binds. Omit the prefix when unknown rather than
133        // mis-claim IPv4. Concrete addresses (e.g. 127.0.0.1, ::1) render
134        // verbatim with the docker `addr:port->...` form.
135        if !p.host_ip.is_empty() {
136            let _ = write!(out, "{}:", p.host_ip);
137        }
138        if p.range > 1 {
139            let _ = write!(
140                out,
141                "{}-{}->",
142                p.host_port,
143                p.host_port.saturating_add(p.range.saturating_sub(1))
144            );
145        } else {
146            let _ = write!(out, "{}->", p.host_port);
147        }
148    }
149    if p.range > 1 {
150        let _ = write!(
151            out,
152            "{}-{}",
153            p.container_port,
154            p.container_port.saturating_add(p.range.saturating_sub(1))
155        );
156    } else {
157        let _ = write!(out, "{}", p.container_port);
158    }
159    let _ = write!(out, "/{protocol}");
160}
161
162/// Try to parse one NDJSON line into `ContainerInfo`. Returns `None`
163/// for blank/non-JSON lines (MOTD/banner) without logging. JSON-shaped
164/// lines that fail to match the schema log at debug level so missing
165/// containers can be correlated to a concrete parse error rather than
166/// guessed from a shrunken list.
167fn try_parse_container_line(trimmed: &str) -> Option<ContainerInfo> {
168    if trimmed.is_empty() {
169        return None;
170    }
171    match serde_json::from_str(trimmed) {
172        Ok(c) => Some(c),
173        Err(e) if trimmed.starts_with('{') => {
174            log::debug!(
175                "[external] container parse: dropped JSON line: {} (err: {})",
176                &trimmed[..trimmed.len().min(120)],
177                e
178            );
179            None
180        }
181        Err(_) => None,
182    }
183}
184
185/// Parse NDJSON output from `docker ps --format '{{json .}}'` or
186/// `podman ps --format '{{json .}}'`. Used by tests and the public
187/// crate API exposed via `lib.rs`; the live SSH path streams through
188/// `parse_container_output` directly, so the binary build sees this
189/// helper as unused and the lint must be silenced.
190#[allow(dead_code)]
191pub fn parse_container_ps(output: &str) -> Vec<ContainerInfo> {
192    output
193        .lines()
194        .filter_map(|line| try_parse_container_line(line.trim()))
195        .collect()
196}
197
198// ---------------------------------------------------------------------------
199// ContainerRuntime
200// ---------------------------------------------------------------------------
201
202/// Supported container runtimes.
203#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
204pub enum ContainerRuntime {
205    Docker,
206    Podman,
207}
208
209impl ContainerRuntime {
210    /// Returns the CLI binary name.
211    pub fn as_str(&self) -> &'static str {
212        match self {
213            ContainerRuntime::Docker => "docker",
214            ContainerRuntime::Podman => "podman",
215        }
216    }
217}
218
219/// Detect runtime from command output by matching the LAST non-empty trimmed
220/// line. Only "docker" or "podman" are accepted. MOTD-resilient.
221/// Currently unused (sentinel-based detection handles this inline) but kept
222/// as a public utility for potential future two-step detection paths.
223#[allow(dead_code)]
224pub fn parse_runtime(output: &str) -> Option<ContainerRuntime> {
225    let last = output
226        .lines()
227        .rev()
228        .map(|l| l.trim())
229        .find(|l| !l.is_empty())?;
230    match last {
231        "docker" => Some(ContainerRuntime::Docker),
232        "podman" => Some(ContainerRuntime::Podman),
233        _ => None,
234    }
235}
236
237// ---------------------------------------------------------------------------
238// ContainerAction
239// ---------------------------------------------------------------------------
240
241/// Actions that can be performed on a container.
242#[derive(Copy, Clone, Debug, PartialEq)]
243pub enum ContainerAction {
244    Start,
245    Stop,
246    Restart,
247}
248
249impl ContainerAction {
250    /// Returns the CLI sub-command string.
251    pub fn as_str(&self) -> &'static str {
252        match self {
253            ContainerAction::Start => "start",
254            ContainerAction::Stop => "stop",
255            ContainerAction::Restart => "restart",
256        }
257    }
258}
259
260/// Build the shell command to perform an action on a container.
261pub fn container_action_command(
262    runtime: ContainerRuntime,
263    action: ContainerAction,
264    container_id: &str,
265) -> String {
266    format!("{} {} {}", runtime.as_str(), action.as_str(), container_id)
267}
268
269// ---------------------------------------------------------------------------
270// Container ID validation
271// ---------------------------------------------------------------------------
272
273/// Validate a container ID or name.
274/// Accepts ASCII alphanumeric, hyphen, underscore, dot.
275/// Rejects empty, non-ASCII, shell metacharacters, colon.
276pub fn validate_container_id(id: &str) -> Result<(), String> {
277    if id.is_empty() {
278        return Err(crate::messages::CONTAINER_ID_EMPTY.to_string());
279    }
280    for c in id.chars() {
281        if !c.is_ascii_alphanumeric() && c != '-' && c != '_' && c != '.' {
282            return Err(crate::messages::container_id_invalid_char(c));
283        }
284    }
285    Ok(())
286}
287
288// ---------------------------------------------------------------------------
289// Combined SSH command + output parsing
290// ---------------------------------------------------------------------------
291
292/// Build the SSH command string for listing containers. Output is the
293/// container NDJSON, then the `##purple:engine##` sentinel, then the
294/// daemon version on its own line. The version subcall is suffixed with
295/// `|| true` so its failure cannot mask a `docker ps` error: the chain
296/// surfaces ps's exit code, while a missing version line just yields
297/// `engine_version: None` downstream.
298///
299/// - `Some(Docker)` / `Some(Podman)`: direct listing for the known runtime.
300/// - `None`: combined detection + listing with sentinel markers in one SSH call.
301pub fn container_list_command(runtime: Option<ContainerRuntime>) -> String {
302    match runtime {
303        Some(ContainerRuntime::Docker) => concat!(
304            "docker ps -a --format '{{json .}}' && ",
305            "echo '##purple:engine##' && ",
306            "{ docker version --format '{{.Server.Version}}' 2>/dev/null || true; }"
307        )
308        .to_string(),
309        Some(ContainerRuntime::Podman) => concat!(
310            "podman ps -a --format '{{json .}}' && ",
311            "echo '##purple:engine##' && ",
312            "{ podman version --format '{{.Server.Version}}' 2>/dev/null || true; }"
313        )
314        .to_string(),
315        None => concat!(
316            "if command -v docker >/dev/null 2>&1; then ",
317            "echo '##purple:docker##' && docker ps -a --format '{{json .}}' && ",
318            "echo '##purple:engine##' && ",
319            "{ docker version --format '{{.Server.Version}}' 2>/dev/null || true; }; ",
320            "elif command -v podman >/dev/null 2>&1; then ",
321            "echo '##purple:podman##' && podman ps -a --format '{{json .}}' && ",
322            "echo '##purple:engine##' && ",
323            "{ podman version --format '{{.Server.Version}}' 2>/dev/null || true; }; ",
324            "else echo '##purple:none##'; fi"
325        )
326        .to_string(),
327    }
328}
329
330/// Parsed result of a container listing command. `engine_version` is the
331/// daemon's `Server.Version` (best-effort, `None` when the version sub-call
332/// failed or the remote runtime predates the engine sentinel).
333#[derive(Debug, Clone, PartialEq)]
334pub struct ContainerListing {
335    pub runtime: ContainerRuntime,
336    pub engine_version: Option<String>,
337    pub containers: Vec<ContainerInfo>,
338}
339
340/// Parse the stdout of a container listing command.
341///
342/// When sentinels are present (combined detection run): extract runtime from
343/// the sentinel line, parse remaining lines as NDJSON. When `caller_runtime`
344/// is provided (subsequent run with known runtime): parse all lines as NDJSON.
345/// In both cases, `##purple:engine##` splits the listing from the optional
346/// trailing daemon version line.
347pub fn parse_container_output(
348    output: &str,
349    caller_runtime: Option<ContainerRuntime>,
350) -> Result<ContainerListing, String> {
351    let runtime = match output
352        .lines()
353        .map(str::trim)
354        .find(|l| l.starts_with("##purple:") && (*l != "##purple:engine##"))
355    {
356        Some("##purple:none##") => {
357            return Err(crate::messages::CONTAINER_RUNTIME_MISSING.to_string());
358        }
359        Some("##purple:docker##") => ContainerRuntime::Docker,
360        Some("##purple:podman##") => ContainerRuntime::Podman,
361        Some(other) => return Err(crate::messages::container_unknown_sentinel(other)),
362        None => match caller_runtime {
363            Some(rt) => rt,
364            None => return Err("No sentinel found and no runtime provided.".to_string()),
365        },
366    };
367
368    // Bound the version capture to the first non-empty post-sentinel line.
369    // A trailing logout banner or MOTD after `docker version` would
370    // otherwise concat into the cached engine_version and surface as
371    // "25.0.3\n-- session closed --" in the Runtime field.
372    let mut engine_version: Option<String> = None;
373    let mut after_engine = false;
374    let mut containers: Vec<ContainerInfo> = Vec::new();
375    // Stream-parse each NDJSON line during the sentinel sweep so we never
376    // build an intermediate copy of the listing block. At 1000 containers
377    // that intermediate buffer would cost ~300 KB and an extra `.lines()`
378    // walk; this loop is O(lines) with zero auxiliary allocation.
379    for line in output.lines() {
380        let trimmed = line.trim();
381        if trimmed == "##purple:engine##" {
382            after_engine = true;
383            continue;
384        }
385        if trimmed.starts_with("##purple:") {
386            continue;
387        }
388        if after_engine {
389            if !trimmed.is_empty() && engine_version.is_none() {
390                engine_version = Some(trimmed.to_string());
391            }
392            continue;
393        }
394        if let Some(c) = try_parse_container_line(trimmed) {
395            containers.push(c);
396        }
397    }
398
399    // Fedora CoreOS, podman-machine and other distros symlink `docker` to
400    // `podman`. Detection picks the docker branch but the JSON shape is
401    // pure podman (array `Names`, lowercase `Id`). When that happens we
402    // relabel the runtime so downstream consumers (MCP runtime field, host
403    // detail label, sort/filter by runtime) match reality.
404    let runtime = if matches!(runtime, ContainerRuntime::Docker) && looks_like_podman(output) {
405        log::debug!(
406            "[external] container detection: docker sentinel emitted podman-shaped JSON, relabeling runtime to Podman"
407        );
408        ContainerRuntime::Podman
409    } else {
410        runtime
411    };
412
413    log::debug!(
414        "[external] container listing parsed: runtime={:?} version={:?} containers={}",
415        runtime,
416        engine_version,
417        containers.len()
418    );
419    Ok(ContainerListing {
420        runtime,
421        engine_version,
422        containers,
423    })
424}
425
426/// Heuristic: does the raw `ps` output look like podman JSON?
427/// Podman emits `"Names":[` (array) and `"Id":` (lowercase d) for every row.
428/// Docker emits `"Names":"` (string) and `"ID":` (uppercase D). We sample the
429/// first JSON-shaped non-sentinel line. The check is fast (substring scan)
430/// and only matters when the docker sentinel was emitted by a podman shim.
431/// Accepts both `"Names":[` and `"Names": [` (pretty-printed) so handwritten
432/// test fixtures and any intermediate JSON formatter cannot defeat the
433/// detector silently.
434fn looks_like_podman(output: &str) -> bool {
435    for line in output.lines() {
436        let trimmed = line.trim();
437        if trimmed.is_empty() || trimmed.starts_with("##purple:") || !trimmed.starts_with('{') {
438            continue;
439        }
440        return trimmed.contains("\"Names\":[") || trimmed.contains("\"Names\": [");
441    }
442    false
443}
444
445// ---------------------------------------------------------------------------
446// SSH fetch functions
447// ---------------------------------------------------------------------------
448
449/// Error from a container listing operation. Preserves the detected runtime
450/// even when the `ps` command fails so it can be cached for future calls.
451#[derive(Debug)]
452pub struct ContainerError {
453    pub runtime: Option<ContainerRuntime>,
454    pub message: String,
455}
456
457impl std::fmt::Display for ContainerError {
458    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
459        write!(f, "{}", self.message)
460    }
461}
462
463/// Translate SSH stderr into a user-friendly error message.
464fn friendly_container_error(stderr: &str, code: Option<i32>) -> String {
465    let lower = stderr.to_lowercase();
466    if lower.contains("remote host identification has changed")
467        || (lower.contains("host key for") && lower.contains("has changed"))
468    {
469        log::debug!("[external] Host key CHANGED detected; returning HOST_KEY_CHANGED toast");
470        crate::messages::HOST_KEY_CHANGED.to_string()
471    } else if lower.contains("host key verification failed")
472        || lower.contains("no matching host key")
473        || lower.contains("no ed25519 host key is known")
474        || lower.contains("no rsa host key is known")
475        || lower.contains("no ecdsa host key is known")
476        || lower.contains("host key is not known")
477    {
478        log::debug!("[external] Host key UNKNOWN detected; returning HOST_KEY_UNKNOWN toast");
479        crate::messages::HOST_KEY_UNKNOWN.to_string()
480    } else if lower.contains("command not found") {
481        crate::messages::CONTAINER_RUNTIME_NOT_FOUND.to_string()
482    } else if lower.contains("permission denied") || lower.contains("got permission denied") {
483        crate::messages::CONTAINER_PERMISSION_DENIED.to_string()
484    } else if lower.contains("cannot connect to the docker daemon")
485        || lower.contains("cannot connect to podman")
486    {
487        crate::messages::CONTAINER_DAEMON_NOT_RUNNING.to_string()
488    } else if lower.contains("connection refused") {
489        crate::messages::CONTAINER_CONNECTION_REFUSED.to_string()
490    } else if lower.contains("no route to host") || lower.contains("network is unreachable") {
491        crate::messages::CONTAINER_HOST_UNREACHABLE.to_string()
492    } else {
493        crate::messages::container_command_failed(code.unwrap_or(1))
494    }
495}
496
497/// Fetch container list synchronously via SSH.
498/// Follows the `fetch_remote_listing` pattern.
499pub fn fetch_containers(
500    ctx: &SshContext<'_>,
501    cached_runtime: Option<ContainerRuntime>,
502) -> Result<ContainerListing, ContainerError> {
503    let command = container_list_command(cached_runtime);
504    let result = crate::snippet::run_snippet(
505        ctx.alias,
506        ctx.config_path,
507        &command,
508        ctx.askpass,
509        ctx.bw_session,
510        true,
511        ctx.has_tunnel,
512    );
513    let alias = ctx.alias;
514    match result {
515        Ok(r) if r.status.success() => {
516            parse_container_output(&r.stdout, cached_runtime).map_err(|e| {
517                error!("[external] Container list parse failed: alias={alias}: {e}");
518                ContainerError {
519                    runtime: cached_runtime,
520                    message: e,
521                }
522            })
523        }
524        Ok(r) => {
525            let stderr = r.stderr.trim().to_string();
526            let msg = friendly_container_error(&stderr, r.status.code());
527            error!("[external] Container fetch failed: alias={alias}: {msg}");
528            Err(ContainerError {
529                runtime: cached_runtime,
530                message: msg,
531            })
532        }
533        Err(e) => {
534            error!("[external] Container fetch failed: alias={alias}: {e}");
535            Err(ContainerError {
536                runtime: cached_runtime,
537                message: e.to_string(),
538            })
539        }
540    }
541}
542
543/// Spawn a background thread to fetch container listings.
544/// Follows the `spawn_remote_listing` pattern.
545pub fn spawn_container_listing<F>(
546    ctx: OwnedSshContext,
547    cached_runtime: Option<ContainerRuntime>,
548    send: F,
549) where
550    F: FnOnce(String, Result<ContainerListing, ContainerError>) + Send + 'static,
551{
552    std::thread::spawn(move || {
553        let borrowed = SshContext {
554            alias: &ctx.alias,
555            config_path: &ctx.config_path,
556            askpass: ctx.askpass.as_deref(),
557            bw_session: ctx.bw_session.as_deref(),
558            has_tunnel: ctx.has_tunnel,
559        };
560        let result = fetch_containers(&borrowed, cached_runtime);
561        send(ctx.alias, result);
562    });
563}
564
565/// Spawn a background thread to perform a container action (start/stop/restart).
566/// Validates the container ID before executing.
567pub fn spawn_container_action<F>(
568    ctx: OwnedSshContext,
569    runtime: ContainerRuntime,
570    action: ContainerAction,
571    container_id: String,
572    send: F,
573) where
574    F: FnOnce(String, ContainerAction, Result<(), String>) + Send + 'static,
575{
576    std::thread::spawn(move || {
577        if let Err(e) = validate_container_id(&container_id) {
578            log::debug!(
579                "[purple] container action {} blocked on alias={}: invalid container_id: {}",
580                action.as_str(),
581                ctx.alias,
582                e
583            );
584            send(ctx.alias, action, Err(e));
585            return;
586        }
587        let alias = &ctx.alias;
588        info!(
589            "Container action: {} container={container_id} alias={alias}",
590            action.as_str()
591        );
592        let command = container_action_command(runtime, action, &container_id);
593        let result = crate::snippet::run_snippet(
594            alias,
595            &ctx.config_path,
596            &command,
597            ctx.askpass.as_deref(),
598            ctx.bw_session.as_deref(),
599            true,
600            ctx.has_tunnel,
601        );
602        match result {
603            Ok(r) if r.status.success() => send(ctx.alias, action, Ok(())),
604            Ok(r) => {
605                let err = friendly_container_error(r.stderr.trim(), r.status.code());
606                error!(
607                    "[external] Container {} failed: alias={alias} container={container_id}: {err}",
608                    action.as_str()
609                );
610                send(ctx.alias, action, Err(err));
611            }
612            Err(e) => {
613                error!(
614                    "[external] Container {} failed: alias={alias} container={container_id}: {e}",
615                    action.as_str()
616                );
617                send(ctx.alias, action, Err(e.to_string()));
618            }
619        }
620    });
621}
622
623// ---------------------------------------------------------------------------
624// ContainerInspect: subset of `docker inspect` output we surface in the UI
625// ---------------------------------------------------------------------------
626
627/// Parsed subset of `docker inspect <id>` (or `podman inspect`). Only the
628/// fields purple's container detail panel renders are extracted; the rest
629/// of the JSON document is discarded so cache size stays bounded.
630#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)]
631pub struct ContainerInspect {
632    pub exit_code: i32,
633    pub oom_killed: bool,
634    pub started_at: String,
635    pub finished_at: String,
636    pub created_at: String,
637    /// `Some("healthy" | "unhealthy" | "starting")` when the image defines
638    /// a HEALTHCHECK. `None` when no healthcheck is configured.
639    pub health: Option<String>,
640    pub restart_count: u32,
641    pub command: Option<Vec<String>>,
642    pub entrypoint: Option<Vec<String>>,
643    pub env_count: usize,
644    pub mount_count: usize,
645    pub networks: Vec<NetworkInfo>,
646    // Audit-relevant fields surfaced in the right-side detail panel.
647    pub image_digest: Option<String>,
648    pub restart_policy: Option<String>,
649    pub user: Option<String>,
650    pub privileged: bool,
651    pub readonly_rootfs: bool,
652    pub apparmor_profile: Option<String>,
653    pub seccomp_profile: Option<String>,
654    pub cap_add: Vec<String>,
655    pub cap_drop: Vec<String>,
656    pub mounts: Vec<MountInfo>,
657    pub compose_project: Option<String>,
658    pub compose_service: Option<String>,
659    // Lifecycle / runtime details surfaced in the LIFECYCLE card.
660    pub pid: Option<u32>,
661    pub stop_signal: Option<String>,
662    pub stop_timeout: Option<u32>,
663    // App identity from OCI image labels (visible in APP card).
664    pub image_version: Option<String>,
665    pub image_revision: Option<String>,
666    pub image_source: Option<String>,
667    pub working_dir: Option<String>,
668    pub hostname: Option<String>,
669    // Resource constraints (RESOURCES card). 0 / None means unlimited.
670    pub memory_limit: Option<u64>,
671    pub cpu_limit_nanos: Option<u64>,
672    pub pids_limit: Option<i64>,
673    pub log_driver: Option<String>,
674    // Network mode (NETWORK card): bridge / host / none / container:xyz.
675    pub network_mode: Option<String>,
676    // Healthcheck definition + recent stats (HEALTH card when present).
677    pub health_test: Option<Vec<String>>,
678    pub health_interval_ns: Option<u64>,
679    pub health_failing_streak: Option<u32>,
680}
681
682#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
683pub struct NetworkInfo {
684    pub name: String,
685    pub ip_address: String,
686}
687
688#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
689pub struct MountInfo {
690    pub source: String,
691    pub destination: String,
692    pub read_only: bool,
693}
694
695/// Build the SSH command string for inspecting a single container.
696pub fn container_inspect_command(runtime: ContainerRuntime, container_id: &str) -> String {
697    format!("{} inspect {}", runtime.as_str(), container_id)
698}
699
700/// Translate a non-zero docker/podman exit code into a short
701/// human-readable hint. Returns `None` for codes without a well-known
702/// meaning so the UI can fall back to the bare number. Exit 0 has no
703/// entry because the detail panel only annotates failed exits.
704/// Sources: docker docs + Linux signal table.
705pub fn exit_code_meaning(code: i32) -> Option<&'static str> {
706    match code {
707        1 => Some("application error"),
708        125 => Some("docker run failed"),
709        126 => Some("command not executable"),
710        127 => Some("command not found"),
711        130 => Some("interrupted (SIGINT)"),
712        137 => Some("killed (SIGKILL / OOM)"),
713        139 => Some("segfault (SIGSEGV)"),
714        143 => Some("terminated (SIGTERM)"),
715        _ => None,
716    }
717}
718
719/// Parse `docker inspect <id>` stdout into `ContainerInspect`. The command
720/// always returns a JSON array; we take the first element. Missing fields
721/// degrade to defaults rather than fail so a partial response still
722/// renders something useful.
723pub fn parse_container_inspect(output: &str) -> Result<ContainerInspect, String> {
724    let trimmed = output.trim();
725    if trimmed.is_empty() {
726        return Err(crate::messages::CONTAINER_INSPECT_EMPTY.to_string());
727    }
728    let value: serde_json::Value = serde_json::from_str(trimmed)
729        .map_err(|e| crate::messages::container_inspect_parse_failed(&e.to_string()))?;
730    let entry = value
731        .as_array()
732        .and_then(|a| a.first())
733        .ok_or_else(|| crate::messages::CONTAINER_INSPECT_EMPTY.to_string())?;
734
735    let state = &entry["State"];
736    let config = &entry["Config"];
737    let network_settings = &entry["NetworkSettings"];
738
739    let exit_code = state["ExitCode"].as_i64().unwrap_or(0) as i32;
740    // Podman 5.x and docker both emit `OOMKilled`. Podman 3.x (still the
741    // packaged default on Ubuntu 22.04 LTS) emits `OomKilled`. Try both so
742    // OOM-killed containers surface in the ATTENTION card regardless of
743    // remote runtime version.
744    let oom_killed = state["OOMKilled"]
745        .as_bool()
746        .or_else(|| state["OomKilled"].as_bool())
747        .unwrap_or(false);
748    let started_at = state["StartedAt"].as_str().unwrap_or("").to_string();
749    let finished_at = state["FinishedAt"].as_str().unwrap_or("").to_string();
750    let health = state
751        .get("Health")
752        .and_then(|h| h.get("Status"))
753        .and_then(|s| s.as_str())
754        .map(|s| s.to_string());
755    let restart_count = entry["RestartCount"].as_u64().unwrap_or(0) as u32;
756
757    let command = config["Cmd"].as_array().map(|arr| {
758        arr.iter()
759            .filter_map(|v| v.as_str().map(|s| s.to_string()))
760            .collect()
761    });
762    let entrypoint = config["Entrypoint"].as_array().map(|arr| {
763        arr.iter()
764            .filter_map(|v| v.as_str().map(|s| s.to_string()))
765            .collect()
766    });
767    let env_count = config["Env"].as_array().map(|arr| arr.len()).unwrap_or(0);
768    let mount_count = entry["Mounts"].as_array().map(|arr| arr.len()).unwrap_or(0);
769
770    let networks = network_settings
771        .get("Networks")
772        .and_then(|n| n.as_object())
773        .map(|map| {
774            map.iter()
775                .map(|(name, cfg)| NetworkInfo {
776                    name: name.clone(),
777                    ip_address: cfg
778                        .get("IPAddress")
779                        .and_then(|v| v.as_str())
780                        .unwrap_or("")
781                        .to_string(),
782                })
783                .collect::<Vec<_>>()
784        })
785        .unwrap_or_default();
786
787    let host_config = &entry["HostConfig"];
788
789    let image_digest = entry["Image"]
790        .as_str()
791        .filter(|s| !s.is_empty())
792        .map(|s| s.to_string());
793    let restart_policy = host_config
794        .get("RestartPolicy")
795        .and_then(|p| p.get("Name"))
796        .and_then(|s| s.as_str())
797        .filter(|s| !s.is_empty() && *s != "no")
798        .map(|s| s.to_string());
799    let user = config["User"]
800        .as_str()
801        .filter(|s| !s.is_empty())
802        .map(|s| s.to_string());
803    let privileged = host_config["Privileged"].as_bool().unwrap_or(false);
804    let readonly_rootfs = host_config["ReadonlyRootfs"].as_bool().unwrap_or(false);
805    let apparmor_profile = host_config["AppArmorProfile"]
806        .as_str()
807        .or_else(|| entry["AppArmorProfile"].as_str())
808        .filter(|s| !s.is_empty())
809        .map(|s| s.to_string());
810    let seccomp_profile = host_config["SecurityOpt"].as_array().and_then(|arr| {
811        arr.iter()
812            .filter_map(|v| v.as_str())
813            .find_map(|s| s.strip_prefix("seccomp=").map(|v| v.to_string()))
814    });
815    let cap_add = host_config["CapAdd"]
816        .as_array()
817        .map(|arr| {
818            arr.iter()
819                .filter_map(|v| v.as_str().map(|s| s.to_string()))
820                .collect()
821        })
822        .unwrap_or_default();
823    let cap_drop = host_config["CapDrop"]
824        .as_array()
825        .map(|arr| {
826            arr.iter()
827                .filter_map(|v| v.as_str().map(|s| s.to_string()))
828                .collect()
829        })
830        .unwrap_or_default();
831    let mounts = entry["Mounts"]
832        .as_array()
833        .map(|arr| {
834            arr.iter()
835                .map(|m| MountInfo {
836                    source: m["Source"].as_str().unwrap_or("").to_string(),
837                    destination: m["Destination"].as_str().unwrap_or("").to_string(),
838                    read_only: !m["RW"].as_bool().unwrap_or(true),
839                })
840                .collect()
841        })
842        .unwrap_or_default();
843    let labels = config.get("Labels").and_then(|l| l.as_object());
844    let label = |key: &str| {
845        labels
846            .and_then(|l| l.get(key))
847            .and_then(|v| v.as_str())
848            .filter(|s| !s.is_empty())
849            .map(|s| s.to_string())
850    };
851    let compose_project = label("com.docker.compose.project");
852    let compose_service = label("com.docker.compose.service");
853    let image_version = label("org.opencontainers.image.version");
854    let image_revision = label("org.opencontainers.image.revision");
855    let image_source = label("org.opencontainers.image.source");
856
857    let created_at = entry["Created"].as_str().unwrap_or("").to_string();
858    // State.Pid is `0` when the container is not running. Drop the zero so
859    // the UI does not render a misleading "pid 0" row for exited rows.
860    let pid = state["Pid"].as_u64().filter(|n| *n > 0).map(|n| n as u32);
861    let hostname = config["Hostname"]
862        .as_str()
863        .filter(|s| !s.is_empty())
864        .map(|s| s.to_string());
865    let working_dir = config["WorkingDir"]
866        .as_str()
867        .filter(|s| !s.is_empty())
868        .map(|s| s.to_string());
869    let stop_signal = config["StopSignal"]
870        .as_str()
871        .filter(|s| !s.is_empty())
872        .map(|s| s.to_string());
873    let stop_timeout = config["StopTimeout"].as_u64().map(|n| n as u32);
874
875    let network_mode = host_config["NetworkMode"]
876        .as_str()
877        .filter(|s| !s.is_empty() && *s != "default")
878        .map(|s| s.to_string());
879    // HostConfig.Memory is bytes, 0 = unlimited (drop). Same for NanoCpus.
880    let memory_limit = host_config["Memory"].as_u64().filter(|n| *n > 0);
881    let cpu_limit_nanos = host_config["NanoCpus"].as_u64().filter(|n| *n > 0);
882    // PidsLimit is i64. 0 or -1 means unlimited; drop both.
883    let pids_limit = host_config["PidsLimit"].as_i64().filter(|n| *n > 0);
884    // LogConfig.Type defaults to "json-file" on docker. Always carry it
885    // so the renderer can decide whether to surface "Logs" only when
886    // non-default.
887    let log_driver = host_config
888        .get("LogConfig")
889        .and_then(|l| l.get("Type"))
890        .and_then(|v| v.as_str())
891        .filter(|s| !s.is_empty())
892        .map(|s| s.to_string());
893
894    let healthcheck = config.get("Healthcheck");
895    let health_test = healthcheck
896        .and_then(|h| h.get("Test"))
897        .and_then(|t| t.as_array())
898        .map(|arr| {
899            arr.iter()
900                .filter_map(|v| v.as_str().map(|s| s.to_string()))
901                .collect::<Vec<_>>()
902        })
903        .filter(|v| !v.is_empty());
904    let health_interval_ns = healthcheck
905        .and_then(|h| h.get("Interval"))
906        .and_then(|v| v.as_u64())
907        .filter(|n| *n > 0);
908    let health_failing_streak = state
909        .get("Health")
910        .and_then(|h| h.get("FailingStreak"))
911        .and_then(|v| v.as_u64())
912        .map(|n| n as u32);
913
914    Ok(ContainerInspect {
915        exit_code,
916        oom_killed,
917        started_at,
918        finished_at,
919        created_at,
920        health,
921        restart_count,
922        command,
923        entrypoint,
924        env_count,
925        mount_count,
926        networks,
927        image_digest,
928        restart_policy,
929        user,
930        privileged,
931        readonly_rootfs,
932        apparmor_profile,
933        seccomp_profile,
934        cap_add,
935        cap_drop,
936        mounts,
937        compose_project,
938        compose_service,
939        pid,
940        stop_signal,
941        stop_timeout,
942        image_version,
943        image_revision,
944        image_source,
945        working_dir,
946        hostname,
947        memory_limit,
948        cpu_limit_nanos,
949        pids_limit,
950        log_driver,
951        network_mode,
952        health_test,
953        health_interval_ns,
954        health_failing_streak,
955    })
956}
957
958/// Parse a Docker `Up …` status string into a compact uptime label.
959/// Returns `None` for any non-running state (Exited, Created, Restarting,
960/// Paused without an `Up` prefix, empty). Cells render `<1m` for
961/// sub-minute uptimes, `1m` / `5m` / `12h` / `5w` / `3mo` / `2y` otherwise.
962/// Format follows Docker's `units.HumanDuration`.
963pub fn parse_uptime_from_status(s: &str) -> Option<String> {
964    let body = s.strip_prefix("Up ")?;
965    let body = body.split('(').next()?.trim();
966    if body == "Less than a second" {
967        return Some("<1m".to_string());
968    }
969    if body == "About a minute" {
970        return Some("1m".to_string());
971    }
972    if body == "About an hour" {
973        return Some("1h".to_string());
974    }
975    let mut parts = body.split_whitespace();
976    let count: u64 = parts.next()?.parse().ok()?;
977    let unit = parts.next()?;
978    let suffix = match unit {
979        "second" | "seconds" => return Some("<1m".to_string()),
980        "minute" | "minutes" => "m",
981        "hour" | "hours" => "h",
982        "day" | "days" => "d",
983        "week" | "weeks" => "w",
984        "month" | "months" => "mo",
985        "year" | "years" => "y",
986        _ => return None,
987    };
988    Some(format!("{count}{suffix}"))
989}
990
991/// Synchronously fetch + parse `container inspect`. Validates the
992/// container ID before issuing the SSH call.
993pub fn fetch_container_inspect(
994    ctx: &SshContext<'_>,
995    runtime: ContainerRuntime,
996    container_id: &str,
997) -> Result<ContainerInspect, String> {
998    validate_container_id(container_id)?;
999    let command = container_inspect_command(runtime, container_id);
1000    let result = crate::snippet::run_snippet(
1001        ctx.alias,
1002        ctx.config_path,
1003        &command,
1004        ctx.askpass,
1005        ctx.bw_session,
1006        true,
1007        ctx.has_tunnel,
1008    );
1009    match result {
1010        Ok(r) if r.status.success() => parse_container_inspect(&r.stdout),
1011        Ok(r) => Err(crate::messages::container_command_failed(
1012            r.status.code().unwrap_or(1),
1013        )),
1014        Err(e) => Err(e.to_string()),
1015    }
1016}
1017
1018/// Spawn a background thread to run `container inspect`. Mirrors the
1019/// `spawn_container_listing` pattern so the call site looks identical.
1020pub fn spawn_container_inspect_listing<F>(
1021    ctx: OwnedSshContext,
1022    runtime: ContainerRuntime,
1023    container_id: String,
1024    send: F,
1025) where
1026    F: FnOnce(String, String, Result<ContainerInspect, String>) + Send + 'static,
1027{
1028    std::thread::spawn(move || {
1029        let borrowed = SshContext {
1030            alias: &ctx.alias,
1031            config_path: &ctx.config_path,
1032            askpass: ctx.askpass.as_deref(),
1033            bw_session: ctx.bw_session.as_deref(),
1034            has_tunnel: ctx.has_tunnel,
1035        };
1036        let result = fetch_container_inspect(&borrowed, runtime, &container_id);
1037        send(ctx.alias, container_id, result);
1038    });
1039}
1040
1041/// Build the `<runtime> logs --tail <n> <id>` command. The
1042/// `--tail` cap is enforced server-side so the SSH stream stays
1043/// bounded even on a noisy container.
1044pub fn container_logs_command(
1045    runtime: ContainerRuntime,
1046    container_id: &str,
1047    tail: usize,
1048) -> String {
1049    format!("{} logs --tail {} {}", runtime.as_str(), tail, container_id)
1050}
1051
1052/// Synchronously fetch logs and split into lines. Returns the raw
1053/// captured stdout split on `\n` so the renderer does not have to
1054/// re-parse. Empty trailing lines are dropped.
1055pub fn fetch_container_logs(
1056    ctx: &SshContext<'_>,
1057    runtime: ContainerRuntime,
1058    container_id: &str,
1059    tail: usize,
1060) -> Result<Vec<String>, String> {
1061    validate_container_id(container_id)?;
1062    let command = container_logs_command(runtime, container_id, tail);
1063    let result = crate::snippet::run_snippet(
1064        ctx.alias,
1065        ctx.config_path,
1066        &command,
1067        ctx.askpass,
1068        ctx.bw_session,
1069        true,
1070        ctx.has_tunnel,
1071    );
1072    match result {
1073        Ok(r) if r.status.success() => Ok(parse_log_output(&r.stdout, &r.stderr)),
1074        Ok(r) => Err(crate::messages::container_command_failed(
1075            r.status.code().unwrap_or(1),
1076        )),
1077        Err(e) => Err(e.to_string()),
1078    }
1079}
1080
1081/// Merge stdout (app logs) and stderr (errors) into a single chronological
1082/// stream. Many container runtimes split levels across the two streams;
1083/// re-interleaving them is closer to what `docker logs` shows on a TTY.
1084/// Trailing blank lines are stripped from each stream before merging so a
1085/// stdout block that ends in a newline does not introduce a phantom empty
1086/// row between the two streams.
1087pub(crate) fn parse_log_output(stdout: &str, stderr: &str) -> Vec<String> {
1088    let mut lines: Vec<String> = stdout.lines().map(|s| s.to_string()).collect();
1089    while lines.last().map(|s| s.is_empty()).unwrap_or(false) {
1090        lines.pop();
1091    }
1092    for s in stderr.lines() {
1093        lines.push(s.to_string());
1094    }
1095    while lines.last().map(|s| s.is_empty()).unwrap_or(false) {
1096        lines.pop();
1097    }
1098    lines
1099}
1100
1101/// Spawn a background thread to run `container logs`. Same shape as
1102/// `spawn_container_inspect_listing`. In demo mode the SSH call is
1103/// short-circuited with a deterministic synthetic log stream so the
1104/// logs viewer (and its `/` search) is exercisable without a remote.
1105pub fn spawn_container_logs_fetch<F>(
1106    ctx: OwnedSshContext,
1107    runtime: ContainerRuntime,
1108    container_id: String,
1109    container_name: String,
1110    tail: usize,
1111    send: F,
1112) where
1113    F: FnOnce(String, String, String, Result<Vec<String>, String>) + Send + 'static,
1114{
1115    if crate::demo_flag::is_demo() {
1116        let lines = demo_log_lines(&container_name, tail);
1117        log::debug!(
1118            "[purple] container_logs_fetch: demo short-circuit alias={} id={} lines={}",
1119            ctx.alias,
1120            container_id,
1121            lines.len()
1122        );
1123        send(ctx.alias, container_id, container_name, Ok(lines));
1124        return;
1125    }
1126    std::thread::spawn(move || {
1127        let borrowed = SshContext {
1128            alias: &ctx.alias,
1129            config_path: &ctx.config_path,
1130            askpass: ctx.askpass.as_deref(),
1131            bw_session: ctx.bw_session.as_deref(),
1132            has_tunnel: ctx.has_tunnel,
1133        };
1134        let result = fetch_container_logs(&borrowed, runtime, &container_id, tail);
1135        send(ctx.alias, container_id, container_name, result);
1136    });
1137}
1138
1139/// Generate a deterministic stream of fake log lines for demo mode.
1140/// Mixes INFO / WARN / ERROR / DEBUG with realistic-looking content
1141/// (HTTP requests, DB pings, retries, timeouts) so the user can
1142/// usefully press `/` and find matches under `--demo`. Anchored to
1143/// `demo_flag::now_secs()` so timestamps stay stable across renders.
1144pub(crate) fn demo_log_lines(container_name: &str, tail: usize) -> Vec<String> {
1145    use std::time::{Duration, UNIX_EPOCH};
1146    // Cheap hash to fan out per-container variation without bringing
1147    // `rand` into the binary.
1148    let seed: u32 = container_name
1149        .bytes()
1150        .fold(0u32, |acc, b| acc.wrapping_mul(31).wrapping_add(b as u32));
1151
1152    // Templates rotate every line. Index 0 is the freshest log line.
1153    let templates: &[&str] = &[
1154        "INFO  [{}] handled GET /api/v1/health 200 in 14ms",
1155        "INFO  [{}] handled POST /api/v1/orders 201 in 38ms (user_id={user})",
1156        "DEBUG [{}] cache hit key=session:{user} ttl=3600",
1157        "INFO  [{}] handled GET /api/v1/users/{user} 200 in 11ms",
1158        "WARN  [{}] slow query detected duration=812ms statement=SELECT FROM orders",
1159        "INFO  [{}] connection pool size=12 idle=8 in_use=4",
1160        "DEBUG [{}] flushing metrics batch size=64",
1161        "INFO  [{}] handled GET /api/v1/inventory 200 in 22ms",
1162        "ERROR [{}] upstream timeout after 5000ms target=payments retry=1",
1163        "WARN  [{}] retrying request attempt=2 backoff=250ms",
1164        "INFO  [{}] handled POST /api/v1/login 200 in 31ms",
1165        "DEBUG [{}] gc cycle reclaimed=42MB took=18ms",
1166        "INFO  [{}] heartbeat ok rss=128MB cpu=4%",
1167        "ERROR [{}] failed to acquire lock resource=cache_warmer waiter=3",
1168        "INFO  [{}] handled DELETE /api/v1/sessions/{user} 204 in 9ms",
1169        "WARN  [{}] disk usage at 78% mount=/data threshold=80%",
1170        "INFO  [{}] handled GET /api/v1/search?q=widget 200 in 47ms",
1171        "DEBUG [{}] websocket ping rtt=12ms",
1172    ];
1173
1174    // Always use `demo_flag::now_secs()` even outside demo mode: the
1175    // helper's OnceLock caches the first wallclock value, so repeated
1176    // calls within a process return the same instant. Branching on
1177    // `is_demo()` would let tests cross a second boundary and flake.
1178    let now = crate::demo_flag::now_secs();
1179
1180    // Map each line to a timestamp working backwards from now. One log
1181    // every 3 seconds keeps the time range realistic for a 200-line tail.
1182    let mut lines = Vec::with_capacity(tail);
1183    for i in 0..tail {
1184        let template = templates[(i + seed as usize) % templates.len()];
1185        let user = 1000 + ((seed as usize + i * 7) % 50);
1186        let secs_back = (i as u64) * 3;
1187        let line_time = UNIX_EPOCH + Duration::from_secs(now.saturating_sub(secs_back));
1188        let ts = format_demo_timestamp(line_time);
1189        let body = template
1190            .replace("{}", container_name)
1191            .replace("{user}", &user.to_string());
1192        lines.push(format!("{} {}", ts, body));
1193    }
1194    // Render flush-top with the newest line at the bottom of the
1195    // viewport, matching real `docker logs --tail` output.
1196    lines.reverse();
1197    lines
1198}
1199
1200fn format_demo_timestamp(t: std::time::SystemTime) -> String {
1201    use std::time::UNIX_EPOCH;
1202    let secs = t
1203        .duration_since(UNIX_EPOCH)
1204        .map(|d| d.as_secs())
1205        .unwrap_or(0);
1206    // Fast UTC breakdown without dragging in chrono. Good enough for a
1207    // demo timestamp where leap seconds / DST are irrelevant.
1208    let days_since_epoch = (secs / 86_400) as i64;
1209    let seconds_in_day = (secs % 86_400) as u32;
1210    let h = seconds_in_day / 3600;
1211    let m = (seconds_in_day % 3600) / 60;
1212    let s = seconds_in_day % 60;
1213    let (y, mo, d) = civil_from_days(days_since_epoch);
1214    format!("{:04}-{:02}-{:02} {:02}:{:02}:{:02}", y, mo, d, h, m, s)
1215}
1216
1217/// Convert days-since-1970-01-01 to (year, month, day). Howard Hinnant's
1218/// civil_from_days algorithm — proleptic Gregorian, 1970 = day 0.
1219fn civil_from_days(z: i64) -> (i32, u32, u32) {
1220    let z = z + 719_468;
1221    let era = if z >= 0 { z } else { z - 146_096 } / 146_097;
1222    let doe = (z - era * 146_097) as u64;
1223    let yoe = (doe - doe / 1460 + doe / 36_524 - doe / 146_096) / 365;
1224    let y = yoe as i64 + era * 400;
1225    let doy = doe - (365 * yoe + yoe / 4 - yoe / 100);
1226    let mp = (5 * doy + 2) / 153;
1227    let d = (doy - (153 * mp + 2) / 5 + 1) as u32;
1228    let m = if mp < 10 { mp + 3 } else { mp - 9 } as u32;
1229    let y = if m <= 2 { y + 1 } else { y };
1230    (y as i32, m, d)
1231}
1232
1233// ---------------------------------------------------------------------------
1234// JSON lines cache
1235// ---------------------------------------------------------------------------
1236
1237/// A cached container listing for a single host. `engine_version` is the
1238/// daemon's `Server.Version` captured during the last refresh, surfaced in
1239/// the host detail panel; `None` means the version sub-call did not return
1240/// or the cache was written by an older purple build.
1241#[derive(Debug, Clone)]
1242pub struct ContainerCacheEntry {
1243    pub timestamp: u64,
1244    pub runtime: ContainerRuntime,
1245    pub engine_version: Option<String>,
1246    pub containers: Vec<ContainerInfo>,
1247}
1248
1249/// Serde helper for a single JSON line in the cache file. `engine_version`
1250/// uses `serde(default)` so cache files written before this field existed
1251/// still deserialize cleanly.
1252#[derive(Serialize, Deserialize)]
1253struct CacheLine {
1254    alias: String,
1255    timestamp: u64,
1256    runtime: ContainerRuntime,
1257    #[serde(default, skip_serializing_if = "Option::is_none")]
1258    engine_version: Option<String>,
1259    containers: Vec<ContainerInfo>,
1260}
1261
1262// Test-only thread-local override for the cache file path.
1263// Mirrors `preferences::set_path_override` so unit tests can write
1264// to a tempdir instead of polluting the real `~/.purple/`.
1265#[cfg(test)]
1266thread_local! {
1267    static PATH_OVERRIDE: std::cell::RefCell<Option<std::path::PathBuf>> =
1268        const { std::cell::RefCell::new(None) };
1269}
1270
1271#[cfg(test)]
1272pub fn set_path_override(path: std::path::PathBuf) {
1273    PATH_OVERRIDE.with(|p| *p.borrow_mut() = Some(path));
1274}
1275
1276#[cfg(test)]
1277#[allow(dead_code)]
1278pub fn clear_path_override() {
1279    PATH_OVERRIDE.with(|p| *p.borrow_mut() = None);
1280}
1281
1282fn cache_path() -> Option<std::path::PathBuf> {
1283    // Tests MUST opt in via `set_path_override` before any code
1284    // path that loads or saves the cache. Falling through to the
1285    // production path lets a forgotten override pollute (and in
1286    // the orphan-prune branch of `reload_hosts`, wipe) the user's
1287    // real `~/.purple/container_cache.jsonl`.
1288    #[cfg(test)]
1289    {
1290        PATH_OVERRIDE.with(|p| p.borrow().clone())
1291    }
1292    #[cfg(not(test))]
1293    {
1294        dirs::home_dir().map(|h| h.join(".purple").join("container_cache.jsonl"))
1295    }
1296}
1297
1298/// Load container cache from `~/.purple/container_cache.jsonl`.
1299/// Malformed lines are silently ignored. Duplicate aliases: last-write-wins.
1300pub fn load_container_cache() -> HashMap<String, ContainerCacheEntry> {
1301    let mut map = HashMap::new();
1302    let Some(path) = cache_path() else {
1303        return map;
1304    };
1305    let Ok(content) = std::fs::read_to_string(&path) else {
1306        return map;
1307    };
1308    for line in content.lines() {
1309        let trimmed = line.trim();
1310        if trimmed.is_empty() {
1311            continue;
1312        }
1313        if let Ok(entry) = serde_json::from_str::<CacheLine>(trimmed) {
1314            map.insert(
1315                entry.alias,
1316                ContainerCacheEntry {
1317                    timestamp: entry.timestamp,
1318                    runtime: entry.runtime,
1319                    engine_version: entry.engine_version,
1320                    containers: entry.containers,
1321                },
1322            );
1323        }
1324    }
1325    map
1326}
1327
1328/// Parse container cache from JSONL content string (for demo/test use).
1329pub fn parse_container_cache_content(content: &str) -> HashMap<String, ContainerCacheEntry> {
1330    let mut map = HashMap::new();
1331    for line in content.lines() {
1332        let trimmed = line.trim();
1333        if trimmed.is_empty() {
1334            continue;
1335        }
1336        if let Ok(entry) = serde_json::from_str::<CacheLine>(trimmed) {
1337            map.insert(
1338                entry.alias,
1339                ContainerCacheEntry {
1340                    timestamp: entry.timestamp,
1341                    runtime: entry.runtime,
1342                    engine_version: entry.engine_version,
1343                    containers: entry.containers,
1344                },
1345            );
1346        }
1347    }
1348    map
1349}
1350
1351/// Save container cache to `~/.purple/container_cache.jsonl` via atomic write.
1352pub fn save_container_cache(cache: &HashMap<String, ContainerCacheEntry>) {
1353    if crate::demo_flag::is_demo() {
1354        return;
1355    }
1356    let Some(path) = cache_path() else {
1357        return;
1358    };
1359    let mut lines = Vec::with_capacity(cache.len());
1360    for (alias, entry) in cache {
1361        let line = CacheLine {
1362            alias: alias.clone(),
1363            timestamp: entry.timestamp,
1364            runtime: entry.runtime,
1365            engine_version: entry.engine_version.clone(),
1366            containers: entry.containers.clone(),
1367        };
1368        if let Ok(s) = serde_json::to_string(&line) {
1369            lines.push(s);
1370        }
1371    }
1372    let content = lines.join("\n");
1373    log::debug!(
1374        "[purple] save_container_cache: {} host entries, {} bytes -> {}",
1375        cache.len(),
1376        content.len(),
1377        path.display()
1378    );
1379    if let Err(e) = crate::fs_util::atomic_write(&path, content.as_bytes()) {
1380        log::warn!(
1381            "[config] Failed to write container cache {}: {e}",
1382            path.display()
1383        );
1384    }
1385}
1386
1387// ---------------------------------------------------------------------------
1388// String truncation
1389// ---------------------------------------------------------------------------
1390
1391/// Truncate a string to at most `max` characters. Appends ".." if truncated.
1392pub fn truncate_str(s: &str, max: usize) -> String {
1393    let count = s.chars().count();
1394    if count <= max {
1395        s.to_string()
1396    } else {
1397        let cut = max.saturating_sub(2);
1398        let end = s.char_indices().nth(cut).map(|(i, _)| i).unwrap_or(s.len());
1399        format!("{}..", &s[..end])
1400    }
1401}
1402
1403// ---------------------------------------------------------------------------
1404// Relative time
1405// ---------------------------------------------------------------------------
1406
1407/// Format a duration in seconds as a compact label (`12s`, `5m`,
1408/// `2h`, `3d`). Used for the in-border staleness badge where width
1409/// is precious and the surrounding label (`synced`) already says
1410/// "ago" without the suffix.
1411pub fn format_uptime_short(seconds: u64) -> String {
1412    if seconds < 60 {
1413        format!("{seconds}s")
1414    } else if seconds < 3600 {
1415        format!("{}m", seconds / 60)
1416    } else if seconds < 86400 {
1417        format!("{}h", seconds / 3600)
1418    } else {
1419        format!("{}d", seconds / 86400)
1420    }
1421}
1422
1423/// Format a Unix timestamp as a human-readable relative time string.
1424/// Honours `demo_flag::now_secs()` when demo mode is active so visual
1425/// regression goldens stay byte-stable across long-running test
1426/// processes (same pattern as `history::format_time_ago`).
1427pub fn format_relative_time(timestamp: u64) -> String {
1428    let now = if crate::demo_flag::is_demo() {
1429        crate::demo_flag::now_secs()
1430    } else {
1431        SystemTime::now()
1432            .duration_since(UNIX_EPOCH)
1433            .unwrap_or_default()
1434            .as_secs()
1435    };
1436    let diff = now.saturating_sub(timestamp);
1437    if diff < 60 {
1438        "just now".to_string()
1439    } else if diff < 3600 {
1440        format!("{}m ago", diff / 60)
1441    } else if diff < 86400 {
1442        format!("{}h ago", diff / 3600)
1443    } else {
1444        format!("{}d ago", diff / 86400)
1445    }
1446}
1447
1448// ---------------------------------------------------------------------------
1449// Tests
1450// ---------------------------------------------------------------------------
1451
1452#[cfg(test)]
1453#[path = "containers_tests.rs"]
1454mod tests;