Skip to main content

purple_ssh/
containers.rs

1use std::collections::HashMap;
2use std::time::{SystemTime, UNIX_EPOCH};
3
4use log::{error, info};
5
6use serde::{Deserialize, Serialize};
7
8use crate::ssh_context::{OwnedSshContext, SshContext};
9
10// ---------------------------------------------------------------------------
11// ContainerInfo model
12// ---------------------------------------------------------------------------
13
14/// Metadata for a single container (from `docker ps -a` / `podman ps -a`).
15///
16/// Deserialization is tolerant of both docker and podman JSON shapes.
17/// Docker uses `ID` plus scalar `Names`/`Ports`; podman uses `Id` plus
18/// `Names` as an array and `Ports` as an array of objects. The custom
19/// helpers below coerce both into the docker-shaped scalar fields the
20/// rest of purple (UI, cache, MCP) already understands.
21#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
22pub struct ContainerInfo {
23    #[serde(rename = "ID", alias = "Id")]
24    pub id: String,
25    #[serde(rename = "Names", deserialize_with = "deserialize_names_field")]
26    pub names: String,
27    #[serde(rename = "Image")]
28    pub image: String,
29    #[serde(rename = "State")]
30    pub state: String,
31    #[serde(rename = "Status", default)]
32    pub status: String,
33    // `default` covers the missing-key case directly via Default::default()
34    // and bypasses `deserialize_with`. The custom deserializer therefore
35    // only runs when `Ports` is present (scalar, array or explicit null).
36    #[serde(
37        rename = "Ports",
38        deserialize_with = "deserialize_ports_field",
39        default
40    )]
41    pub ports: String,
42}
43
44/// Accept `Names` as either a scalar string (docker) or an array of
45/// strings (podman). Multiple names join with `,` to match docker's
46/// own comma-joined rendering. Unexpected shapes (number, object,
47/// null) propagate as serde errors; `parse_container_ps` drops the
48/// offending row via `.ok()`, which is the right behaviour for a row
49/// that has lost its identity.
50fn deserialize_names_field<'de, D>(deserializer: D) -> Result<String, D::Error>
51where
52    D: serde::Deserializer<'de>,
53{
54    #[derive(Deserialize)]
55    #[serde(untagged)]
56    enum NamesField {
57        Scalar(String),
58        Array(Vec<String>),
59    }
60    match NamesField::deserialize(deserializer)? {
61        NamesField::Scalar(s) => Ok(s),
62        NamesField::Array(arr) => Ok(arr.join(",")),
63    }
64}
65
66/// Accept `Ports` as either a scalar string (docker) or an array of
67/// port objects (podman). Podman entries are rendered into the same
68/// `host_ip:host_port->container_port/proto` form docker emits, so
69/// downstream UI rendering stays uniform. An explicit JSON null is
70/// tolerated and produces an empty string: podman uses `null` to mean
71/// "no ports published", which is semantically valid and the row must
72/// remain visible.
73fn deserialize_ports_field<'de, D>(deserializer: D) -> Result<String, D::Error>
74where
75    D: serde::Deserializer<'de>,
76{
77    #[derive(Deserialize)]
78    #[serde(untagged)]
79    enum PortsField {
80        Scalar(String),
81        Array(Vec<PodmanPort>),
82    }
83    match Option::<PortsField>::deserialize(deserializer)? {
84        Some(PortsField::Scalar(s)) => Ok(s),
85        Some(PortsField::Array(arr)) => Ok(format_podman_ports(&arr)),
86        None => Ok(String::new()),
87    }
88}
89
90#[derive(Deserialize)]
91struct PodmanPort {
92    #[serde(default)]
93    host_ip: String,
94    #[serde(default)]
95    container_port: u32,
96    #[serde(default)]
97    host_port: u32,
98    #[serde(default = "podman_port_default_range")]
99    range: u32,
100    #[serde(default)]
101    protocol: String,
102}
103
104fn podman_port_default_range() -> u32 {
105    1
106}
107
108fn format_podman_ports(ports: &[PodmanPort]) -> String {
109    // ~24 chars per typical port entry. Pre-allocating avoids the
110    // intermediate Vec<String> + repeated re-allocations that the prior
111    // map/collect/join chain produced for compose stacks with many
112    // published ports.
113    let mut out = String::with_capacity(ports.len().saturating_mul(24));
114    for (i, p) in ports.iter().enumerate() {
115        if i > 0 {
116            out.push_str(", ");
117        }
118        write_podman_port(p, &mut out);
119    }
120    out
121}
122
123fn write_podman_port(p: &PodmanPort, out: &mut String) {
124    use std::fmt::Write as _;
125    let protocol = if p.protocol.is_empty() {
126        "tcp"
127    } else {
128        p.protocol.as_str()
129    };
130    if p.host_port != 0 {
131        // Podman emits an empty `host_ip` for both IPv4 wildcard and IPv6
132        // wildcard binds. Omit the prefix when unknown rather than
133        // mis-claim IPv4. Concrete addresses (e.g. 127.0.0.1, ::1) render
134        // verbatim with the docker `addr:port->...` form.
135        if !p.host_ip.is_empty() {
136            let _ = write!(out, "{}:", p.host_ip);
137        }
138        if p.range > 1 {
139            let _ = write!(
140                out,
141                "{}-{}->",
142                p.host_port,
143                p.host_port.saturating_add(p.range.saturating_sub(1))
144            );
145        } else {
146            let _ = write!(out, "{}->", p.host_port);
147        }
148    }
149    if p.range > 1 {
150        let _ = write!(
151            out,
152            "{}-{}",
153            p.container_port,
154            p.container_port.saturating_add(p.range.saturating_sub(1))
155        );
156    } else {
157        let _ = write!(out, "{}", p.container_port);
158    }
159    let _ = write!(out, "/{protocol}");
160}
161
162/// Try to parse one NDJSON line into `ContainerInfo`. Returns `None`
163/// for blank/non-JSON lines (MOTD/banner) without logging. JSON-shaped
164/// lines that fail to match the schema log at debug level so missing
165/// containers can be correlated to a concrete parse error rather than
166/// guessed from a shrunken list.
167fn try_parse_container_line(trimmed: &str) -> Option<ContainerInfo> {
168    if trimmed.is_empty() {
169        return None;
170    }
171    match serde_json::from_str(trimmed) {
172        Ok(c) => Some(c),
173        Err(e) if trimmed.starts_with('{') => {
174            log::debug!(
175                "[external] container parse: dropped JSON line: {} (err: {})",
176                &trimmed[..trimmed.len().min(120)],
177                e
178            );
179            None
180        }
181        Err(_) => None,
182    }
183}
184
185/// Parse NDJSON output from `docker ps --format '{{json .}}'` or
186/// `podman ps --format '{{json .}}'`. Used by tests and the public
187/// crate API exposed via `lib.rs`; the live SSH path streams through
188/// `parse_container_output` directly, so the binary build sees this
189/// helper as unused and the lint must be silenced.
190#[allow(dead_code)]
191pub fn parse_container_ps(output: &str) -> Vec<ContainerInfo> {
192    output
193        .lines()
194        .filter_map(|line| try_parse_container_line(line.trim()))
195        .collect()
196}
197
198// ---------------------------------------------------------------------------
199// ContainerRuntime
200// ---------------------------------------------------------------------------
201
202/// Supported container runtimes.
203#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
204pub enum ContainerRuntime {
205    Docker,
206    Podman,
207}
208
209impl ContainerRuntime {
210    /// Returns the CLI binary name.
211    pub fn as_str(&self) -> &'static str {
212        match self {
213            ContainerRuntime::Docker => "docker",
214            ContainerRuntime::Podman => "podman",
215        }
216    }
217}
218
219/// Detect runtime from command output by matching the LAST non-empty trimmed
220/// line. Only "docker" or "podman" are accepted. MOTD-resilient.
221/// Currently unused (sentinel-based detection handles this inline) but kept
222/// as a public utility for potential future two-step detection paths.
223#[allow(dead_code)]
224pub fn parse_runtime(output: &str) -> Option<ContainerRuntime> {
225    let last = output
226        .lines()
227        .rev()
228        .map(|l| l.trim())
229        .find(|l| !l.is_empty())?;
230    match last {
231        "docker" => Some(ContainerRuntime::Docker),
232        "podman" => Some(ContainerRuntime::Podman),
233        _ => None,
234    }
235}
236
237// ---------------------------------------------------------------------------
238// ContainerAction
239// ---------------------------------------------------------------------------
240
241/// Actions that can be performed on a container.
242#[derive(Copy, Clone, Debug, PartialEq)]
243pub enum ContainerAction {
244    Start,
245    Stop,
246    Restart,
247}
248
249impl ContainerAction {
250    /// Returns the CLI sub-command string.
251    pub fn as_str(&self) -> &'static str {
252        match self {
253            ContainerAction::Start => "start",
254            ContainerAction::Stop => "stop",
255            ContainerAction::Restart => "restart",
256        }
257    }
258}
259
260/// Build the shell command to perform an action on a container.
261pub fn container_action_command(
262    runtime: ContainerRuntime,
263    action: ContainerAction,
264    container_id: &str,
265) -> String {
266    format!("{} {} {}", runtime.as_str(), action.as_str(), container_id)
267}
268
269// ---------------------------------------------------------------------------
270// Container ID validation
271// ---------------------------------------------------------------------------
272
273/// Validate a container ID or name.
274/// Accepts ASCII alphanumeric, hyphen, underscore, dot.
275/// Rejects empty, non-ASCII, shell metacharacters, colon.
276pub fn validate_container_id(id: &str) -> Result<(), String> {
277    if id.is_empty() {
278        return Err(crate::messages::CONTAINER_ID_EMPTY.to_string());
279    }
280    for c in id.chars() {
281        if !c.is_ascii_alphanumeric() && c != '-' && c != '_' && c != '.' {
282            return Err(crate::messages::container_id_invalid_char(c));
283        }
284    }
285    Ok(())
286}
287
288// ---------------------------------------------------------------------------
289// Combined SSH command + output parsing
290// ---------------------------------------------------------------------------
291
292/// Build the SSH command string for listing containers. Output is the
293/// container NDJSON, then the `##purple:engine##` sentinel, then the
294/// daemon version on its own line. The version subcall is suffixed with
295/// `|| true` so its failure cannot mask a `docker ps` error: the chain
296/// surfaces ps's exit code, while a missing version line just yields
297/// `engine_version: None` downstream.
298///
299/// - `Some(Docker)` / `Some(Podman)`: direct listing for the known runtime.
300/// - `None`: combined detection + listing with sentinel markers in one SSH call.
301pub fn container_list_command(runtime: Option<ContainerRuntime>) -> String {
302    match runtime {
303        Some(ContainerRuntime::Docker) => concat!(
304            "docker ps -a --format '{{json .}}' && ",
305            "echo '##purple:engine##' && ",
306            "{ docker version --format '{{.Server.Version}}' 2>/dev/null || true; }"
307        )
308        .to_string(),
309        Some(ContainerRuntime::Podman) => concat!(
310            "podman ps -a --format '{{json .}}' && ",
311            "echo '##purple:engine##' && ",
312            "{ podman version --format '{{.Server.Version}}' 2>/dev/null || true; }"
313        )
314        .to_string(),
315        None => concat!(
316            "if command -v docker >/dev/null 2>&1; then ",
317            "echo '##purple:docker##' && docker ps -a --format '{{json .}}' && ",
318            "echo '##purple:engine##' && ",
319            "{ docker version --format '{{.Server.Version}}' 2>/dev/null || true; }; ",
320            "elif command -v podman >/dev/null 2>&1; then ",
321            "echo '##purple:podman##' && podman ps -a --format '{{json .}}' && ",
322            "echo '##purple:engine##' && ",
323            "{ podman version --format '{{.Server.Version}}' 2>/dev/null || true; }; ",
324            "else echo '##purple:none##'; fi"
325        )
326        .to_string(),
327    }
328}
329
330/// Parsed result of a container listing command. `engine_version` is the
331/// daemon's `Server.Version` (best-effort, `None` when the version sub-call
332/// failed or the remote runtime predates the engine sentinel).
333#[derive(Debug, Clone, PartialEq)]
334pub struct ContainerListing {
335    pub runtime: ContainerRuntime,
336    pub engine_version: Option<String>,
337    pub containers: Vec<ContainerInfo>,
338}
339
340/// Parse the stdout of a container listing command.
341///
342/// When sentinels are present (combined detection run): extract runtime from
343/// the sentinel line, parse remaining lines as NDJSON. When `caller_runtime`
344/// is provided (subsequent run with known runtime): parse all lines as NDJSON.
345/// In both cases, `##purple:engine##` splits the listing from the optional
346/// trailing daemon version line.
347pub fn parse_container_output(
348    output: &str,
349    caller_runtime: Option<ContainerRuntime>,
350) -> Result<ContainerListing, String> {
351    let runtime = match output
352        .lines()
353        .map(str::trim)
354        .find(|l| l.starts_with("##purple:") && (*l != "##purple:engine##"))
355    {
356        Some("##purple:none##") => {
357            return Err(crate::messages::CONTAINER_RUNTIME_MISSING.to_string());
358        }
359        Some("##purple:docker##") => ContainerRuntime::Docker,
360        Some("##purple:podman##") => ContainerRuntime::Podman,
361        Some(other) => return Err(crate::messages::container_unknown_sentinel(other)),
362        None => match caller_runtime {
363            Some(rt) => rt,
364            None => return Err("No sentinel found and no runtime provided.".to_string()),
365        },
366    };
367
368    // Bound the version capture to the first non-empty post-sentinel line.
369    // A trailing logout banner or MOTD after `docker version` would
370    // otherwise concat into the cached engine_version and surface as
371    // "25.0.3\n-- session closed --" in the Runtime field.
372    let mut engine_version: Option<String> = None;
373    let mut after_engine = false;
374    let mut containers: Vec<ContainerInfo> = Vec::new();
375    // Stream-parse each NDJSON line during the sentinel sweep so we never
376    // build an intermediate copy of the listing block. At 1000 containers
377    // that intermediate buffer would cost ~300 KB and an extra `.lines()`
378    // walk; this loop is O(lines) with zero auxiliary allocation.
379    for line in output.lines() {
380        let trimmed = line.trim();
381        if trimmed == "##purple:engine##" {
382            after_engine = true;
383            continue;
384        }
385        if trimmed.starts_with("##purple:") {
386            continue;
387        }
388        if after_engine {
389            if !trimmed.is_empty() && engine_version.is_none() {
390                engine_version = Some(trimmed.to_string());
391            }
392            continue;
393        }
394        if let Some(c) = try_parse_container_line(trimmed) {
395            containers.push(c);
396        }
397    }
398
399    // Fedora CoreOS, podman-machine and other distros symlink `docker` to
400    // `podman`. Detection picks the docker branch but the JSON shape is
401    // pure podman (array `Names`, lowercase `Id`). When that happens we
402    // relabel the runtime so downstream consumers (MCP runtime field, host
403    // detail label, sort/filter by runtime) match reality.
404    let runtime = if matches!(runtime, ContainerRuntime::Docker) && looks_like_podman(output) {
405        log::debug!(
406            "[external] container detection: docker sentinel emitted podman-shaped JSON, relabeling runtime to Podman"
407        );
408        ContainerRuntime::Podman
409    } else {
410        runtime
411    };
412
413    log::debug!(
414        "[external] container listing parsed: runtime={:?} version={:?} containers={}",
415        runtime,
416        engine_version,
417        containers.len()
418    );
419    Ok(ContainerListing {
420        runtime,
421        engine_version,
422        containers,
423    })
424}
425
426/// Heuristic: does the raw `ps` output look like podman JSON?
427/// Podman emits `"Names":[` (array) and `"Id":` (lowercase d) for every row.
428/// Docker emits `"Names":"` (string) and `"ID":` (uppercase D). We sample the
429/// first JSON-shaped non-sentinel line. The check is fast (substring scan)
430/// and only matters when the docker sentinel was emitted by a podman shim.
431/// Accepts both `"Names":[` and `"Names": [` (pretty-printed) so handwritten
432/// test fixtures and any intermediate JSON formatter cannot defeat the
433/// detector silently.
434fn looks_like_podman(output: &str) -> bool {
435    for line in output.lines() {
436        let trimmed = line.trim();
437        if trimmed.is_empty() || trimmed.starts_with("##purple:") || !trimmed.starts_with('{') {
438            continue;
439        }
440        return trimmed.contains("\"Names\":[") || trimmed.contains("\"Names\": [");
441    }
442    false
443}
444
445// ---------------------------------------------------------------------------
446// SSH fetch functions
447// ---------------------------------------------------------------------------
448
449/// Error from a container listing operation. Preserves the detected runtime
450/// even when the `ps` command fails so it can be cached for future calls.
451#[derive(Debug)]
452pub struct ContainerError {
453    pub runtime: Option<ContainerRuntime>,
454    pub message: String,
455}
456
457impl std::fmt::Display for ContainerError {
458    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
459        write!(f, "{}", self.message)
460    }
461}
462
463/// Translate SSH stderr into a user-friendly error message.
464fn friendly_container_error(stderr: &str, code: Option<i32>) -> String {
465    let lower = stderr.to_lowercase();
466    if lower.contains("remote host identification has changed")
467        || (lower.contains("host key for") && lower.contains("has changed"))
468    {
469        log::debug!("[external] Host key CHANGED detected; returning HOST_KEY_CHANGED toast");
470        crate::messages::HOST_KEY_CHANGED.to_string()
471    } else if lower.contains("host key verification failed")
472        || lower.contains("no matching host key")
473        || lower.contains("no ed25519 host key is known")
474        || lower.contains("no rsa host key is known")
475        || lower.contains("no ecdsa host key is known")
476        || lower.contains("host key is not known")
477    {
478        log::debug!("[external] Host key UNKNOWN detected; returning HOST_KEY_UNKNOWN toast");
479        crate::messages::HOST_KEY_UNKNOWN.to_string()
480    } else if lower.contains("command not found") {
481        crate::messages::CONTAINER_RUNTIME_NOT_FOUND.to_string()
482    } else if lower.contains("permission denied") || lower.contains("got permission denied") {
483        crate::messages::CONTAINER_PERMISSION_DENIED.to_string()
484    } else if lower.contains("cannot connect to the docker daemon")
485        || lower.contains("cannot connect to podman")
486    {
487        crate::messages::CONTAINER_DAEMON_NOT_RUNNING.to_string()
488    } else if lower.contains("connection refused") {
489        crate::messages::CONTAINER_CONNECTION_REFUSED.to_string()
490    } else if lower.contains("no route to host") || lower.contains("network is unreachable") {
491        crate::messages::CONTAINER_HOST_UNREACHABLE.to_string()
492    } else {
493        crate::messages::container_command_failed(code.unwrap_or(1))
494    }
495}
496
497/// Fetch container list synchronously via SSH.
498/// Follows the `fetch_remote_listing` pattern.
499pub fn fetch_containers(
500    ctx: &SshContext<'_>,
501    cached_runtime: Option<ContainerRuntime>,
502) -> Result<ContainerListing, ContainerError> {
503    let command = container_list_command(cached_runtime);
504    let result = crate::snippet::run_snippet(
505        ctx.alias,
506        ctx.config_path,
507        &command,
508        ctx.askpass,
509        ctx.bw_session,
510        true,
511        ctx.has_tunnel,
512    );
513    let alias = ctx.alias;
514    match result {
515        Ok(r) if r.status.success() => {
516            parse_container_output(&r.stdout, cached_runtime).map_err(|e| {
517                error!("[external] Container list parse failed: alias={alias}: {e}");
518                ContainerError {
519                    runtime: cached_runtime,
520                    message: e,
521                }
522            })
523        }
524        Ok(r) => {
525            let stderr = r.stderr.trim().to_string();
526            let msg = friendly_container_error(&stderr, r.status.code());
527            error!("[external] Container fetch failed: alias={alias}: {msg}");
528            Err(ContainerError {
529                runtime: cached_runtime,
530                message: msg,
531            })
532        }
533        Err(e) => {
534            error!("[external] Container fetch failed: alias={alias}: {e}");
535            Err(ContainerError {
536                runtime: cached_runtime,
537                message: e.to_string(),
538            })
539        }
540    }
541}
542
543/// Spawn a background thread to fetch container listings.
544/// Follows the `spawn_remote_listing` pattern.
545pub fn spawn_container_listing<F>(
546    ctx: OwnedSshContext,
547    cached_runtime: Option<ContainerRuntime>,
548    send: F,
549) where
550    F: FnOnce(String, Result<ContainerListing, ContainerError>) + Send + 'static,
551{
552    std::thread::spawn(move || {
553        let borrowed = SshContext {
554            alias: &ctx.alias,
555            config_path: &ctx.config_path,
556            askpass: ctx.askpass.as_deref(),
557            bw_session: ctx.bw_session.as_deref(),
558            has_tunnel: ctx.has_tunnel,
559        };
560        let result = fetch_containers(&borrowed, cached_runtime);
561        send(ctx.alias, result);
562    });
563}
564
565/// Spawn a background thread to perform a container action (start/stop/restart).
566/// Validates the container ID before executing.
567pub fn spawn_container_action<F>(
568    ctx: OwnedSshContext,
569    runtime: ContainerRuntime,
570    action: ContainerAction,
571    container_id: String,
572    send: F,
573) where
574    F: FnOnce(String, ContainerAction, Result<(), String>) + Send + 'static,
575{
576    std::thread::spawn(move || {
577        if let Err(e) = validate_container_id(&container_id) {
578            send(ctx.alias, action, Err(e));
579            return;
580        }
581        let alias = &ctx.alias;
582        info!(
583            "Container action: {} container={container_id} alias={alias}",
584            action.as_str()
585        );
586        let command = container_action_command(runtime, action, &container_id);
587        let result = crate::snippet::run_snippet(
588            alias,
589            &ctx.config_path,
590            &command,
591            ctx.askpass.as_deref(),
592            ctx.bw_session.as_deref(),
593            true,
594            ctx.has_tunnel,
595        );
596        match result {
597            Ok(r) if r.status.success() => send(ctx.alias, action, Ok(())),
598            Ok(r) => {
599                let err = friendly_container_error(r.stderr.trim(), r.status.code());
600                error!(
601                    "[external] Container {} failed: alias={alias} container={container_id}: {err}",
602                    action.as_str()
603                );
604                send(ctx.alias, action, Err(err));
605            }
606            Err(e) => {
607                error!(
608                    "[external] Container {} failed: alias={alias} container={container_id}: {e}",
609                    action.as_str()
610                );
611                send(ctx.alias, action, Err(e.to_string()));
612            }
613        }
614    });
615}
616
617// ---------------------------------------------------------------------------
618// ContainerInspect: subset of `docker inspect` output we surface in the UI
619// ---------------------------------------------------------------------------
620
621/// Parsed subset of `docker inspect <id>` (or `podman inspect`). Only the
622/// fields purple's container detail panel renders are extracted; the rest
623/// of the JSON document is discarded so cache size stays bounded.
624#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)]
625pub struct ContainerInspect {
626    pub exit_code: i32,
627    pub oom_killed: bool,
628    pub started_at: String,
629    pub finished_at: String,
630    pub created_at: String,
631    /// `Some("healthy" | "unhealthy" | "starting")` when the image defines
632    /// a HEALTHCHECK. `None` when no healthcheck is configured.
633    pub health: Option<String>,
634    pub restart_count: u32,
635    pub command: Option<Vec<String>>,
636    pub entrypoint: Option<Vec<String>>,
637    pub env_count: usize,
638    pub mount_count: usize,
639    pub networks: Vec<NetworkInfo>,
640    // Audit-relevant fields surfaced in the right-side detail panel.
641    pub image_digest: Option<String>,
642    pub restart_policy: Option<String>,
643    pub user: Option<String>,
644    pub privileged: bool,
645    pub readonly_rootfs: bool,
646    pub apparmor_profile: Option<String>,
647    pub seccomp_profile: Option<String>,
648    pub cap_add: Vec<String>,
649    pub cap_drop: Vec<String>,
650    pub mounts: Vec<MountInfo>,
651    pub compose_project: Option<String>,
652    pub compose_service: Option<String>,
653    // Lifecycle / runtime details surfaced in the LIFECYCLE card.
654    pub pid: Option<u32>,
655    pub stop_signal: Option<String>,
656    pub stop_timeout: Option<u32>,
657    // App identity from OCI image labels (visible in APP card).
658    pub image_version: Option<String>,
659    pub image_revision: Option<String>,
660    pub image_source: Option<String>,
661    pub working_dir: Option<String>,
662    pub hostname: Option<String>,
663    // Resource constraints (RESOURCES card). 0 / None means unlimited.
664    pub memory_limit: Option<u64>,
665    pub cpu_limit_nanos: Option<u64>,
666    pub pids_limit: Option<i64>,
667    pub log_driver: Option<String>,
668    // Network mode (NETWORK card): bridge / host / none / container:xyz.
669    pub network_mode: Option<String>,
670    // Healthcheck definition + recent stats (HEALTH card when present).
671    pub health_test: Option<Vec<String>>,
672    pub health_interval_ns: Option<u64>,
673    pub health_failing_streak: Option<u32>,
674}
675
676#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
677pub struct NetworkInfo {
678    pub name: String,
679    pub ip_address: String,
680}
681
682#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
683pub struct MountInfo {
684    pub source: String,
685    pub destination: String,
686    pub read_only: bool,
687}
688
689/// Build the SSH command string for inspecting a single container.
690pub fn container_inspect_command(runtime: ContainerRuntime, container_id: &str) -> String {
691    format!("{} inspect {}", runtime.as_str(), container_id)
692}
693
694/// Translate a non-zero docker/podman exit code into a short
695/// human-readable hint. Returns `None` for codes without a well-known
696/// meaning so the UI can fall back to the bare number. Exit 0 has no
697/// entry because the detail panel only annotates failed exits.
698/// Sources: docker docs + Linux signal table.
699pub fn exit_code_meaning(code: i32) -> Option<&'static str> {
700    match code {
701        1 => Some("application error"),
702        125 => Some("docker run failed"),
703        126 => Some("command not executable"),
704        127 => Some("command not found"),
705        130 => Some("interrupted (SIGINT)"),
706        137 => Some("killed (SIGKILL / OOM)"),
707        139 => Some("segfault (SIGSEGV)"),
708        143 => Some("terminated (SIGTERM)"),
709        _ => None,
710    }
711}
712
713/// Parse `docker inspect <id>` stdout into `ContainerInspect`. The command
714/// always returns a JSON array; we take the first element. Missing fields
715/// degrade to defaults rather than fail so a partial response still
716/// renders something useful.
717pub fn parse_container_inspect(output: &str) -> Result<ContainerInspect, String> {
718    let trimmed = output.trim();
719    if trimmed.is_empty() {
720        return Err(crate::messages::CONTAINER_INSPECT_EMPTY.to_string());
721    }
722    let value: serde_json::Value = serde_json::from_str(trimmed)
723        .map_err(|e| crate::messages::container_inspect_parse_failed(&e.to_string()))?;
724    let entry = value
725        .as_array()
726        .and_then(|a| a.first())
727        .ok_or_else(|| crate::messages::CONTAINER_INSPECT_EMPTY.to_string())?;
728
729    let state = &entry["State"];
730    let config = &entry["Config"];
731    let network_settings = &entry["NetworkSettings"];
732
733    let exit_code = state["ExitCode"].as_i64().unwrap_or(0) as i32;
734    // Podman 5.x and docker both emit `OOMKilled`. Podman 3.x (still the
735    // packaged default on Ubuntu 22.04 LTS) emits `OomKilled`. Try both so
736    // OOM-killed containers surface in the ATTENTION card regardless of
737    // remote runtime version.
738    let oom_killed = state["OOMKilled"]
739        .as_bool()
740        .or_else(|| state["OomKilled"].as_bool())
741        .unwrap_or(false);
742    let started_at = state["StartedAt"].as_str().unwrap_or("").to_string();
743    let finished_at = state["FinishedAt"].as_str().unwrap_or("").to_string();
744    let health = state
745        .get("Health")
746        .and_then(|h| h.get("Status"))
747        .and_then(|s| s.as_str())
748        .map(|s| s.to_string());
749    let restart_count = entry["RestartCount"].as_u64().unwrap_or(0) as u32;
750
751    let command = config["Cmd"].as_array().map(|arr| {
752        arr.iter()
753            .filter_map(|v| v.as_str().map(|s| s.to_string()))
754            .collect()
755    });
756    let entrypoint = config["Entrypoint"].as_array().map(|arr| {
757        arr.iter()
758            .filter_map(|v| v.as_str().map(|s| s.to_string()))
759            .collect()
760    });
761    let env_count = config["Env"].as_array().map(|arr| arr.len()).unwrap_or(0);
762    let mount_count = entry["Mounts"].as_array().map(|arr| arr.len()).unwrap_or(0);
763
764    let networks = network_settings
765        .get("Networks")
766        .and_then(|n| n.as_object())
767        .map(|map| {
768            map.iter()
769                .map(|(name, cfg)| NetworkInfo {
770                    name: name.clone(),
771                    ip_address: cfg
772                        .get("IPAddress")
773                        .and_then(|v| v.as_str())
774                        .unwrap_or("")
775                        .to_string(),
776                })
777                .collect::<Vec<_>>()
778        })
779        .unwrap_or_default();
780
781    let host_config = &entry["HostConfig"];
782
783    let image_digest = entry["Image"]
784        .as_str()
785        .filter(|s| !s.is_empty())
786        .map(|s| s.to_string());
787    let restart_policy = host_config
788        .get("RestartPolicy")
789        .and_then(|p| p.get("Name"))
790        .and_then(|s| s.as_str())
791        .filter(|s| !s.is_empty() && *s != "no")
792        .map(|s| s.to_string());
793    let user = config["User"]
794        .as_str()
795        .filter(|s| !s.is_empty())
796        .map(|s| s.to_string());
797    let privileged = host_config["Privileged"].as_bool().unwrap_or(false);
798    let readonly_rootfs = host_config["ReadonlyRootfs"].as_bool().unwrap_or(false);
799    let apparmor_profile = host_config["AppArmorProfile"]
800        .as_str()
801        .or_else(|| entry["AppArmorProfile"].as_str())
802        .filter(|s| !s.is_empty())
803        .map(|s| s.to_string());
804    let seccomp_profile = host_config["SecurityOpt"].as_array().and_then(|arr| {
805        arr.iter()
806            .filter_map(|v| v.as_str())
807            .find_map(|s| s.strip_prefix("seccomp=").map(|v| v.to_string()))
808    });
809    let cap_add = host_config["CapAdd"]
810        .as_array()
811        .map(|arr| {
812            arr.iter()
813                .filter_map(|v| v.as_str().map(|s| s.to_string()))
814                .collect()
815        })
816        .unwrap_or_default();
817    let cap_drop = host_config["CapDrop"]
818        .as_array()
819        .map(|arr| {
820            arr.iter()
821                .filter_map(|v| v.as_str().map(|s| s.to_string()))
822                .collect()
823        })
824        .unwrap_or_default();
825    let mounts = entry["Mounts"]
826        .as_array()
827        .map(|arr| {
828            arr.iter()
829                .map(|m| MountInfo {
830                    source: m["Source"].as_str().unwrap_or("").to_string(),
831                    destination: m["Destination"].as_str().unwrap_or("").to_string(),
832                    read_only: !m["RW"].as_bool().unwrap_or(true),
833                })
834                .collect()
835        })
836        .unwrap_or_default();
837    let labels = config.get("Labels").and_then(|l| l.as_object());
838    let label = |key: &str| {
839        labels
840            .and_then(|l| l.get(key))
841            .and_then(|v| v.as_str())
842            .filter(|s| !s.is_empty())
843            .map(|s| s.to_string())
844    };
845    let compose_project = label("com.docker.compose.project");
846    let compose_service = label("com.docker.compose.service");
847    let image_version = label("org.opencontainers.image.version");
848    let image_revision = label("org.opencontainers.image.revision");
849    let image_source = label("org.opencontainers.image.source");
850
851    let created_at = entry["Created"].as_str().unwrap_or("").to_string();
852    // State.Pid is `0` when the container is not running. Drop the zero so
853    // the UI does not render a misleading "pid 0" row for exited rows.
854    let pid = state["Pid"].as_u64().filter(|n| *n > 0).map(|n| n as u32);
855    let hostname = config["Hostname"]
856        .as_str()
857        .filter(|s| !s.is_empty())
858        .map(|s| s.to_string());
859    let working_dir = config["WorkingDir"]
860        .as_str()
861        .filter(|s| !s.is_empty())
862        .map(|s| s.to_string());
863    let stop_signal = config["StopSignal"]
864        .as_str()
865        .filter(|s| !s.is_empty())
866        .map(|s| s.to_string());
867    let stop_timeout = config["StopTimeout"].as_u64().map(|n| n as u32);
868
869    let network_mode = host_config["NetworkMode"]
870        .as_str()
871        .filter(|s| !s.is_empty() && *s != "default")
872        .map(|s| s.to_string());
873    // HostConfig.Memory is bytes, 0 = unlimited (drop). Same for NanoCpus.
874    let memory_limit = host_config["Memory"].as_u64().filter(|n| *n > 0);
875    let cpu_limit_nanos = host_config["NanoCpus"].as_u64().filter(|n| *n > 0);
876    // PidsLimit is i64. 0 or -1 means unlimited; drop both.
877    let pids_limit = host_config["PidsLimit"].as_i64().filter(|n| *n > 0);
878    // LogConfig.Type defaults to "json-file" on docker. Always carry it
879    // so the renderer can decide whether to surface "Logs" only when
880    // non-default.
881    let log_driver = host_config
882        .get("LogConfig")
883        .and_then(|l| l.get("Type"))
884        .and_then(|v| v.as_str())
885        .filter(|s| !s.is_empty())
886        .map(|s| s.to_string());
887
888    let healthcheck = config.get("Healthcheck");
889    let health_test = healthcheck
890        .and_then(|h| h.get("Test"))
891        .and_then(|t| t.as_array())
892        .map(|arr| {
893            arr.iter()
894                .filter_map(|v| v.as_str().map(|s| s.to_string()))
895                .collect::<Vec<_>>()
896        })
897        .filter(|v| !v.is_empty());
898    let health_interval_ns = healthcheck
899        .and_then(|h| h.get("Interval"))
900        .and_then(|v| v.as_u64())
901        .filter(|n| *n > 0);
902    let health_failing_streak = state
903        .get("Health")
904        .and_then(|h| h.get("FailingStreak"))
905        .and_then(|v| v.as_u64())
906        .map(|n| n as u32);
907
908    Ok(ContainerInspect {
909        exit_code,
910        oom_killed,
911        started_at,
912        finished_at,
913        created_at,
914        health,
915        restart_count,
916        command,
917        entrypoint,
918        env_count,
919        mount_count,
920        networks,
921        image_digest,
922        restart_policy,
923        user,
924        privileged,
925        readonly_rootfs,
926        apparmor_profile,
927        seccomp_profile,
928        cap_add,
929        cap_drop,
930        mounts,
931        compose_project,
932        compose_service,
933        pid,
934        stop_signal,
935        stop_timeout,
936        image_version,
937        image_revision,
938        image_source,
939        working_dir,
940        hostname,
941        memory_limit,
942        cpu_limit_nanos,
943        pids_limit,
944        log_driver,
945        network_mode,
946        health_test,
947        health_interval_ns,
948        health_failing_streak,
949    })
950}
951
952/// Parse a Docker `Up …` status string into a compact uptime label.
953/// Returns `None` for any non-running state (Exited, Created, Restarting,
954/// Paused without an `Up` prefix, empty). Cells render `<1m` for
955/// sub-minute uptimes, `1m` / `5m` / `12h` / `5w` / `3mo` / `2y` otherwise.
956/// Format follows Docker's `units.HumanDuration`.
957pub fn parse_uptime_from_status(s: &str) -> Option<String> {
958    let body = s.strip_prefix("Up ")?;
959    let body = body.split('(').next()?.trim();
960    if body == "Less than a second" {
961        return Some("<1m".to_string());
962    }
963    if body == "About a minute" {
964        return Some("1m".to_string());
965    }
966    if body == "About an hour" {
967        return Some("1h".to_string());
968    }
969    let mut parts = body.split_whitespace();
970    let count: u64 = parts.next()?.parse().ok()?;
971    let unit = parts.next()?;
972    let suffix = match unit {
973        "second" | "seconds" => return Some("<1m".to_string()),
974        "minute" | "minutes" => "m",
975        "hour" | "hours" => "h",
976        "day" | "days" => "d",
977        "week" | "weeks" => "w",
978        "month" | "months" => "mo",
979        "year" | "years" => "y",
980        _ => return None,
981    };
982    Some(format!("{count}{suffix}"))
983}
984
985/// Synchronously fetch + parse `container inspect`. Validates the
986/// container ID before issuing the SSH call.
987pub fn fetch_container_inspect(
988    ctx: &SshContext<'_>,
989    runtime: ContainerRuntime,
990    container_id: &str,
991) -> Result<ContainerInspect, String> {
992    validate_container_id(container_id)?;
993    let command = container_inspect_command(runtime, container_id);
994    let result = crate::snippet::run_snippet(
995        ctx.alias,
996        ctx.config_path,
997        &command,
998        ctx.askpass,
999        ctx.bw_session,
1000        true,
1001        ctx.has_tunnel,
1002    );
1003    match result {
1004        Ok(r) if r.status.success() => parse_container_inspect(&r.stdout),
1005        Ok(r) => Err(crate::messages::container_command_failed(
1006            r.status.code().unwrap_or(1),
1007        )),
1008        Err(e) => Err(e.to_string()),
1009    }
1010}
1011
1012/// Spawn a background thread to run `container inspect`. Mirrors the
1013/// `spawn_container_listing` pattern so the call site looks identical.
1014pub fn spawn_container_inspect_listing<F>(
1015    ctx: OwnedSshContext,
1016    runtime: ContainerRuntime,
1017    container_id: String,
1018    send: F,
1019) where
1020    F: FnOnce(String, String, Result<ContainerInspect, String>) + Send + 'static,
1021{
1022    std::thread::spawn(move || {
1023        let borrowed = SshContext {
1024            alias: &ctx.alias,
1025            config_path: &ctx.config_path,
1026            askpass: ctx.askpass.as_deref(),
1027            bw_session: ctx.bw_session.as_deref(),
1028            has_tunnel: ctx.has_tunnel,
1029        };
1030        let result = fetch_container_inspect(&borrowed, runtime, &container_id);
1031        send(ctx.alias, container_id, result);
1032    });
1033}
1034
1035/// Build the `<runtime> logs --tail <n> <id>` command. The
1036/// `--tail` cap is enforced server-side so the SSH stream stays
1037/// bounded even on a noisy container.
1038pub fn container_logs_command(
1039    runtime: ContainerRuntime,
1040    container_id: &str,
1041    tail: usize,
1042) -> String {
1043    format!("{} logs --tail {} {}", runtime.as_str(), tail, container_id)
1044}
1045
1046/// Synchronously fetch logs and split into lines. Returns the raw
1047/// captured stdout split on `\n` so the renderer does not have to
1048/// re-parse. Empty trailing lines are dropped.
1049pub fn fetch_container_logs(
1050    ctx: &SshContext<'_>,
1051    runtime: ContainerRuntime,
1052    container_id: &str,
1053    tail: usize,
1054) -> Result<Vec<String>, String> {
1055    validate_container_id(container_id)?;
1056    let command = container_logs_command(runtime, container_id, tail);
1057    let result = crate::snippet::run_snippet(
1058        ctx.alias,
1059        ctx.config_path,
1060        &command,
1061        ctx.askpass,
1062        ctx.bw_session,
1063        true,
1064        ctx.has_tunnel,
1065    );
1066    match result {
1067        Ok(r) if r.status.success() => Ok(parse_log_output(&r.stdout, &r.stderr)),
1068        Ok(r) => Err(crate::messages::container_command_failed(
1069            r.status.code().unwrap_or(1),
1070        )),
1071        Err(e) => Err(e.to_string()),
1072    }
1073}
1074
1075/// Merge stdout (app logs) and stderr (errors) into a single chronological
1076/// stream. Many container runtimes split levels across the two streams;
1077/// re-interleaving them is closer to what `docker logs` shows on a TTY.
1078/// Trailing blank lines are stripped from each stream before merging so a
1079/// stdout block that ends in a newline does not introduce a phantom empty
1080/// row between the two streams.
1081pub(crate) fn parse_log_output(stdout: &str, stderr: &str) -> Vec<String> {
1082    let mut lines: Vec<String> = stdout.lines().map(|s| s.to_string()).collect();
1083    while lines.last().map(|s| s.is_empty()).unwrap_or(false) {
1084        lines.pop();
1085    }
1086    for s in stderr.lines() {
1087        lines.push(s.to_string());
1088    }
1089    while lines.last().map(|s| s.is_empty()).unwrap_or(false) {
1090        lines.pop();
1091    }
1092    lines
1093}
1094
1095/// Spawn a background thread to run `container logs`. Same shape as
1096/// `spawn_container_inspect_listing`.
1097pub fn spawn_container_logs_fetch<F>(
1098    ctx: OwnedSshContext,
1099    runtime: ContainerRuntime,
1100    container_id: String,
1101    container_name: String,
1102    tail: usize,
1103    send: F,
1104) where
1105    F: FnOnce(String, String, String, Result<Vec<String>, String>) + Send + 'static,
1106{
1107    std::thread::spawn(move || {
1108        let borrowed = SshContext {
1109            alias: &ctx.alias,
1110            config_path: &ctx.config_path,
1111            askpass: ctx.askpass.as_deref(),
1112            bw_session: ctx.bw_session.as_deref(),
1113            has_tunnel: ctx.has_tunnel,
1114        };
1115        let result = fetch_container_logs(&borrowed, runtime, &container_id, tail);
1116        send(ctx.alias, container_id, container_name, result);
1117    });
1118}
1119
1120// ---------------------------------------------------------------------------
1121// JSON lines cache
1122// ---------------------------------------------------------------------------
1123
1124/// A cached container listing for a single host. `engine_version` is the
1125/// daemon's `Server.Version` captured during the last refresh, surfaced in
1126/// the host detail panel; `None` means the version sub-call did not return
1127/// or the cache was written by an older purple build.
1128#[derive(Debug, Clone)]
1129pub struct ContainerCacheEntry {
1130    pub timestamp: u64,
1131    pub runtime: ContainerRuntime,
1132    pub engine_version: Option<String>,
1133    pub containers: Vec<ContainerInfo>,
1134}
1135
1136/// Serde helper for a single JSON line in the cache file. `engine_version`
1137/// uses `serde(default)` so cache files written before this field existed
1138/// still deserialize cleanly.
1139#[derive(Serialize, Deserialize)]
1140struct CacheLine {
1141    alias: String,
1142    timestamp: u64,
1143    runtime: ContainerRuntime,
1144    #[serde(default, skip_serializing_if = "Option::is_none")]
1145    engine_version: Option<String>,
1146    containers: Vec<ContainerInfo>,
1147}
1148
1149// Test-only thread-local override for the cache file path.
1150// Mirrors `preferences::set_path_override` so unit tests can write
1151// to a tempdir instead of polluting the real `~/.purple/`.
1152#[cfg(test)]
1153thread_local! {
1154    static PATH_OVERRIDE: std::cell::RefCell<Option<std::path::PathBuf>> =
1155        const { std::cell::RefCell::new(None) };
1156}
1157
1158#[cfg(test)]
1159pub fn set_path_override(path: std::path::PathBuf) {
1160    PATH_OVERRIDE.with(|p| *p.borrow_mut() = Some(path));
1161}
1162
1163#[cfg(test)]
1164#[allow(dead_code)]
1165pub fn clear_path_override() {
1166    PATH_OVERRIDE.with(|p| *p.borrow_mut() = None);
1167}
1168
1169fn cache_path() -> Option<std::path::PathBuf> {
1170    // Tests MUST opt in via `set_path_override` before any code
1171    // path that loads or saves the cache. Falling through to the
1172    // production path lets a forgotten override pollute (and in
1173    // the orphan-prune branch of `reload_hosts`, wipe) the user's
1174    // real `~/.purple/container_cache.jsonl`.
1175    #[cfg(test)]
1176    {
1177        PATH_OVERRIDE.with(|p| p.borrow().clone())
1178    }
1179    #[cfg(not(test))]
1180    {
1181        dirs::home_dir().map(|h| h.join(".purple").join("container_cache.jsonl"))
1182    }
1183}
1184
1185/// Load container cache from `~/.purple/container_cache.jsonl`.
1186/// Malformed lines are silently ignored. Duplicate aliases: last-write-wins.
1187pub fn load_container_cache() -> HashMap<String, ContainerCacheEntry> {
1188    let mut map = HashMap::new();
1189    let Some(path) = cache_path() else {
1190        return map;
1191    };
1192    let Ok(content) = std::fs::read_to_string(&path) else {
1193        return map;
1194    };
1195    for line in content.lines() {
1196        let trimmed = line.trim();
1197        if trimmed.is_empty() {
1198            continue;
1199        }
1200        if let Ok(entry) = serde_json::from_str::<CacheLine>(trimmed) {
1201            map.insert(
1202                entry.alias,
1203                ContainerCacheEntry {
1204                    timestamp: entry.timestamp,
1205                    runtime: entry.runtime,
1206                    engine_version: entry.engine_version,
1207                    containers: entry.containers,
1208                },
1209            );
1210        }
1211    }
1212    map
1213}
1214
1215/// Parse container cache from JSONL content string (for demo/test use).
1216pub fn parse_container_cache_content(content: &str) -> HashMap<String, ContainerCacheEntry> {
1217    let mut map = HashMap::new();
1218    for line in content.lines() {
1219        let trimmed = line.trim();
1220        if trimmed.is_empty() {
1221            continue;
1222        }
1223        if let Ok(entry) = serde_json::from_str::<CacheLine>(trimmed) {
1224            map.insert(
1225                entry.alias,
1226                ContainerCacheEntry {
1227                    timestamp: entry.timestamp,
1228                    runtime: entry.runtime,
1229                    engine_version: entry.engine_version,
1230                    containers: entry.containers,
1231                },
1232            );
1233        }
1234    }
1235    map
1236}
1237
1238/// Save container cache to `~/.purple/container_cache.jsonl` via atomic write.
1239pub fn save_container_cache(cache: &HashMap<String, ContainerCacheEntry>) {
1240    if crate::demo_flag::is_demo() {
1241        return;
1242    }
1243    let Some(path) = cache_path() else {
1244        return;
1245    };
1246    let mut lines = Vec::with_capacity(cache.len());
1247    for (alias, entry) in cache {
1248        let line = CacheLine {
1249            alias: alias.clone(),
1250            timestamp: entry.timestamp,
1251            runtime: entry.runtime,
1252            engine_version: entry.engine_version.clone(),
1253            containers: entry.containers.clone(),
1254        };
1255        if let Ok(s) = serde_json::to_string(&line) {
1256            lines.push(s);
1257        }
1258    }
1259    let content = lines.join("\n");
1260    log::debug!(
1261        "[purple] save_container_cache: {} host entries, {} bytes -> {}",
1262        cache.len(),
1263        content.len(),
1264        path.display()
1265    );
1266    if let Err(e) = crate::fs_util::atomic_write(&path, content.as_bytes()) {
1267        log::warn!(
1268            "[config] Failed to write container cache {}: {e}",
1269            path.display()
1270        );
1271    }
1272}
1273
1274// ---------------------------------------------------------------------------
1275// String truncation
1276// ---------------------------------------------------------------------------
1277
1278/// Truncate a string to at most `max` characters. Appends ".." if truncated.
1279pub fn truncate_str(s: &str, max: usize) -> String {
1280    let count = s.chars().count();
1281    if count <= max {
1282        s.to_string()
1283    } else {
1284        let cut = max.saturating_sub(2);
1285        let end = s.char_indices().nth(cut).map(|(i, _)| i).unwrap_or(s.len());
1286        format!("{}..", &s[..end])
1287    }
1288}
1289
1290// ---------------------------------------------------------------------------
1291// Relative time
1292// ---------------------------------------------------------------------------
1293
1294/// Format a duration in seconds as a compact label (`12s`, `5m`,
1295/// `2h`, `3d`). Used for the in-border staleness badge where width
1296/// is precious and the surrounding label (`synced`) already says
1297/// "ago" without the suffix.
1298pub fn format_uptime_short(seconds: u64) -> String {
1299    if seconds < 60 {
1300        format!("{seconds}s")
1301    } else if seconds < 3600 {
1302        format!("{}m", seconds / 60)
1303    } else if seconds < 86400 {
1304        format!("{}h", seconds / 3600)
1305    } else {
1306        format!("{}d", seconds / 86400)
1307    }
1308}
1309
1310/// Format a Unix timestamp as a human-readable relative time string.
1311/// Honours `demo_flag::now_secs()` when demo mode is active so visual
1312/// regression goldens stay byte-stable across long-running test
1313/// processes (same pattern as `history::format_time_ago`).
1314pub fn format_relative_time(timestamp: u64) -> String {
1315    let now = if crate::demo_flag::is_demo() {
1316        crate::demo_flag::now_secs()
1317    } else {
1318        SystemTime::now()
1319            .duration_since(UNIX_EPOCH)
1320            .unwrap_or_default()
1321            .as_secs()
1322    };
1323    let diff = now.saturating_sub(timestamp);
1324    if diff < 60 {
1325        "just now".to_string()
1326    } else if diff < 3600 {
1327        format!("{}m ago", diff / 60)
1328    } else if diff < 86400 {
1329        format!("{}h ago", diff / 3600)
1330    } else {
1331        format!("{}d ago", diff / 86400)
1332    }
1333}
1334
1335// ---------------------------------------------------------------------------
1336// Tests
1337// ---------------------------------------------------------------------------
1338
1339#[cfg(test)]
1340#[path = "containers_tests.rs"]
1341mod tests;