Skip to main content

sbox/backend/
podman.rs

1use std::fs;
2use std::io::IsTerminal;
3use std::path::{Path, PathBuf};
4use std::process::{Command, ExitCode, Stdio};
5
6use crate::error::SboxError;
7use crate::resolve::{
8    ExecutionPlan, ResolvedImageSource, ResolvedMount, ResolvedSecret, ResolvedUser,
9};
10
11/// Cloud metadata service hostnames blocked when `network_allow` is active.
12/// These resolve to 192.0.2.1 (non-routable) inside the container.
13/// Does NOT block hardcoded-IP connections (e.g. direct TCP to 169.254.169.254).
14pub(crate) const CLOUD_METADATA_HOSTNAMES: &[&str] = &[
15    "metadata.google.internal",   // GCP
16    "metadata.internal",          // GCP alias
17    "instance-data.ec2.internal", // AWS
18    "169.254.169.254",            // hostname-style lookup (not the raw IP)
19    "100.100.100.200",            // Alibaba Cloud
20];
21
22#[derive(Debug, Clone)]
23pub(crate) enum SignatureVerificationSupport {
24    Available {
25        policy: PathBuf,
26    },
27    Unavailable {
28        policy: Option<PathBuf>,
29        reason: String,
30    },
31}
32
33pub fn execute(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
34    if plan.policy.reuse_container {
35        return execute_via_reusable_session(plan, false);
36    }
37
38    validate_runtime_inputs(plan)?;
39    verify_image_signature(plan)?;
40    let image = resolve_container_image(plan)?;
41    let args = build_run_args(plan, &image)?;
42
43    let mut child = Command::new("podman");
44    child.args(&args);
45    child.current_dir(&plan.workspace.effective_host_dir);
46    child.stdin(Stdio::inherit());
47    child.stdout(Stdio::inherit());
48    child.stderr(Stdio::inherit());
49
50    let status = child
51        .status()
52        .map_err(|source| SboxError::BackendUnavailable {
53            backend: "podman".to_string(),
54            source,
55        })?;
56
57    Ok(status_to_exit_code(status))
58}
59
60pub fn execute_interactive(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
61    if plan.policy.reuse_container {
62        return execute_via_reusable_session(plan, true);
63    }
64
65    validate_runtime_inputs(plan)?;
66    verify_image_signature(plan)?;
67    let image = resolve_container_image(plan)?;
68    let tty = std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
69    let args = build_run_args_with_options(plan, &image, tty)?;
70
71    let mut child = Command::new("podman");
72    child.args(&args);
73    child.current_dir(&plan.workspace.effective_host_dir);
74    child.stdin(Stdio::inherit());
75    child.stdout(Stdio::inherit());
76    child.stderr(Stdio::inherit());
77
78    let status = child
79        .status()
80        .map_err(|source| SboxError::BackendUnavailable {
81            backend: "podman".to_string(),
82            source,
83        })?;
84
85    Ok(status_to_exit_code(status))
86}
87
88fn execute_via_reusable_session(
89    plan: &ExecutionPlan,
90    interactive: bool,
91) -> Result<ExitCode, SboxError> {
92    validate_runtime_inputs(plan)?;
93    verify_image_signature(plan)?;
94    let image = resolve_container_image(plan)?;
95    let session_name = plan
96        .policy
97        .reusable_session_name
98        .as_deref()
99        .ok_or_else(|| SboxError::ReusableSandboxSessionsNotImplemented {
100            profile: plan.profile_name.clone(),
101        })?;
102
103    ensure_reusable_container(plan, &image, session_name)?;
104
105    let tty = interactive && std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
106    let mut child = Command::new("podman");
107    child.args(build_exec_args(plan, session_name, tty));
108    child.current_dir(&plan.workspace.effective_host_dir);
109    child.stdin(Stdio::inherit());
110    child.stdout(Stdio::inherit());
111    child.stderr(Stdio::inherit());
112
113    let status = child
114        .status()
115        .map_err(|source| SboxError::BackendUnavailable {
116            backend: "podman".to_string(),
117            source,
118        })?;
119
120    Ok(status_to_exit_code(status))
121}
122
123fn resolve_container_image(plan: &ExecutionPlan) -> Result<String, SboxError> {
124    match &plan.image.source {
125        ResolvedImageSource::Reference(reference) => Ok(reference.clone()),
126        ResolvedImageSource::Build { recipe_path, tag } => {
127            ensure_built_image(recipe_path, tag, &plan.workspace.root)?;
128            Ok(tag.clone())
129        }
130    }
131}
132
133fn verify_image_signature(plan: &ExecutionPlan) -> Result<(), SboxError> {
134    if !plan.image.verify_signature {
135        return Ok(());
136    }
137
138    let reference = match &plan.image.source {
139        ResolvedImageSource::Reference(reference) => reference.clone(),
140        ResolvedImageSource::Build { tag, .. } => {
141            return Err(SboxError::SignatureVerificationUnavailable {
142                image: tag.clone(),
143                reason: "signature verification is not implemented for local build images".into(),
144            });
145        }
146    };
147
148    let policy = match inspect_signature_verification_support()? {
149        SignatureVerificationSupport::Available { policy } => policy,
150        SignatureVerificationSupport::Unavailable { reason, .. } => {
151            return Err(SboxError::SignatureVerificationUnavailable {
152                image: reference,
153                reason,
154            });
155        }
156    };
157
158    run_signature_verification(&reference, &policy)
159}
160
161fn run_signature_verification(reference: &str, policy: &Path) -> Result<(), SboxError> {
162    let status = Command::new("skopeo")
163        .args([
164            "--policy",
165            &policy.display().to_string(),
166            "inspect",
167            "--raw",
168            &format!("docker://{reference}"),
169        ])
170        .stdin(Stdio::null())
171        .stdout(Stdio::null())
172        .stderr(Stdio::null())
173        .status()
174        .map_err(|source| SboxError::BackendUnavailable {
175            backend: "skopeo".to_string(),
176            source,
177        })?;
178
179    if status.success() {
180        Ok(())
181    } else {
182        Err(SboxError::SignatureVerificationFailed {
183            image: reference.to_string(),
184            policy: policy.to_path_buf(),
185            reason: format!("skopeo exited with status {}", status.code().unwrap_or(1)),
186        })
187    }
188}
189
190pub(crate) fn inspect_signature_verification_support()
191-> Result<SignatureVerificationSupport, SboxError> {
192    match Command::new("skopeo")
193        .arg("--version")
194        .stdin(Stdio::null())
195        .stdout(Stdio::null())
196        .stderr(Stdio::null())
197        .status()
198    {
199        Ok(status) if status.success() => {}
200        Ok(_) => {
201            return Ok(SignatureVerificationSupport::Unavailable {
202                policy: None,
203                reason: "skopeo is installed but unusable".into(),
204            });
205        }
206        Err(_) => {
207            return Ok(SignatureVerificationSupport::Unavailable {
208                policy: None,
209                reason: "skopeo is not installed".into(),
210            });
211        }
212    }
213
214    let Some(policy) = resolve_signature_policy_path() else {
215        return Ok(SignatureVerificationSupport::Unavailable {
216            policy: None,
217            reason: "no containers policy file found; configure SBOX_SIGNATURE_POLICY, ~/.config/containers/policy.json, or /etc/containers/policy.json".into(),
218        });
219    };
220
221    if !policy_supports_signature_verification(&policy)? {
222        return Ok(SignatureVerificationSupport::Unavailable {
223            policy: Some(policy.clone()),
224            reason: "policy does not enforce signature verification".to_string(),
225        });
226    }
227
228    Ok(SignatureVerificationSupport::Available { policy })
229}
230
231fn resolve_signature_policy_path() -> Option<PathBuf> {
232    if let Some(path) = std::env::var_os("SBOX_SIGNATURE_POLICY") {
233        let path = PathBuf::from(path);
234        if path.is_file() {
235            return Some(path);
236        }
237    }
238
239    if let Some(home) = crate::platform::home_dir() {
240        let user_policy = home.join(".config/containers/policy.json");
241        if user_policy.is_file() {
242            return Some(user_policy);
243        }
244    }
245
246    // /etc/containers/policy.json is Linux-specific; on other platforms this
247    // path won't exist and the check is harmless.
248    let system_policy = PathBuf::from("/etc/containers/policy.json");
249    system_policy.is_file().then_some(system_policy)
250}
251
252fn policy_supports_signature_verification(path: &Path) -> Result<bool, SboxError> {
253    let content = fs::read_to_string(path).map_err(|source| SboxError::ConfigRead {
254        path: path.to_path_buf(),
255        source,
256    })?;
257    let json: serde_json::Value =
258        serde_json::from_str(&content).map_err(|source| SboxError::ConfigValidation {
259            message: format!("invalid containers policy {}: {source}", path.display()),
260        })?;
261
262    let mut candidates = Vec::new();
263    if let Some(default) = json.get("default") {
264        candidates.push(default);
265    }
266    if let Some(transports) = json.get("transports").and_then(|value| value.as_object()) {
267        for transport_name in ["docker", "docker-daemon"] {
268            if let Some(scopes) = transports
269                .get(transport_name)
270                .and_then(|value| value.as_object())
271            {
272                for requirements in scopes.values() {
273                    candidates.push(requirements);
274                }
275            }
276        }
277    }
278
279    Ok(candidates
280        .into_iter()
281        .any(requirements_enable_signature_verification))
282}
283
284fn requirements_enable_signature_verification(value: &serde_json::Value) -> bool {
285    let Some(requirements) = value.as_array() else {
286        return false;
287    };
288
289    requirements.iter().any(|requirement| {
290        requirement
291            .get("type")
292            .and_then(|value| value.as_str())
293            .is_some_and(|kind| matches!(kind, "signedBy" | "sigstoreSigned"))
294    })
295}
296
297pub fn build_run_args(plan: &ExecutionPlan, image: &str) -> Result<Vec<String>, SboxError> {
298    build_run_args_with_options(plan, image, false)
299}
300
301pub fn build_run_args_with_options(
302    plan: &ExecutionPlan,
303    image: &str,
304    tty: bool,
305) -> Result<Vec<String>, SboxError> {
306    let mut args = vec!["run".to_string(), "--rm".to_string(), "-i".to_string()];
307
308    if tty {
309        args.push("-t".to_string());
310    }
311
312    args.push("--workdir".to_string());
313    args.push(plan.workspace.sandbox_cwd.clone());
314
315    if plan.policy.read_only_rootfs {
316        args.push("--read-only".to_string());
317    }
318
319    if plan.policy.no_new_privileges {
320        args.push("--security-opt".to_string());
321        args.push("no-new-privileges".to_string());
322    }
323
324    for capability in &plan.policy.cap_drop {
325        args.push("--cap-drop".to_string());
326        args.push(capability.clone());
327    }
328
329    for capability in &plan.policy.cap_add {
330        args.push("--cap-add".to_string());
331        args.push(capability.clone());
332    }
333
334    match plan.policy.network.as_str() {
335        "off" => {
336            args.push("--network".to_string());
337            args.push("none".to_string());
338        }
339        "on" => {}
340        other => {
341            args.push("--network".to_string());
342            args.push(other.to_string());
343        }
344    }
345
346    for port in &plan.policy.ports {
347        args.push("--publish".to_string());
348        args.push(port.clone());
349    }
350
351    match &plan.user {
352        ResolvedUser::KeepId => {
353            args.push("--userns".to_string());
354            args.push("keep-id".to_string());
355            // Disable SELinux relabeling; rootless keep-id containers cannot set
356            // xattrs on device nodes (e.g. /dev/null) when SELinux is enforcing.
357            args.push("--security-opt".to_string());
358            args.push("label=disable".to_string());
359        }
360        ResolvedUser::Explicit { uid, gid } => {
361            args.push("--user".to_string());
362            args.push(format!("{uid}:{gid}"));
363        }
364        ResolvedUser::Default => {}
365    }
366
367    for mount in &plan.mounts {
368        append_mount_args(&mut args, mount)?;
369    }
370
371    for cache in &plan.caches {
372        args.push("--mount".to_string());
373        if let Some(source) = &cache.source {
374            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
375                args.push(format!(
376                    "type=bind,src={},target={},relabel=private,readonly={}",
377                    path.display(),
378                    cache.target,
379                    bool_string(cache.read_only)
380                ));
381            } else {
382                args.push(format!(
383                    "type=volume,src={},target={},readonly={}",
384                    source,
385                    cache.target,
386                    bool_string(cache.read_only)
387                ));
388            }
389        } else {
390            args.push(format!(
391                "type=volume,src={},target={},readonly={}",
392                scoped_cache_name(&plan.workspace.root, &cache.name),
393                cache.target,
394                bool_string(cache.read_only)
395            ));
396        }
397    }
398
399    for secret in &plan.secrets {
400        append_secret_args(&mut args, secret, &plan.workspace.root)?;
401    }
402
403    for variable in &plan.environment.variables {
404        args.push("--env".to_string());
405        args.push(format!("{}={}", variable.name, variable.value));
406    }
407
408    if let Some(pull_policy) = &plan.policy.pull_policy {
409        args.push("--pull".to_string());
410        args.push(pull_policy.clone());
411    }
412
413    args.push(image.to_string());
414    args.extend(plan.command.iter().cloned());
415
416    Ok(args)
417}
418
419fn append_mount_args(args: &mut Vec<String>, mount: &ResolvedMount) -> Result<(), SboxError> {
420    match mount.kind.as_str() {
421        "bind" => {
422            let source = mount
423                .source
424                .as_ref()
425                .expect("bind mounts always resolve source");
426            args.push("--mount".to_string());
427            // relabel=private is omitted: we pass --security-opt label=disable for
428            // keep-id containers, and relabeling /dev/null (used for mask mounts) would
429            // fail with lsetxattr under SELinux + rootless Podman regardless.
430            args.push(format!(
431                "type=bind,src={},target={},readonly={}",
432                source.display(),
433                mount.target,
434                bool_string(mount.read_only)
435            ));
436            Ok(())
437        }
438        "tmpfs" => {
439            args.push("--tmpfs".to_string());
440            let spec = if mount.read_only {
441                format!("{}:ro", mount.target)
442            } else {
443                mount.target.clone()
444            };
445            args.push(spec);
446            Ok(())
447        }
448        "mask" => {
449            // Overlay the file with /dev/null so the container sees an empty file.
450            // The host file is untouched; malicious hooks cannot read its contents.
451            // No relabel=private: would trigger lsetxattr on /dev/null under SELinux.
452            args.push("--mount".to_string());
453            args.push(format!(
454                "type=bind,src=/dev/null,target={},readonly=true",
455                mount.target
456            ));
457            Ok(())
458        }
459        other => Err(SboxError::UnsupportedMountType {
460            mount_type: other.to_string(),
461        }),
462    }
463}
464
465fn ensure_reusable_container(
466    plan: &ExecutionPlan,
467    image: &str,
468    session_name: &str,
469) -> Result<(), SboxError> {
470    match inspect_container_state(session_name)? {
471        ContainerState::Running => return Ok(()),
472        ContainerState::Stopped => {
473            let status = Command::new("podman")
474                .args(["start", session_name])
475                .stdin(Stdio::null())
476                .stdout(Stdio::null())
477                .stderr(Stdio::null())
478                .status()
479                .map_err(|source| SboxError::BackendUnavailable {
480                    backend: "podman".to_string(),
481                    source,
482                })?;
483
484            if status.success() {
485                return Ok(());
486            }
487
488            return Err(SboxError::BackendCommandFailed {
489                backend: "podman".to_string(),
490                command: format!("podman start {session_name}"),
491                status: status.code().unwrap_or(1),
492            });
493        }
494        ContainerState::Missing => {}
495    }
496
497    let mut create_args = vec![
498        "create".to_string(),
499        "--name".to_string(),
500        session_name.to_string(),
501        "--workdir".to_string(),
502        plan.workspace.sandbox_cwd.clone(),
503    ];
504    append_container_settings(&mut create_args, plan)?;
505    create_args.push(image.to_string());
506    create_args.push("sleep".to_string());
507    create_args.push("infinity".to_string());
508
509    let create_status = Command::new("podman")
510        .args(&create_args)
511        .current_dir(&plan.workspace.effective_host_dir)
512        .stdin(Stdio::null())
513        .stdout(Stdio::null())
514        .stderr(Stdio::null())
515        .status()
516        .map_err(|source| SboxError::BackendUnavailable {
517            backend: "podman".to_string(),
518            source,
519        })?;
520
521    if !create_status.success() {
522        return Err(SboxError::BackendCommandFailed {
523            backend: "podman".to_string(),
524            command: format!("podman create --name {session_name} ..."),
525            status: create_status.code().unwrap_or(1),
526        });
527    }
528
529    let start_status = Command::new("podman")
530        .args(["start", session_name])
531        .stdin(Stdio::null())
532        .stdout(Stdio::null())
533        .stderr(Stdio::null())
534        .status()
535        .map_err(|source| SboxError::BackendUnavailable {
536            backend: "podman".to_string(),
537            source,
538        })?;
539
540    if start_status.success() {
541        Ok(())
542    } else {
543        Err(SboxError::BackendCommandFailed {
544            backend: "podman".to_string(),
545            command: format!("podman start {session_name}"),
546            status: start_status.code().unwrap_or(1),
547        })
548    }
549}
550
551fn build_exec_args(plan: &ExecutionPlan, session_name: &str, tty: bool) -> Vec<String> {
552    let mut args = vec!["exec".to_string(), "-i".to_string()];
553    if tty {
554        args.push("-t".to_string());
555    }
556
557    args.push("--workdir".to_string());
558    args.push(plan.workspace.sandbox_cwd.clone());
559
560    for variable in &plan.environment.variables {
561        args.push("--env".to_string());
562        args.push(format!("{}={}", variable.name, variable.value));
563    }
564
565    args.push(session_name.to_string());
566    args.extend(plan.command.iter().cloned());
567    args
568}
569
570fn append_container_settings(
571    args: &mut Vec<String>,
572    plan: &ExecutionPlan,
573) -> Result<(), SboxError> {
574    if plan.policy.read_only_rootfs {
575        args.push("--read-only".to_string());
576    }
577
578    if plan.policy.no_new_privileges {
579        args.push("--security-opt".to_string());
580        args.push("no-new-privileges".to_string());
581    }
582
583    for capability in &plan.policy.cap_drop {
584        args.push("--cap-drop".to_string());
585        args.push(capability.clone());
586    }
587
588    for capability in &plan.policy.cap_add {
589        args.push("--cap-add".to_string());
590        args.push(capability.clone());
591    }
592
593    match plan.policy.network.as_str() {
594        "off" => {
595            args.push("--network".to_string());
596            args.push("none".to_string());
597        }
598        "on" => {}
599        other => {
600            args.push("--network".to_string());
601            args.push(other.to_string());
602        }
603    }
604
605    // Domain allow-listing: break default DNS, inject resolved IPs as /etc/hosts entries.
606    // RFC 5737 192.0.2.1 is TEST-NET-1, guaranteed non-routable — DNS queries will time out.
607    if !plan.policy.network_allow.is_empty() {
608        args.push("--dns".to_string());
609        args.push("192.0.2.1".to_string());
610        for (hostname, ip) in &plan.policy.network_allow {
611            args.push("--add-host".to_string());
612            args.push(format!("{hostname}:{ip}"));
613        }
614    }
615
616    // Block cloud metadata service hostnames whenever the network is on, regardless of
617    // whether network_allow is set. /etc/hosts entries take priority over DNS.
618    // NOTE: scripts connecting directly to 169.254.169.254 by raw IP bypass this;
619    // full IP-level blocking requires host-level firewall rules outside of rootless Podman.
620    if plan.policy.network != "off" {
621        for hostname in CLOUD_METADATA_HOSTNAMES {
622            args.push("--add-host".to_string());
623            args.push(format!("{hostname}:192.0.2.1"));
624        }
625    }
626
627    for port in &plan.policy.ports {
628        args.push("--publish".to_string());
629        args.push(port.clone());
630    }
631
632    match &plan.user {
633        ResolvedUser::KeepId => {
634            args.push("--userns".to_string());
635            args.push("keep-id".to_string());
636            args.push("--security-opt".to_string());
637            args.push("label=disable".to_string());
638        }
639        ResolvedUser::Explicit { uid, gid } => {
640            args.push("--user".to_string());
641            args.push(format!("{uid}:{gid}"));
642        }
643        ResolvedUser::Default => {}
644    }
645
646    for mount in &plan.mounts {
647        append_mount_args(args, mount)?;
648    }
649
650    for cache in &plan.caches {
651        args.push("--mount".to_string());
652        if let Some(source) = &cache.source {
653            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
654                args.push(format!(
655                    "type=bind,src={},target={},relabel=private,readonly={}",
656                    path.display(),
657                    cache.target,
658                    bool_string(cache.read_only)
659                ));
660            } else {
661                args.push(format!(
662                    "type=volume,src={},target={},readonly={}",
663                    source,
664                    cache.target,
665                    bool_string(cache.read_only)
666                ));
667            }
668        } else {
669            args.push(format!(
670                "type=volume,src={},target={},readonly={}",
671                scoped_cache_name(&plan.workspace.root, &cache.name),
672                cache.target,
673                bool_string(cache.read_only)
674            ));
675        }
676    }
677
678    for secret in &plan.secrets {
679        append_secret_args(args, secret, &plan.workspace.root)?;
680    }
681
682    for variable in &plan.environment.variables {
683        args.push("--env".to_string());
684        args.push(format!("{}={}", variable.name, variable.value));
685    }
686
687    Ok(())
688}
689
690enum ContainerState {
691    Missing,
692    Stopped,
693    Running,
694}
695
696fn inspect_container_state(session_name: &str) -> Result<ContainerState, SboxError> {
697    let output = Command::new("podman")
698        .args(["inspect", "--format", "{{.State.Running}}", session_name])
699        .stdin(Stdio::null())
700        .stdout(Stdio::piped())
701        .stderr(Stdio::null())
702        .output()
703        .map_err(|source| SboxError::BackendUnavailable {
704            backend: "podman".to_string(),
705            source,
706        })?;
707
708    if output.status.success() {
709        let stdout = String::from_utf8_lossy(&output.stdout);
710        if stdout.trim() == "true" {
711            Ok(ContainerState::Running)
712        } else {
713            Ok(ContainerState::Stopped)
714        }
715    } else if output.status.code() == Some(125) {
716        Ok(ContainerState::Missing)
717    } else {
718        Err(SboxError::BackendCommandFailed {
719            backend: "podman".to_string(),
720            command: format!("podman inspect {session_name}"),
721            status: output.status.code().unwrap_or(1),
722        })
723    }
724}
725
726fn validate_runtime_inputs(plan: &ExecutionPlan) -> Result<(), SboxError> {
727    for mount in &plan.mounts {
728        validate_mount_source(mount)?;
729    }
730
731    for secret in &plan.secrets {
732        validate_secret_source(secret, &plan.workspace.root)?;
733    }
734
735    Ok(())
736}
737
738fn validate_mount_source(mount: &ResolvedMount) -> Result<(), SboxError> {
739    if mount.kind != "bind" {
740        // "tmpfs" and "mask" mounts have no host source to validate
741        return Ok(());
742    }
743
744    let source = mount
745        .source
746        .as_ref()
747        .expect("bind mounts always resolve source");
748
749    if source.exists() {
750        return Ok(());
751    }
752
753    if mount.create {
754        // If the path looks like a file (has an extension), create an empty file.
755        // Otherwise create a directory (e.g. node_modules, .cache).
756        if source.extension().is_some() {
757            if let Some(parent) = source.parent() {
758                fs::create_dir_all(parent).ok();
759            }
760            return fs::write(source, b"").map_err(|_| SboxError::HostPathNotFound {
761                kind: "mount source",
762                name: mount.target.clone(),
763                path: source.clone(),
764            });
765        }
766        return fs::create_dir_all(source).map_err(|_| SboxError::HostPathNotFound {
767            kind: "mount source",
768            name: mount.target.clone(),
769            path: source.clone(),
770        });
771    }
772
773    Err(SboxError::HostPathNotFound {
774        kind: "mount source",
775        name: mount.target.clone(),
776        path: source.clone(),
777    })
778}
779
780fn append_secret_args(
781    args: &mut Vec<String>,
782    secret: &ResolvedSecret,
783    workspace_root: &Path,
784) -> Result<(), SboxError> {
785    let path = validate_secret_source(secret, workspace_root)?;
786
787    args.push("--mount".to_string());
788    args.push(format!(
789        "type=bind,src={},target={},relabel=private,readonly=true",
790        path.display(),
791        secret.target
792    ));
793    Ok(())
794}
795
796fn validate_secret_source(
797    secret: &ResolvedSecret,
798    workspace_root: &Path,
799) -> Result<PathBuf, SboxError> {
800    let path = try_resolve_host_path(&secret.source, workspace_root).ok_or_else(|| {
801        SboxError::UnsupportedSecretSource {
802            name: secret.name.clone(),
803            secret_source: secret.source.clone(),
804        }
805    })?;
806
807    if path.exists() {
808        Ok(path)
809    } else {
810        Err(SboxError::HostPathNotFound {
811            kind: "secret source",
812            name: secret.name.clone(),
813            path,
814        })
815    }
816}
817
818fn try_resolve_host_path(input: &str, base: &Path) -> Option<PathBuf> {
819    if input.starts_with("~/") || input == "~" {
820        let mut path = crate::platform::home_dir()?;
821        let remainder = input.strip_prefix("~/").unwrap_or("");
822        if !remainder.is_empty() {
823            path.push(remainder);
824        }
825        return Some(path);
826    }
827
828    let path = Path::new(input);
829    if path.is_absolute() {
830        return Some(path.to_path_buf());
831    }
832
833    if input.starts_with("./") || input.starts_with("../") || input.contains('/') {
834        return Some(base.join(path));
835    }
836
837    None
838}
839
840fn scoped_cache_name(workspace_root: &Path, cache_name: &str) -> String {
841    format!(
842        "sbox-cache-{}-{}",
843        stable_hash(&workspace_root.display().to_string()),
844        sanitize_volume_name(cache_name)
845    )
846}
847
848fn sanitize_volume_name(name: &str) -> String {
849    name.chars()
850        .map(|ch| {
851            if ch.is_ascii_alphanumeric() || ch == '_' || ch == '.' || ch == '-' {
852                ch
853            } else {
854                '-'
855            }
856        })
857        .collect()
858}
859
860fn stable_hash(input: &str) -> String {
861    let mut hash = 0xcbf29ce484222325u64;
862    for byte in input.as_bytes() {
863        hash ^= u64::from(*byte);
864        hash = hash.wrapping_mul(0x100000001b3);
865    }
866    format!("{hash:016x}")
867}
868
869fn bool_string(value: bool) -> &'static str {
870    if value { "true" } else { "false" }
871}
872
873fn ensure_built_image(
874    recipe_path: &Path,
875    tag: &str,
876    workspace_root: &Path,
877) -> Result<(), SboxError> {
878    let exists_status = Command::new("podman")
879        .args(["image", "exists", tag])
880        .current_dir(workspace_root)
881        .stdin(Stdio::null())
882        .stdout(Stdio::null())
883        .stderr(Stdio::null())
884        .status()
885        .map_err(|source| SboxError::BackendUnavailable {
886            backend: "podman".to_string(),
887            source,
888        })?;
889
890    if exists_status.success() {
891        return Ok(());
892    }
893
894    if exists_status.code() != Some(1) {
895        return Err(SboxError::BackendCommandFailed {
896            backend: "podman".to_string(),
897            command: format!("podman image exists {tag}"),
898            status: exists_status.code().unwrap_or(1),
899        });
900    }
901
902    let build_args = vec![
903        "build".to_string(),
904        "-t".to_string(),
905        tag.to_string(),
906        "-f".to_string(),
907        recipe_path.display().to_string(),
908        workspace_root.display().to_string(),
909    ];
910
911    let build_status = Command::new("podman")
912        .args(&build_args)
913        .current_dir(workspace_root)
914        .stdin(Stdio::inherit())
915        .stdout(Stdio::inherit())
916        .stderr(Stdio::inherit())
917        .status()
918        .map_err(|source| SboxError::BackendUnavailable {
919            backend: "podman".to_string(),
920            source,
921        })?;
922
923    if build_status.success() {
924        Ok(())
925    } else {
926        Err(SboxError::BackendCommandFailed {
927            backend: "podman".to_string(),
928            command: format!(
929                "podman build -t {tag} -f {} {}",
930                recipe_path.display(),
931                workspace_root.display()
932            ),
933            status: build_status.code().unwrap_or(1),
934        })
935    }
936}
937
938fn status_to_exit_code(status: std::process::ExitStatus) -> ExitCode {
939    match status.code() {
940        Some(code) => ExitCode::from(u8::try_from(code).unwrap_or(1)),
941        None => ExitCode::from(1),
942    }
943}
944
945#[cfg(test)]
946mod tests {
947    use std::fs;
948    use std::path::{Path, PathBuf};
949    use std::time::{SystemTime, UNIX_EPOCH};
950
951    use crate::config::model::ExecutionMode;
952    use crate::error::SboxError;
953    use crate::resolve::{
954        CwdMapping, EnvVarSource, ExecutionPlan, ModeSource, ProfileSource, ResolvedCache,
955        ResolvedEnvVar, ResolvedEnvironment, ResolvedImage, ResolvedImageSource, ResolvedMount,
956        ResolvedPolicy, ResolvedSecret, ResolvedUser, ResolvedWorkspace,
957    };
958
959    use super::{
960        build_run_args, build_run_args_with_options, policy_supports_signature_verification,
961        requirements_enable_signature_verification, validate_runtime_inputs,
962    };
963
964    fn create_temp_fixture() -> (PathBuf, PathBuf) {
965        let unique = SystemTime::now()
966            .duration_since(UNIX_EPOCH)
967            .expect("time should move forward")
968            .as_nanos();
969        let root = std::env::temp_dir().join(format!("sbox-podman-test-{unique}"));
970        let secret = root.join("secret.txt");
971        fs::create_dir_all(&root).expect("temp fixture directory should be created");
972        fs::write(&secret, "token").expect("secret fixture should be written");
973        (root, secret)
974    }
975
976    fn sample_plan() -> ExecutionPlan {
977        let (root, secret) = create_temp_fixture();
978
979        ExecutionPlan {
980            command: vec!["python".into(), "app.py".into()],
981            command_string: "python app.py".into(),
982            backend: crate::config::BackendKind::Podman,
983            image: ResolvedImage {
984                description: "ref:python:3.13-slim".into(),
985                source: ResolvedImageSource::Reference("python:3.13-slim".into()),
986                trust: crate::resolve::ImageTrust::MutableReference,
987                verify_signature: false,
988            },
989            profile_name: "default".into(),
990            profile_source: ProfileSource::DefaultProfile,
991            mode: ExecutionMode::Sandbox,
992            mode_source: ModeSource::Profile,
993            workspace: ResolvedWorkspace {
994                root: root.clone(),
995                invocation_dir: root.clone(),
996                effective_host_dir: root.clone(),
997                mount: "/workspace".into(),
998                sandbox_cwd: "/workspace".into(),
999                cwd_mapping: CwdMapping::InvocationMapped,
1000            },
1001            policy: ResolvedPolicy {
1002                network: "off".into(),
1003                writable: true,
1004                ports: vec!["3000:3000".into()],
1005                no_new_privileges: true,
1006                read_only_rootfs: false,
1007                reuse_container: false,
1008                reusable_session_name: None,
1009                cap_drop: vec!["all".into()],
1010                cap_add: Vec::new(),
1011                pull_policy: None,
1012                network_allow: Vec::new(),
1013                network_allow_patterns: Vec::new(),
1014            },
1015            environment: ResolvedEnvironment {
1016                variables: vec![ResolvedEnvVar {
1017                    name: "APP_ENV".into(),
1018                    value: "development".into(),
1019                    source: EnvVarSource::Set,
1020                }],
1021                denied: Vec::new(),
1022            },
1023            mounts: vec![ResolvedMount {
1024                kind: "bind".into(),
1025                source: Some(root.clone()),
1026                target: "/workspace".into(),
1027                read_only: false,
1028                is_workspace: true,
1029                create: false,
1030            }],
1031            caches: vec![ResolvedCache {
1032                name: "uv-cache".into(),
1033                target: "/root/.cache/uv".into(),
1034                source: None,
1035                read_only: false,
1036            }],
1037            secrets: vec![ResolvedSecret {
1038                name: "token".into(),
1039                source: secret.display().to_string(),
1040                target: "/run/secrets/token".into(),
1041            }],
1042            user: ResolvedUser::KeepId,
1043            audit: crate::resolve::ExecutionAudit {
1044                install_style: false,
1045                trusted_image_required: false,
1046                sensitive_pass_through_vars: Vec::new(),
1047                lockfile: crate::resolve::LockfileAudit {
1048                    applicable: false,
1049                    required: false,
1050                    present: false,
1051                    expected_files: Vec::new(),
1052                },
1053                pre_run: Vec::new(),
1054            },
1055        }
1056    }
1057
1058    #[test]
1059    fn builds_expected_podman_arguments() {
1060        let plan = sample_plan();
1061        let args = build_run_args(&plan, "python:3.13-slim").expect("builder should succeed");
1062        let joined = args.join(" ");
1063
1064        assert!(joined.contains("run --rm -i"));
1065        assert!(joined.contains("--workdir /workspace"));
1066        assert!(joined.contains("--network none"));
1067        assert!(joined.contains("--publish 3000:3000"));
1068        assert!(joined.contains("--userns keep-id"));
1069        assert!(joined.contains("--cap-drop all"));
1070        assert!(joined.contains("--env APP_ENV=development"));
1071        assert!(joined.contains("python:3.13-slim python app.py"));
1072    }
1073
1074    #[test]
1075    fn preflight_fails_when_secret_source_is_missing() {
1076        let mut plan = sample_plan();
1077        plan.secrets[0].source = "/tmp/definitely-missing-sbox-secret".into();
1078
1079        let error =
1080            validate_runtime_inputs(&plan).expect_err("missing secret should fail preflight");
1081        assert!(matches!(
1082            error,
1083            SboxError::HostPathNotFound {
1084                kind: "secret source",
1085                ..
1086            }
1087        ));
1088    }
1089
1090    #[test]
1091    fn interactive_podman_arguments_enable_tty() {
1092        let plan = sample_plan();
1093        let args = build_run_args_with_options(&plan, "python:3.13-slim", true)
1094            .expect("interactive builder should succeed");
1095
1096        assert!(args.iter().any(|arg| arg == "-t"));
1097    }
1098
1099    #[test]
1100    fn requirements_detect_signed_by_policy() {
1101        let requirements = serde_json::json!([
1102            {
1103                "type": "signedBy",
1104                "keyType": "GPGKeys",
1105                "keyPath": "/tmp/test.pub"
1106            }
1107        ]);
1108
1109        assert!(requirements_enable_signature_verification(&requirements));
1110    }
1111
1112    #[test]
1113    fn insecure_accept_anything_policy_is_not_verifying() {
1114        let policy_path = write_policy(
1115            Path::new("/tmp"),
1116            r#"{
1117  "default": [
1118    { "type": "insecureAcceptAnything" }
1119  ]
1120}"#,
1121        );
1122
1123        assert!(
1124            !policy_supports_signature_verification(&policy_path)
1125                .expect("policy inspection should succeed")
1126        );
1127        let _ = fs::remove_file(policy_path);
1128    }
1129
1130    #[test]
1131    fn signed_policy_is_detected_as_verifying() {
1132        let policy_path = write_policy(
1133            Path::new("/tmp"),
1134            r#"{
1135  "default": [
1136    {
1137      "type": "signedBy",
1138      "keyType": "GPGKeys",
1139      "keyPath": "/tmp/test.pub"
1140    }
1141  ]
1142}"#,
1143        );
1144
1145        assert!(
1146            policy_supports_signature_verification(&policy_path)
1147                .expect("policy inspection should succeed")
1148        );
1149        let _ = fs::remove_file(policy_path);
1150    }
1151
1152    fn write_policy(root: &Path, content: &str) -> PathBuf {
1153        let unique = SystemTime::now()
1154            .duration_since(UNIX_EPOCH)
1155            .expect("clock should be monotonic enough for tests")
1156            .as_nanos();
1157        let path = root.join(format!("sbox-policy-{unique}.json"));
1158        fs::write(&path, content).expect("policy fixture should be written");
1159        path
1160    }
1161}