Skip to main content

sbox/backend/
docker.rs

1use std::fs;
2use std::io::IsTerminal;
3use std::path::{Path, PathBuf};
4use std::process::{Command, ExitCode, Stdio};
5
6use crate::backend::podman::CLOUD_METADATA_HOSTNAMES;
7
8use crate::error::SboxError;
9use crate::resolve::{
10    ExecutionPlan, ResolvedImageSource, ResolvedMount, ResolvedSecret, ResolvedUser,
11};
12
13pub fn execute(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
14    if plan.policy.reuse_container {
15        return execute_via_reusable_session(plan, false);
16    }
17
18    validate_runtime_inputs(plan)?;
19    let image = resolve_container_image(plan)?;
20    let args = build_run_args(plan, &image)?;
21
22    let mut child = Command::new("docker");
23    child.args(&args);
24    child.current_dir(&plan.workspace.effective_host_dir);
25    child.stdin(Stdio::inherit());
26    child.stdout(Stdio::inherit());
27    child.stderr(Stdio::inherit());
28
29    let status = child
30        .status()
31        .map_err(|source| SboxError::BackendUnavailable {
32            backend: "docker".to_string(),
33            source,
34        })?;
35
36    Ok(status_to_exit_code(status))
37}
38
39pub fn execute_interactive(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
40    if plan.policy.reuse_container {
41        return execute_via_reusable_session(plan, true);
42    }
43
44    validate_runtime_inputs(plan)?;
45    let image = resolve_container_image(plan)?;
46    let tty = std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
47    let args = build_run_args_with_options(plan, &image, tty)?;
48
49    let mut child = Command::new("docker");
50    child.args(&args);
51    child.current_dir(&plan.workspace.effective_host_dir);
52    child.stdin(Stdio::inherit());
53    child.stdout(Stdio::inherit());
54    child.stderr(Stdio::inherit());
55
56    let status = child
57        .status()
58        .map_err(|source| SboxError::BackendUnavailable {
59            backend: "docker".to_string(),
60            source,
61        })?;
62
63    Ok(status_to_exit_code(status))
64}
65
66fn execute_via_reusable_session(
67    plan: &ExecutionPlan,
68    interactive: bool,
69) -> Result<ExitCode, SboxError> {
70    validate_runtime_inputs(plan)?;
71    let image = resolve_container_image(plan)?;
72    let session_name = plan
73        .policy
74        .reusable_session_name
75        .as_deref()
76        .ok_or_else(|| SboxError::ReusableSandboxSessionsNotImplemented {
77            profile: plan.profile_name.clone(),
78        })?;
79
80    ensure_reusable_container(plan, &image, session_name)?;
81
82    let tty = interactive && std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
83    let mut child = Command::new("docker");
84    child.args(build_exec_args(plan, session_name, tty));
85    child.current_dir(&plan.workspace.effective_host_dir);
86    child.stdin(Stdio::inherit());
87    child.stdout(Stdio::inherit());
88    child.stderr(Stdio::inherit());
89
90    let status = child
91        .status()
92        .map_err(|source| SboxError::BackendUnavailable {
93            backend: "docker".to_string(),
94            source,
95        })?;
96
97    Ok(status_to_exit_code(status))
98}
99
100fn resolve_container_image(plan: &ExecutionPlan) -> Result<String, SboxError> {
101    match &plan.image.source {
102        ResolvedImageSource::Reference(reference) => Ok(reference.clone()),
103        ResolvedImageSource::Build { recipe_path, tag } => {
104            ensure_built_image(recipe_path, tag, &plan.workspace.root)?;
105            Ok(tag.clone())
106        }
107    }
108}
109
110pub fn build_run_args(plan: &ExecutionPlan, image: &str) -> Result<Vec<String>, SboxError> {
111    build_run_args_with_options(plan, image, false)
112}
113
114pub fn build_run_args_with_options(
115    plan: &ExecutionPlan,
116    image: &str,
117    tty: bool,
118) -> Result<Vec<String>, SboxError> {
119    let mut args = vec!["run".to_string(), "--rm".to_string(), "-i".to_string()];
120
121    if tty {
122        args.push("-t".to_string());
123    }
124
125    args.push("--workdir".to_string());
126    args.push(plan.workspace.sandbox_cwd.clone());
127
128    if plan.policy.read_only_rootfs {
129        args.push("--read-only".to_string());
130    }
131
132    if plan.policy.no_new_privileges {
133        args.push("--security-opt".to_string());
134        args.push("no-new-privileges".to_string());
135    }
136
137    for capability in &plan.policy.cap_drop {
138        args.push("--cap-drop".to_string());
139        args.push(capability.clone());
140    }
141
142    for capability in &plan.policy.cap_add {
143        args.push("--cap-add".to_string());
144        args.push(capability.clone());
145    }
146
147    match plan.policy.network.as_str() {
148        "off" => {
149            args.push("--network".to_string());
150            args.push("none".to_string());
151        }
152        "on" => {}
153        other => {
154            args.push("--network".to_string());
155            args.push(other.to_string());
156        }
157    }
158
159    if !plan.policy.network_allow.is_empty() {
160        args.push("--dns".to_string());
161        args.push("192.0.2.1".to_string());
162        for (hostname, ip) in &plan.policy.network_allow {
163            args.push("--add-host".to_string());
164            args.push(format!("{hostname}:{ip}"));
165        }
166    }
167
168    if plan.policy.network != "off" {
169        for hostname in CLOUD_METADATA_HOSTNAMES {
170            args.push("--add-host".to_string());
171            args.push(format!("{hostname}:192.0.2.1"));
172        }
173    }
174
175    for port in &plan.policy.ports {
176        args.push("--publish".to_string());
177        args.push(port.clone());
178    }
179
180    // Docker has no --userns keep-id; always map to explicit uid:gid using the current process
181    // identity so that files written to bind-mounted workspace dirs are owned by the host user
182    // rather than root (the default in non-rootless Docker).
183    match &plan.user {
184        ResolvedUser::KeepId | ResolvedUser::Default => {
185            let (uid, gid) = current_uid_gid();
186            args.push("--user".to_string());
187            args.push(format!("{uid}:{gid}"));
188        }
189        ResolvedUser::Explicit { uid, gid } => {
190            args.push("--user".to_string());
191            args.push(format!("{uid}:{gid}"));
192        }
193    }
194
195    for mount in &plan.mounts {
196        append_mount_args(&mut args, mount)?;
197    }
198
199    for cache in &plan.caches {
200        args.push("--mount".to_string());
201        if let Some(source) = &cache.source {
202            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
203                args.push(format!(
204                    "type=bind,src={},target={},readonly={}",
205                    path_to_docker_str(&path),
206                    cache.target,
207                    bool_string(cache.read_only)
208                ));
209            } else {
210                args.push(format!(
211                    "type=volume,src={},target={},readonly={}",
212                    source,
213                    cache.target,
214                    bool_string(cache.read_only)
215                ));
216            }
217        } else {
218            args.push(format!(
219                "type=volume,src={},target={},readonly={}",
220                scoped_cache_name(&plan.workspace.root, &cache.name),
221                cache.target,
222                bool_string(cache.read_only)
223            ));
224        }
225    }
226
227    for secret in &plan.secrets {
228        append_secret_args(&mut args, secret, &plan.workspace.root)?;
229    }
230
231    for variable in &plan.environment.variables {
232        args.push("--env".to_string());
233        args.push(format!("{}={}", variable.name, variable.value));
234    }
235
236    if let Some(pull_policy) = &plan.policy.pull_policy {
237        args.push("--pull".to_string());
238        args.push(pull_policy.clone());
239    }
240
241    args.push(image.to_string());
242    args.extend(plan.command.iter().cloned());
243
244    Ok(args)
245}
246
247fn append_mount_args(args: &mut Vec<String>, mount: &ResolvedMount) -> Result<(), SboxError> {
248    match mount.kind.as_str() {
249        "bind" => {
250            let source = mount
251                .source
252                .as_ref()
253                .expect("bind mounts always resolve source");
254            args.push("--mount".to_string());
255            // Docker does not support relabel=private (Podman/SELinux extension).
256            args.push(format!(
257                "type=bind,src={},target={},readonly={}",
258                path_to_docker_str(source),
259                mount.target,
260                bool_string(mount.read_only)
261            ));
262            Ok(())
263        }
264        "tmpfs" => {
265            args.push("--tmpfs".to_string());
266            let spec = if mount.read_only {
267                format!("{}:ro", mount.target)
268            } else {
269                mount.target.clone()
270            };
271            args.push(spec);
272            Ok(())
273        }
274        "mask" => {
275            args.push("--mount".to_string());
276            args.push(format!(
277                "type=bind,src=/dev/null,target={},readonly=true",
278                mount.target
279            ));
280            Ok(())
281        }
282        other => Err(SboxError::UnsupportedMountType {
283            mount_type: other.to_string(),
284        }),
285    }
286}
287
288fn ensure_reusable_container(
289    plan: &ExecutionPlan,
290    image: &str,
291    session_name: &str,
292) -> Result<(), SboxError> {
293    match inspect_container_state(session_name)? {
294        ContainerState::Running => return Ok(()),
295        ContainerState::Stopped => {
296            let status = Command::new("docker")
297                .args(["start", session_name])
298                .stdin(Stdio::null())
299                .stdout(Stdio::null())
300                .stderr(Stdio::null())
301                .status()
302                .map_err(|source| SboxError::BackendUnavailable {
303                    backend: "docker".to_string(),
304                    source,
305                })?;
306
307            if status.success() {
308                return Ok(());
309            }
310
311            return Err(SboxError::BackendCommandFailed {
312                backend: "docker".to_string(),
313                command: format!("docker start {session_name}"),
314                status: status.code().unwrap_or(1),
315            });
316        }
317        ContainerState::Missing => {}
318    }
319
320    let mut create_args = vec![
321        "create".to_string(),
322        "--name".to_string(),
323        session_name.to_string(),
324        "--workdir".to_string(),
325        plan.workspace.sandbox_cwd.clone(),
326    ];
327    append_container_settings(&mut create_args, plan)?;
328    create_args.push(image.to_string());
329    create_args.push("sleep".to_string());
330    create_args.push("infinity".to_string());
331
332    let create_status = Command::new("docker")
333        .args(&create_args)
334        .current_dir(&plan.workspace.effective_host_dir)
335        .stdin(Stdio::null())
336        .stdout(Stdio::null())
337        .stderr(Stdio::null())
338        .status()
339        .map_err(|source| SboxError::BackendUnavailable {
340            backend: "docker".to_string(),
341            source,
342        })?;
343
344    if !create_status.success() {
345        return Err(SboxError::BackendCommandFailed {
346            backend: "docker".to_string(),
347            command: format!("docker create --name {session_name} ..."),
348            status: create_status.code().unwrap_or(1),
349        });
350    }
351
352    let start_status = Command::new("docker")
353        .args(["start", session_name])
354        .stdin(Stdio::null())
355        .stdout(Stdio::null())
356        .stderr(Stdio::null())
357        .status()
358        .map_err(|source| SboxError::BackendUnavailable {
359            backend: "docker".to_string(),
360            source,
361        })?;
362
363    if start_status.success() {
364        Ok(())
365    } else {
366        Err(SboxError::BackendCommandFailed {
367            backend: "docker".to_string(),
368            command: format!("docker start {session_name}"),
369            status: start_status.code().unwrap_or(1),
370        })
371    }
372}
373
374fn build_exec_args(plan: &ExecutionPlan, session_name: &str, tty: bool) -> Vec<String> {
375    let mut args = vec!["exec".to_string(), "-i".to_string()];
376    if tty {
377        args.push("-t".to_string());
378    }
379
380    args.push("--workdir".to_string());
381    args.push(plan.workspace.sandbox_cwd.clone());
382
383    for variable in &plan.environment.variables {
384        args.push("--env".to_string());
385        args.push(format!("{}={}", variable.name, variable.value));
386    }
387
388    args.push(session_name.to_string());
389    args.extend(plan.command.iter().cloned());
390    args
391}
392
393fn append_container_settings(
394    args: &mut Vec<String>,
395    plan: &ExecutionPlan,
396) -> Result<(), SboxError> {
397    if plan.policy.read_only_rootfs {
398        args.push("--read-only".to_string());
399    }
400
401    if plan.policy.no_new_privileges {
402        args.push("--security-opt".to_string());
403        args.push("no-new-privileges".to_string());
404    }
405
406    for capability in &plan.policy.cap_drop {
407        args.push("--cap-drop".to_string());
408        args.push(capability.clone());
409    }
410
411    for capability in &plan.policy.cap_add {
412        args.push("--cap-add".to_string());
413        args.push(capability.clone());
414    }
415
416    match plan.policy.network.as_str() {
417        "off" => {
418            args.push("--network".to_string());
419            args.push("none".to_string());
420        }
421        "on" => {}
422        other => {
423            args.push("--network".to_string());
424            args.push(other.to_string());
425        }
426    }
427
428    if !plan.policy.network_allow.is_empty() {
429        args.push("--dns".to_string());
430        args.push("192.0.2.1".to_string());
431        for (hostname, ip) in &plan.policy.network_allow {
432            args.push("--add-host".to_string());
433            args.push(format!("{hostname}:{ip}"));
434        }
435    }
436
437    for port in &plan.policy.ports {
438        args.push("--publish".to_string());
439        args.push(port.clone());
440    }
441
442    match &plan.user {
443        ResolvedUser::KeepId | ResolvedUser::Default => {
444            let (uid, gid) = current_uid_gid();
445            args.push("--user".to_string());
446            args.push(format!("{uid}:{gid}"));
447        }
448        ResolvedUser::Explicit { uid, gid } => {
449            args.push("--user".to_string());
450            args.push(format!("{uid}:{gid}"));
451        }
452    }
453
454    for mount in &plan.mounts {
455        append_mount_args(args, mount)?;
456    }
457
458    for cache in &plan.caches {
459        args.push("--mount".to_string());
460        if let Some(source) = &cache.source {
461            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
462                args.push(format!(
463                    "type=bind,src={},target={},readonly={}",
464                    path_to_docker_str(&path),
465                    cache.target,
466                    bool_string(cache.read_only)
467                ));
468            } else {
469                args.push(format!(
470                    "type=volume,src={},target={},readonly={}",
471                    source,
472                    cache.target,
473                    bool_string(cache.read_only)
474                ));
475            }
476        } else {
477            args.push(format!(
478                "type=volume,src={},target={},readonly={}",
479                scoped_cache_name(&plan.workspace.root, &cache.name),
480                cache.target,
481                bool_string(cache.read_only)
482            ));
483        }
484    }
485
486    for secret in &plan.secrets {
487        append_secret_args(args, secret, &plan.workspace.root)?;
488    }
489
490    for variable in &plan.environment.variables {
491        args.push("--env".to_string());
492        args.push(format!("{}={}", variable.name, variable.value));
493    }
494
495    Ok(())
496}
497
498enum ContainerState {
499    Missing,
500    Stopped,
501    Running,
502}
503
504fn inspect_container_state(session_name: &str) -> Result<ContainerState, SboxError> {
505    // Use `docker container ls -a` to avoid exit-code ambiguity between
506    // "container not found" and "daemon not running".
507    let output = Command::new("docker")
508        .args([
509            "container",
510            "ls",
511            "-a",
512            "--filter",
513            &format!("name=^{session_name}$"),
514            "--format",
515            "{{.State}}",
516        ])
517        .stdin(Stdio::null())
518        .stdout(Stdio::piped())
519        .stderr(Stdio::null())
520        .output()
521        .map_err(|source| SboxError::BackendUnavailable {
522            backend: "docker".to_string(),
523            source,
524        })?;
525
526    let stdout = String::from_utf8_lossy(&output.stdout);
527    let state = stdout.trim();
528    if state.is_empty() {
529        Ok(ContainerState::Missing)
530    } else if state == "running" {
531        Ok(ContainerState::Running)
532    } else {
533        Ok(ContainerState::Stopped)
534    }
535}
536
537fn validate_runtime_inputs(plan: &ExecutionPlan) -> Result<(), SboxError> {
538    for mount in &plan.mounts {
539        validate_mount_source(mount)?;
540    }
541    for secret in &plan.secrets {
542        validate_secret_source(secret, &plan.workspace.root)?;
543    }
544    Ok(())
545}
546
547fn validate_mount_source(mount: &ResolvedMount) -> Result<(), SboxError> {
548    if mount.kind != "bind" {
549        return Ok(());
550    }
551
552    let source = mount
553        .source
554        .as_ref()
555        .expect("bind mounts always resolve source");
556
557    if source.exists() {
558        return Ok(());
559    }
560
561    if mount.create {
562        // If the path looks like a file (has an extension), create an empty file.
563        // Otherwise create a directory (e.g. node_modules, .cache).
564        // Docker bind-mounts a missing source as a directory by default, which
565        // corrupts lockfiles like package-lock.json on first install.
566        if source.extension().is_some() {
567            if let Some(parent) = source.parent() {
568                fs::create_dir_all(parent).ok();
569            }
570            return fs::write(source, b"").map_err(|_| SboxError::HostPathNotFound {
571                kind: "mount source",
572                name: mount.target.clone(),
573                path: source.clone(),
574            });
575        }
576        return fs::create_dir_all(source).map_err(|_| SboxError::HostPathNotFound {
577            kind: "mount source",
578            name: mount.target.clone(),
579            path: source.clone(),
580        });
581    }
582
583    Err(SboxError::HostPathNotFound {
584        kind: "mount source",
585        name: mount.target.clone(),
586        path: source.clone(),
587    })
588}
589
590fn append_secret_args(
591    args: &mut Vec<String>,
592    secret: &ResolvedSecret,
593    workspace_root: &Path,
594) -> Result<(), SboxError> {
595    let path = validate_secret_source(secret, workspace_root)?;
596    args.push("--mount".to_string());
597    // Docker does not support relabel=private.
598    args.push(format!(
599        "type=bind,src={},target={},readonly=true",
600        path_to_docker_str(&path),
601        secret.target
602    ));
603    Ok(())
604}
605
606fn validate_secret_source(
607    secret: &ResolvedSecret,
608    workspace_root: &Path,
609) -> Result<PathBuf, SboxError> {
610    let path = try_resolve_host_path(&secret.source, workspace_root).ok_or_else(|| {
611        SboxError::UnsupportedSecretSource {
612            name: secret.name.clone(),
613            secret_source: secret.source.clone(),
614        }
615    })?;
616
617    if path.exists() {
618        Ok(path)
619    } else {
620        Err(SboxError::HostPathNotFound {
621            kind: "secret source",
622            name: secret.name.clone(),
623            path,
624        })
625    }
626}
627
628fn try_resolve_host_path(input: &str, base: &Path) -> Option<PathBuf> {
629    if input.starts_with("~/") || input == "~" {
630        let mut path = crate::platform::home_dir()?;
631        let remainder = input.strip_prefix("~/").unwrap_or("");
632        if !remainder.is_empty() {
633            path.push(remainder);
634        }
635        return Some(path);
636    }
637
638    let path = Path::new(input);
639    if path.is_absolute() {
640        return Some(path.to_path_buf());
641    }
642
643    if input.starts_with("./") || input.starts_with("../") || input.contains('/') {
644        return Some(base.join(path));
645    }
646
647    None
648}
649
650fn scoped_cache_name(workspace_root: &Path, cache_name: &str) -> String {
651    format!(
652        "sbox-cache-{}-{}",
653        stable_hash(&workspace_root.display().to_string()),
654        sanitize_volume_name(cache_name)
655    )
656}
657
658fn sanitize_volume_name(name: &str) -> String {
659    name.chars()
660        .map(|ch| {
661            if ch.is_ascii_alphanumeric() || ch == '_' || ch == '.' || ch == '-' {
662                ch
663            } else {
664                '-'
665            }
666        })
667        .collect()
668}
669
670fn stable_hash(input: &str) -> String {
671    let mut hash = 0xcbf29ce484222325u64;
672    for byte in input.as_bytes() {
673        hash ^= u64::from(*byte);
674        hash = hash.wrapping_mul(0x100000001b3);
675    }
676    format!("{hash:016x}")
677}
678
679fn bool_string(value: bool) -> &'static str {
680    if value { "true" } else { "false" }
681}
682
683/// Convert a host path to a string suitable for Docker `--mount src=` / `--volume` arguments.
684///
685/// On Windows, `Path::display()` emits backslashes (`C:\Users\...`), but Docker expects
686/// forward slashes (`/c/Users/...` for Docker Desktop with WSL2). On Unix the path is
687/// returned unchanged.
688///
689/// **Known limitation**: UNC paths (`\\server\share\...`) are not supported by Docker
690/// bind mounts and are not converted — Docker will reject them with a path error.
691/// Only local drive paths (`C:\...`) and extended-length prefixes (`\\?\C:\...`) work.
692fn path_to_docker_str(path: &Path) -> String {
693    #[cfg(windows)]
694    {
695        let s = path.display().to_string();
696
697        // Strip extended-length prefix \\?\ (produced by canonicalize on Windows).
698        let s = s.strip_prefix(r"\\?\").unwrap_or(&s);
699
700        // Drive-letter path: C:\... or C: (drive root with no separator)
701        if s.len() >= 2 && s.as_bytes()[1] == b':' {
702            let drive = s.chars().next().unwrap_or('c').to_ascii_lowercase();
703            // s[2..] is either empty (bare "C:"), "\" (drive root), or "\rest..."
704            let after_colon = &s[2..];
705            let rest = after_colon.replace('\\', "/");
706            // Ensure the result always starts with /drive/ so Docker treats it as absolute.
707            return if rest.starts_with('/') {
708                format!("/{drive}{rest}")
709            } else {
710                format!("/{drive}/{rest}")
711            };
712        }
713
714        // UNC path \\server\share\... — pass through with forward slashes; Docker on Windows
715        // doesn't support UNC mounts in general, but at least don't corrupt the string.
716        s.replace('\\', "/")
717    }
718    #[cfg(not(windows))]
719    {
720        path.display().to_string()
721    }
722}
723
724/// Return the current process's effective uid and gid.
725///
726/// - Linux: parsed from `/proc/self/status` (no subprocess, always available).
727/// - macOS / other Unix: falls back to spawning `id -u` / `id -g`.
728/// - Windows: returns (0, 0) — Docker Desktop on Windows routes bind-mounts through WSL2
729///   so the host-user mapping is handled by the Docker Desktop daemon, not by `--user`.
730fn current_uid_gid() -> (u32, u32) {
731    #[cfg(target_os = "linux")]
732    {
733        let status = std::fs::read_to_string("/proc/self/status").unwrap_or_default();
734        let uid = parse_proc_id(&status, "Uid:");
735        let gid = parse_proc_id(&status, "Gid:");
736        return (uid, gid);
737    }
738    #[cfg(all(unix, not(target_os = "linux")))]
739    {
740        let uid = run_id_flag("-u").unwrap_or_else(|| {
741            eprintln!(
742                "sbox: warning: `id -u` failed — cannot determine UID; \
743                 container will run as root (uid=0). Install coreutils or run as a real user."
744            );
745            0
746        });
747        let gid = run_id_flag("-g").unwrap_or_else(|| {
748            eprintln!(
749                "sbox: warning: `id -g` failed — cannot determine GID; \
750                 container will run as root (gid=0)."
751            );
752            0
753        });
754        return (uid, gid);
755    }
756    #[cfg(windows)]
757    {
758        (0, 0)
759    }
760}
761
762#[cfg(all(unix, not(target_os = "linux")))]
763fn run_id_flag(flag: &str) -> Option<u32> {
764    let out = std::process::Command::new("id")
765        .arg(flag)
766        .output()
767        .ok()?;
768    String::from_utf8_lossy(&out.stdout).trim().parse().ok()
769}
770
771fn parse_proc_id(status: &str, key: &str) -> u32 {
772    status
773        .lines()
774        .find(|line| line.starts_with(key))
775        .and_then(|line| line.split_whitespace().nth(1))
776        .and_then(|s| s.parse().ok())
777        .unwrap_or(0)
778}
779
780fn ensure_built_image(
781    recipe_path: &Path,
782    tag: &str,
783    workspace_root: &Path,
784) -> Result<(), SboxError> {
785    // docker image inspect exits 0 if the image exists, non-zero if not.
786    let exists_status = Command::new("docker")
787        .args(["image", "inspect", "--format", "", tag])
788        .current_dir(workspace_root)
789        .stdin(Stdio::null())
790        .stdout(Stdio::null())
791        .stderr(Stdio::null())
792        .status()
793        .map_err(|source| SboxError::BackendUnavailable {
794            backend: "docker".to_string(),
795            source,
796        })?;
797
798    if exists_status.success() {
799        return Ok(());
800    }
801
802    let build_status = Command::new("docker")
803        .args([
804            "build",
805            "-t",
806            tag,
807            "-f",
808            &path_to_docker_str(recipe_path),
809            &path_to_docker_str(workspace_root),
810        ])
811        .current_dir(workspace_root)
812        .stdin(Stdio::inherit())
813        .stdout(Stdio::inherit())
814        .stderr(Stdio::inherit())
815        .status()
816        .map_err(|source| SboxError::BackendUnavailable {
817            backend: "docker".to_string(),
818            source,
819        })?;
820
821    if build_status.success() {
822        Ok(())
823    } else {
824        Err(SboxError::BackendCommandFailed {
825            backend: "docker".to_string(),
826            command: format!(
827                "docker build -t {tag} -f {} {}",
828                path_to_docker_str(recipe_path),
829                path_to_docker_str(workspace_root)
830            ),
831            status: build_status.code().unwrap_or(1),
832        })
833    }
834}
835
836fn status_to_exit_code(status: std::process::ExitStatus) -> ExitCode {
837    match status.code() {
838        Some(code) => ExitCode::from(u8::try_from(code).unwrap_or(1)),
839        None => ExitCode::from(1),
840    }
841}
842
843#[cfg(test)]
844mod tests {
845    use super::{build_run_args, current_uid_gid};
846    use crate::config::model::ExecutionMode;
847    use crate::resolve::{
848        CwdMapping, ExecutionPlan, ImageTrust, ModeSource, ProfileSource, ResolvedEnvironment,
849        ResolvedImage, ResolvedImageSource, ResolvedPolicy, ResolvedUser, ResolvedWorkspace,
850    };
851    use std::path::PathBuf;
852
853    fn sample_plan() -> ExecutionPlan {
854        ExecutionPlan {
855            command: vec!["npm".into(), "install".into()],
856            command_string: "npm install".into(),
857            backend: crate::config::BackendKind::Docker,
858            image: ResolvedImage {
859                description: "ref:node:22".into(),
860                source: ResolvedImageSource::Reference("node:22".into()),
861                trust: ImageTrust::MutableReference,
862                verify_signature: false,
863            },
864            profile_name: "install".into(),
865            profile_source: ProfileSource::DefaultProfile,
866            mode: ExecutionMode::Sandbox,
867            mode_source: ModeSource::Profile,
868            workspace: ResolvedWorkspace {
869                root: PathBuf::from("/project"),
870                invocation_dir: PathBuf::from("/project"),
871                effective_host_dir: PathBuf::from("/project"),
872                mount: "/workspace".into(),
873                sandbox_cwd: "/workspace".into(),
874                cwd_mapping: CwdMapping::InvocationMapped,
875            },
876            policy: ResolvedPolicy {
877                network: "off".into(),
878                writable: true,
879                ports: Vec::new(),
880                no_new_privileges: true,
881                read_only_rootfs: false,
882                reuse_container: false,
883                reusable_session_name: None,
884                cap_drop: Vec::new(),
885                cap_add: Vec::new(),
886                pull_policy: None,
887                network_allow: Vec::new(),
888                network_allow_patterns: Vec::new(),
889            },
890            environment: ResolvedEnvironment {
891                variables: Vec::new(),
892                denied: Vec::new(),
893            },
894            mounts: Vec::new(),
895            caches: Vec::new(),
896            secrets: Vec::new(),
897            user: ResolvedUser::Default,
898            audit: crate::resolve::ExecutionAudit {
899                install_style: false,
900                trusted_image_required: false,
901                sensitive_pass_through_vars: Vec::new(),
902                lockfile: crate::resolve::LockfileAudit {
903                    applicable: false,
904                    required: false,
905                    present: false,
906                    expected_files: Vec::new(),
907                },
908                pre_run: Vec::new(),
909            },
910        }
911    }
912
913    #[test]
914    fn docker_run_args_use_network_none_when_off() {
915        let plan = sample_plan();
916        let args = build_run_args(&plan, "node:22").expect("args should build");
917        let joined = args.join(" ");
918        assert!(joined.contains("--network none"));
919        assert!(!joined.contains("relabel"));
920    }
921
922    #[test]
923    fn docker_run_args_map_keepid_to_explicit_user() {
924        let mut plan = sample_plan();
925        plan.user = ResolvedUser::KeepId;
926        let args = build_run_args(&plan, "node:22").expect("args should build");
927        let joined = args.join(" ");
928        // Should have --user UID:GID, not --userns keep-id
929        assert!(joined.contains("--user"));
930        assert!(!joined.contains("keep-id"));
931    }
932
933    #[test]
934    fn parse_proc_id_extracts_real_uid() {
935        // /proc/self/status has lines like "Uid:\t1000\t1000\t1000\t1000"
936        let fake = "Name:\tfoo\nUid:\t1000\t1000\t1000\t1000\nGid:\t1001\t1001\t1001\t1001\n";
937        assert_eq!(super::parse_proc_id(fake, "Uid:"), 1000);
938        assert_eq!(super::parse_proc_id(fake, "Gid:"), 1001);
939    }
940
941    #[test]
942    fn current_uid_gid_returns_nonzero_for_normal_user() {
943        let (uid, _gid) = current_uid_gid();
944        // In a normal test environment we won't be root
945        // Just verify the function runs without panic and returns plausible values
946        assert!(uid < 100_000);
947    }
948
949    #[test]
950    fn metadata_hostnames_blocked_when_network_is_on() {
951        let mut plan = sample_plan();
952        plan.policy.network = "on".into();
953        let args = build_run_args(&plan, "node:22").expect("args should build");
954        let joined = args.join(" ");
955        // Every cloud metadata hostname must be sinkholes to 192.0.2.1
956        for hostname in crate::backend::podman::CLOUD_METADATA_HOSTNAMES {
957            assert!(
958                joined.contains(&format!("--add-host {hostname}:192.0.2.1")),
959                "expected metadata host {hostname} to be blocked, args: {joined}"
960            );
961        }
962    }
963
964    #[test]
965    fn metadata_hostnames_not_added_when_network_is_off() {
966        let plan = sample_plan(); // network: off by default
967        let args = build_run_args(&plan, "node:22").expect("args should build");
968        let joined = args.join(" ");
969        // With network off there is no point adding --add-host entries
970        assert!(
971            !joined.contains("metadata.google.internal"),
972            "metadata host should not appear when network is off, args: {joined}"
973        );
974    }
975
976    #[test]
977    fn network_allow_breaks_dns_and_injects_resolved_hosts() {
978        let mut plan = sample_plan();
979        plan.policy.network = "on".into();
980        plan.policy.network_allow = vec![("registry.npmjs.org".into(), "104.16.0.0".into())];
981        let args = build_run_args(&plan, "node:22").expect("args should build");
982        let joined = args.join(" ");
983        // DNS must be broken to the black-hole address
984        assert!(
985            joined.contains("--dns 192.0.2.1"),
986            "expected DNS break when network_allow is set, args: {joined}"
987        );
988        // The resolved registry host must be injected
989        assert!(
990            joined.contains("--add-host registry.npmjs.org:104.16.0.0"),
991            "expected registry host injected via --add-host, args: {joined}"
992        );
993    }
994
995    #[test]
996    fn network_on_without_network_allow_still_blocks_metadata_hosts() {
997        // Even with unrestricted network (no allow-list), metadata hosts must be blocked.
998        let mut plan = sample_plan();
999        plan.policy.network = "on".into();
1000        plan.policy.network_allow = vec![];
1001        let args = build_run_args(&plan, "node:22").expect("args should build");
1002        let joined = args.join(" ");
1003        // No DNS break (no allow-list)
1004        assert!(
1005            !joined.contains("--dns 192.0.2.1"),
1006            "DNS should not be broken without allow-list"
1007        );
1008        // But metadata hosts should still be blocked
1009        assert!(
1010            joined.contains("--add-host metadata.google.internal:192.0.2.1"),
1011            "metadata host should be blocked even without network_allow, args: {joined}"
1012        );
1013    }
1014
1015    #[test]
1016    fn denied_env_vars_not_passed_to_container() {
1017        let mut plan = sample_plan();
1018        plan.environment.denied = vec!["NPM_TOKEN".into(), "NODE_AUTH_TOKEN".into()];
1019        plan.policy.network = "on".into();
1020        let args = build_run_args(&plan, "node:22").expect("args should build");
1021        let joined = args.join(" ");
1022        // Denied vars must not appear as -e VAR=... or --env VAR=...
1023        assert!(
1024            !joined.contains("NPM_TOKEN"),
1025            "denied env var NPM_TOKEN should not appear in docker args: {joined}"
1026        );
1027    }
1028
1029    #[test]
1030    fn resolved_user_default_injects_user_flag() {
1031        // ResolvedUser::Default must inject --user UID:GID so bind-mount files are
1032        // owned by the host user rather than root in non-rootless Docker.
1033        let mut plan = sample_plan();
1034        plan.user = ResolvedUser::Default;
1035        let args = build_run_args(&plan, "node:22").expect("args should build");
1036        let joined = args.join(" ");
1037        assert!(
1038            joined.contains("--user"),
1039            "Default user must still inject --user for Docker: {joined}"
1040        );
1041        // Must NOT be keep-id (that is Podman-only)
1042        assert!(
1043            !joined.contains("keep-id"),
1044            "--user must be explicit UID:GID, not keep-id: {joined}"
1045        );
1046    }
1047
1048    // path_to_docker_str is only meaningful on Windows, but the logic is exercised
1049    // below via a cfg-gated test that checks the conversion rules directly.
1050    #[test]
1051    #[cfg(windows)]
1052    fn path_to_docker_str_converts_drive_paths() {
1053        use std::path::Path;
1054        // Standard drive path
1055        assert_eq!(
1056            path_to_docker_str(Path::new(r"C:\Users\foo\project")),
1057            "/c/Users/foo/project"
1058        );
1059        // Drive root only — must end with / so Docker treats it as absolute
1060        assert_eq!(
1061            path_to_docker_str(Path::new(r"C:\")),
1062            "/c/"
1063        );
1064        // Bare drive letter without separator
1065        assert_eq!(
1066            path_to_docker_str(Path::new("C:")),
1067            "/c/"
1068        );
1069        // Extended-length prefix stripped
1070        assert_eq!(
1071            path_to_docker_str(Path::new(r"\\?\C:\foo\bar")),
1072            "/c/foo/bar"
1073        );
1074    }
1075}