Skip to main content

sbox/backend/
docker.rs

1use std::fs;
2use std::io::IsTerminal;
3use std::path::{Path, PathBuf};
4use std::process::{Command, ExitCode, Stdio};
5
6use crate::error::SboxError;
7use crate::resolve::{ExecutionPlan, ResolvedImageSource, ResolvedMount, ResolvedSecret, ResolvedUser};
8
9pub fn execute(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
10    if plan.policy.reuse_container {
11        return execute_via_reusable_session(plan, false);
12    }
13
14    validate_runtime_inputs(plan)?;
15    let image = resolve_container_image(plan)?;
16    let args = build_run_args(plan, &image)?;
17
18    let mut child = Command::new("docker");
19    child.args(&args);
20    child.current_dir(&plan.workspace.effective_host_dir);
21    child.stdin(Stdio::inherit());
22    child.stdout(Stdio::inherit());
23    child.stderr(Stdio::inherit());
24
25    let status = child
26        .status()
27        .map_err(|source| SboxError::BackendUnavailable {
28            backend: "docker".to_string(),
29            source,
30        })?;
31
32    Ok(status_to_exit_code(status))
33}
34
35pub fn execute_interactive(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
36    if plan.policy.reuse_container {
37        return execute_via_reusable_session(plan, true);
38    }
39
40    validate_runtime_inputs(plan)?;
41    let image = resolve_container_image(plan)?;
42    let tty = std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
43    let args = build_run_args_with_options(plan, &image, tty)?;
44
45    let mut child = Command::new("docker");
46    child.args(&args);
47    child.current_dir(&plan.workspace.effective_host_dir);
48    child.stdin(Stdio::inherit());
49    child.stdout(Stdio::inherit());
50    child.stderr(Stdio::inherit());
51
52    let status = child
53        .status()
54        .map_err(|source| SboxError::BackendUnavailable {
55            backend: "docker".to_string(),
56            source,
57        })?;
58
59    Ok(status_to_exit_code(status))
60}
61
62fn execute_via_reusable_session(
63    plan: &ExecutionPlan,
64    interactive: bool,
65) -> Result<ExitCode, SboxError> {
66    validate_runtime_inputs(plan)?;
67    let image = resolve_container_image(plan)?;
68    let session_name = plan
69        .policy
70        .reusable_session_name
71        .as_deref()
72        .ok_or_else(|| SboxError::ReusableSandboxSessionsNotImplemented {
73            profile: plan.profile_name.clone(),
74        })?;
75
76    ensure_reusable_container(plan, &image, session_name)?;
77
78    let tty = interactive && std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
79    let mut child = Command::new("docker");
80    child.args(build_exec_args(plan, session_name, tty));
81    child.current_dir(&plan.workspace.effective_host_dir);
82    child.stdin(Stdio::inherit());
83    child.stdout(Stdio::inherit());
84    child.stderr(Stdio::inherit());
85
86    let status = child
87        .status()
88        .map_err(|source| SboxError::BackendUnavailable {
89            backend: "docker".to_string(),
90            source,
91        })?;
92
93    Ok(status_to_exit_code(status))
94}
95
96fn resolve_container_image(plan: &ExecutionPlan) -> Result<String, SboxError> {
97    match &plan.image.source {
98        ResolvedImageSource::Reference(reference) => Ok(reference.clone()),
99        ResolvedImageSource::Build { recipe_path, tag } => {
100            ensure_built_image(recipe_path, tag, &plan.workspace.root)?;
101            Ok(tag.clone())
102        }
103    }
104}
105
106pub fn build_run_args(plan: &ExecutionPlan, image: &str) -> Result<Vec<String>, SboxError> {
107    build_run_args_with_options(plan, image, false)
108}
109
110pub fn build_run_args_with_options(
111    plan: &ExecutionPlan,
112    image: &str,
113    tty: bool,
114) -> Result<Vec<String>, SboxError> {
115    let mut args = vec!["run".to_string(), "--rm".to_string(), "-i".to_string()];
116
117    if tty {
118        args.push("-t".to_string());
119    }
120
121    args.push("--workdir".to_string());
122    args.push(plan.workspace.sandbox_cwd.clone());
123
124    if plan.policy.read_only_rootfs {
125        args.push("--read-only".to_string());
126    }
127
128    if plan.policy.no_new_privileges {
129        args.push("--security-opt".to_string());
130        args.push("no-new-privileges".to_string());
131    }
132
133    for capability in &plan.policy.cap_drop {
134        args.push("--cap-drop".to_string());
135        args.push(capability.clone());
136    }
137
138    for capability in &plan.policy.cap_add {
139        args.push("--cap-add".to_string());
140        args.push(capability.clone());
141    }
142
143    match plan.policy.network.as_str() {
144        "off" => {
145            args.push("--network".to_string());
146            args.push("none".to_string());
147        }
148        "on" => {}
149        other => {
150            args.push("--network".to_string());
151            args.push(other.to_string());
152        }
153    }
154
155    if !plan.policy.network_allow.is_empty() {
156        args.push("--dns".to_string());
157        args.push("192.0.2.1".to_string());
158        for (hostname, ip) in &plan.policy.network_allow {
159            args.push("--add-host".to_string());
160            args.push(format!("{hostname}:{ip}"));
161        }
162    }
163
164    for port in &plan.policy.ports {
165        args.push("--publish".to_string());
166        args.push(port.clone());
167    }
168
169    // Docker has no --userns keep-id; map to explicit uid:gid using current process identity.
170    match &plan.user {
171        ResolvedUser::KeepId => {
172            let (uid, gid) = current_uid_gid();
173            args.push("--user".to_string());
174            args.push(format!("{uid}:{gid}"));
175        }
176        ResolvedUser::Explicit { uid, gid } => {
177            args.push("--user".to_string());
178            args.push(format!("{uid}:{gid}"));
179        }
180        ResolvedUser::Default => {}
181    }
182
183    for mount in &plan.mounts {
184        append_mount_args(&mut args, mount)?;
185    }
186
187    for cache in &plan.caches {
188        args.push("--mount".to_string());
189        if let Some(source) = &cache.source {
190            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
191                args.push(format!(
192                    "type=bind,src={},target={},readonly={}",
193                    path.display(),
194                    cache.target,
195                    bool_string(cache.read_only)
196                ));
197            } else {
198                args.push(format!(
199                    "type=volume,src={},target={},readonly={}",
200                    source,
201                    cache.target,
202                    bool_string(cache.read_only)
203                ));
204            }
205        } else {
206            args.push(format!(
207                "type=volume,src={},target={},readonly={}",
208                scoped_cache_name(&plan.workspace.root, &cache.name),
209                cache.target,
210                bool_string(cache.read_only)
211            ));
212        }
213    }
214
215    for secret in &plan.secrets {
216        append_secret_args(&mut args, secret, &plan.workspace.root)?;
217    }
218
219    for variable in &plan.environment.variables {
220        args.push("--env".to_string());
221        args.push(format!("{}={}", variable.name, variable.value));
222    }
223
224    if let Some(pull_policy) = &plan.policy.pull_policy {
225        args.push("--pull".to_string());
226        args.push(pull_policy.clone());
227    }
228
229    args.push(image.to_string());
230    args.extend(plan.command.iter().cloned());
231
232    Ok(args)
233}
234
235fn append_mount_args(args: &mut Vec<String>, mount: &ResolvedMount) -> Result<(), SboxError> {
236    match mount.kind.as_str() {
237        "bind" => {
238            let source = mount
239                .source
240                .as_ref()
241                .expect("bind mounts always resolve source");
242            args.push("--mount".to_string());
243            // Docker does not support relabel=private (Podman/SELinux extension).
244            args.push(format!(
245                "type=bind,src={},target={},readonly={}",
246                source.display(),
247                mount.target,
248                bool_string(mount.read_only)
249            ));
250            Ok(())
251        }
252        "tmpfs" => {
253            args.push("--tmpfs".to_string());
254            let spec = if mount.read_only {
255                format!("{}:ro", mount.target)
256            } else {
257                mount.target.clone()
258            };
259            args.push(spec);
260            Ok(())
261        }
262        "mask" => {
263            args.push("--mount".to_string());
264            args.push(format!(
265                "type=bind,src=/dev/null,target={},readonly=true",
266                mount.target
267            ));
268            Ok(())
269        }
270        other => Err(SboxError::UnsupportedMountType {
271            mount_type: other.to_string(),
272        }),
273    }
274}
275
276fn ensure_reusable_container(
277    plan: &ExecutionPlan,
278    image: &str,
279    session_name: &str,
280) -> Result<(), SboxError> {
281    match inspect_container_state(session_name)? {
282        ContainerState::Running => return Ok(()),
283        ContainerState::Stopped => {
284            let status = Command::new("docker")
285                .args(["start", session_name])
286                .stdin(Stdio::null())
287                .stdout(Stdio::null())
288                .stderr(Stdio::null())
289                .status()
290                .map_err(|source| SboxError::BackendUnavailable {
291                    backend: "docker".to_string(),
292                    source,
293                })?;
294
295            if status.success() {
296                return Ok(());
297            }
298
299            return Err(SboxError::BackendCommandFailed {
300                backend: "docker".to_string(),
301                command: format!("docker start {session_name}"),
302                status: status.code().unwrap_or(1),
303            });
304        }
305        ContainerState::Missing => {}
306    }
307
308    let mut create_args = vec![
309        "create".to_string(),
310        "--name".to_string(),
311        session_name.to_string(),
312        "--workdir".to_string(),
313        plan.workspace.sandbox_cwd.clone(),
314    ];
315    append_container_settings(&mut create_args, plan)?;
316    create_args.push(image.to_string());
317    create_args.push("sleep".to_string());
318    create_args.push("infinity".to_string());
319
320    let create_status = Command::new("docker")
321        .args(&create_args)
322        .current_dir(&plan.workspace.effective_host_dir)
323        .stdin(Stdio::null())
324        .stdout(Stdio::null())
325        .stderr(Stdio::null())
326        .status()
327        .map_err(|source| SboxError::BackendUnavailable {
328            backend: "docker".to_string(),
329            source,
330        })?;
331
332    if !create_status.success() {
333        return Err(SboxError::BackendCommandFailed {
334            backend: "docker".to_string(),
335            command: format!("docker create --name {session_name} ..."),
336            status: create_status.code().unwrap_or(1),
337        });
338    }
339
340    let start_status = Command::new("docker")
341        .args(["start", session_name])
342        .stdin(Stdio::null())
343        .stdout(Stdio::null())
344        .stderr(Stdio::null())
345        .status()
346        .map_err(|source| SboxError::BackendUnavailable {
347            backend: "docker".to_string(),
348            source,
349        })?;
350
351    if start_status.success() {
352        Ok(())
353    } else {
354        Err(SboxError::BackendCommandFailed {
355            backend: "docker".to_string(),
356            command: format!("docker start {session_name}"),
357            status: start_status.code().unwrap_or(1),
358        })
359    }
360}
361
362fn build_exec_args(plan: &ExecutionPlan, session_name: &str, tty: bool) -> Vec<String> {
363    let mut args = vec!["exec".to_string(), "-i".to_string()];
364    if tty {
365        args.push("-t".to_string());
366    }
367
368    args.push("--workdir".to_string());
369    args.push(plan.workspace.sandbox_cwd.clone());
370
371    for variable in &plan.environment.variables {
372        args.push("--env".to_string());
373        args.push(format!("{}={}", variable.name, variable.value));
374    }
375
376    args.push(session_name.to_string());
377    args.extend(plan.command.iter().cloned());
378    args
379}
380
381fn append_container_settings(
382    args: &mut Vec<String>,
383    plan: &ExecutionPlan,
384) -> Result<(), SboxError> {
385    if plan.policy.read_only_rootfs {
386        args.push("--read-only".to_string());
387    }
388
389    if plan.policy.no_new_privileges {
390        args.push("--security-opt".to_string());
391        args.push("no-new-privileges".to_string());
392    }
393
394    for capability in &plan.policy.cap_drop {
395        args.push("--cap-drop".to_string());
396        args.push(capability.clone());
397    }
398
399    for capability in &plan.policy.cap_add {
400        args.push("--cap-add".to_string());
401        args.push(capability.clone());
402    }
403
404    match plan.policy.network.as_str() {
405        "off" => {
406            args.push("--network".to_string());
407            args.push("none".to_string());
408        }
409        "on" => {}
410        other => {
411            args.push("--network".to_string());
412            args.push(other.to_string());
413        }
414    }
415
416    if !plan.policy.network_allow.is_empty() {
417        args.push("--dns".to_string());
418        args.push("192.0.2.1".to_string());
419        for (hostname, ip) in &plan.policy.network_allow {
420            args.push("--add-host".to_string());
421            args.push(format!("{hostname}:{ip}"));
422        }
423    }
424
425    for port in &plan.policy.ports {
426        args.push("--publish".to_string());
427        args.push(port.clone());
428    }
429
430    match &plan.user {
431        ResolvedUser::KeepId => {
432            let (uid, gid) = current_uid_gid();
433            args.push("--user".to_string());
434            args.push(format!("{uid}:{gid}"));
435        }
436        ResolvedUser::Explicit { uid, gid } => {
437            args.push("--user".to_string());
438            args.push(format!("{uid}:{gid}"));
439        }
440        ResolvedUser::Default => {}
441    }
442
443    for mount in &plan.mounts {
444        append_mount_args(args, mount)?;
445    }
446
447    for cache in &plan.caches {
448        args.push("--mount".to_string());
449        if let Some(source) = &cache.source {
450            if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
451                args.push(format!(
452                    "type=bind,src={},target={},readonly={}",
453                    path.display(),
454                    cache.target,
455                    bool_string(cache.read_only)
456                ));
457            } else {
458                args.push(format!(
459                    "type=volume,src={},target={},readonly={}",
460                    source,
461                    cache.target,
462                    bool_string(cache.read_only)
463                ));
464            }
465        } else {
466            args.push(format!(
467                "type=volume,src={},target={},readonly={}",
468                scoped_cache_name(&plan.workspace.root, &cache.name),
469                cache.target,
470                bool_string(cache.read_only)
471            ));
472        }
473    }
474
475    for secret in &plan.secrets {
476        append_secret_args(args, secret, &plan.workspace.root)?;
477    }
478
479    for variable in &plan.environment.variables {
480        args.push("--env".to_string());
481        args.push(format!("{}={}", variable.name, variable.value));
482    }
483
484    Ok(())
485}
486
487enum ContainerState {
488    Missing,
489    Stopped,
490    Running,
491}
492
493fn inspect_container_state(session_name: &str) -> Result<ContainerState, SboxError> {
494    // Use `docker container ls -a` to avoid exit-code ambiguity between
495    // "container not found" and "daemon not running".
496    let output = Command::new("docker")
497        .args([
498            "container",
499            "ls",
500            "-a",
501            "--filter",
502            &format!("name=^{session_name}$"),
503            "--format",
504            "{{.State}}",
505        ])
506        .stdin(Stdio::null())
507        .stdout(Stdio::piped())
508        .stderr(Stdio::null())
509        .output()
510        .map_err(|source| SboxError::BackendUnavailable {
511            backend: "docker".to_string(),
512            source,
513        })?;
514
515    let stdout = String::from_utf8_lossy(&output.stdout);
516    let state = stdout.trim();
517    if state.is_empty() {
518        Ok(ContainerState::Missing)
519    } else if state == "running" {
520        Ok(ContainerState::Running)
521    } else {
522        Ok(ContainerState::Stopped)
523    }
524}
525
526fn validate_runtime_inputs(plan: &ExecutionPlan) -> Result<(), SboxError> {
527    for mount in &plan.mounts {
528        validate_mount_source(mount)?;
529    }
530    for secret in &plan.secrets {
531        validate_secret_source(secret, &plan.workspace.root)?;
532    }
533    Ok(())
534}
535
536fn validate_mount_source(mount: &ResolvedMount) -> Result<(), SboxError> {
537    if mount.kind != "bind" {
538        return Ok(());
539    }
540
541    let source = mount
542        .source
543        .as_ref()
544        .expect("bind mounts always resolve source");
545
546    if source.exists() {
547        return Ok(());
548    }
549
550    if mount.create {
551        return fs::create_dir_all(source).map_err(|_| SboxError::HostPathNotFound {
552            kind: "mount source",
553            name: mount.target.clone(),
554            path: source.clone(),
555        });
556    }
557
558    Err(SboxError::HostPathNotFound {
559        kind: "mount source",
560        name: mount.target.clone(),
561        path: source.clone(),
562    })
563}
564
565fn append_secret_args(
566    args: &mut Vec<String>,
567    secret: &ResolvedSecret,
568    workspace_root: &Path,
569) -> Result<(), SboxError> {
570    let path = validate_secret_source(secret, workspace_root)?;
571    args.push("--mount".to_string());
572    // Docker does not support relabel=private.
573    args.push(format!(
574        "type=bind,src={},target={},readonly=true",
575        path.display(),
576        secret.target
577    ));
578    Ok(())
579}
580
581fn validate_secret_source(
582    secret: &ResolvedSecret,
583    workspace_root: &Path,
584) -> Result<PathBuf, SboxError> {
585    let path = try_resolve_host_path(&secret.source, workspace_root).ok_or_else(|| {
586        SboxError::UnsupportedSecretSource {
587            name: secret.name.clone(),
588            secret_source: secret.source.clone(),
589        }
590    })?;
591
592    if path.exists() {
593        Ok(path)
594    } else {
595        Err(SboxError::HostPathNotFound {
596            kind: "secret source",
597            name: secret.name.clone(),
598            path,
599        })
600    }
601}
602
603fn try_resolve_host_path(input: &str, base: &Path) -> Option<PathBuf> {
604    if input.starts_with("~/") || input == "~" {
605        let home = std::env::var_os("HOME")?;
606        let remainder = input.strip_prefix("~/").unwrap_or("");
607        let mut path = PathBuf::from(home);
608        if !remainder.is_empty() {
609            path.push(remainder);
610        }
611        return Some(path);
612    }
613
614    let path = Path::new(input);
615    if path.is_absolute() {
616        return Some(path.to_path_buf());
617    }
618
619    if input.starts_with("./") || input.starts_with("../") || input.contains('/') {
620        return Some(base.join(path));
621    }
622
623    None
624}
625
626fn scoped_cache_name(workspace_root: &Path, cache_name: &str) -> String {
627    format!(
628        "sbox-cache-{}-{}",
629        stable_hash(&workspace_root.display().to_string()),
630        sanitize_volume_name(cache_name)
631    )
632}
633
634fn sanitize_volume_name(name: &str) -> String {
635    name.chars()
636        .map(|ch| {
637            if ch.is_ascii_alphanumeric() || ch == '_' || ch == '.' || ch == '-' {
638                ch
639            } else {
640                '-'
641            }
642        })
643        .collect()
644}
645
646fn stable_hash(input: &str) -> String {
647    let mut hash = 0xcbf29ce484222325u64;
648    for byte in input.as_bytes() {
649        hash ^= u64::from(*byte);
650        hash = hash.wrapping_mul(0x100000001b3);
651    }
652    format!("{hash:016x}")
653}
654
655fn bool_string(value: bool) -> &'static str {
656    if value { "true" } else { "false" }
657}
658
659/// Read the current process's effective uid and gid from /proc/self/status.
660/// Falls back to (0, 0) on any read failure (should not happen on Linux).
661fn current_uid_gid() -> (u32, u32) {
662    let status = std::fs::read_to_string("/proc/self/status").unwrap_or_default();
663    let uid = parse_proc_id(&status, "Uid:");
664    let gid = parse_proc_id(&status, "Gid:");
665    (uid, gid)
666}
667
668fn parse_proc_id(status: &str, key: &str) -> u32 {
669    status
670        .lines()
671        .find(|line| line.starts_with(key))
672        .and_then(|line| line.split_whitespace().nth(1))
673        .and_then(|s| s.parse().ok())
674        .unwrap_or(0)
675}
676
677fn ensure_built_image(recipe_path: &Path, tag: &str, workspace_root: &Path) -> Result<(), SboxError> {
678    // docker image inspect exits 0 if the image exists, non-zero if not.
679    let exists_status = Command::new("docker")
680        .args(["image", "inspect", "--format", "", tag])
681        .current_dir(workspace_root)
682        .stdin(Stdio::null())
683        .stdout(Stdio::null())
684        .stderr(Stdio::null())
685        .status()
686        .map_err(|source| SboxError::BackendUnavailable {
687            backend: "docker".to_string(),
688            source,
689        })?;
690
691    if exists_status.success() {
692        return Ok(());
693    }
694
695    let build_status = Command::new("docker")
696        .args([
697            "build",
698            "-t",
699            tag,
700            "-f",
701            &recipe_path.display().to_string(),
702            &workspace_root.display().to_string(),
703        ])
704        .current_dir(workspace_root)
705        .stdin(Stdio::inherit())
706        .stdout(Stdio::inherit())
707        .stderr(Stdio::inherit())
708        .status()
709        .map_err(|source| SboxError::BackendUnavailable {
710            backend: "docker".to_string(),
711            source,
712        })?;
713
714    if build_status.success() {
715        Ok(())
716    } else {
717        Err(SboxError::BackendCommandFailed {
718            backend: "docker".to_string(),
719            command: format!(
720                "docker build -t {tag} -f {} {}",
721                recipe_path.display(),
722                workspace_root.display()
723            ),
724            status: build_status.code().unwrap_or(1),
725        })
726    }
727}
728
729fn status_to_exit_code(status: std::process::ExitStatus) -> ExitCode {
730    match status.code() {
731        Some(code) => ExitCode::from(u8::try_from(code).unwrap_or(1)),
732        None => ExitCode::from(1),
733    }
734}
735
736#[cfg(test)]
737mod tests {
738    use super::{build_run_args, current_uid_gid};
739    use crate::config::model::ExecutionMode;
740    use crate::resolve::{
741        CwdMapping, ExecutionPlan, ImageTrust, ModeSource, ProfileSource,
742        ResolvedEnvironment, ResolvedImage, ResolvedImageSource, ResolvedPolicy, ResolvedUser,
743        ResolvedWorkspace,
744    };
745    use std::path::PathBuf;
746
747    fn sample_plan() -> ExecutionPlan {
748        ExecutionPlan {
749            command: vec!["npm".into(), "install".into()],
750            command_string: "npm install".into(),
751            backend: crate::config::BackendKind::Docker,
752            image: ResolvedImage {
753                description: "ref:node:22".into(),
754                source: ResolvedImageSource::Reference("node:22".into()),
755                trust: ImageTrust::MutableReference,
756                verify_signature: false,
757            },
758            profile_name: "install".into(),
759            profile_source: ProfileSource::DefaultProfile,
760            mode: ExecutionMode::Sandbox,
761            mode_source: ModeSource::Profile,
762            workspace: ResolvedWorkspace {
763                root: PathBuf::from("/project"),
764                invocation_dir: PathBuf::from("/project"),
765                effective_host_dir: PathBuf::from("/project"),
766                mount: "/workspace".into(),
767                sandbox_cwd: "/workspace".into(),
768                cwd_mapping: CwdMapping::InvocationMapped,
769            },
770            policy: ResolvedPolicy {
771                network: "off".into(),
772                writable: true,
773                ports: Vec::new(),
774                no_new_privileges: true,
775                read_only_rootfs: false,
776                reuse_container: false,
777                reusable_session_name: None,
778                cap_drop: Vec::new(),
779                cap_add: Vec::new(),
780                pull_policy: None,
781                network_allow: Vec::new(),
782                network_allow_patterns: Vec::new(),
783            },
784            environment: ResolvedEnvironment {
785                variables: Vec::new(),
786                denied: Vec::new(),
787            },
788            mounts: Vec::new(),
789            caches: Vec::new(),
790            secrets: Vec::new(),
791            user: ResolvedUser::Default,
792            audit: crate::resolve::ExecutionAudit {
793                install_style: false,
794                trusted_image_required: false,
795                sensitive_pass_through_vars: Vec::new(),
796                lockfile: crate::resolve::LockfileAudit {
797                    applicable: false,
798                    required: false,
799                    present: false,
800                    expected_files: Vec::new(),
801                },
802                pre_run: Vec::new(),
803            },
804        }
805    }
806
807    #[test]
808    fn docker_run_args_use_network_none_when_off() {
809        let plan = sample_plan();
810        let args = build_run_args(&plan, "node:22").expect("args should build");
811        let joined = args.join(" ");
812        assert!(joined.contains("--network none"));
813        assert!(!joined.contains("relabel"));
814    }
815
816    #[test]
817    fn docker_run_args_map_keepid_to_explicit_user() {
818        let mut plan = sample_plan();
819        plan.user = ResolvedUser::KeepId;
820        let args = build_run_args(&plan, "node:22").expect("args should build");
821        let joined = args.join(" ");
822        // Should have --user UID:GID, not --userns keep-id
823        assert!(joined.contains("--user"));
824        assert!(!joined.contains("keep-id"));
825    }
826
827    #[test]
828    fn parse_proc_id_extracts_real_uid() {
829        // /proc/self/status has lines like "Uid:\t1000\t1000\t1000\t1000"
830        let fake = "Name:\tfoo\nUid:\t1000\t1000\t1000\t1000\nGid:\t1001\t1001\t1001\t1001\n";
831        assert_eq!(super::parse_proc_id(fake, "Uid:"), 1000);
832        assert_eq!(super::parse_proc_id(fake, "Gid:"), 1001);
833    }
834
835    #[test]
836    fn current_uid_gid_returns_nonzero_for_normal_user() {
837        let (uid, _gid) = current_uid_gid();
838        // In a normal test environment we won't be root
839        // Just verify the function runs without panic and returns plausible values
840        assert!(uid < 100_000);
841    }
842}