1use std::fs;
2use std::io::IsTerminal;
3use std::path::{Path, PathBuf};
4use std::process::{Command, ExitCode, Stdio};
5
6use crate::backend::podman::CLOUD_METADATA_HOSTNAMES;
7
8use crate::error::SboxError;
9use crate::resolve::{
10 ExecutionPlan, ResolvedImageSource, ResolvedMount, ResolvedSecret, ResolvedUser,
11};
12
13pub fn execute(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
14 if plan.policy.reuse_container {
15 return execute_via_reusable_session(plan, false);
16 }
17
18 validate_runtime_inputs(plan)?;
19 let image = resolve_container_image(plan)?;
20 let args = build_run_args(plan, &image)?;
21
22 let mut child = Command::new("docker");
23 child.args(&args);
24 child.current_dir(&plan.workspace.effective_host_dir);
25 child.stdin(Stdio::inherit());
26 child.stdout(Stdio::inherit());
27 child.stderr(Stdio::inherit());
28
29 let status = child
30 .status()
31 .map_err(|source| SboxError::BackendUnavailable {
32 backend: "docker".to_string(),
33 source,
34 })?;
35
36 Ok(status_to_exit_code(status))
37}
38
39pub fn execute_interactive(plan: &ExecutionPlan) -> Result<ExitCode, SboxError> {
40 if plan.policy.reuse_container {
41 return execute_via_reusable_session(plan, true);
42 }
43
44 validate_runtime_inputs(plan)?;
45 let image = resolve_container_image(plan)?;
46 let tty = std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
47 let args = build_run_args_with_options(plan, &image, tty)?;
48
49 let mut child = Command::new("docker");
50 child.args(&args);
51 child.current_dir(&plan.workspace.effective_host_dir);
52 child.stdin(Stdio::inherit());
53 child.stdout(Stdio::inherit());
54 child.stderr(Stdio::inherit());
55
56 let status = child
57 .status()
58 .map_err(|source| SboxError::BackendUnavailable {
59 backend: "docker".to_string(),
60 source,
61 })?;
62
63 Ok(status_to_exit_code(status))
64}
65
66fn execute_via_reusable_session(
67 plan: &ExecutionPlan,
68 interactive: bool,
69) -> Result<ExitCode, SboxError> {
70 validate_runtime_inputs(plan)?;
71 let image = resolve_container_image(plan)?;
72 let session_name = plan
73 .policy
74 .reusable_session_name
75 .as_deref()
76 .ok_or_else(|| SboxError::ReusableSandboxSessionsNotImplemented {
77 profile: plan.profile_name.clone(),
78 })?;
79
80 ensure_reusable_container(plan, &image, session_name)?;
81
82 let tty = interactive && std::io::stdin().is_terminal() && std::io::stdout().is_terminal();
83 let mut child = Command::new("docker");
84 child.args(build_exec_args(plan, session_name, tty));
85 child.current_dir(&plan.workspace.effective_host_dir);
86 child.stdin(Stdio::inherit());
87 child.stdout(Stdio::inherit());
88 child.stderr(Stdio::inherit());
89
90 let status = child
91 .status()
92 .map_err(|source| SboxError::BackendUnavailable {
93 backend: "docker".to_string(),
94 source,
95 })?;
96
97 Ok(status_to_exit_code(status))
98}
99
100fn resolve_container_image(plan: &ExecutionPlan) -> Result<String, SboxError> {
101 match &plan.image.source {
102 ResolvedImageSource::Reference(reference) => Ok(reference.clone()),
103 ResolvedImageSource::Build { recipe_path, tag } => {
104 ensure_built_image(recipe_path, tag, &plan.workspace.root)?;
105 Ok(tag.clone())
106 }
107 }
108}
109
110pub fn build_run_args(plan: &ExecutionPlan, image: &str) -> Result<Vec<String>, SboxError> {
111 build_run_args_with_options(plan, image, false)
112}
113
114pub fn build_run_args_with_options(
115 plan: &ExecutionPlan,
116 image: &str,
117 tty: bool,
118) -> Result<Vec<String>, SboxError> {
119 let mut args = vec!["run".to_string(), "--rm".to_string(), "-i".to_string()];
120
121 if tty {
122 args.push("-t".to_string());
123 }
124
125 args.push("--workdir".to_string());
126 args.push(plan.workspace.sandbox_cwd.clone());
127
128 if plan.policy.read_only_rootfs {
129 args.push("--read-only".to_string());
130 }
131
132 if plan.policy.no_new_privileges {
133 args.push("--security-opt".to_string());
134 args.push("no-new-privileges".to_string());
135 }
136
137 for capability in &plan.policy.cap_drop {
138 args.push("--cap-drop".to_string());
139 args.push(capability.clone());
140 }
141
142 for capability in &plan.policy.cap_add {
143 args.push("--cap-add".to_string());
144 args.push(capability.clone());
145 }
146
147 match plan.policy.network.as_str() {
148 "off" => {
149 args.push("--network".to_string());
150 args.push("none".to_string());
151 }
152 "on" => {}
153 other => {
154 args.push("--network".to_string());
155 args.push(other.to_string());
156 }
157 }
158
159 if !plan.policy.network_allow.is_empty() {
160 args.push("--dns".to_string());
161 args.push("192.0.2.1".to_string());
162 for (hostname, ip) in &plan.policy.network_allow {
163 args.push("--add-host".to_string());
164 args.push(format!("{hostname}:{ip}"));
165 }
166 }
167
168 if plan.policy.network != "off" {
169 for hostname in CLOUD_METADATA_HOSTNAMES {
170 args.push("--add-host".to_string());
171 args.push(format!("{hostname}:192.0.2.1"));
172 }
173 }
174
175 for port in &plan.policy.ports {
176 args.push("--publish".to_string());
177 args.push(port.clone());
178 }
179
180 match &plan.user {
182 ResolvedUser::KeepId => {
183 let (uid, gid) = current_uid_gid();
184 args.push("--user".to_string());
185 args.push(format!("{uid}:{gid}"));
186 }
187 ResolvedUser::Explicit { uid, gid } => {
188 args.push("--user".to_string());
189 args.push(format!("{uid}:{gid}"));
190 }
191 ResolvedUser::Default => {}
192 }
193
194 for mount in &plan.mounts {
195 append_mount_args(&mut args, mount)?;
196 }
197
198 for cache in &plan.caches {
199 args.push("--mount".to_string());
200 if let Some(source) = &cache.source {
201 if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
202 args.push(format!(
203 "type=bind,src={},target={},readonly={}",
204 path.display(),
205 cache.target,
206 bool_string(cache.read_only)
207 ));
208 } else {
209 args.push(format!(
210 "type=volume,src={},target={},readonly={}",
211 source,
212 cache.target,
213 bool_string(cache.read_only)
214 ));
215 }
216 } else {
217 args.push(format!(
218 "type=volume,src={},target={},readonly={}",
219 scoped_cache_name(&plan.workspace.root, &cache.name),
220 cache.target,
221 bool_string(cache.read_only)
222 ));
223 }
224 }
225
226 for secret in &plan.secrets {
227 append_secret_args(&mut args, secret, &plan.workspace.root)?;
228 }
229
230 for variable in &plan.environment.variables {
231 args.push("--env".to_string());
232 args.push(format!("{}={}", variable.name, variable.value));
233 }
234
235 if let Some(pull_policy) = &plan.policy.pull_policy {
236 args.push("--pull".to_string());
237 args.push(pull_policy.clone());
238 }
239
240 args.push(image.to_string());
241 args.extend(plan.command.iter().cloned());
242
243 Ok(args)
244}
245
246fn append_mount_args(args: &mut Vec<String>, mount: &ResolvedMount) -> Result<(), SboxError> {
247 match mount.kind.as_str() {
248 "bind" => {
249 let source = mount
250 .source
251 .as_ref()
252 .expect("bind mounts always resolve source");
253 args.push("--mount".to_string());
254 args.push(format!(
256 "type=bind,src={},target={},readonly={}",
257 source.display(),
258 mount.target,
259 bool_string(mount.read_only)
260 ));
261 Ok(())
262 }
263 "tmpfs" => {
264 args.push("--tmpfs".to_string());
265 let spec = if mount.read_only {
266 format!("{}:ro", mount.target)
267 } else {
268 mount.target.clone()
269 };
270 args.push(spec);
271 Ok(())
272 }
273 "mask" => {
274 args.push("--mount".to_string());
275 args.push(format!(
276 "type=bind,src=/dev/null,target={},readonly=true",
277 mount.target
278 ));
279 Ok(())
280 }
281 other => Err(SboxError::UnsupportedMountType {
282 mount_type: other.to_string(),
283 }),
284 }
285}
286
287fn ensure_reusable_container(
288 plan: &ExecutionPlan,
289 image: &str,
290 session_name: &str,
291) -> Result<(), SboxError> {
292 match inspect_container_state(session_name)? {
293 ContainerState::Running => return Ok(()),
294 ContainerState::Stopped => {
295 let status = Command::new("docker")
296 .args(["start", session_name])
297 .stdin(Stdio::null())
298 .stdout(Stdio::null())
299 .stderr(Stdio::null())
300 .status()
301 .map_err(|source| SboxError::BackendUnavailable {
302 backend: "docker".to_string(),
303 source,
304 })?;
305
306 if status.success() {
307 return Ok(());
308 }
309
310 return Err(SboxError::BackendCommandFailed {
311 backend: "docker".to_string(),
312 command: format!("docker start {session_name}"),
313 status: status.code().unwrap_or(1),
314 });
315 }
316 ContainerState::Missing => {}
317 }
318
319 let mut create_args = vec![
320 "create".to_string(),
321 "--name".to_string(),
322 session_name.to_string(),
323 "--workdir".to_string(),
324 plan.workspace.sandbox_cwd.clone(),
325 ];
326 append_container_settings(&mut create_args, plan)?;
327 create_args.push(image.to_string());
328 create_args.push("sleep".to_string());
329 create_args.push("infinity".to_string());
330
331 let create_status = Command::new("docker")
332 .args(&create_args)
333 .current_dir(&plan.workspace.effective_host_dir)
334 .stdin(Stdio::null())
335 .stdout(Stdio::null())
336 .stderr(Stdio::null())
337 .status()
338 .map_err(|source| SboxError::BackendUnavailable {
339 backend: "docker".to_string(),
340 source,
341 })?;
342
343 if !create_status.success() {
344 return Err(SboxError::BackendCommandFailed {
345 backend: "docker".to_string(),
346 command: format!("docker create --name {session_name} ..."),
347 status: create_status.code().unwrap_or(1),
348 });
349 }
350
351 let start_status = Command::new("docker")
352 .args(["start", session_name])
353 .stdin(Stdio::null())
354 .stdout(Stdio::null())
355 .stderr(Stdio::null())
356 .status()
357 .map_err(|source| SboxError::BackendUnavailable {
358 backend: "docker".to_string(),
359 source,
360 })?;
361
362 if start_status.success() {
363 Ok(())
364 } else {
365 Err(SboxError::BackendCommandFailed {
366 backend: "docker".to_string(),
367 command: format!("docker start {session_name}"),
368 status: start_status.code().unwrap_or(1),
369 })
370 }
371}
372
373fn build_exec_args(plan: &ExecutionPlan, session_name: &str, tty: bool) -> Vec<String> {
374 let mut args = vec!["exec".to_string(), "-i".to_string()];
375 if tty {
376 args.push("-t".to_string());
377 }
378
379 args.push("--workdir".to_string());
380 args.push(plan.workspace.sandbox_cwd.clone());
381
382 for variable in &plan.environment.variables {
383 args.push("--env".to_string());
384 args.push(format!("{}={}", variable.name, variable.value));
385 }
386
387 args.push(session_name.to_string());
388 args.extend(plan.command.iter().cloned());
389 args
390}
391
392fn append_container_settings(
393 args: &mut Vec<String>,
394 plan: &ExecutionPlan,
395) -> Result<(), SboxError> {
396 if plan.policy.read_only_rootfs {
397 args.push("--read-only".to_string());
398 }
399
400 if plan.policy.no_new_privileges {
401 args.push("--security-opt".to_string());
402 args.push("no-new-privileges".to_string());
403 }
404
405 for capability in &plan.policy.cap_drop {
406 args.push("--cap-drop".to_string());
407 args.push(capability.clone());
408 }
409
410 for capability in &plan.policy.cap_add {
411 args.push("--cap-add".to_string());
412 args.push(capability.clone());
413 }
414
415 match plan.policy.network.as_str() {
416 "off" => {
417 args.push("--network".to_string());
418 args.push("none".to_string());
419 }
420 "on" => {}
421 other => {
422 args.push("--network".to_string());
423 args.push(other.to_string());
424 }
425 }
426
427 if !plan.policy.network_allow.is_empty() {
428 args.push("--dns".to_string());
429 args.push("192.0.2.1".to_string());
430 for (hostname, ip) in &plan.policy.network_allow {
431 args.push("--add-host".to_string());
432 args.push(format!("{hostname}:{ip}"));
433 }
434 }
435
436 for port in &plan.policy.ports {
437 args.push("--publish".to_string());
438 args.push(port.clone());
439 }
440
441 match &plan.user {
442 ResolvedUser::KeepId => {
443 let (uid, gid) = current_uid_gid();
444 args.push("--user".to_string());
445 args.push(format!("{uid}:{gid}"));
446 }
447 ResolvedUser::Explicit { uid, gid } => {
448 args.push("--user".to_string());
449 args.push(format!("{uid}:{gid}"));
450 }
451 ResolvedUser::Default => {}
452 }
453
454 for mount in &plan.mounts {
455 append_mount_args(args, mount)?;
456 }
457
458 for cache in &plan.caches {
459 args.push("--mount".to_string());
460 if let Some(source) = &cache.source {
461 if let Some(path) = try_resolve_host_path(source, &plan.workspace.root) {
462 args.push(format!(
463 "type=bind,src={},target={},readonly={}",
464 path.display(),
465 cache.target,
466 bool_string(cache.read_only)
467 ));
468 } else {
469 args.push(format!(
470 "type=volume,src={},target={},readonly={}",
471 source,
472 cache.target,
473 bool_string(cache.read_only)
474 ));
475 }
476 } else {
477 args.push(format!(
478 "type=volume,src={},target={},readonly={}",
479 scoped_cache_name(&plan.workspace.root, &cache.name),
480 cache.target,
481 bool_string(cache.read_only)
482 ));
483 }
484 }
485
486 for secret in &plan.secrets {
487 append_secret_args(args, secret, &plan.workspace.root)?;
488 }
489
490 for variable in &plan.environment.variables {
491 args.push("--env".to_string());
492 args.push(format!("{}={}", variable.name, variable.value));
493 }
494
495 Ok(())
496}
497
498enum ContainerState {
499 Missing,
500 Stopped,
501 Running,
502}
503
504fn inspect_container_state(session_name: &str) -> Result<ContainerState, SboxError> {
505 let output = Command::new("docker")
508 .args([
509 "container",
510 "ls",
511 "-a",
512 "--filter",
513 &format!("name=^{session_name}$"),
514 "--format",
515 "{{.State}}",
516 ])
517 .stdin(Stdio::null())
518 .stdout(Stdio::piped())
519 .stderr(Stdio::null())
520 .output()
521 .map_err(|source| SboxError::BackendUnavailable {
522 backend: "docker".to_string(),
523 source,
524 })?;
525
526 let stdout = String::from_utf8_lossy(&output.stdout);
527 let state = stdout.trim();
528 if state.is_empty() {
529 Ok(ContainerState::Missing)
530 } else if state == "running" {
531 Ok(ContainerState::Running)
532 } else {
533 Ok(ContainerState::Stopped)
534 }
535}
536
537fn validate_runtime_inputs(plan: &ExecutionPlan) -> Result<(), SboxError> {
538 for mount in &plan.mounts {
539 validate_mount_source(mount)?;
540 }
541 for secret in &plan.secrets {
542 validate_secret_source(secret, &plan.workspace.root)?;
543 }
544 Ok(())
545}
546
547fn validate_mount_source(mount: &ResolvedMount) -> Result<(), SboxError> {
548 if mount.kind != "bind" {
549 return Ok(());
550 }
551
552 let source = mount
553 .source
554 .as_ref()
555 .expect("bind mounts always resolve source");
556
557 if source.exists() {
558 return Ok(());
559 }
560
561 if mount.create {
562 if source.extension().is_some() {
567 if let Some(parent) = source.parent() {
568 fs::create_dir_all(parent).ok();
569 }
570 return fs::write(source, b"").map_err(|_| SboxError::HostPathNotFound {
571 kind: "mount source",
572 name: mount.target.clone(),
573 path: source.clone(),
574 });
575 }
576 return fs::create_dir_all(source).map_err(|_| SboxError::HostPathNotFound {
577 kind: "mount source",
578 name: mount.target.clone(),
579 path: source.clone(),
580 });
581 }
582
583 Err(SboxError::HostPathNotFound {
584 kind: "mount source",
585 name: mount.target.clone(),
586 path: source.clone(),
587 })
588}
589
590fn append_secret_args(
591 args: &mut Vec<String>,
592 secret: &ResolvedSecret,
593 workspace_root: &Path,
594) -> Result<(), SboxError> {
595 let path = validate_secret_source(secret, workspace_root)?;
596 args.push("--mount".to_string());
597 args.push(format!(
599 "type=bind,src={},target={},readonly=true",
600 path.display(),
601 secret.target
602 ));
603 Ok(())
604}
605
606fn validate_secret_source(
607 secret: &ResolvedSecret,
608 workspace_root: &Path,
609) -> Result<PathBuf, SboxError> {
610 let path = try_resolve_host_path(&secret.source, workspace_root).ok_or_else(|| {
611 SboxError::UnsupportedSecretSource {
612 name: secret.name.clone(),
613 secret_source: secret.source.clone(),
614 }
615 })?;
616
617 if path.exists() {
618 Ok(path)
619 } else {
620 Err(SboxError::HostPathNotFound {
621 kind: "secret source",
622 name: secret.name.clone(),
623 path,
624 })
625 }
626}
627
628fn try_resolve_host_path(input: &str, base: &Path) -> Option<PathBuf> {
629 if input.starts_with("~/") || input == "~" {
630 let home = std::env::var_os("HOME")?;
631 let remainder = input.strip_prefix("~/").unwrap_or("");
632 let mut path = PathBuf::from(home);
633 if !remainder.is_empty() {
634 path.push(remainder);
635 }
636 return Some(path);
637 }
638
639 let path = Path::new(input);
640 if path.is_absolute() {
641 return Some(path.to_path_buf());
642 }
643
644 if input.starts_with("./") || input.starts_with("../") || input.contains('/') {
645 return Some(base.join(path));
646 }
647
648 None
649}
650
651fn scoped_cache_name(workspace_root: &Path, cache_name: &str) -> String {
652 format!(
653 "sbox-cache-{}-{}",
654 stable_hash(&workspace_root.display().to_string()),
655 sanitize_volume_name(cache_name)
656 )
657}
658
659fn sanitize_volume_name(name: &str) -> String {
660 name.chars()
661 .map(|ch| {
662 if ch.is_ascii_alphanumeric() || ch == '_' || ch == '.' || ch == '-' {
663 ch
664 } else {
665 '-'
666 }
667 })
668 .collect()
669}
670
671fn stable_hash(input: &str) -> String {
672 let mut hash = 0xcbf29ce484222325u64;
673 for byte in input.as_bytes() {
674 hash ^= u64::from(*byte);
675 hash = hash.wrapping_mul(0x100000001b3);
676 }
677 format!("{hash:016x}")
678}
679
680fn bool_string(value: bool) -> &'static str {
681 if value { "true" } else { "false" }
682}
683
684fn current_uid_gid() -> (u32, u32) {
687 let status = std::fs::read_to_string("/proc/self/status").unwrap_or_default();
688 let uid = parse_proc_id(&status, "Uid:");
689 let gid = parse_proc_id(&status, "Gid:");
690 (uid, gid)
691}
692
693fn parse_proc_id(status: &str, key: &str) -> u32 {
694 status
695 .lines()
696 .find(|line| line.starts_with(key))
697 .and_then(|line| line.split_whitespace().nth(1))
698 .and_then(|s| s.parse().ok())
699 .unwrap_or(0)
700}
701
702fn ensure_built_image(
703 recipe_path: &Path,
704 tag: &str,
705 workspace_root: &Path,
706) -> Result<(), SboxError> {
707 let exists_status = Command::new("docker")
709 .args(["image", "inspect", "--format", "", tag])
710 .current_dir(workspace_root)
711 .stdin(Stdio::null())
712 .stdout(Stdio::null())
713 .stderr(Stdio::null())
714 .status()
715 .map_err(|source| SboxError::BackendUnavailable {
716 backend: "docker".to_string(),
717 source,
718 })?;
719
720 if exists_status.success() {
721 return Ok(());
722 }
723
724 let build_status = Command::new("docker")
725 .args([
726 "build",
727 "-t",
728 tag,
729 "-f",
730 &recipe_path.display().to_string(),
731 &workspace_root.display().to_string(),
732 ])
733 .current_dir(workspace_root)
734 .stdin(Stdio::inherit())
735 .stdout(Stdio::inherit())
736 .stderr(Stdio::inherit())
737 .status()
738 .map_err(|source| SboxError::BackendUnavailable {
739 backend: "docker".to_string(),
740 source,
741 })?;
742
743 if build_status.success() {
744 Ok(())
745 } else {
746 Err(SboxError::BackendCommandFailed {
747 backend: "docker".to_string(),
748 command: format!(
749 "docker build -t {tag} -f {} {}",
750 recipe_path.display(),
751 workspace_root.display()
752 ),
753 status: build_status.code().unwrap_or(1),
754 })
755 }
756}
757
758fn status_to_exit_code(status: std::process::ExitStatus) -> ExitCode {
759 match status.code() {
760 Some(code) => ExitCode::from(u8::try_from(code).unwrap_or(1)),
761 None => ExitCode::from(1),
762 }
763}
764
765#[cfg(test)]
766mod tests {
767 use super::{build_run_args, current_uid_gid};
768 use crate::config::model::ExecutionMode;
769 use crate::resolve::{
770 CwdMapping, ExecutionPlan, ImageTrust, ModeSource, ProfileSource, ResolvedEnvironment,
771 ResolvedImage, ResolvedImageSource, ResolvedPolicy, ResolvedUser, ResolvedWorkspace,
772 };
773 use std::path::PathBuf;
774
775 fn sample_plan() -> ExecutionPlan {
776 ExecutionPlan {
777 command: vec!["npm".into(), "install".into()],
778 command_string: "npm install".into(),
779 backend: crate::config::BackendKind::Docker,
780 image: ResolvedImage {
781 description: "ref:node:22".into(),
782 source: ResolvedImageSource::Reference("node:22".into()),
783 trust: ImageTrust::MutableReference,
784 verify_signature: false,
785 },
786 profile_name: "install".into(),
787 profile_source: ProfileSource::DefaultProfile,
788 mode: ExecutionMode::Sandbox,
789 mode_source: ModeSource::Profile,
790 workspace: ResolvedWorkspace {
791 root: PathBuf::from("/project"),
792 invocation_dir: PathBuf::from("/project"),
793 effective_host_dir: PathBuf::from("/project"),
794 mount: "/workspace".into(),
795 sandbox_cwd: "/workspace".into(),
796 cwd_mapping: CwdMapping::InvocationMapped,
797 },
798 policy: ResolvedPolicy {
799 network: "off".into(),
800 writable: true,
801 ports: Vec::new(),
802 no_new_privileges: true,
803 read_only_rootfs: false,
804 reuse_container: false,
805 reusable_session_name: None,
806 cap_drop: Vec::new(),
807 cap_add: Vec::new(),
808 pull_policy: None,
809 network_allow: Vec::new(),
810 network_allow_patterns: Vec::new(),
811 },
812 environment: ResolvedEnvironment {
813 variables: Vec::new(),
814 denied: Vec::new(),
815 },
816 mounts: Vec::new(),
817 caches: Vec::new(),
818 secrets: Vec::new(),
819 user: ResolvedUser::Default,
820 audit: crate::resolve::ExecutionAudit {
821 install_style: false,
822 trusted_image_required: false,
823 sensitive_pass_through_vars: Vec::new(),
824 lockfile: crate::resolve::LockfileAudit {
825 applicable: false,
826 required: false,
827 present: false,
828 expected_files: Vec::new(),
829 },
830 pre_run: Vec::new(),
831 },
832 }
833 }
834
835 #[test]
836 fn docker_run_args_use_network_none_when_off() {
837 let plan = sample_plan();
838 let args = build_run_args(&plan, "node:22").expect("args should build");
839 let joined = args.join(" ");
840 assert!(joined.contains("--network none"));
841 assert!(!joined.contains("relabel"));
842 }
843
844 #[test]
845 fn docker_run_args_map_keepid_to_explicit_user() {
846 let mut plan = sample_plan();
847 plan.user = ResolvedUser::KeepId;
848 let args = build_run_args(&plan, "node:22").expect("args should build");
849 let joined = args.join(" ");
850 assert!(joined.contains("--user"));
852 assert!(!joined.contains("keep-id"));
853 }
854
855 #[test]
856 fn parse_proc_id_extracts_real_uid() {
857 let fake = "Name:\tfoo\nUid:\t1000\t1000\t1000\t1000\nGid:\t1001\t1001\t1001\t1001\n";
859 assert_eq!(super::parse_proc_id(fake, "Uid:"), 1000);
860 assert_eq!(super::parse_proc_id(fake, "Gid:"), 1001);
861 }
862
863 #[test]
864 fn current_uid_gid_returns_nonzero_for_normal_user() {
865 let (uid, _gid) = current_uid_gid();
866 assert!(uid < 100_000);
869 }
870
871 #[test]
872 fn metadata_hostnames_blocked_when_network_is_on() {
873 let mut plan = sample_plan();
874 plan.policy.network = "on".into();
875 let args = build_run_args(&plan, "node:22").expect("args should build");
876 let joined = args.join(" ");
877 for hostname in crate::backend::podman::CLOUD_METADATA_HOSTNAMES {
879 assert!(
880 joined.contains(&format!("--add-host {hostname}:192.0.2.1")),
881 "expected metadata host {hostname} to be blocked, args: {joined}"
882 );
883 }
884 }
885
886 #[test]
887 fn metadata_hostnames_not_added_when_network_is_off() {
888 let plan = sample_plan(); let args = build_run_args(&plan, "node:22").expect("args should build");
890 let joined = args.join(" ");
891 assert!(
893 !joined.contains("metadata.google.internal"),
894 "metadata host should not appear when network is off, args: {joined}"
895 );
896 }
897
898 #[test]
899 fn network_allow_breaks_dns_and_injects_resolved_hosts() {
900 let mut plan = sample_plan();
901 plan.policy.network = "on".into();
902 plan.policy.network_allow = vec![("registry.npmjs.org".into(), "104.16.0.0".into())];
903 let args = build_run_args(&plan, "node:22").expect("args should build");
904 let joined = args.join(" ");
905 assert!(
907 joined.contains("--dns 192.0.2.1"),
908 "expected DNS break when network_allow is set, args: {joined}"
909 );
910 assert!(
912 joined.contains("--add-host registry.npmjs.org:104.16.0.0"),
913 "expected registry host injected via --add-host, args: {joined}"
914 );
915 }
916
917 #[test]
918 fn network_on_without_network_allow_still_blocks_metadata_hosts() {
919 let mut plan = sample_plan();
921 plan.policy.network = "on".into();
922 plan.policy.network_allow = vec![];
923 let args = build_run_args(&plan, "node:22").expect("args should build");
924 let joined = args.join(" ");
925 assert!(
927 !joined.contains("--dns 192.0.2.1"),
928 "DNS should not be broken without allow-list"
929 );
930 assert!(
932 joined.contains("--add-host metadata.google.internal:192.0.2.1"),
933 "metadata host should be blocked even without network_allow, args: {joined}"
934 );
935 }
936
937 #[test]
938 fn denied_env_vars_not_passed_to_container() {
939 let mut plan = sample_plan();
940 plan.environment.denied = vec!["NPM_TOKEN".into(), "NODE_AUTH_TOKEN".into()];
941 plan.policy.network = "on".into();
942 let args = build_run_args(&plan, "node:22").expect("args should build");
943 let joined = args.join(" ");
944 assert!(
946 !joined.contains("NPM_TOKEN"),
947 "denied env var NPM_TOKEN should not appear in docker args: {joined}"
948 );
949 }
950}