pub mod build_lock_manager;
pub mod composer;
pub mod image_manager;
pub mod layers;
pub mod proxy_manager;
pub mod template_engine;
use crate::agent::task_logger::TaskLogger;
use crate::agent::{Agent, LogProcessor};
use crate::context::AppContext;
use crate::context::ContainerEngine;
use crate::context::VolumeMount;
use crate::context::docker_client::DockerClient;
use crate::context::tsk_config;
use crate::docker::proxy_manager::ProxyManager;
use crate::tui::events::{ServerEvent, ServerEventSender};
use bollard::models::{ContainerCreateBody, DeviceMapping, HostConfig};
use bollard::query_parameters::{LogsOptions, RemoveContainerOptions};
use futures_util::stream::StreamExt;
use std::collections::HashMap;
use std::io::Write;
use std::path::PathBuf;
use std::sync::Arc;
const CONTAINER_WORKSPACE_BASE: &str = "/workspace";
const CONTAINER_USER: &str = "agent";
const SECCOMP_DIND_PROFILE: &str = include_str!("seccomp_dind.json");
const PODMAN_STORAGE_PATH: &str = "/home/agent/.local/share/containers/storage";
enum DindStorage {
#[allow(dead_code)]
None,
Tmpfs,
NamedVolume(String),
}
fn dind_storage_strategy(task_id: &str) -> DindStorage {
if cfg!(target_os = "macos") {
DindStorage::Tmpfs
} else {
let kernel_ok = std::fs::read_to_string("/proc/sys/kernel/osrelease")
.ok()
.and_then(|r| {
let mut parts = r.trim().splitn(3, '.');
let major: u32 = parts.next()?.parse().ok()?;
let minor: u32 = parts.next()?.parse().ok()?;
Some(major > 6 || (major == 6 && minor >= 6))
})
.unwrap_or(false);
if kernel_ok {
DindStorage::Tmpfs
} else {
DindStorage::NamedVolume(format!("tsk-dind-{task_id}"))
}
}
}
fn cgroup_controller_available(controller: &str) -> bool {
let uid = unsafe { libc::getuid() };
let paths = [
format!("/sys/fs/cgroup/user.slice/user-{uid}.slice/user@{uid}.service/cgroup.controllers"),
format!("/sys/fs/cgroup/user.slice/user-{uid}.slice/cgroup.controllers"),
];
for path in &paths {
if let Ok(contents) = std::fs::read_to_string(path) {
return contents.split_whitespace().any(|c| c == controller);
}
}
true
}
pub(crate) const PROXY_ENV_VARS: &[&str] = &[
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"https_proxy",
"NO_PROXY",
"no_proxy",
];
fn container_working_dir(project: &str) -> String {
format!("{CONTAINER_WORKSPACE_BASE}/{project}")
}
pub(crate) fn resolve_config_from_task(
task: &crate::task::Task,
ctx: &AppContext,
event_sender: &Option<crate::tui::events::ServerEventSender>,
) -> crate::context::ResolvedConfig {
if let Some(ref json) = task.resolved_config {
match serde_json::from_str(json) {
Ok(config) => return config,
Err(e) => {
crate::tui::events::emit_or_print(
event_sender,
crate::tui::events::ServerEvent::WarningMessage(format!(
"Warning: Failed to deserialize resolved_config for task {}: {e}. Falling back to live resolution.",
task.id
)),
);
}
}
}
let project_config = tsk_config::load_project_config(&task.repo_root);
ctx.tsk_config().resolve_config(
&task.project,
project_config.as_ref(),
Some(&task.repo_root),
)
}
pub struct DockerManager {
ctx: AppContext,
client: Arc<dyn DockerClient>,
proxy_manager: ProxyManager,
event_sender: Option<ServerEventSender>,
}
impl DockerManager {
pub fn new(
ctx: &AppContext,
client: Arc<dyn DockerClient>,
event_sender: Option<ServerEventSender>,
) -> Self {
let proxy_manager = ProxyManager::new(
client.clone(),
ctx.tsk_env(),
ctx.tsk_config().container_engine.clone(),
event_sender.clone(),
);
Self {
ctx: ctx.clone(),
client,
proxy_manager,
event_sender,
}
}
fn emit(&self, event: ServerEvent) {
crate::tui::events::emit_or_print(&self.event_sender, event);
}
pub fn client(&self) -> Arc<dyn DockerClient> {
Arc::clone(&self.client)
}
fn is_nested(&self) -> bool {
if cfg!(test) {
return false;
}
std::env::var("TSK_CONTAINER").is_ok() && Self::has_tsk_proxy_env()
}
fn has_tsk_proxy_env() -> bool {
PROXY_ENV_VARS.iter().any(|var| {
std::env::var(var)
.map(|val| val.contains("tsk-proxy"))
.unwrap_or(false)
})
}
fn build_proxy_env_vars(
&self,
resolved: &crate::context::ResolvedConfig,
proxy_config: &crate::context::ResolvedProxyConfig,
) -> Vec<String> {
if self.is_nested() {
let mut env = Vec::new();
for var in PROXY_ENV_VARS
.iter()
.copied()
.chain(["JAVA_TOOL_OPTIONS", "TSK_PROXY_HOST"])
{
if let Ok(val) = std::env::var(var) {
env.push(format!("{var}={val}"));
}
}
return env;
}
let proxy_url = proxy_config.proxy_url();
let proxy_container_name = proxy_config.proxy_container_name();
let mut env = vec![
format!("HTTP_PROXY={proxy_url}"),
format!("HTTPS_PROXY={proxy_url}"),
format!("http_proxy={proxy_url}"),
format!("https_proxy={proxy_url}"),
format!("NO_PROXY=localhost,127.0.0.1,{proxy_container_name}"),
format!("no_proxy=localhost,127.0.0.1,{proxy_container_name}"),
];
env.push(format!(
"JAVA_TOOL_OPTIONS=-Dhttp.proxyHost={pcn} -Dhttp.proxyPort=3128 \
-Dhttps.proxyHost={pcn} -Dhttps.proxyPort=3128 \
-Dhttp.nonProxyHosts=localhost|127.0.0.1 \
-Dhttps.nonProxyHosts=localhost|127.0.0.1",
pcn = proxy_container_name
));
env.push(format!("TSK_PROXY_HOST={proxy_container_name}"));
if resolved.has_host_ports() {
env.push(format!("TSK_HOST_PORTS={}", resolved.host_ports_env()));
}
env
}
async fn remove_container(&self, container_id: &str) -> Result<(), String> {
self.client
.remove_container(
container_id,
Some(RemoveContainerOptions {
force: true,
..Default::default()
}),
)
.await
.map_err(|e| e.to_string())
}
fn build_bind_volumes(
&self,
task: &crate::task::Task,
agent: &dyn Agent,
resolved: &crate::context::ResolvedConfig,
) -> Vec<String> {
let repo_path = task
.copied_repo_path
.as_ref()
.expect("Task must have copied_repo_path set before container execution");
let repo_path_str = repo_path
.to_str()
.expect("Repository path should be valid UTF-8");
let working_dir = container_working_dir(&task.project);
let mut binds = vec![format!("{repo_path_str}:{working_dir}")];
for (host_path, container_path, options) in agent.volumes() {
let bind = if options.is_empty() {
format!("{host_path}:{container_path}")
} else {
format!("{host_path}:{container_path}:{options}")
};
binds.push(bind);
}
let instructions_file_path = PathBuf::from(&task.instructions_file);
if let Some(parent) = instructions_file_path.parent() {
let abs_parent = parent
.canonicalize()
.unwrap_or_else(|_| parent.to_path_buf());
binds.push(format!("{}:/instructions:ro", abs_parent.to_string_lossy()));
}
if let Some(task_dir) = repo_path.parent() {
let output_dir = task_dir.join("output");
binds.push(format!("{}:/output", output_dir.to_string_lossy()));
}
for volume in &resolved.volumes {
match volume {
VolumeMount::Bind(bind) => {
if let Ok(host_path) = bind.expanded_host_path() {
let bind_str = if bind.readonly {
format!("{}:{}:ro", host_path.display(), bind.container)
} else {
format!("{}:{}", host_path.display(), bind.container)
};
binds.push(bind_str);
}
}
VolumeMount::Named(named) => {
let volume_name = format!("tsk-{}", named.name);
let bind_str = if named.readonly {
format!("{volume_name}:{}:ro", named.container)
} else {
format!("{volume_name}:{}", named.container)
};
binds.push(bind_str);
}
}
}
binds
}
fn build_container_name(&self, task: &crate::task::Task) -> String {
if task.is_interactive {
format!("tsk-interactive-{}", task.id)
} else {
format!("tsk-{}", task.id)
}
}
fn create_container_config(
&self,
image: &str,
task: &crate::task::Task,
agent: &dyn Agent,
network_name: Option<&str>,
proxy_config: Option<&crate::context::ResolvedProxyConfig>,
proxy_container_ip: Option<&str>,
) -> (ContainerCreateBody, DindStorage) {
let resolved = resolve_config_from_task(task, &self.ctx, &self.event_sender);
let mut binds = self.build_bind_volumes(task, agent, &resolved);
let instructions_file_path = PathBuf::from(&task.instructions_file);
let working_dir = container_working_dir(&task.project);
let mut env_vars = if let Some(pc) = proxy_config {
self.build_proxy_env_vars(&resolved, pc)
} else if self.is_nested() {
let default_pc = crate::context::ResolvedProxyConfig::default();
self.build_proxy_env_vars(&resolved, &default_pc)
} else {
Vec::new()
};
env_vars.push("TSK_CONTAINER=1".to_string());
env_vars.push(format!("TSK_TASK_ID={}", task.id));
for (key, value) in agent.environment() {
env_vars.push(format!("{key}={value}"));
}
for env_var in &resolved.env {
env_vars.push(format!("{}={}", env_var.name, env_var.value));
}
if task.dind {
env_vars.push("BUILDAH_ISOLATION=chroot".to_string());
}
if task.stack == "python" {
env_vars.push(format!("PYTHONPATH={working_dir}"));
}
let agent_command = agent.build_command(
instructions_file_path.to_str().unwrap_or("instructions.md"),
task.is_interactive,
);
let command = if agent_command.is_empty() {
None
} else {
Some(agent_command)
};
let container_engine = &self.ctx.tsk_config().container_engine;
let devices: Option<Vec<DeviceMapping>> = if resolved.devices.is_empty() {
None
} else {
let mut mappings = Vec::new();
for pattern in &resolved.devices {
if pattern.contains('*') || pattern.contains('?') || pattern.contains('[') {
if let Ok(paths) = glob::glob(pattern) {
for entry in paths.flatten() {
let path_str = entry.to_string_lossy().to_string();
mappings.push(DeviceMapping {
path_on_host: Some(path_str.clone()),
path_in_container: Some(path_str),
cgroup_permissions: Some("rwm".to_string()),
});
}
}
} else {
mappings.push(DeviceMapping {
path_on_host: Some(pattern.clone()),
path_in_container: Some(pattern.clone()),
cgroup_permissions: Some("rwm".to_string()),
});
}
}
if mappings.is_empty() {
None
} else {
Some(mappings)
}
};
let security_opt = if task.dind {
let mut security_opts = if *container_engine == ContainerEngine::Podman {
let seccomp_path = self.ctx.tsk_env().config_dir().join("seccomp_dind.json");
if std::fs::write(&seccomp_path, SECCOMP_DIND_PROFILE).is_ok() {
vec![format!("seccomp={}", seccomp_path.display())]
} else {
vec!["seccomp=unconfined".to_string()]
}
} else {
vec![format!("seccomp={SECCOMP_DIND_PROFILE}")]
};
security_opts.push("apparmor=unconfined".to_string());
if *container_engine == ContainerEngine::Podman {
security_opts.push("label=disable".to_string());
}
Some(security_opts)
} else {
None
};
let mut cap_drop = vec![
"NET_ADMIN".to_string(),
"SETPCAP".to_string(),
"SYS_ADMIN".to_string(),
"SYS_PTRACE".to_string(),
"DAC_OVERRIDE".to_string(),
"AUDIT_WRITE".to_string(),
];
if !task.dind && !resolved.sudo {
cap_drop.push("SETUID".to_string());
cap_drop.push("SETGID".to_string());
}
if network_name.is_some() {
cap_drop.push("NET_RAW".to_string());
}
let has_storage_volume = binds
.iter()
.any(|b| b.split(':').nth(1) == Some(PODMAN_STORAGE_PATH));
let needs_podman_storage = (task.dind || self.is_nested()) && !has_storage_volume;
let dind_storage = if needs_podman_storage {
let strategy = dind_storage_strategy(&task.id);
if let DindStorage::NamedVolume(ref name) = strategy {
binds.push(format!("{name}:{PODMAN_STORAGE_PATH}"));
}
strategy
} else {
DindStorage::None
};
let config = ContainerCreateBody {
image: Some(image.to_string()),
user: Some(CONTAINER_USER.to_string()),
cmd: command,
host_config: Some(HostConfig {
binds: Some(binds),
network_mode: if self.is_nested() {
Some("host".to_string())
} else {
network_name.map(|n| n.to_string())
},
memory: if self.is_nested()
|| (*container_engine == ContainerEngine::Podman
&& !cgroup_controller_available("memory"))
{
None
} else {
Some(resolved.memory_limit_bytes())
},
cpu_quota: if self.is_nested()
|| (*container_engine == ContainerEngine::Podman
&& !cgroup_controller_available("cpu"))
{
None
} else {
Some(resolved.cpu_quota_microseconds())
},
privileged: if resolved.privileged {
Some(true)
} else {
None
},
devices,
cap_drop: Some(cap_drop),
security_opt,
tmpfs: if matches!(dind_storage, DindStorage::Tmpfs) {
Some(HashMap::from([(
PODMAN_STORAGE_PATH.to_string(),
"size=40G,mode=1777".to_string(),
)]))
} else {
None
},
userns_mode: if *container_engine == ContainerEngine::Podman {
Some("keep-id".to_string())
} else {
None
},
extra_hosts: match (proxy_config, proxy_container_ip) {
(Some(pc), Some(ip)) => {
Some(vec![format!("{}:{}", pc.proxy_container_name(), ip)])
}
_ => None,
},
..Default::default()
}),
working_dir: Some(working_dir),
env: Some(env_vars),
attach_stdin: Some(task.is_interactive),
attach_stdout: Some(task.is_interactive),
attach_stderr: Some(task.is_interactive),
tty: Some(task.is_interactive),
open_stdin: Some(task.is_interactive),
..Default::default()
};
(config, dind_storage)
}
pub async fn run_task_container(
&self,
docker_image_tag: &str,
task: &crate::task::Task,
agent: &dyn Agent,
) -> Result<(String, crate::agent::TaskResult), String> {
let resolved = resolve_config_from_task(task, &self.ctx, &self.event_sender);
let proxy_config = resolved.proxy_config();
let proxy_session = if task.network_isolation && !self.is_nested() {
let suppress_stdout = self.event_sender.is_some();
let proxy_logger = TaskLogger::from_path(
&self
.ctx
.tsk_env()
.task_dir(&task.id)
.join("output")
.join("agent.log"),
suppress_stdout,
);
match self
.proxy_manager
.acquire_proxy(&task.id, &proxy_config, &proxy_logger)
.await
{
Ok(session) => Some(session),
Err(e) => {
return Err(format!(
"Failed to ensure proxy is running and healthy: {e}. \
The task should be retried later when the proxy is available. \
Check the status in Docker."
));
}
}
} else {
None
};
let (container_id, dind_storage, result) = self
.run_container_inner(
docker_image_tag,
task,
agent,
proxy_session.as_ref().map(|s| s.network_name.as_str()),
proxy_session.as_ref().map(|_| &proxy_config),
proxy_session.as_ref().and_then(|s| s.proxy_ip.as_deref()),
)
.await;
if let Some(ref id) = container_id {
let _ = self.remove_container(id).await;
}
if let Some(ref session) = proxy_session {
self.proxy_manager.release_proxy(session).await;
}
if let DindStorage::NamedVolume(ref name) = dind_storage {
let _ = self.client.remove_volume(name).await;
}
result
}
async fn run_container_inner(
&self,
docker_image_tag: &str,
task: &crate::task::Task,
agent: &dyn Agent,
network_name: Option<&str>,
proxy_config: Option<&crate::context::ResolvedProxyConfig>,
proxy_container_ip: Option<&str>,
) -> (
Option<String>,
DindStorage,
Result<(String, crate::agent::TaskResult), String>,
) {
let suppress_stdout = self.event_sender.is_some();
let (config, dind_storage) = self.create_container_config(
docker_image_tag,
task,
agent,
network_name,
proxy_config,
proxy_container_ip,
);
let container_name = self.build_container_name(task);
let options = bollard::query_parameters::CreateContainerOptionsBuilder::default()
.name(&container_name)
.build();
let container_id = match self.client.create_container(Some(options), config).await {
Ok(id) => id,
Err(e) => return (None, dind_storage, Err(e)),
};
for (tar_data, dest_path) in agent.files_to_copy() {
if let Err(e) = self
.client
.upload_to_container(&container_id, &dest_path, tar_data)
.await
{
return (Some(container_id), dind_storage, Err(e));
}
}
if let Err(e) = self.client.start_container(&container_id).await {
return (Some(container_id), dind_storage, Err(e));
}
let result = if task.is_interactive {
println!("\nStarting interactive session...");
match self.client.attach_container(&container_id).await {
Ok(()) => {
println!("\nInteractive session ended");
Ok((
String::new(),
crate::agent::TaskResult {
success: true,
message: "Interactive session completed".to_string(),
cost_usd: None,
duration_ms: None,
},
))
}
Err(e) => {
eprintln!("Interactive session ended with error: {e}");
Err(e)
}
}
} else {
let log_file = match self.ctx.tsk_env().open_agent_log(&task.id) {
Ok(file) => Some(file),
Err(e) => {
self.emit(ServerEvent::WarningMessage(format!(
"Warning: Failed to open agent log file: {e}"
)));
None
}
};
let mut log_processor = agent.create_log_processor(Some(task));
let output = self
.stream_container_logs(
&container_id,
&mut *log_processor,
log_file,
suppress_stdout,
)
.await;
let task_result = log_processor.get_final_result().cloned();
match output {
Ok((output, exit_code)) => {
let task_result = match (exit_code, task_result) {
(code, Some(mut r)) if code != 0 => {
r.success = false;
r
}
(code, None) if code != 0 => crate::agent::TaskResult {
success: false,
message: format!("Container exited with status {code}"),
cost_usd: None,
duration_ms: None,
},
(_, Some(r)) => r,
(_, None) => crate::agent::TaskResult {
success: true,
message: "Task completed".to_string(),
cost_usd: None,
duration_ms: None,
},
};
Ok((output, task_result))
}
Err(e) => Err(e),
}
};
(Some(container_id), dind_storage, result)
}
async fn stream_container_logs(
&self,
container_id: &str,
log_processor: &mut dyn LogProcessor,
log_file: Option<std::fs::File>,
suppress_stdout: bool,
) -> Result<(String, i64), String> {
let mut log_file = log_file;
let client_clone = Arc::clone(&self.client);
let container_id_clone = container_id.to_string();
let (tx, mut rx) = tokio::sync::mpsc::channel::<String>(100);
let log_event_sender = self.event_sender.clone();
let log_task = tokio::spawn(async move {
let log_options = LogsOptions {
stdout: true,
stderr: true,
follow: true,
timestamps: false,
..Default::default()
};
match client_clone
.logs_stream(&container_id_clone, Some(log_options))
.await
{
Ok(mut stream) => {
while let Some(result) = stream.next().await {
match result {
Ok(log_line) => {
if tx.send(log_line).await.is_err() {
break;
}
}
Err(e) => {
crate::tui::events::emit_or_print(
&log_event_sender,
crate::tui::events::ServerEvent::WarningMessage(format!(
"Error streaming logs: {e}"
)),
);
break;
}
}
}
}
Err(e) => {
crate::tui::events::emit_or_print(
&log_event_sender,
crate::tui::events::ServerEvent::WarningMessage(format!(
"Failed to start log streaming: {e}"
)),
);
}
}
});
let mut all_logs = String::new();
let mut line_buffer = String::new();
let docker_client = Arc::clone(&self.client);
let wait_future = docker_client.wait_container(container_id);
tokio::pin!(wait_future);
loop {
tokio::select! {
Some(log_chunk) = rx.recv() => {
all_logs.push_str(&log_chunk);
line_buffer.push_str(&log_chunk);
process_complete_lines(&mut line_buffer, log_processor, log_file.as_mut(), suppress_stdout, &self.event_sender);
}
exit_code = &mut wait_future => {
let exit_code = exit_code?;
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
while let Ok(log_chunk) = rx.try_recv() {
all_logs.push_str(&log_chunk);
line_buffer.push_str(&log_chunk);
process_complete_lines(&mut line_buffer, log_processor, log_file.as_mut(), suppress_stdout, &self.event_sender);
}
if !line_buffer.trim().is_empty() {
line_buffer.push('\n');
process_complete_lines(&mut line_buffer, log_processor, log_file.as_mut(), suppress_stdout, &self.event_sender);
}
log_task.abort();
return Ok((all_logs, exit_code));
}
}
}
}
}
fn process_complete_lines(
line_buffer: &mut String,
log_processor: &mut dyn LogProcessor,
mut log_file: Option<&mut std::fs::File>,
suppress_stdout: bool,
event_sender: &Option<ServerEventSender>,
) {
while let Some(newline_pos) = line_buffer.find('\n') {
let complete_line = &line_buffer[..newline_pos];
let trimmed = complete_line.trim_end_matches('\r');
if let Some(log_line) = log_processor.process_line(trimmed) {
if !suppress_stdout {
println!("{log_line}");
}
if let Some(ref mut file) = log_file {
match serde_json::to_string(&log_line) {
Ok(json) => {
if let Err(e) = writeln!(file, "{json}") {
crate::tui::events::emit_or_print(
event_sender,
ServerEvent::WarningMessage(format!(
"Warning: Failed to write to agent log file: {e}"
)),
);
}
}
Err(e) => {
crate::tui::events::emit_or_print(
event_sender,
ServerEvent::WarningMessage(format!(
"Warning: Failed to serialize log line: {e}"
)),
);
}
}
}
}
line_buffer.drain(..=newline_pos);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::AppContext;
use crate::context::ResolvedProxyConfig;
use crate::task::{Task, TaskStatus};
use crate::test_utils::TrackedDockerClient;
use std::sync::Arc;
fn default_proxy_container_name() -> String {
ResolvedProxyConfig::default().proxy_container_name()
}
fn create_test_task(is_interactive: bool) -> Task {
let repo_path = PathBuf::from("/tmp/test-repo");
Task {
id: "test-task-id".to_string(),
repo_root: repo_path.clone(),
task_type: "feature".to_string(),
instructions_file: "/tmp/test-repo/.tsk/tasks/instructions.md".to_string(),
status: TaskStatus::Running,
started_at: Some(chrono::Utc::now()),
branch_name: "tsk/feature/test-task/test-task-id".to_string(),
copied_repo_path: Some(repo_path),
is_interactive,
..Task::test_default()
}
}
#[tokio::test]
async fn test_run_task_container_success() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let (output, task_result) = result.unwrap();
assert_eq!(output, "Container logs");
assert!(task_result.success);
assert_eq!(task_result.message, "Task completed");
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 2);
let task_container_config = &create_calls[1].1;
let actual_cmd = task_container_config.cmd.as_ref().unwrap();
assert_eq!(actual_cmd.len(), 3);
assert_eq!(actual_cmd[0], "sh");
assert_eq!(actual_cmd[1], "-c");
assert!(actual_cmd[2].contains("claude"));
assert_eq!(task_container_config.user, Some(CONTAINER_USER.to_string()));
let pcn = default_proxy_container_name();
let env = task_container_config.env.as_ref().unwrap();
assert!(env.contains(&format!("HTTP_PROXY=http://{pcn}:3128")));
assert!(env.contains(&format!("HTTPS_PROXY=http://{pcn}:3128")));
assert!(
env.iter().any(|e| e.starts_with("JAVA_TOOL_OPTIONS=")),
"JAVA_TOOL_OPTIONS should be set for proxy"
);
assert!(env.contains(&"TSK_CONTAINER=1".to_string()));
assert!(env.contains(&"TSK_TASK_ID=test-task-id".to_string()));
drop(create_calls);
let start_calls = mock_client.start_container_calls.lock().unwrap();
assert_eq!(start_calls.len(), 2); assert_eq!(start_calls[0], pcn);
assert_eq!(start_calls[1], "test-container-id-1");
let wait_calls = mock_client.wait_container_calls.lock().unwrap();
assert_eq!(wait_calls.len(), 1);
assert_eq!(wait_calls[0], "test-container-id-1");
let remove_calls = mock_client.remove_container_calls.lock().unwrap();
assert_eq!(remove_calls.len(), 2); assert_eq!(remove_calls[0].0, "test-container-id-1");
assert_eq!(remove_calls[1].0, pcn);
drop(remove_calls);
let upload_calls = mock_client.upload_to_container_calls.lock().unwrap();
for (container_id, dest_path, _tar_data) in upload_calls.iter() {
assert_eq!(container_id, "test-container-id-1");
assert_eq!(dest_path, "/home/agent");
}
}
#[tokio::test]
async fn test_run_task_container_interactive() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(true);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let (output, task_result) = result.unwrap();
assert_eq!(output, "");
assert!(task_result.success);
assert_eq!(task_result.message, "Interactive session completed");
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 2);
let (options, config) = &create_calls[1];
assert_eq!(
options.as_ref().unwrap().name,
Some("tsk-interactive-test-task-id".to_string())
);
assert_eq!(config.attach_stdin, Some(true));
assert_eq!(config.attach_stdout, Some(true));
assert_eq!(config.attach_stderr, Some(true));
assert_eq!(config.tty, Some(true));
assert_eq!(config.open_stdin, Some(true));
let env = config.env.as_ref().unwrap();
assert!(env.contains(&"TSK_CONTAINER=1".to_string()));
assert!(env.contains(&"TSK_TASK_ID=test-task-id".to_string()));
let start_calls = mock_client.start_container_calls.lock().unwrap();
assert_eq!(start_calls.len(), 2);
let remove_calls = mock_client.remove_container_calls.lock().unwrap();
assert_eq!(remove_calls.len(), 2); assert_eq!(remove_calls[0].0, "test-container-id-1");
}
#[tokio::test]
async fn test_run_task_container_non_zero_exit() {
let mock_client = Arc::new(TrackedDockerClient {
exit_code: 1,
..Default::default()
});
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let (output, task_result) = result.unwrap();
assert!(!task_result.success);
assert!(
task_result
.message
.contains("Container exited with status 1")
);
assert!(output.contains("Container logs"));
let remove_calls = mock_client.remove_container_calls.lock().unwrap();
assert_eq!(remove_calls.len(), 2); assert_eq!(remove_calls[0].0, "test-container-id-1");
drop(remove_calls);
let disconnect_calls = mock_client.disconnect_network_calls.lock().unwrap();
assert_eq!(disconnect_calls.len(), 1);
drop(disconnect_calls);
let remove_network_calls = mock_client.remove_network_calls.lock().unwrap();
assert_eq!(remove_network_calls.len(), 1);
}
#[tokio::test]
async fn test_run_task_container_network_setup_fails() {
let mock_client = TrackedDockerClient {
network_exists: false,
create_network_error: Some("Docker daemon not running".to_string()),
..Default::default()
};
let mock_client = Arc::new(mock_client);
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_err());
let error_msg = result.unwrap_err();
assert!(error_msg.contains("Failed to ensure proxy is running and healthy"));
assert!(
error_msg.contains("Failed to ensure network exists")
|| error_msg.contains("Failed to create network")
);
let start_calls = mock_client.start_container_calls.lock().unwrap();
assert_eq!(start_calls.len(), 0);
}
#[tokio::test]
async fn test_container_configuration() {
let agent_network = "tsk-agent-test-task-id";
let mock_client = Arc::new(TrackedDockerClient {
inspect_container_response: serde_json::json!({
"State": { "Running": true },
"NetworkSettings": {
"Networks": {
agent_network: { "IPAddress": "172.18.0.2" }
}
}
})
.to_string(),
..Default::default()
});
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 2);
let pcn = default_proxy_container_name();
let (proxy_options, _proxy_config) = &create_calls[0];
assert_eq!(proxy_options.as_ref().unwrap().name, Some(pcn.clone()));
let (options, config) = &create_calls[1];
assert!(
options
.as_ref()
.unwrap()
.name
.as_ref()
.unwrap()
.starts_with("tsk-")
);
assert_eq!(
options.as_ref().unwrap().name,
Some("tsk-test-task-id".to_string())
);
assert_eq!(config.image, Some("tsk/base".to_string()));
assert_eq!(config.working_dir, Some("/workspace/default".to_string()));
assert_eq!(config.user, Some(CONTAINER_USER.to_string()));
let actual_cmd = config.cmd.as_ref().unwrap();
assert_eq!(actual_cmd.len(), 3);
assert_eq!(actual_cmd[0], "sh");
assert_eq!(actual_cmd[1], "-c");
assert!(actual_cmd[2].contains("claude"));
assert!(config.entrypoint.is_none());
let host_config = config.host_config.as_ref().unwrap();
assert_eq!(
host_config.network_mode,
Some("tsk-agent-test-task-id".to_string())
);
let extra_hosts = host_config
.extra_hosts
.as_ref()
.expect("extra_hosts should be set");
assert_eq!(extra_hosts.len(), 1);
assert!(extra_hosts[0].contains(&pcn));
assert!(extra_hosts[0].contains("172.18.0.2"));
let default_resolved = crate::context::ResolvedConfig::default();
assert_eq!(
host_config.memory,
Some(default_resolved.memory_limit_bytes())
);
assert_eq!(
host_config.cpu_quota,
Some(default_resolved.cpu_quota_microseconds())
);
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 4); assert!(binds[0].contains("/tmp/test-repo:/workspace/default"));
assert!(binds[1].contains(":/home/agent/.claude"));
assert!(binds[2].contains(":/instructions:ro"));
assert!(binds[3].contains(":/output"));
let env = config.env.as_ref().unwrap();
assert!(env.contains(&format!("HTTP_PROXY=http://{pcn}:3128")));
assert!(env.contains(&format!("HTTPS_PROXY=http://{pcn}:3128")));
assert!(env.contains(&format!("NO_PROXY=localhost,127.0.0.1,{pcn}")));
assert!(env.contains(&format!("no_proxy=localhost,127.0.0.1,{pcn}")));
assert!(
env.iter().any(|e| e.starts_with("JAVA_TOOL_OPTIONS=")),
"JAVA_TOOL_OPTIONS should be set for proxy"
);
assert!(
host_config.security_opt.is_none(),
"security_opt should be None when dind is disabled"
);
let cap_drop = host_config.cap_drop.as_ref().unwrap();
assert!(
cap_drop.contains(&"SETUID".to_string()),
"SETUID should be dropped when dind is disabled"
);
assert!(
cap_drop.contains(&"SETGID".to_string()),
"SETGID should be dropped when dind is disabled"
);
drop(create_calls);
let create_internal_network_calls =
mock_client.create_internal_network_calls.lock().unwrap();
assert_eq!(create_internal_network_calls.len(), 1);
assert_eq!(create_internal_network_calls[0], "tsk-agent-test-task-id");
drop(create_internal_network_calls);
let connect_calls = mock_client.connect_network_calls.lock().unwrap();
assert_eq!(connect_calls.len(), 1);
assert_eq!(
connect_calls[0],
(pcn.clone(), "tsk-agent-test-task-id".to_string())
);
drop(connect_calls);
let disconnect_calls = mock_client.disconnect_network_calls.lock().unwrap();
assert_eq!(disconnect_calls.len(), 1);
assert_eq!(
disconnect_calls[0],
(pcn, "tsk-agent-test-task-id".to_string())
);
drop(disconnect_calls);
let remove_network_calls = mock_client.remove_network_calls.lock().unwrap();
assert_eq!(remove_network_calls.len(), 1);
assert_eq!(remove_network_calls[0], "tsk-agent-test-task-id");
}
#[tokio::test]
async fn test_run_task_container_with_instructions_file() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.instructions_file = "/tmp/tsk-test/instructions.txt".to_string();
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 2);
let task_container_config = &create_calls[1].1;
let host_config = task_container_config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 4); assert!(binds[2].contains("/tmp/tsk-test:/instructions:ro"));
assert!(binds[3].contains(":/output"));
}
#[tokio::test]
async fn test_relative_path_conversion() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let temp_dir = tempfile::TempDir::new().unwrap();
let absolute_path = temp_dir.path().join("test-repo");
let mut task = create_test_task(false);
task.copied_repo_path = Some(absolute_path.clone());
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 2); let (_, config) = &create_calls[1];
let host_config = config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
let repo_bind = &binds[0];
assert!(repo_bind.starts_with('/'));
assert!(repo_bind.contains("test-repo"));
assert!(repo_bind.ends_with(":/workspace/default"));
assert_eq!(binds.len(), 4); assert!(binds[1].contains(":/home/agent/.claude"));
assert!(binds[2].contains(":/instructions:ro"));
assert!(binds[3].contains(":/output"));
}
#[tokio::test]
async fn test_project_volume_mounts_bind() {
use crate::context::{BindMount, SharedConfig, TskConfig, VolumeMount};
use std::collections::HashMap;
let mock_client = Arc::new(TrackedDockerClient::default());
let mut project_configs = HashMap::new();
project_configs.insert(
"default".to_string(),
SharedConfig {
volumes: vec![VolumeMount::Bind(BindMount {
host: "/host/cache".to_string(),
container: "/container/cache".to_string(),
readonly: false,
})],
..Default::default()
},
);
let tsk_config = TskConfig {
project: project_configs,
..Default::default()
};
let ctx = AppContext::builder().with_tsk_config(tsk_config).build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let host_config = task_container_config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 5);
assert!(
binds
.iter()
.any(|b| b.contains("/host/cache:/container/cache"))
);
}
#[tokio::test]
async fn test_project_volume_mounts_named() {
use crate::context::{NamedVolume, SharedConfig, TskConfig, VolumeMount};
use std::collections::HashMap;
let mock_client = Arc::new(TrackedDockerClient::default());
let mut project_configs = HashMap::new();
project_configs.insert(
"default".to_string(),
SharedConfig {
volumes: vec![VolumeMount::Named(NamedVolume {
name: "build-cache".to_string(),
container: "/container/cache".to_string(),
readonly: false,
})],
..Default::default()
},
);
let tsk_config = TskConfig {
project: project_configs,
..Default::default()
};
let ctx = AppContext::builder().with_tsk_config(tsk_config).build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let host_config = task_container_config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 5);
assert!(
binds
.iter()
.any(|b| b.contains("tsk-build-cache:/container/cache"))
);
}
#[tokio::test]
async fn test_project_volume_mounts_readonly() {
use crate::context::{BindMount, SharedConfig, TskConfig, VolumeMount};
use std::collections::HashMap;
let mock_client = Arc::new(TrackedDockerClient::default());
let mut project_configs = HashMap::new();
project_configs.insert(
"default".to_string(),
SharedConfig {
volumes: vec![VolumeMount::Bind(BindMount {
host: "/etc/ssl/certs".to_string(),
container: "/etc/ssl/certs".to_string(),
readonly: true,
})],
..Default::default()
},
);
let tsk_config = TskConfig {
project: project_configs,
..Default::default()
};
let ctx = AppContext::builder().with_tsk_config(tsk_config).build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let host_config = task_container_config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 5);
assert!(
binds
.iter()
.any(|b| b.contains("/etc/ssl/certs:/etc/ssl/certs:ro"))
);
}
#[tokio::test]
async fn test_no_project_volumes_when_project_not_configured() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.project = "unconfigured-project".to_string();
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let host_config = task_container_config.host_config.as_ref().unwrap();
let binds = host_config.binds.as_ref().unwrap();
assert_eq!(binds.len(), 4);
}
#[tokio::test]
async fn test_project_env_vars() {
use crate::context::{EnvVar, SharedConfig, TskConfig};
use std::collections::HashMap;
let mock_client = Arc::new(TrackedDockerClient::default());
let mut project_configs = HashMap::new();
project_configs.insert(
"default".to_string(),
SharedConfig {
env: vec![
EnvVar {
name: "DATABASE_URL".to_string(),
value: "postgres://tsk-proxy:5432/mydb".to_string(),
},
EnvVar {
name: "DEBUG".to_string(),
value: "true".to_string(),
},
],
..Default::default()
},
);
let tsk_config = TskConfig {
project: project_configs,
..Default::default()
};
let ctx = AppContext::builder().with_tsk_config(tsk_config).build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let env = task_container_config.env.as_ref().unwrap();
assert!(
env.iter()
.any(|e| e == "DATABASE_URL=postgres://tsk-proxy:5432/mydb")
);
assert!(env.iter().any(|e| e == "DEBUG=true"));
}
#[tokio::test]
async fn test_python_stack_sets_pythonpath() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.stack = "python".to_string();
task.project = "my-python-app".to_string();
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let env = task_container_config.env.as_ref().unwrap();
assert!(env.contains(&"PYTHONPATH=/workspace/my-python-app".to_string()));
assert_eq!(
task_container_config.working_dir,
Some("/workspace/my-python-app".to_string())
);
}
#[tokio::test]
async fn test_non_python_stack_no_pythonpath() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false); let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let env = task_container_config.env.as_ref().unwrap();
assert!(!env.iter().any(|e| e.starts_with("PYTHONPATH=")));
}
#[tokio::test]
async fn test_no_project_env_vars_when_project_not_configured() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.project = "unconfigured-project".to_string();
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_container_config = &create_calls[1].1;
let env = task_container_config.env.as_ref().unwrap();
assert!(
!env.iter()
.any(|e| e.starts_with("DATABASE_URL=") || e.starts_with("DEBUG="))
);
}
#[tokio::test]
async fn test_run_task_container_no_network_isolation() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.network_isolation = false;
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_ok());
let create_calls = mock_client.create_container_calls.lock().unwrap();
assert_eq!(create_calls.len(), 1);
let task_config = &create_calls[0].1;
let env = task_config.env.as_ref().unwrap();
assert!(!env.iter().any(|e| e.starts_with("HTTP_PROXY=")));
assert!(!env.iter().any(|e| e.starts_with("HTTPS_PROXY=")));
assert!(!env.iter().any(|e| e.starts_with("NO_PROXY=")));
assert!(!env.iter().any(|e| e.starts_with("no_proxy=")));
assert!(
!env.iter().any(|e| e.starts_with("JAVA_TOOL_OPTIONS=")),
"JAVA_TOOL_OPTIONS should NOT be set when proxy is disabled"
);
assert!(env.contains(&"TSK_CONTAINER=1".to_string()));
assert!(env.contains(&"TSK_TASK_ID=test-task-id".to_string()));
let host_config = task_config.host_config.as_ref().unwrap();
assert!(
host_config.network_mode.is_none(),
"network_mode should be None when isolation is disabled"
);
let cap_drop = host_config.cap_drop.as_ref().unwrap();
assert!(
!cap_drop.contains(&"NET_RAW".to_string()),
"NET_RAW should not be dropped"
);
assert!(
cap_drop.contains(&"NET_ADMIN".to_string()),
"NET_ADMIN should still be dropped"
);
drop(create_calls);
let create_network_calls = mock_client.create_internal_network_calls.lock().unwrap();
assert_eq!(create_network_calls.len(), 0);
drop(create_network_calls);
let connect_calls = mock_client.connect_network_calls.lock().unwrap();
assert_eq!(connect_calls.len(), 0);
drop(connect_calls);
let disconnect_calls = mock_client.disconnect_network_calls.lock().unwrap();
assert_eq!(disconnect_calls.len(), 0);
drop(disconnect_calls);
let remove_network_calls = mock_client.remove_network_calls.lock().unwrap();
assert_eq!(remove_network_calls.len(), 0);
}
#[tokio::test]
async fn test_cleanup_on_container_create_failure() {
let mock_client = Arc::new(TrackedDockerClient {
create_container_error: Some("out of disk space".to_string()),
..Default::default()
});
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("out of disk space"));
let remove_calls = mock_client.remove_container_calls.lock().unwrap();
assert_eq!(remove_calls.len(), 1); drop(remove_calls);
let disconnect_calls = mock_client.disconnect_network_calls.lock().unwrap();
assert_eq!(disconnect_calls.len(), 1);
drop(disconnect_calls);
let remove_network_calls = mock_client.remove_network_calls.lock().unwrap();
assert_eq!(remove_network_calls.len(), 1);
}
#[tokio::test]
async fn test_cleanup_on_start_container_failure() {
let mock_client = Arc::new(TrackedDockerClient {
start_container_error: Some("container runtime error".to_string()),
..Default::default()
});
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let result = manager.run_task_container("tsk/base", &task, &agent).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("container runtime error"));
let remove_calls = mock_client.remove_container_calls.lock().unwrap();
assert_eq!(remove_calls.len(), 2); assert_eq!(remove_calls[0].0, "test-container-id-1");
drop(remove_calls);
let disconnect_calls = mock_client.disconnect_network_calls.lock().unwrap();
assert_eq!(disconnect_calls.len(), 1);
drop(disconnect_calls);
let remove_network_calls = mock_client.remove_network_calls.lock().unwrap();
assert_eq!(remove_network_calls.len(), 1);
}
#[tokio::test]
async fn test_dind_security_relaxations() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.dind = true;
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_config = &create_calls[1].1;
let host_config = task_config.host_config.as_ref().unwrap();
let security_opt = host_config
.security_opt
.as_ref()
.expect("security_opt should be Some when dind is enabled");
assert!(
security_opt.iter().any(|s| s.starts_with("seccomp=")),
"Should have seccomp profile"
);
assert!(
security_opt.iter().any(|s| s == "apparmor=unconfined"),
"Should have apparmor=unconfined"
);
let cap_drop = host_config.cap_drop.as_ref().unwrap();
assert!(
!cap_drop.contains(&"SETUID".to_string()),
"SETUID should not be dropped when dind is enabled"
);
assert!(
!cap_drop.contains(&"SETGID".to_string()),
"SETGID should not be dropped when dind is enabled"
);
assert!(cap_drop.contains(&"NET_ADMIN".to_string()));
assert!(cap_drop.contains(&"SYS_ADMIN".to_string()));
let env = task_config.env.as_ref().unwrap();
assert!(
env.contains(&"BUILDAH_ISOLATION=chroot".to_string()),
"BUILDAH_ISOLATION=chroot should be set when dind is enabled"
);
}
#[tokio::test]
async fn test_non_dind_security_defaults() {
let mock_client = Arc::new(TrackedDockerClient::default());
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let task = create_test_task(false); let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_config = &create_calls[1].1;
let host_config = task_config.host_config.as_ref().unwrap();
assert!(
host_config.security_opt.is_none(),
"security_opt should be None when dind is disabled"
);
let cap_drop = host_config.cap_drop.as_ref().unwrap();
assert!(
cap_drop.contains(&"SETUID".to_string()),
"SETUID should be dropped when dind is disabled"
);
assert!(
cap_drop.contains(&"SETGID".to_string()),
"SETGID should be dropped when dind is disabled"
);
assert!(cap_drop.contains(&"NET_ADMIN".to_string()));
assert!(cap_drop.contains(&"SYS_ADMIN".to_string()));
assert!(cap_drop.contains(&"SYS_PTRACE".to_string()));
assert!(cap_drop.contains(&"DAC_OVERRIDE".to_string()));
assert!(cap_drop.contains(&"AUDIT_WRITE".to_string()));
let env = task_config.env.as_ref().unwrap();
assert!(
!env.contains(&"BUILDAH_ISOLATION=chroot".to_string()),
"BUILDAH_ISOLATION should not be set when dind is disabled"
);
}
#[tokio::test]
async fn test_sudo_security_relaxations() {
let mock_client = Arc::new(TrackedDockerClient::default());
let resolved = crate::context::ResolvedConfig {
sudo: true,
..Default::default()
};
let config_json = serde_json::to_string(&resolved).unwrap();
let ctx = AppContext::builder().build();
let manager = DockerManager::new(&ctx, mock_client.clone(), None);
let mut task = create_test_task(false);
task.resolved_config = Some(config_json);
let agent = crate::agent::ClaudeAgent::with_tsk_env(ctx.tsk_env());
let _ = manager.run_task_container("tsk/base", &task, &agent).await;
let create_calls = mock_client.create_container_calls.lock().unwrap();
let task_config = &create_calls[1].1;
let host_config = task_config.host_config.as_ref().unwrap();
let cap_drop = host_config.cap_drop.as_ref().unwrap();
assert!(
!cap_drop.contains(&"SETUID".to_string()),
"SETUID should not be dropped when sudo is enabled"
);
assert!(
!cap_drop.contains(&"SETGID".to_string()),
"SETGID should not be dropped when sudo is enabled"
);
assert!(
host_config.security_opt.is_none(),
"security_opt should be None when only sudo is enabled (not dind)"
);
}
}