use anyhow::{Context, Result, bail};
use std::process::Command;
use crate::config::{self, Config, Language, Utility};
pub fn generate_dockerfile(config: &Config) -> String {
let languages = &config.environment.languages;
let utilities = &config.environment.utilities;
let mut sections = vec![
r#"FROM rust:bookworm
# Base dependencies (always installed)
RUN apt-get update && apt-get install -y \
git openssh-client curl jq tmux pkg-config libssl-dev gosu sqlite3 unzip \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
];
if languages.contains(&Language::Node) {
sections.push(
r#"# Node.js
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
&& apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/* \
&& corepack enable && corepack prepare yarn@1.22.22 --activate \
&& corepack prepare pnpm@latest --activate \
&& npm install -g turbo"#
.to_string(),
);
} else {
sections.push(
r#"# Node.js (required for Claude Code)
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
&& apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
if languages.contains(&Language::Python) {
sections.push(
r#"# Python
RUN apt-get update && apt-get install -y \
python3 python3-pip python3-venv \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
sections.push(
r#"# Claude Code
RUN npm install -g @anthropic-ai/claude-code \
&& mv "$(which claude)" "$(which claude)-real"
COPY claude-wrapper.sh /usr/local/bin/claude
RUN chmod +x /usr/local/bin/claude"#
.to_string(),
);
if utilities.contains(&Utility::Glow) {
sections.push(
r#"# glow (markdown reader)
RUN mkdir -p /etc/apt/keyrings \
&& curl -fsSL https://repo.charm.sh/apt/gpg.key | gpg --dearmor -o /etc/apt/keyrings/charm.gpg \
&& echo "deb [signed-by=/etc/apt/keyrings/charm.gpg] https://repo.charm.sh/apt/ * *" \
> /etc/apt/sources.list.d/charm.list \
&& apt-get update && apt-get install -y glow \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
if utilities.contains(&Utility::Playwright) {
sections.push(
r#"# Playwright (browser automation)
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
RUN npm install -g playwright@latest \
&& npx playwright install --with-deps chromium \
&& chmod -R 1777 /ms-playwright"#
.to_string(),
);
}
if utilities.contains(&Utility::Ansible) && !languages.contains(&Language::Python) {
sections.push(
r#"# Python (required for Ansible)
RUN apt-get update && apt-get install -y \
python3 python3-pip python3-venv \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
if utilities.contains(&Utility::Just) {
sections.push(
r#"# just (command runner)
RUN curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin"#
.to_string(),
);
}
if utilities.contains(&Utility::Mise) {
sections.push(
r#"# mise (tool version manager)
RUN curl https://mise.run | sh \
&& mv /root/.local/bin/mise /usr/local/bin/mise"#
.to_string(),
);
}
if utilities.contains(&Utility::Proto) {
sections.push(
r#"# proto (toolchain manager)
RUN curl -fsSL https://moonrepo.dev/install/proto.sh | bash -s -- --yes \
&& mv /root/.proto/bin/proto /usr/local/bin/proto"#
.to_string(),
);
}
if utilities.contains(&Utility::Pulumi) {
sections.push(
r#"# Pulumi (infrastructure as code)
RUN curl -fsSL https://get.pulumi.com | sh \
&& mv /root/.pulumi/bin/* /usr/local/bin/"#
.to_string(),
);
}
if utilities.contains(&Utility::Ansible) {
sections.push(
r#"# Ansible (automation)
RUN pip3 install --break-system-packages ansible"#
.to_string(),
);
}
if utilities.contains(&Utility::AwsCli) {
sections.push(
r#"# AWS CLI
RUN curl -fsSL "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o /tmp/awscliv2.zip \
&& unzip -q /tmp/awscliv2.zip -d /tmp \
&& /tmp/aws/install \
&& rm -rf /tmp/aws /tmp/awscliv2.zip"#
.to_string(),
);
}
if utilities.contains(&Utility::Terraform) {
sections.push(
r#"# Terraform
RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com bookworm main" \
> /etc/apt/sources.list.d/hashicorp.list \
&& apt-get update && apt-get install -y terraform \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
if utilities.contains(&Utility::Docker) {
sections.push(
r#"# Docker CLI + Compose plugin (Docker-outside-of-Docker via mounted socket)
RUN install -m 0755 -d /etc/apt/keyrings \
&& curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc \
&& chmod a+r /etc/apt/keyrings/docker.asc \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian bookworm stable" \
> /etc/apt/sources.list.d/docker.list \
&& apt-get update && apt-get install -y docker-ce-cli docker-compose-plugin \
&& rm -rf /var/lib/apt/lists/*"#
.to_string(),
);
}
if utilities.contains(&Utility::Kubectl) {
sections.push(
r#"# kubectl (Kubernetes CLI)
RUN curl -fsSL "https://dl.k8s.io/release/$(curl -fsSL https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" \
-o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl"#
.to_string(),
);
}
if utilities.contains(&Utility::Yq) {
sections.push(
r#"# yq (YAML processor)
RUN curl -fsSL "https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)" \
-o /usr/local/bin/yq \
&& chmod +x /usr/local/bin/yq"#
.to_string(),
);
}
if languages.contains(&Language::Rust) {
sections.push(
r#"# Rust components
RUN rustup component add clippy rustfmt"#
.to_string(),
);
}
sections.push(
r#"# room + room-ralph (always installed)
RUN cargo install room-cli room-ralph
# GitHub CLI
RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \
| dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \
> /etc/apt/sources.list.d/github-cli.list \
&& apt-get update && apt-get install -y gh \
&& rm -rf /var/lib/apt/lists/*
# Non-root user (UID matches host user to avoid permission issues)
ARG AGENT_UID=1000
RUN useradd -m -s /bin/bash -u $AGENT_UID agent
WORKDIR /workspaces
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
CMD ["sleep", "infinity"]"#
.to_string(),
);
sections.join("\n\n")
}
pub fn generate_compose(config: &Config) -> String {
let mut volumes = vec![
" - ./workspaces:/mnt/sandbox-root".to_string(),
" - claude-data:/home/agent/.claude".to_string(),
" - cargo-cache:/usr/local/cargo/registry".to_string(),
" - room-data:/home/agent/.room".to_string(),
];
if config.auth.mount_ssh {
volumes.push(" - ${SSH_AUTH_SOCK:-/dev/null}:/tmp/ssh-agent.sock:ro".to_string());
volumes.push(" - ~/.ssh/known_hosts:/home/agent/.ssh/known_hosts:ro".to_string());
}
if config.environment.utilities.contains(&Utility::Docker) {
volumes.push(" - /var/run/docker.sock:/var/run/docker.sock".to_string());
}
let volumes_str = volumes.join("\n");
let mut env_vars = Vec::new();
if config.auth.mount_ssh {
env_vars.push(" - SSH_AUTH_SOCK=/tmp/ssh-agent.sock".to_string());
}
let env_section = if env_vars.is_empty() {
String::new()
} else {
format!(" environment:\n{}\n", env_vars.join("\n"))
};
let host_uid = std::process::Command::new("id")
.args(["-u"])
.output()
.ok()
.and_then(|o| {
if o.status.success() {
Some(String::from_utf8_lossy(&o.stdout).trim().to_string())
} else {
None
}
})
.unwrap_or_else(|| "1000".to_string());
format!(
r#"services:
sandbox:
build:
context: .
args:
AGENT_UID: "{host_uid}"
container_name: {container}
volumes:
{volumes_str}
env_file: .env
{env_section} restart: unless-stopped
volumes:
claude-data:
cargo-cache:
room-data:
"#,
container = config.project.container_name,
)
}
pub fn generate_entrypoint(config: &Config) -> String {
let room_name = &config.room.default;
format!(
r#"#!/bin/bash
set -e
# === Phase 1: Root setup ===
# Symlink each workspace into /workspaces
mkdir -p /workspaces
for dir in /mnt/sandbox-root/*/; do
[ -d "$dir" ] || continue
name=$(basename "$dir")
if [ -L "/workspaces/$name" ]; then
echo "[entrypoint] WARNING: duplicate workspace name '$name' — skipping ${{dir}}"
continue
fi
ln -sfn "$dir" "/workspaces/$name"
done
echo "[entrypoint] Linked workspaces: $(ls /workspaces 2>/dev/null | tr '\n' ' ')"
# SSH agent forwarding: ensure the socket is accessible to the agent user.
# Private keys stay on the host — only the agent socket is forwarded.
if [ -S /tmp/ssh-agent.sock ]; then
chmod 777 /tmp/ssh-agent.sock 2>/dev/null || true
echo "[entrypoint] SSH agent socket available"
fi
# SSH known_hosts: ensure directory and permissions
mkdir -p /home/agent/.ssh
chmod 700 /home/agent/.ssh
chown -R agent:agent /home/agent/.ssh
# Docker socket: match GID so agent can use it
if [ -S /var/run/docker.sock ]; then
DOCKER_SOCK_GID=$(stat -c '%g' /var/run/docker.sock)
if getent group "$DOCKER_SOCK_GID" >/dev/null 2>&1; then
DOCKER_GROUP=$(getent group "$DOCKER_SOCK_GID" | cut -d: -f1)
else
groupadd -g "$DOCKER_SOCK_GID" dockerhost
DOCKER_GROUP=dockerhost
fi
usermod -aG "$DOCKER_GROUP" agent
echo "[entrypoint] Docker socket available (gid=$DOCKER_SOCK_GID, group=$DOCKER_GROUP)"
fi
# Distribute app .env to each workspace if APP_ENV points to a file
if [ -n "$APP_ENV" ] && [ -f "$APP_ENV" ]; then
for dir in /workspaces/*/; do
cp "$APP_ENV" "${{dir}}.env" 2>/dev/null || true
done
echo "[entrypoint] Distributed .env to all workspaces"
fi
# Fix ownership
chown -R agent:agent /home/agent/.claude /home/agent/.room 2>/dev/null || true
chown -R agent:agent /usr/local/cargo/registry /usr/local/cargo/git 2>/dev/null || true
chown agent:agent /workspaces
# Git config
gosu agent git config --global init.defaultBranch main
gosu agent git config --global user.email "agent@sandbox.dev"
gosu agent git config --global user.name "sandbox-agent"
if [ -d /home/agent/.ssh ]; then
gosu agent git config --global core.sshCommand \
"ssh -o StrictHostKeyChecking=accept-new"
fi
# Ensure ~/.local/bin is in PATH
grep -q '.local/bin' /home/agent/.bashrc 2>/dev/null || \
echo 'export PATH="$HOME/.local/bin:$PATH"' >> /home/agent/.bashrc
chown agent:agent /home/agent/.bashrc
# Start room daemon and create default room
gosu agent room daemon 2>/dev/null &
sleep 1
TOKEN=$(gosu agent room join "system" 2>/dev/null \
| python3 -c "import sys,json; print(json.load(sys.stdin)['token'])" 2>/dev/null || true)
if [ -n "$TOKEN" ]; then
gosu agent room create "{room_name}" -t "$TOKEN" 2>/dev/null || true
fi
echo "[entrypoint] Ready."
# === Phase 2: Drop to agent user ===
exec gosu agent "$@"
"#
)
}
pub fn generate_claude_wrapper() -> &'static str {
r#"#!/bin/bash
# Wrapper that injects --dangerously-skip-permissions for headless (-p) mode only.
for arg in "$@"; do
if [ "$arg" = "-p" ] || [ "$arg" = "--print" ]; then
exec claude-real --dangerously-skip-permissions "$@"
fi
done
exec claude-real "$@"
"#
}
pub fn write_assets(config: &Config) -> Result<()> {
let dir = config::sandbox_dir();
std::fs::create_dir_all(&dir)?;
std::fs::write(dir.join("Dockerfile"), generate_dockerfile(config))
.context("failed to write Dockerfile")?;
std::fs::write(dir.join("docker-compose.yml"), generate_compose(config))
.context("failed to write docker-compose.yml")?;
std::fs::write(dir.join("entrypoint.sh"), generate_entrypoint(config))
.context("failed to write entrypoint.sh")?;
std::fs::write(dir.join("claude-wrapper.sh"), generate_claude_wrapper())
.context("failed to write claude-wrapper.sh")?;
Ok(())
}
fn compose_project_name() -> String {
Config::load()
.map(|c| c.project.container_name.clone())
.unwrap_or_else(|_| "room-sandbox".to_string())
}
fn compose(args: &[&str]) -> Result<()> {
let dir = config::sandbox_dir();
let project = compose_project_name();
let status = Command::new("docker")
.args(["compose", "-p", &project, "-f"])
.arg(dir.join("docker-compose.yml"))
.args(args)
.status()
.with_context(|| format!("failed to run docker compose {}", args.join(" ")))?;
if !status.success() {
bail!("docker compose {} failed", args.join(" "));
}
Ok(())
}
pub fn build() -> Result<()> {
compose(&["build"])
}
pub fn up() -> Result<()> {
compose(&["up", "-d"])
}
pub fn down() -> Result<()> {
compose(&["down"])
}
pub fn down_with_volumes() -> Result<()> {
compose(&["down", "-v"])
}
pub fn logs() -> Result<()> {
compose(&["logs", "-f"])
}
pub fn is_running(config: &Config) -> bool {
Command::new("docker")
.args(["inspect", "-f", "{{.State.Running}}"])
.arg(&config.project.container_name)
.output()
.map(|o| String::from_utf8_lossy(&o.stdout).trim() == "true")
.unwrap_or(false)
}
pub fn inject_agent_instructions(config: &Config) -> Result<()> {
let container = &config.project.container_name;
let room = &config.room.default;
for agent in &config.agents {
let project_dir = format!(
"/home/agent/.claude/projects/-mnt-sandbox-root-{}",
agent.name
);
let _ = Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(container)
.args(["mkdir", "-p", &project_dir])
.status();
let instructions = generate_role_instructions(&agent.name, &agent.role, room);
let personality = generate_personality_file(&agent.name, &agent.role);
for (path, content) in [
(format!("{project_dir}/CLAUDE.md"), &instructions),
(
format!("/home/agent/.room/personality-{}.txt", agent.name),
&personality,
),
] {
if let Some(parent) = std::path::Path::new(&path).parent() {
let _ = Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(container)
.args(["mkdir", "-p", &parent.to_string_lossy()])
.status();
}
let mut child = Command::new("docker")
.args(["exec", "-i", "-u", "agent"])
.arg(container)
.args(["sh", "-c", &format!("cat > '{path}'")])
.stdin(std::process::Stdio::piped())
.spawn()
.with_context(|| format!("failed to write {path}"))?;
if let Some(mut stdin) = child.stdin.take() {
use std::io::Write;
stdin.write_all(content.as_bytes())?;
}
child.wait()?;
}
eprintln!(" [{}] {} instructions written", agent.role, agent.name);
}
Ok(())
}
const TASKBOARD_INSTRUCTIONS: &str = r#"## Taskboard
Use `/taskboard` commands to manage and claim work:
- `/taskboard` — view all tasks and their status
- `/taskboard add <title>` — create a new task
- `/taskboard assign <id> <agent>` — assign a task to an agent
- `/taskboard claim <id>` — claim a task for yourself
- `/taskboard status <id> <status>` — update task status (todo, in_progress, review, done)
- `/taskboard remove <id>` — remove a task
Always check the taskboard before asking for work. Claim tasks before starting.
Update task status as you progress."#;
fn generate_role_instructions(name: &str, role: &crate::config::AgentRole, room: &str) -> String {
use crate::config::AgentRole;
let role_section = match role {
AgentRole::Coder => {
r#"## Role: Coder
You are a **coder** agent. Your primary responsibilities:
- Pick up tasks from the taskboard and implement them
- Write clean, tested, production-quality code
- Create feature branches for your work
- Push changes and create pull requests when work is complete
- Report progress and blockers to the room
### Workflow
1. Check the taskboard (`/taskboard`) for available tasks
2. Claim a task (`/taskboard claim <id>`) before starting work
3. Update status to in_progress (`/taskboard status <id> in_progress`)
4. Implement the task on a feature branch
5. Run tests and ensure CI passes
6. Create a PR and notify the room
7. Update status to review (`/taskboard status <id> review`)
8. Move on to the next task"#
}
AgentRole::Reviewer => {
r#"## Role: Reviewer
You are a **reviewer** agent. Your primary responsibilities:
- Review pull requests created by other agents
- Check code quality, correctness, and test coverage
- Leave constructive, specific feedback on PRs
- Approve PRs that meet quality standards
- Flag security issues, bugs, or architectural concerns
### Workflow
1. Check the taskboard for tasks in `review` status
2. Review the associated PR — check logic, edge cases, tests
3. Run the project's linter and test suite on the branch
4. Leave inline comments on specific issues via `gh pr review`
5. Approve or request changes with clear reasoning
6. Notify the room when review is complete
7. Update task status to done (`/taskboard status <id> done`) on approval
### Guidelines
- Do NOT write code or implement features yourself
- Focus on catching bugs, not style preferences
- If a PR is good, approve it quickly — don't block unnecessarily"#
}
AgentRole::Manager => {
r#"## Role: Manager
You are a **manager/orchestrator** agent. Your primary responsibilities:
- Break down high-level goals into concrete tasks on the taskboard
- Post tasks for agents to pick up (`/taskboard add <title>`)
- Optionally assign tasks, or let agents self-assign
- Track progress and unblock stuck agents
- Coordinate between agents working on related features
- Prioritize work and manage the taskboard
### Workflow
1. Receive goals or feature requests from the human operator
2. Break them into well-defined, independent tasks
3. Post tasks to the taskboard (`/taskboard add <title>`)
4. Let agents claim tasks, or assign directly when coordinating dependent work
5. Monitor the taskboard and room for progress
6. Help resolve blockers and coordinate reviews
7. Request reviews when PRs are ready
### Guidelines
- Do NOT write code yourself — delegate to coders
- Keep tasks small and independently testable
- Ensure agents aren't working on conflicting changes
- Prefer letting agents self-assign — only assign directly when coordinating dependent work
- Escalate to the human operator when decisions are needed"#
}
};
format!(
r#"# Agent Instructions
You are **{name}**, operating in room **{room}**.
{role_section}
{TASKBOARD_INSTRUCTIONS}
## Communication
Use the room to coordinate with other agents and the human operator:
- Send updates when you start/finish tasks
- Ask for help if you're blocked
- Respond to messages directed at you (@{name})
"#
)
}
fn generate_personality_file(_name: &str, role: &crate::config::AgentRole) -> String {
use crate::config::AgentRole;
let role_prompt = match role {
AgentRole::Coder => {
"You are a software engineer agent. Your workflow:\n\
1. Check the taskboard (`/taskboard`) for available tasks\n\
2. Claim a task and announce your plan in the room\n\
3. Implement on a feature branch — write clean, well-tested code\n\
4. Follow the project's conventions and run the test suite before committing\n\
5. Open a PR and notify the room when ready for review\n\
6. Update task status and pick up the next task\n\n\
Prefer small, focused changes over large refactors. One concern per PR."
}
AgentRole::Reviewer => {
"You are a code review agent. Your workflow:\n\
1. Check the taskboard for tasks in `review` status\n\
2. Review the PR — check correctness, test coverage, and edge cases\n\
3. Run the project's linter and test suite on the branch\n\
4. Leave clear, actionable feedback via `gh pr review`\n\
5. Approve or request changes with specific reasoning\n\
6. Mark task as done on the taskboard when approved\n\n\
You do not write feature code — you read and critique it. \
Flag security issues, performance concerns, and missing tests."
}
AgentRole::Manager => {
"You are a coordination agent. Your workflow:\n\
1. Receive goals or feature requests from the human operator\n\
2. Break them into well-defined, independently testable tasks\n\
3. Post tasks to the taskboard (`/taskboard add <title>`)\n\
4. Let agents self-assign, or assign directly for dependent work\n\
5. Monitor the taskboard and room for progress\n\
6. Help resolve blockers and request reviews when PRs are ready\n\n\
You read code and PRs but do not write code. Escalate to the human operator \
when architectural decisions or priority calls are needed."
}
};
format!(
"{role_prompt}\n\n\
Always use `/taskboard` to check for and manage tasks. \
Never ask for work in the room if the taskboard has available tasks.\n"
)
}
pub fn ensure_running(config: &Config) -> Result<()> {
if !is_running(config) {
eprintln!("Container not running — starting...");
up()?;
}
Ok(())
}
pub fn exec(config: &Config, user: &str, workdir: &str, args: &[&str]) -> Result<()> {
let status = Command::new("docker")
.args(["exec", "-it", "-u", user, "-w", workdir])
.arg(&config.project.container_name)
.args(args)
.status()
.context("failed to docker exec")?;
if !status.success() {
bail!("docker exec failed with status {}", status);
}
Ok(())
}
pub fn exec_output(config: &Config, user: &str, args: &[&str]) -> Result<String> {
let output = Command::new("docker")
.args(["exec", "-u", user])
.arg(&config.project.container_name)
.args(args)
.output()
.context("failed to docker exec")?;
Ok(String::from_utf8_lossy(&output.stdout).to_string())
}
pub fn is_agent_running(config: &Config, name: &str) -> bool {
let pattern = format!("room-ralph.*{name}");
Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(&config.project.container_name)
.args(["pgrep", "-f", &pattern])
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
pub fn kill_agent(config: &Config, name: &str) -> Result<()> {
let container = &config.project.container_name;
let script = format!(
r#"PID=$(pgrep -f "room-ralph.*{name}" | head -1); \
if [ -n "$PID" ]; then \
kill -- -$PID 2>/dev/null || kill $PID 2>/dev/null; \
sleep 2; \
if kill -0 $PID 2>/dev/null; then \
kill -9 -- -$PID 2>/dev/null || kill -9 $PID 2>/dev/null; \
fi; \
fi"#
);
let _ = Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(container)
.args(["bash", "-c", &script])
.status();
Ok(())
}
pub fn ensure_room(config: &Config) -> Result<()> {
let container = &config.project.container_name;
let room = &config.room.default;
Command::new("docker")
.args(["exec", "-d", "-u", "agent"])
.arg(container)
.args(["room", "daemon"])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.context("failed to start room daemon")?;
std::thread::sleep(std::time::Duration::from_millis(500));
let join_output = Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(container)
.args(["room", "join", "system"])
.output()
.context("failed to join room")?;
if !join_output.status.success() {
let stderr = String::from_utf8_lossy(&join_output.stderr);
eprintln!("warning: room join failed: {}", stderr.trim());
return Ok(());
}
let stdout = String::from_utf8_lossy(&join_output.stdout);
let token = parse_token(&stdout).context("failed to parse token from room join output")?;
let create_output = Command::new("docker")
.args(["exec", "-u", "agent"])
.arg(container)
.args(["room", "create", room, "-t", &token])
.output()
.context("failed to create room")?;
if !create_output.status.success() {
let stderr = String::from_utf8_lossy(&create_output.stderr);
if !stderr.contains("already exists") {
eprintln!("warning: room create failed: {}", stderr.trim());
}
}
Ok(())
}
fn parse_token(json: &str) -> Option<String> {
let parsed: serde_json::Value = serde_json::from_str(json).ok()?;
parsed.get("token")?.as_str().map(|s| s.to_string())
}
fn ralph_cmd_args(config: &Config, name: &str, ralph_args: &[String]) -> Vec<String> {
let room = &config.room.default;
let personality_file = format!("/home/agent/.room/personality-{name}.txt");
let mut args = vec![
"room-ralph".to_string(),
room.to_string(),
name.to_string(),
"--personality".to_string(),
personality_file,
"--allow-all".to_string(),
];
args.extend(ralph_args.iter().cloned());
args
}
pub fn start_agents_background(
config: &Config,
names: &[String],
ralph_args: &[String],
) -> Result<()> {
let container = &config.project.container_name;
for name in names {
let workdir = format!("/workspaces/{name}");
let args = ralph_cmd_args(config, name, ralph_args);
let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
let status = Command::new("docker")
.args(["exec", "-d", "-u", "agent", "-w", &workdir])
.arg(container)
.args(&args_ref)
.status()
.with_context(|| format!("failed to start agent {name}"))?;
let role = config
.get_agent(name)
.map(|a| a.role.to_string())
.unwrap_or_else(|| "coder".to_string());
if status.success() {
eprintln!(" started {name} ({role})");
} else {
eprintln!(" failed to start {name}");
}
}
Ok(())
}
pub fn start_agents_tailed(config: &Config, names: &[String], ralph_args: &[String]) -> Result<()> {
use std::io::{BufRead, BufReader};
use std::sync::mpsc;
use std::thread;
let colors = [
"\x1b[36m", "\x1b[33m", "\x1b[35m", "\x1b[32m", "\x1b[34m", "\x1b[31m", "\x1b[96m", "\x1b[93m", ];
let reset = "\x1b[0m";
let container = &config.project.container_name;
let room = &config.room.default;
let (tx, rx) = mpsc::channel::<(String, String)>();
let mut children = Vec::new();
for (i, name) in names.iter().enumerate() {
let color = colors[i % colors.len()];
let prefix = format!("{color}{:<12}{reset}", name);
let role = config
.get_agent(name)
.map(|a| a.role.to_string())
.unwrap_or_else(|| "coder".to_string());
eprintln!("{prefix} starting in room '{room}' ({role})...");
let workdir = format!("/workspaces/{name}");
let args = ralph_cmd_args(config, name, ralph_args);
let args_ref: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
let mut cmd = Command::new("docker");
cmd.args(["exec", "-u", "agent", "-w", &workdir])
.arg(container)
.args(&args_ref)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
let mut child = cmd
.spawn()
.with_context(|| format!("failed to start agent {name}"))?;
if let Some(stdout) = child.stdout.take() {
let tx = tx.clone();
let prefix = prefix.clone();
thread::spawn(move || {
let reader = BufReader::new(stdout);
for line in reader.lines().map_while(Result::ok) {
let _ = tx.send((prefix.clone(), line));
}
});
}
if let Some(stderr) = child.stderr.take() {
let tx = tx.clone();
let prefix = prefix.clone();
thread::spawn(move || {
let reader = BufReader::new(stderr);
for line in reader.lines().map_while(Result::ok) {
let _ = tx.send((prefix.clone(), line));
}
});
}
children.push(child);
}
drop(tx);
for (prefix, line) in rx {
println!("{prefix} {line}");
}
for mut child in children {
let _ = child.wait();
}
Ok(())
}
pub fn run_claude(config: &Config, name: &str, extra_args: &[String]) -> Result<()> {
let container = &config.project.container_name;
let workdir = format!("/workspaces/{name}");
let status = Command::new("docker")
.args(["exec", "-it", "-u", "agent", "-w", &workdir])
.arg(container)
.args(["claude-real", "--dangerously-skip-permissions"])
.args(extra_args)
.status()
.context("failed to run claude")?;
if !status.success() {
bail!("claude exited with status {}", status);
}
Ok(())
}
pub fn clone_workspace(repo: &str, name: &str) -> Result<()> {
let workspace = config::agent_workspace(name);
if workspace.exists() {
eprintln!(" [skip] {name} — workspace already exists");
return Ok(());
}
eprintln!(" [clone] {name}");
let status = Command::new("git")
.args(["clone", repo])
.arg(&workspace)
.status()
.context("failed to clone repo")?;
if !status.success() {
bail!("git clone failed for agent {name}");
}
Ok(())
}