use super::progress::ProgressReporter;
use super::{
CONTAINER_NAME, DOCKERFILE, DockerClient, DockerError, ENTRYPOINT_SH, HEALTHCHECK_SH,
IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT, OPENCODE_CLOUD_BOOTSTRAP_SH,
active_resource_names, remap_image_tag,
};
use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
use bollard::models::BuildInfoAux;
use bollard::query_parameters::{
BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
RemoveImageOptionsBuilder, TagImageOptions,
};
use bytes::Bytes;
use flate2::Compression;
use flate2::write::GzEncoder;
use futures_util::StreamExt;
use http_body_util::{Either, Full};
use std::collections::{HashMap, HashSet, VecDeque};
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
use tar::Builder as TarBuilder;
use tracing::{debug, warn};
const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
const LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH: &str = "packages/opencode";
const LOCAL_OPENCODE_EXCLUDED_DIRS: &[&str] = &[
".git",
".planning",
"node_modules",
"target",
"dist",
".turbo",
".cache",
];
const LOCAL_OPENCODE_EXCLUDED_FILES: &[&str] = &[".DS_Store"];
#[derive(Debug, Clone, Copy, Default)]
struct BuildContextOptions {
include_local_opencode_submodule: bool,
}
fn effective_image_tag(tag: &str) -> String {
remap_image_tag(tag)
}
fn profile_scoped_image_ids(images: &[bollard::models::ImageSummary]) -> Option<HashSet<String>> {
let names = active_resource_names();
let instance_id = names.instance_id.as_deref()?;
let expected_tags = [
format!("{IMAGE_NAME_GHCR}:{}", names.image_tag),
format!("{IMAGE_NAME_DOCKERHUB}:{}", names.image_tag),
format!("{IMAGE_NAME_GHCR}:{}", names.previous_image_tag),
format!("{IMAGE_NAME_DOCKERHUB}:{}", names.previous_image_tag),
];
let mut ids = HashSet::new();
for image in images {
let tag_match = image
.repo_tags
.iter()
.any(|tag| expected_tags.contains(tag));
let label_match = image
.labels
.get(super::INSTANCE_LABEL_KEY)
.is_some_and(|value| value == instance_id);
if tag_match || label_match {
ids.insert(image.id.clone());
}
}
Some(ids)
}
fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
let Ok(value) = env::var(var_name) else {
return default;
};
let Ok(parsed) = value.trim().parse::<usize>() else {
return default;
};
parsed.clamp(5, 500)
}
fn is_error_line(line: &str) -> bool {
let lower = line.to_lowercase();
lower.contains("error")
|| lower.contains("failed")
|| lower.contains("cannot")
|| lower.contains("unable to")
|| lower.contains("not found")
|| lower.contains("permission denied")
}
pub async fn image_exists(
client: &DockerClient,
image: &str,
tag: &str,
) -> Result<bool, DockerError> {
let tag = effective_image_tag(tag);
let full_name = format!("{image}:{tag}");
debug!("Checking if image exists: {}", full_name);
match client.inner().inspect_image(&full_name).await {
Ok(_) => Ok(true),
Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, ..
}) => Ok(false),
Err(e) => Err(DockerError::from(e)),
}
}
pub async fn remove_images_by_name(
client: &DockerClient,
name_fragment: &str,
force: bool,
) -> Result<usize, DockerError> {
debug!("Removing Docker images matching '{name_fragment}'");
let images = list_docker_images(client).await?;
let image_ids = if name_fragment == CONTAINER_NAME {
profile_scoped_image_ids(&images)
.unwrap_or_else(|| collect_image_ids(&images, name_fragment))
} else {
collect_image_ids(&images, name_fragment)
};
remove_image_ids(client, image_ids, force).await
}
async fn list_docker_images(
client: &DockerClient,
) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
let list_options = ListImagesOptionsBuilder::new().all(true).build();
client
.inner()
.list_images(Some(list_options))
.await
.map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
}
const LABEL_TITLE: &str = "org.opencontainers.image.title";
const LABEL_SOURCE: &str = "org.opencontainers.image.source";
const LABEL_URL: &str = "org.opencontainers.image.url";
const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
fn collect_image_ids(
images: &[bollard::models::ImageSummary],
name_fragment: &str,
) -> HashSet<String> {
let mut image_ids = HashSet::new();
for image in images {
if image_matches_fragment_or_labels(image, name_fragment) {
image_ids.insert(image.id.clone());
}
}
image_ids
}
fn image_matches_fragment_or_labels(
image: &bollard::models::ImageSummary,
name_fragment: &str,
) -> bool {
let tag_match = image
.repo_tags
.iter()
.any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
let digest_match = image
.repo_digests
.iter()
.any(|digest| digest.contains(name_fragment));
let label_match = image_labels_match(&image.labels);
tag_match || digest_match || label_match
}
fn image_labels_match(labels: &HashMap<String, String>) -> bool {
labels
.get(LABEL_SOURCE)
.is_some_and(|value| value == LABEL_SOURCE_VALUE)
|| labels
.get(LABEL_URL)
.is_some_and(|value| value == LABEL_URL_VALUE)
|| labels
.get(LABEL_TITLE)
.is_some_and(|value| value == LABEL_TITLE_VALUE)
}
async fn remove_image_ids(
client: &DockerClient,
image_ids: HashSet<String>,
force: bool,
) -> Result<usize, DockerError> {
if image_ids.is_empty() {
return Ok(0);
}
let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
let mut removed = 0usize;
for image_id in image_ids {
let result = client
.inner()
.remove_image(&image_id, Some(remove_options.clone()), None)
.await;
match result {
Ok(_) => removed += 1,
Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, ..
}) => {
debug!("Docker image already removed: {}", image_id);
}
Err(err) => {
return Err(DockerError::Image(format!(
"Failed to remove image {image_id}: {err}"
)));
}
}
}
Ok(removed)
}
pub async fn build_image(
client: &DockerClient,
tag: Option<&str>,
progress: &mut ProgressReporter,
no_cache: bool,
build_args: Option<HashMap<String, String>>,
) -> Result<String, DockerError> {
let tag = effective_image_tag(tag.unwrap_or(IMAGE_TAG_DEFAULT));
let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
debug!("Building image: {} (no_cache: {})", full_name, no_cache);
let build_args = build_args.unwrap_or_default();
let include_local_opencode_submodule = build_args
.get("OPENCODE_SOURCE")
.is_some_and(|value| value.eq_ignore_ascii_case("local"));
let context_msg = if include_local_opencode_submodule {
"Packaging local opencode checkout"
} else {
"Preparing build context"
};
progress.update_spinner("build", context_msg);
let context = create_build_context(BuildContextOptions {
include_local_opencode_submodule,
})
.map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
let session_id = format!(
"opencode-cloud-build-{}",
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_nanos()
);
let options = BuildImageOptions {
t: Some(full_name.clone()),
dockerfile: "Dockerfile".to_string(),
version: BuilderVersion::BuilderBuildKit,
session: Some(session_id),
rm: true,
nocache: no_cache,
buildargs: Some(build_args),
platform: String::new(),
target: String::new(),
..Default::default()
};
let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
progress.update_spinner("build", "Sending build context to Docker");
let mut stream = client.inner().build_image(options, None, Some(body));
progress.update_spinner("build", "Waiting for Docker build to start");
let mut maybe_image_id = None;
let mut log_state = BuildLogState::new();
while let Some(result) = stream.next().await {
let Ok(info) = result else {
return Err(handle_stream_error(
"Build failed",
result.expect_err("checked error").to_string(),
&log_state,
progress,
));
};
handle_stream_message(&info, progress, &mut log_state);
if let Some(error_detail) = &info.error_detail
&& let Some(error_msg) = &error_detail.message
{
progress.abandon_all(error_msg);
let context = format_build_error_with_context(
error_msg,
&log_state.recent_logs,
&log_state.error_logs,
&log_state.recent_buildkit_logs,
);
return Err(DockerError::Build(context));
}
if let Some(aux) = info.aux {
match aux {
BuildInfoAux::Default(image_id) => {
if let Some(id) = image_id.id {
maybe_image_id = Some(id);
}
}
BuildInfoAux::BuildKit(status) => {
handle_buildkit_status(&status, progress, &mut log_state);
}
}
}
}
let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
let finish_msg = format!("Build complete: {image_id}");
progress.finish("build", &finish_msg);
Ok(full_name)
}
struct BuildLogState {
recent_logs: VecDeque<String>,
error_logs: VecDeque<String>,
recent_buildkit_logs: VecDeque<String>,
build_log_buffer_size: usize,
error_log_buffer_size: usize,
last_buildkit_vertex: Option<String>,
last_buildkit_vertex_id: Option<String>,
export_vertex_id: Option<String>,
export_vertex_name: Option<String>,
buildkit_logs_by_vertex_id: HashMap<String, String>,
vertex_name_by_vertex_id: HashMap<String, String>,
}
impl BuildLogState {
fn new() -> Self {
let build_log_buffer_size = read_log_buffer_size(
"OPENCODE_DOCKER_BUILD_LOG_TAIL",
DEFAULT_BUILD_LOG_BUFFER_SIZE,
);
let error_log_buffer_size = read_log_buffer_size(
"OPENCODE_DOCKER_BUILD_ERROR_TAIL",
DEFAULT_ERROR_LOG_BUFFER_SIZE,
);
Self {
recent_logs: VecDeque::with_capacity(build_log_buffer_size),
error_logs: VecDeque::with_capacity(error_log_buffer_size),
recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
build_log_buffer_size,
error_log_buffer_size,
last_buildkit_vertex: None,
last_buildkit_vertex_id: None,
export_vertex_id: None,
export_vertex_name: None,
buildkit_logs_by_vertex_id: HashMap::new(),
vertex_name_by_vertex_id: HashMap::new(),
}
}
}
fn clean_buildkit_label(raw: &str) -> String {
let trimmed = raw.trim();
let Some(rest) = trimmed.strip_prefix("[internal] ") else {
return trimmed.to_string();
};
if rest.starts_with("load remote build context") {
"Loading remote build context".to_string()
} else if let Some(image) = rest.strip_prefix("load metadata for ") {
format!("Resolving image {image}")
} else if rest.starts_with("load build definition") {
"Loading Dockerfile".to_string()
} else if rest.starts_with("load build context") {
"Loading build context".to_string()
} else {
let mut chars = rest.chars();
match chars.next() {
None => String::new(),
Some(c) => c.to_uppercase().to_string() + chars.as_str(),
}
}
}
fn handle_stream_message(
info: &bollard::models::BuildInfo,
progress: &mut ProgressReporter,
state: &mut BuildLogState,
) {
let Some(stream_msg) = info.stream.as_deref() else {
return;
};
let msg = stream_msg.trim();
if msg.is_empty() {
return;
}
if progress.is_plain_output() {
eprint!("{stream_msg}");
} else {
let has_runtime_vertex = state
.last_buildkit_vertex
.as_deref()
.is_some_and(|name| name.starts_with("[runtime "));
let is_internal_msg = msg.contains("[internal]");
if !(has_runtime_vertex && is_internal_msg) {
progress.update_spinner("build", &clean_buildkit_label(stream_msg));
}
}
if state.recent_logs.len() >= state.build_log_buffer_size {
state.recent_logs.pop_front();
}
state.recent_logs.push_back(msg.to_string());
if is_error_line(msg) {
if state.error_logs.len() >= state.error_log_buffer_size {
state.error_logs.pop_front();
}
state.error_logs.push_back(msg.to_string());
}
if msg.starts_with("Step ") {
debug!("Build step: {}", msg);
}
}
fn handle_buildkit_status(
status: &BuildkitStatusResponse,
progress: &mut ProgressReporter,
state: &mut BuildLogState,
) {
let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
update_export_vertex_from_logs(
&latest_logs,
&state.vertex_name_by_vertex_id,
&mut state.export_vertex_id,
&mut state.export_vertex_name,
);
let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
status,
&state.vertex_name_by_vertex_id,
state.export_vertex_id.as_deref(),
state.export_vertex_name.as_deref(),
) {
Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
None => {
let Some(log_entry) = latest_logs.last() else {
return;
};
let name = state
.vertex_name_by_vertex_id
.get(&log_entry.vertex_id)
.cloned()
.or_else(|| state.last_buildkit_vertex.clone())
.unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
(log_entry.vertex_id.clone(), name)
}
};
record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
state.last_buildkit_vertex_id = Some(vertex_id.clone());
if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
state.last_buildkit_vertex = Some(vertex_name.clone());
}
let display_name = clean_buildkit_label(&vertex_name);
let message = if progress.is_plain_output() {
display_name
} else if let Some(log_entry) = latest_logs
.iter()
.rev()
.find(|entry| entry.vertex_id == vertex_id)
{
format!("{display_name} · {}", log_entry.message)
} else {
display_name
};
progress.update_spinner("build", &message);
if progress.is_plain_output() {
for log_entry in latest_logs {
eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
}
return;
}
let (Some(current_id), Some(current_name)) = (
state.last_buildkit_vertex_id.as_ref(),
state.last_buildkit_vertex.as_ref(),
) else {
return;
};
let name = state
.vertex_name_by_vertex_id
.get(current_id)
.unwrap_or(current_name);
let _ = name;
}
fn handle_stream_error(
prefix: &str,
error_str: String,
state: &BuildLogState,
progress: &mut ProgressReporter,
) -> DockerError {
progress.abandon_all(prefix);
let buildkit_hint = if error_str.contains("mount")
|| error_str.contains("--mount")
|| state
.recent_logs
.iter()
.any(|log| log.contains("--mount") && log.contains("cache"))
{
"\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
} else {
""
};
let context = format!(
"{}{}",
format_build_error_with_context(
&error_str,
&state.recent_logs,
&state.error_logs,
&state.recent_buildkit_logs,
),
buildkit_hint
);
DockerError::Build(context)
}
fn update_buildkit_vertex_names(
vertex_name_by_vertex_id: &mut HashMap<String, String>,
status: &BuildkitStatusResponse,
) {
for vertex in &status.vertexes {
if vertex.name.is_empty() {
continue;
}
vertex_name_by_vertex_id
.entry(vertex.digest.clone())
.or_insert_with(|| vertex.name.clone());
}
}
fn select_latest_buildkit_vertex(
status: &BuildkitStatusResponse,
vertex_name_by_vertex_id: &HashMap<String, String>,
export_vertex_id: Option<&str>,
export_vertex_name: Option<&str>,
) -> Option<(String, String)> {
if let Some(export_vertex_id) = export_vertex_id {
let name = export_vertex_name
.map(str::to_string)
.or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
.unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
return Some((export_vertex_id.to_string(), name));
}
let mut best_runtime: Option<(u32, String, String)> = None;
let mut fallback: Option<(String, String)> = None;
for vertex in &status.vertexes {
let name = if vertex.name.is_empty() {
vertex_name_by_vertex_id.get(&vertex.digest).cloned()
} else {
Some(vertex.name.clone())
};
let Some(name) = name else {
continue;
};
if fallback.is_none() && !name.starts_with("[internal]") {
fallback = Some((vertex.digest.clone(), name.clone()));
}
if let Some(step) = parse_runtime_step(&name) {
match &best_runtime {
Some((best_step, _, _)) if *best_step >= step => {}
_ => {
best_runtime = Some((step, vertex.digest.clone(), name.clone()));
}
}
}
}
if let Some((_, digest, name)) = best_runtime {
Some((digest, name))
} else {
fallback.or_else(|| {
status.vertexes.iter().find_map(|vertex| {
let name = if vertex.name.is_empty() {
vertex_name_by_vertex_id.get(&vertex.digest).cloned()
} else {
Some(vertex.name.clone())
};
name.map(|resolved| (vertex.digest.clone(), resolved))
})
})
}
}
fn parse_runtime_step(name: &str) -> Option<u32> {
let prefix = "[runtime ";
let start = name.find(prefix)? + prefix.len();
let rest = &name[start..];
let end = rest.find('/')?;
rest[..end].trim().parse::<u32>().ok()
}
fn format_vertex_fallback_label(vertex_id: &str) -> String {
let short = vertex_id
.strip_prefix("sha256:")
.unwrap_or(vertex_id)
.chars()
.take(12)
.collect::<String>();
format!("vertex {short}")
}
fn update_export_vertex_from_logs(
latest_logs: &[BuildkitLogEntry],
vertex_name_by_vertex_id: &HashMap<String, String>,
export_vertex_id: &mut Option<String>,
export_vertex_name: &mut Option<String>,
) {
if let Some(entry) = latest_logs
.iter()
.rev()
.find(|log| log.message.trim_start().starts_with("exporting to image"))
{
*export_vertex_id = Some(entry.vertex_id.clone());
if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
*export_vertex_name = Some(name.clone());
}
}
}
fn record_buildkit_logs(
state: &mut BuildLogState,
latest_logs: &[BuildkitLogEntry],
current_vertex_id: &str,
current_vertex_name: &str,
) {
for log_entry in latest_logs {
let name = state
.vertex_name_by_vertex_id
.get(&log_entry.vertex_id)
.cloned()
.or_else(|| {
if log_entry.vertex_id == current_vertex_id {
Some(current_vertex_name.to_string())
} else {
None
}
})
.unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
let message = log_entry.message.replace('\r', "").trim_end().to_string();
if message.is_empty() {
continue;
}
if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
state.recent_buildkit_logs.pop_front();
}
state
.recent_buildkit_logs
.push_back(format!("[{name}] {message}"));
}
}
#[derive(Debug, Clone)]
struct BuildkitLogEntry {
vertex_id: String,
message: String,
}
fn append_buildkit_logs(
logs: &mut HashMap<String, String>,
status: &BuildkitStatusResponse,
) -> Vec<BuildkitLogEntry> {
let mut latest: Vec<BuildkitLogEntry> = Vec::new();
for log in &status.logs {
let vertex_id = log.vertex.clone();
let message = String::from_utf8_lossy(&log.msg).to_string();
let entry = logs.entry(vertex_id.clone()).or_default();
entry.push_str(&message);
latest.push(BuildkitLogEntry { vertex_id, message });
}
latest
}
pub async fn pull_image(
client: &DockerClient,
tag: Option<&str>,
progress: &mut ProgressReporter,
) -> Result<String, DockerError> {
let requested_tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
let resolved_tag = effective_image_tag(requested_tag);
let isolated_default_tag =
requested_tag == IMAGE_TAG_DEFAULT && resolved_tag != IMAGE_TAG_DEFAULT;
let registry_pull_tag = if isolated_default_tag {
IMAGE_TAG_DEFAULT
} else {
requested_tag
};
debug!(
"Attempting to pull from GHCR: {}:{}",
IMAGE_NAME_GHCR, registry_pull_tag
);
let ghcr_err =
match pull_from_registry(client, IMAGE_NAME_GHCR, registry_pull_tag, progress).await {
Ok(()) => {
if isolated_default_tag {
retag_local_image(
client,
&format!("{IMAGE_NAME_GHCR}:{registry_pull_tag}"),
&resolved_tag,
)
.await?;
}
let full_name = format!("{IMAGE_NAME_GHCR}:{resolved_tag}");
return Ok(full_name);
}
Err(e) => e,
};
warn!(
"GHCR pull failed: {}. Trying Docker Hub fallback...",
ghcr_err
);
debug!(
"Attempting to pull from Docker Hub: {}:{}",
IMAGE_NAME_DOCKERHUB, registry_pull_tag
);
match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, registry_pull_tag, progress).await {
Ok(()) => {
if isolated_default_tag {
retag_local_image(
client,
&format!("{IMAGE_NAME_DOCKERHUB}:{registry_pull_tag}"),
&resolved_tag,
)
.await?;
return Ok(format!("{IMAGE_NAME_GHCR}:{resolved_tag}"));
}
let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{resolved_tag}");
Ok(full_name)
}
Err(dockerhub_err) => Err(DockerError::Pull(format!(
"Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
))),
}
}
async fn retag_local_image(
client: &DockerClient,
source_image: &str,
target_tag: &str,
) -> Result<(), DockerError> {
let options = TagImageOptions {
repo: Some(IMAGE_NAME_GHCR.to_string()),
tag: Some(target_tag.to_string()),
};
client
.inner()
.tag_image(source_image, Some(options))
.await
.map_err(|e| {
DockerError::Pull(format!(
"Failed to retag pulled image {source_image} as {IMAGE_NAME_GHCR}:{target_tag}: {e}"
))
})?;
Ok(())
}
const MAX_PULL_RETRIES: usize = 3;
async fn pull_from_registry(
client: &DockerClient,
image: &str,
tag: &str,
progress: &mut ProgressReporter,
) -> Result<(), DockerError> {
let full_name = format!("{image}:{tag}");
let mut last_error = None;
for attempt in 1..=MAX_PULL_RETRIES {
debug!(
"Pull attempt {}/{} for {}",
attempt, MAX_PULL_RETRIES, full_name
);
match do_pull(client, image, tag, progress).await {
Ok(()) => return Ok(()),
Err(e) => {
warn!("Pull attempt {} failed: {}", attempt, e);
last_error = Some(e);
if attempt < MAX_PULL_RETRIES {
let delay_ms = 1000 * (1 << (attempt - 1));
tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
}
}
}
}
Err(last_error.unwrap_or_else(|| {
DockerError::Pull(format!(
"Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
))
}))
}
async fn do_pull(
client: &DockerClient,
image: &str,
tag: &str,
progress: &mut ProgressReporter,
) -> Result<(), DockerError> {
let full_name = format!("{image}:{tag}");
let options = CreateImageOptions {
from_image: Some(image.to_string()),
tag: Some(tag.to_string()),
platform: String::new(),
..Default::default()
};
let mut stream = client.inner().create_image(Some(options), None, None);
progress.add_spinner("pull", &format!("Pulling {full_name}..."));
while let Some(result) = stream.next().await {
match result {
Ok(info) => {
if let Some(error_detail) = &info.error_detail
&& let Some(error_msg) = &error_detail.message
{
progress.abandon_all(error_msg);
return Err(DockerError::Pull(error_msg.to_string()));
}
if let Some(layer_id) = &info.id {
let status = info.status.as_deref().unwrap_or("");
match status {
"Already exists" => {
progress.finish(layer_id, "Already exists");
}
"Pull complete" => {
progress.finish(layer_id, "Pull complete");
}
"Downloading" | "Extracting" => {
if let Some(progress_detail) = &info.progress_detail {
let current = progress_detail.current.unwrap_or(0) as u64;
let total = progress_detail.total.unwrap_or(0) as u64;
if total > 0 {
progress.update_layer(layer_id, current, total, status);
}
}
}
_ => {
progress.update_spinner(layer_id, status);
}
}
} else if let Some(status) = &info.status {
progress.update_spinner("pull", status);
}
}
Err(e) => {
progress.abandon_all("Pull failed");
return Err(DockerError::Pull(format!("Pull failed: {e}")));
}
}
}
progress.finish("pull", &format!("Pull complete: {full_name}"));
Ok(())
}
fn format_build_error_with_context(
error: &str,
recent_logs: &VecDeque<String>,
error_logs: &VecDeque<String>,
recent_buildkit_logs: &VecDeque<String>,
) -> String {
let mut message = String::new();
message.push_str(error);
if !error_logs.is_empty() {
let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
let unique_errors: Vec<_> = error_logs
.iter()
.filter(|line| !recent_set.contains(line))
.collect();
if !unique_errors.is_empty() {
message.push_str("\n\nPotential errors detected during build:");
for line in unique_errors {
message.push_str("\n ");
message.push_str(line);
}
}
}
if !recent_buildkit_logs.is_empty() {
message.push_str("\n\nRecent BuildKit output:");
for line in recent_buildkit_logs {
message.push_str("\n ");
message.push_str(line);
}
}
if !recent_logs.is_empty() {
message.push_str("\n\nRecent build output:");
for line in recent_logs {
message.push_str("\n ");
message.push_str(line);
}
} else if recent_buildkit_logs.is_empty() {
message.push_str("\n\nNo build output was received from the Docker daemon.");
message.push_str("\nThis usually means the build failed before any logs were streamed.");
}
let error_lower = error.to_lowercase();
if error_lower.contains("network")
|| error_lower.contains("connection")
|| error_lower.contains("timeout")
{
message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
} else if error_lower.contains("disk")
|| error_lower.contains("space")
|| error_lower.contains("no space")
{
message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
} else if error_lower.contains("permission") || error_lower.contains("denied") {
message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
}
message
}
fn create_build_context(options: BuildContextOptions) -> Result<Vec<u8>, io::Error> {
let repo_root = if options.include_local_opencode_submodule {
Some(workspace_root_for_build_context()?)
} else {
None
};
create_build_context_with_repo_root(options, repo_root.as_deref())
}
fn workspace_root_for_build_context() -> Result<PathBuf, io::Error> {
Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../..")
.canonicalize()
}
fn create_build_context_with_repo_root(
options: BuildContextOptions,
repo_root: Option<&Path>,
) -> Result<Vec<u8>, io::Error> {
let mut archive_buffer = Vec::new();
{
let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
let mut tar = TarBuilder::new(encoder);
let dockerfile_bytes = DOCKERFILE.as_bytes();
append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/entrypoint.sh",
ENTRYPOINT_SH,
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
OPENCODE_CLOUD_BOOTSTRAP_SH,
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/healthcheck.sh",
HEALTHCHECK_SH,
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/opencode-broker.service",
include_bytes!("files/opencode-broker.service"),
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/opencode.service",
include_bytes!("files/opencode.service"),
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/pam/opencode",
include_bytes!("files/pam/opencode"),
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/opencode.jsonc",
include_bytes!("files/opencode.jsonc"),
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/starship.toml",
include_bytes!("files/starship.toml"),
0o644,
)?;
append_bytes(
&mut tar,
"packages/core/src/docker/files/bashrc.extra",
include_bytes!("files/bashrc.extra"),
0o644,
)?;
append_directory(
&mut tar,
Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
0o755,
)?;
if options.include_local_opencode_submodule {
let repo_root = repo_root.ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
"Local opencode build requested but workspace root is unavailable",
)
})?;
append_local_opencode_submodule(&mut tar, repo_root)?;
}
tar.finish()?;
let encoder = tar.into_inner()?;
encoder.finish()?;
}
Ok(archive_buffer)
}
fn append_local_opencode_submodule<W: Write>(
tar: &mut TarBuilder<W>,
repo_root: &Path,
) -> Result<(), io::Error> {
let source_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
if !source_root.is_dir() {
return Err(io::Error::new(
io::ErrorKind::NotFound,
format!(
"Local opencode submodule path not found: {}",
source_root.display()
),
));
}
let canonical_source_root = source_root.canonicalize()?;
append_local_tree_recursive(
tar,
&source_root,
&canonical_source_root,
Path::new(""),
Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
)
}
fn append_local_tree_recursive<W: Write>(
tar: &mut TarBuilder<W>,
source_root: &Path,
canonical_source_root: &Path,
relative_path: &Path,
archive_root: &Path,
) -> Result<(), io::Error> {
let current_path = source_root.join(relative_path);
let mut entries: Vec<_> =
fs::read_dir(¤t_path)?.collect::<Result<Vec<_>, io::Error>>()?;
entries.sort_by_key(|a| a.file_name());
for entry in entries {
let file_name = entry.file_name();
let entry_relative = if relative_path.as_os_str().is_empty() {
PathBuf::from(&file_name)
} else {
relative_path.join(&file_name)
};
if should_exclude_local_opencode_path(&entry_relative) {
continue;
}
let entry_path = entry.path();
let metadata = fs::symlink_metadata(&entry_path)?;
let archive_path = archive_root.join(&entry_relative);
if metadata.is_dir() {
append_directory(tar, &archive_path, mode_from_metadata(&metadata, 0o755))?;
append_local_tree_recursive(
tar,
source_root,
canonical_source_root,
&entry_relative,
archive_root,
)?;
continue;
}
if metadata.is_file() {
append_file_from_disk(
tar,
&archive_path,
&entry_path,
mode_from_metadata(&metadata, 0o644),
)?;
continue;
}
if metadata.file_type().is_symlink() {
match resolve_local_symlink_target(&entry_path, canonical_source_root)? {
Some(target_path) => {
let target_metadata = fs::metadata(&target_path)?;
if target_metadata.is_file() {
append_file_from_disk(
tar,
&archive_path,
&target_path,
mode_from_metadata(&target_metadata, 0o644),
)?;
} else {
debug!(
"Skipping symlink with non-file target in local opencode context: {} -> {}",
entry_path.display(),
target_path.display()
);
}
}
None => {
debug!(
"Skipping symlink outside checkout or unresolved in local opencode context: {}",
entry_path.display()
);
}
}
}
}
Ok(())
}
fn resolve_local_symlink_target(
link_path: &Path,
canonical_source_root: &Path,
) -> Result<Option<PathBuf>, io::Error> {
let link_target = fs::read_link(link_path)?;
let resolved = if link_target.is_absolute() {
link_target
} else {
link_path
.parent()
.unwrap_or_else(|| Path::new(""))
.join(link_target)
};
let canonical_target = match resolved.canonicalize() {
Ok(path) => path,
Err(_) => return Ok(None),
};
if canonical_target.starts_with(canonical_source_root) {
Ok(Some(canonical_target))
} else {
Ok(None)
}
}
fn should_exclude_local_opencode_path(relative_path: &Path) -> bool {
if relative_path.file_name().is_some_and(|name| {
LOCAL_OPENCODE_EXCLUDED_FILES
.iter()
.any(|candidate| name == OsStr::new(candidate))
}) {
return true;
}
relative_path.components().any(|component| {
let part = component.as_os_str();
LOCAL_OPENCODE_EXCLUDED_DIRS
.iter()
.any(|candidate| part == OsStr::new(candidate))
})
}
#[cfg(unix)]
fn mode_from_metadata(metadata: &fs::Metadata, fallback: u32) -> u32 {
use std::os::unix::fs::PermissionsExt;
let mode = metadata.permissions().mode() & 0o7777;
if mode == 0 { fallback } else { mode }
}
#[cfg(not(unix))]
fn mode_from_metadata(_metadata: &fs::Metadata, fallback: u32) -> u32 {
fallback
}
fn append_directory<W: Write>(
tar: &mut TarBuilder<W>,
path: &Path,
mode: u32,
) -> Result<(), io::Error> {
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_mode(mode);
header.set_entry_type(tar::EntryType::Directory);
tar.append_data(&mut header, path, io::empty())?;
Ok(())
}
fn append_file_from_disk<W: Write>(
tar: &mut TarBuilder<W>,
archive_path: &Path,
source_path: &Path,
mode: u32,
) -> Result<(), io::Error> {
let mut file = fs::File::open(source_path)?;
let metadata = file.metadata()?;
let mut header = tar::Header::new_gnu();
header.set_size(metadata.len());
header.set_mode(mode);
tar.append_data(&mut header, archive_path, &mut file)?;
Ok(())
}
fn append_bytes<W: Write>(
tar: &mut TarBuilder<W>,
path: &str,
contents: &[u8],
mode: u32,
) -> Result<(), io::Error> {
let mut header = tar::Header::new_gnu();
header.set_path(path)?;
header.set_size(contents.len() as u64);
header.set_mode(mode);
header.set_cksum();
tar.append(&header, contents)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use bollard::models::ImageSummary;
use flate2::read::GzDecoder;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::io::{Cursor, Read};
use tar::Archive;
use tempfile::tempdir;
fn make_image_summary(
id: &str,
tags: Vec<&str>,
digests: Vec<&str>,
labels: HashMap<String, String>,
) -> ImageSummary {
ImageSummary {
id: id.to_string(),
parent_id: String::new(),
repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
repo_digests: digests
.into_iter()
.map(|digest| digest.to_string())
.collect(),
created: 0,
size: 0,
shared_size: -1,
labels,
containers: 0,
manifests: None,
descriptor: None,
}
}
fn archive_entries(context: Vec<u8>) -> HashSet<String> {
let cursor = Cursor::new(context);
let decoder = GzDecoder::new(cursor);
let mut archive = Archive::new(decoder);
let mut paths = HashSet::new();
for entry in archive.entries().expect("should read archive entries") {
let entry = entry.expect("should read entry");
let path = entry.path().expect("should read entry path");
paths.insert(path.to_string_lossy().to_string());
}
paths
}
fn archive_entry_bytes(context: Vec<u8>, wanted_path: &str) -> Option<Vec<u8>> {
let cursor = Cursor::new(context);
let decoder = GzDecoder::new(cursor);
let mut archive = Archive::new(decoder);
for entry in archive.entries().expect("should read archive entries") {
let mut entry = entry.expect("should read entry");
let path = entry.path().expect("should read entry path");
if path == Path::new(wanted_path) {
let mut bytes = Vec::new();
entry
.read_to_end(&mut bytes)
.expect("should read entry bytes");
return Some(bytes);
}
}
None
}
#[test]
fn create_build_context_succeeds() {
let context =
create_build_context(BuildContextOptions::default()).expect("should create context");
assert!(!context.is_empty(), "context should not be empty");
assert_eq!(context[0], 0x1f, "should be gzip compressed");
assert_eq!(context[1], 0x8b, "should be gzip compressed");
}
#[test]
fn build_context_includes_docker_assets() {
let context =
create_build_context(BuildContextOptions::default()).expect("should create context");
let cursor = Cursor::new(context);
let decoder = GzDecoder::new(cursor);
let mut archive = Archive::new(decoder);
let mut found_entrypoint = false;
let mut found_healthcheck = false;
let mut found_bootstrap_helper = false;
for entry in archive.entries().expect("should read archive entries") {
let entry = entry.expect("should read entry");
let path = entry.path().expect("should read entry path");
if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
found_entrypoint = true;
}
if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
found_healthcheck = true;
}
if path
== std::path::Path::new(
"packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
)
{
found_bootstrap_helper = true;
}
if found_entrypoint && found_healthcheck && found_bootstrap_helper {
break;
}
}
assert!(
found_entrypoint,
"entrypoint asset should be in the build context"
);
assert!(
found_healthcheck,
"healthcheck asset should be in the build context"
);
assert!(
found_bootstrap_helper,
"bootstrap helper asset should be in the build context"
);
}
#[test]
fn build_context_includes_opencode_placeholder_in_default_mode() {
let context =
create_build_context(BuildContextOptions::default()).expect("should create context");
let entries = archive_entries(context);
assert!(
entries
.iter()
.any(|path| path.trim_end_matches('/') == "packages/opencode"),
"default mode should include an empty packages/opencode placeholder"
);
}
#[test]
fn build_context_local_mode_includes_submodule_and_excludes_heavy_paths() {
let temp = tempdir().expect("should create tempdir");
let repo_root = temp.path();
let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
fs::create_dir_all(submodule_root.join("src")).expect("should create src");
fs::create_dir_all(submodule_root.join(".git")).expect("should create .git");
fs::create_dir_all(submodule_root.join("node_modules/pkg"))
.expect("should create node_modules");
fs::create_dir_all(submodule_root.join("target/release")).expect("should create target");
fs::create_dir_all(submodule_root.join("dist")).expect("should create dist");
fs::create_dir_all(submodule_root.join(".turbo")).expect("should create .turbo");
fs::create_dir_all(submodule_root.join(".cache")).expect("should create .cache");
fs::create_dir_all(submodule_root.join(".planning/phases/very-long-planning-phase-name"))
.expect("should create planning");
fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
fs::write(submodule_root.join("src/main.ts"), "console.log('ok');")
.expect("should write source");
fs::write(submodule_root.join(".git/config"), "dirty").expect("should write .git file");
fs::write(submodule_root.join("node_modules/pkg/index.js"), "ignored")
.expect("should write node_modules file");
fs::write(submodule_root.join("target/release/app"), "ignored")
.expect("should write target file");
fs::write(submodule_root.join("dist/ui.js"), "ignored").expect("should write dist file");
fs::write(submodule_root.join(".turbo/state.json"), "ignored")
.expect("should write turbo file");
fs::write(submodule_root.join(".cache/cache.bin"), "ignored")
.expect("should write cache file");
fs::write(
submodule_root.join(".planning/phases/very-long-planning-phase-name/phase.md"),
"ignored",
)
.expect("should write planning file");
fs::write(submodule_root.join(".DS_Store"), "ignored").expect("should write ds_store");
let context = create_build_context_with_repo_root(
BuildContextOptions {
include_local_opencode_submodule: true,
},
Some(repo_root),
)
.expect("should create local context");
let entries = archive_entries(context);
assert!(
entries.contains("packages/opencode/package.json"),
"local mode should include submodule files"
);
assert!(
entries.contains("packages/opencode/src/main.ts"),
"local mode should include source files"
);
assert!(
!entries.contains("packages/opencode/.git/config"),
"local mode should exclude .git"
);
assert!(
!entries.contains("packages/opencode/node_modules/pkg/index.js"),
"local mode should exclude node_modules"
);
assert!(
!entries.contains("packages/opencode/target/release/app"),
"local mode should exclude target"
);
assert!(
!entries.contains("packages/opencode/dist/ui.js"),
"local mode should exclude dist"
);
assert!(
!entries.contains("packages/opencode/.turbo/state.json"),
"local mode should exclude .turbo"
);
assert!(
!entries.contains("packages/opencode/.cache/cache.bin"),
"local mode should exclude .cache"
);
assert!(
!entries.contains(
"packages/opencode/.planning/phases/very-long-planning-phase-name/phase.md"
),
"local mode should exclude .planning"
);
assert!(
!entries.contains("packages/opencode/.DS_Store"),
"local mode should exclude .DS_Store files"
);
}
#[test]
fn build_context_local_mode_supports_long_non_excluded_paths() {
let temp = tempdir().expect("should create tempdir");
let repo_root = temp.path();
let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
fs::create_dir_all(&submodule_root).expect("should create submodule root");
fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
let long_segment = "a".repeat(140);
let long_dir = submodule_root.join("src").join(&long_segment);
fs::create_dir_all(&long_dir).expect("should create long path directory");
fs::write(long_dir.join("main.ts"), "console.log('long path');")
.expect("should write long path file");
let context = create_build_context_with_repo_root(
BuildContextOptions {
include_local_opencode_submodule: true,
},
Some(repo_root),
)
.expect("should create local context with long paths");
let entries = archive_entries(context);
let long_entry = format!("packages/opencode/src/{long_segment}/main.ts");
assert!(
entries.contains(&long_entry),
"long non-excluded path should be archived via GNU long-name handling"
);
}
#[cfg(unix)]
#[test]
fn build_context_local_mode_materializes_symlinked_files() {
use std::os::unix::fs::symlink;
let temp = tempdir().expect("should create tempdir");
let repo_root = temp.path();
let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
let fonts_dir = submodule_root.join("packages/ui/src/assets/fonts");
fs::create_dir_all(&fonts_dir).expect("should create fonts dir");
fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
fs::write(
fonts_dir.join("BlexMonoNerdFontMono-Regular.woff2"),
b"font-bytes",
)
.expect("should write target font");
symlink(
"BlexMonoNerdFontMono-Regular.woff2",
fonts_dir.join("ibm-plex-mono.woff2"),
)
.expect("should create symlinked font");
let context = create_build_context_with_repo_root(
BuildContextOptions {
include_local_opencode_submodule: true,
},
Some(repo_root),
)
.expect("should create local context with symlink");
let entries = archive_entries(context.clone());
assert!(
entries.contains("packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2"),
"local mode should include symlinked asset paths"
);
let alias_bytes = archive_entry_bytes(
context,
"packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2",
)
.expect("symlinked asset should contain bytes");
assert_eq!(alias_bytes, b"font-bytes");
}
#[test]
fn default_tag_is_latest() {
assert_eq!(IMAGE_TAG_DEFAULT, "latest");
}
#[test]
fn format_build_error_includes_recent_logs() {
let mut logs = VecDeque::new();
logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
logs.push_back("Step 2/5 : RUN apt-get update".to_string());
logs.push_back("E: Unable to fetch some archives".to_string());
let error_logs = VecDeque::new();
let buildkit_logs = VecDeque::new();
let result = format_build_error_with_context(
"Build failed: exit code 1",
&logs,
&error_logs,
&buildkit_logs,
);
assert!(result.contains("Build failed: exit code 1"));
assert!(result.contains("Recent build output:"));
assert!(result.contains("Step 1/5"));
assert!(result.contains("Unable to fetch"));
}
#[test]
fn format_build_error_handles_empty_logs() {
let logs = VecDeque::new();
let error_logs = VecDeque::new();
let buildkit_logs = VecDeque::new();
let result =
format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
assert!(result.contains("Stream error"));
assert!(!result.contains("Recent build output:"));
}
#[test]
fn format_build_error_adds_network_suggestion() {
let logs = VecDeque::new();
let error_logs = VecDeque::new();
let buildkit_logs = VecDeque::new();
let result = format_build_error_with_context(
"connection timeout",
&logs,
&error_logs,
&buildkit_logs,
);
assert!(result.contains("Check your network connection"));
}
#[test]
fn format_build_error_adds_disk_suggestion() {
let logs = VecDeque::new();
let error_logs = VecDeque::new();
let buildkit_logs = VecDeque::new();
let result = format_build_error_with_context(
"no space left on device",
&logs,
&error_logs,
&buildkit_logs,
);
assert!(result.contains("Free up disk space"));
}
#[test]
fn format_build_error_shows_error_lines_separately() {
let mut recent_logs = VecDeque::new();
recent_logs.push_back("Compiling foo v1.0".to_string());
recent_logs.push_back("Successfully installed bar".to_string());
let mut error_logs = VecDeque::new();
error_logs.push_back("error: failed to compile dust".to_string());
error_logs.push_back("error: failed to compile glow".to_string());
let buildkit_logs = VecDeque::new();
let result = format_build_error_with_context(
"Build failed",
&recent_logs,
&error_logs,
&buildkit_logs,
);
assert!(result.contains("Potential errors detected during build:"));
assert!(result.contains("failed to compile dust"));
assert!(result.contains("failed to compile glow"));
}
#[test]
fn is_error_line_detects_errors() {
assert!(is_error_line("error: something failed"));
assert!(is_error_line("Error: build failed"));
assert!(is_error_line("Failed to install package"));
assert!(is_error_line("cannot find module"));
assert!(is_error_line("Unable to locate package"));
assert!(!is_error_line("Compiling foo v1.0"));
assert!(!is_error_line("Successfully installed"));
}
#[test]
fn collect_image_ids_matches_labels() {
let mut labels = HashMap::new();
labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
let images = vec![
make_image_summary("sha256:opencode", vec![], vec![], labels),
make_image_summary(
"sha256:other",
vec!["busybox:latest"],
vec![],
HashMap::new(),
),
];
let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
assert!(ids.contains("sha256:opencode"));
assert!(!ids.contains("sha256:other"));
}
#[test]
fn clean_buildkit_label_strips_internal_load_remote_context() {
assert_eq!(
clean_buildkit_label("[internal] load remote build context"),
"Loading remote build context"
);
}
#[test]
fn clean_buildkit_label_strips_internal_load_metadata() {
assert_eq!(
clean_buildkit_label("[internal] load metadata for docker.io/library/ubuntu:24.04"),
"Resolving image docker.io/library/ubuntu:24.04"
);
}
#[test]
fn clean_buildkit_label_strips_internal_load_build_definition() {
assert_eq!(
clean_buildkit_label("[internal] load build definition from Dockerfile"),
"Loading Dockerfile"
);
}
#[test]
fn clean_buildkit_label_strips_internal_load_build_context() {
assert_eq!(
clean_buildkit_label("[internal] load build context"),
"Loading build context"
);
}
#[test]
fn clean_buildkit_label_capitalizes_unknown_internal() {
assert_eq!(
clean_buildkit_label("[internal] some unknown thing"),
"Some unknown thing"
);
}
#[test]
fn clean_buildkit_label_preserves_runtime_steps() {
assert_eq!(
clean_buildkit_label("[runtime 1/15] RUN apt-get update"),
"[runtime 1/15] RUN apt-get update"
);
}
#[test]
fn clean_buildkit_label_preserves_plain_text() {
assert_eq!(
clean_buildkit_label("Step 3/10 : COPY . ."),
"Step 3/10 : COPY . ."
);
}
#[test]
fn clean_buildkit_label_trims_whitespace() {
assert_eq!(
clean_buildkit_label(" [internal] load build context "),
"Loading build context"
);
}
}