Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    CONTAINER_NAME, DOCKERFILE, DockerClient, DockerError, ENTRYPOINT_SH, HEALTHCHECK_SH,
9    IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT, OPENCODE_CLOUD_BOOTSTRAP_SH,
10    active_resource_names, remap_image_tag,
11};
12use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
13use bollard::models::BuildInfoAux;
14use bollard::query_parameters::{
15    BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
16    RemoveImageOptionsBuilder, TagImageOptions,
17};
18use bytes::Bytes;
19use flate2::Compression;
20use flate2::write::GzEncoder;
21use futures_util::StreamExt;
22use http_body_util::{Either, Full};
23use std::collections::{HashMap, HashSet, VecDeque};
24use std::env;
25use std::ffi::OsStr;
26use std::fs;
27use std::io::{self, Write};
28use std::path::{Path, PathBuf};
29use std::time::{SystemTime, UNIX_EPOCH};
30use tar::Builder as TarBuilder;
31use tracing::{debug, warn};
32
33/// Default number of recent build log lines to capture for error context
34const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
35
36/// Default number of error lines to capture separately
37const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
38
39const LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH: &str = "packages/opencode";
40// Keep local source excludes aligned with Dockerfile Build Hygiene Rules and
41// the Dockerfile Optimization Checklist in README docs.
42const LOCAL_OPENCODE_EXCLUDED_DIRS: &[&str] = &[
43    ".git",
44    ".planning",
45    "node_modules",
46    "target",
47    "dist",
48    ".turbo",
49    ".cache",
50];
51const LOCAL_OPENCODE_EXCLUDED_FILES: &[&str] = &[".DS_Store"];
52
53#[derive(Debug, Clone, Copy, Default)]
54struct BuildContextOptions {
55    include_local_opencode_submodule: bool,
56}
57
58fn effective_image_tag(tag: &str) -> String {
59    remap_image_tag(tag)
60}
61
62fn profile_scoped_image_ids(images: &[bollard::models::ImageSummary]) -> Option<HashSet<String>> {
63    let names = active_resource_names();
64    let instance_id = names.instance_id.as_deref()?;
65    let expected_tags = [
66        format!("{IMAGE_NAME_GHCR}:{}", names.image_tag),
67        format!("{IMAGE_NAME_DOCKERHUB}:{}", names.image_tag),
68        format!("{IMAGE_NAME_GHCR}:{}", names.previous_image_tag),
69        format!("{IMAGE_NAME_DOCKERHUB}:{}", names.previous_image_tag),
70    ];
71
72    // In isolated mode, avoid broad "contains name fragment" matching and only remove
73    // image tags associated with the active instance.
74    let mut ids = HashSet::new();
75    for image in images {
76        let tag_match = image
77            .repo_tags
78            .iter()
79            .any(|tag| expected_tags.contains(tag));
80        let label_match = image
81            .labels
82            .get(super::INSTANCE_LABEL_KEY)
83            .is_some_and(|value| value == instance_id);
84        if tag_match || label_match {
85            ids.insert(image.id.clone());
86        }
87    }
88    Some(ids)
89}
90
91/// Read a log buffer size from env with bounds
92fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
93    let Ok(value) = env::var(var_name) else {
94        return default;
95    };
96    let Ok(parsed) = value.trim().parse::<usize>() else {
97        return default;
98    };
99    parsed.clamp(5, 500)
100}
101
102/// Check if a line looks like an error message
103fn is_error_line(line: &str) -> bool {
104    let lower = line.to_lowercase();
105    lower.contains("error")
106        || lower.contains("failed")
107        || lower.contains("cannot")
108        || lower.contains("unable to")
109        || lower.contains("not found")
110        || lower.contains("permission denied")
111}
112
113/// Check if an image exists locally
114pub async fn image_exists(
115    client: &DockerClient,
116    image: &str,
117    tag: &str,
118) -> Result<bool, DockerError> {
119    let tag = effective_image_tag(tag);
120    let full_name = format!("{image}:{tag}");
121    debug!("Checking if image exists: {}", full_name);
122
123    match client.inner().inspect_image(&full_name).await {
124        Ok(_) => Ok(true),
125        Err(bollard::errors::Error::DockerResponseServerError {
126            status_code: 404, ..
127        }) => Ok(false),
128        Err(e) => Err(DockerError::from(e)),
129    }
130}
131
132/// Remove all images whose tags, digests, or labels match the provided name fragment
133///
134/// Returns the number of images removed.
135pub async fn remove_images_by_name(
136    client: &DockerClient,
137    name_fragment: &str,
138    force: bool,
139) -> Result<usize, DockerError> {
140    debug!("Removing Docker images matching '{name_fragment}'");
141
142    let images = list_docker_images(client).await?;
143
144    let image_ids = if name_fragment == CONTAINER_NAME {
145        profile_scoped_image_ids(&images)
146            .unwrap_or_else(|| collect_image_ids(&images, name_fragment))
147    } else {
148        collect_image_ids(&images, name_fragment)
149    };
150    remove_image_ids(client, image_ids, force).await
151}
152
153/// List all local Docker images (including intermediate layers).
154async fn list_docker_images(
155    client: &DockerClient,
156) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
157    let list_options = ListImagesOptionsBuilder::new().all(true).build();
158    client
159        .inner()
160        .list_images(Some(list_options))
161        .await
162        .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
163}
164
165const LABEL_TITLE: &str = "org.opencontainers.image.title";
166const LABEL_SOURCE: &str = "org.opencontainers.image.source";
167const LABEL_URL: &str = "org.opencontainers.image.url";
168
169const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
170const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
171const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
172
173/// Collect image IDs that contain the provided name fragment or match opencode labels.
174fn collect_image_ids(
175    images: &[bollard::models::ImageSummary],
176    name_fragment: &str,
177) -> HashSet<String> {
178    let mut image_ids = HashSet::new();
179    for image in images {
180        if image_matches_fragment_or_labels(image, name_fragment) {
181            image_ids.insert(image.id.clone());
182        }
183    }
184    image_ids
185}
186
187fn image_matches_fragment_or_labels(
188    image: &bollard::models::ImageSummary,
189    name_fragment: &str,
190) -> bool {
191    let tag_match = image
192        .repo_tags
193        .iter()
194        .any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
195    let digest_match = image
196        .repo_digests
197        .iter()
198        .any(|digest| digest.contains(name_fragment));
199    let label_match = image_labels_match(&image.labels);
200
201    tag_match || digest_match || label_match
202}
203
204fn image_labels_match(labels: &HashMap<String, String>) -> bool {
205    labels
206        .get(LABEL_SOURCE)
207        .is_some_and(|value| value == LABEL_SOURCE_VALUE)
208        || labels
209            .get(LABEL_URL)
210            .is_some_and(|value| value == LABEL_URL_VALUE)
211        || labels
212            .get(LABEL_TITLE)
213            .is_some_and(|value| value == LABEL_TITLE_VALUE)
214}
215
216/// Remove image IDs, returning the number removed.
217async fn remove_image_ids(
218    client: &DockerClient,
219    image_ids: HashSet<String>,
220    force: bool,
221) -> Result<usize, DockerError> {
222    if image_ids.is_empty() {
223        return Ok(0);
224    }
225
226    let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
227    let mut removed = 0usize;
228    for image_id in image_ids {
229        let result = client
230            .inner()
231            .remove_image(&image_id, Some(remove_options.clone()), None)
232            .await;
233        match result {
234            Ok(_) => removed += 1,
235            Err(bollard::errors::Error::DockerResponseServerError {
236                status_code: 404, ..
237            }) => {
238                debug!("Docker image already removed: {}", image_id);
239            }
240            Err(err) => {
241                return Err(DockerError::Image(format!(
242                    "Failed to remove image {image_id}: {err}"
243                )));
244            }
245        }
246    }
247
248    Ok(removed)
249}
250
251/// Build the opencode image from embedded Dockerfile
252///
253/// Shows real-time build progress with streaming output.
254/// Returns the full image:tag string on success.
255///
256/// # Arguments
257/// * `client` - Docker client
258/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
259/// * `progress` - Progress reporter for build feedback
260/// * `no_cache` - If true, build without using Docker layer cache
261pub async fn build_image(
262    client: &DockerClient,
263    tag: Option<&str>,
264    progress: &mut ProgressReporter,
265    no_cache: bool,
266    build_args: Option<HashMap<String, String>>,
267) -> Result<String, DockerError> {
268    let tag = effective_image_tag(tag.unwrap_or(IMAGE_TAG_DEFAULT));
269    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
270    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
271
272    let build_args = build_args.unwrap_or_default();
273    let include_local_opencode_submodule = build_args
274        .get("OPENCODE_SOURCE")
275        .is_some_and(|value| value.eq_ignore_ascii_case("local"));
276
277    // Create tar archive containing Dockerfile and (optionally) the local submodule checkout.
278    // This can take several seconds for local submodule builds due to recursive tar+gzip.
279    let context_msg = if include_local_opencode_submodule {
280        "Packaging local opencode checkout"
281    } else {
282        "Preparing build context"
283    };
284    progress.update_spinner("build", context_msg);
285    let context = create_build_context(BuildContextOptions {
286        include_local_opencode_submodule,
287    })
288    .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
289
290    // Set up build options
291    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
292    // BuildKit requires a unique session ID for each build
293    let session_id = format!(
294        "opencode-cloud-build-{}",
295        SystemTime::now()
296            .duration_since(UNIX_EPOCH)
297            .unwrap_or_default()
298            .as_nanos()
299    );
300    let options = BuildImageOptions {
301        t: Some(full_name.clone()),
302        dockerfile: "Dockerfile".to_string(),
303        version: BuilderVersion::BuilderBuildKit,
304        session: Some(session_id),
305        rm: true,
306        nocache: no_cache,
307        buildargs: Some(build_args),
308        platform: String::new(),
309        target: String::new(),
310        ..Default::default()
311    };
312
313    // Create build body from context
314    let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
315
316    // Sending the context to Docker and waiting for build initialization can take
317    // several seconds, especially for large local-submodule contexts.
318    progress.update_spinner("build", "Sending build context to Docker");
319
320    // Start build with streaming output
321    let mut stream = client.inner().build_image(options, None, Some(body));
322
323    progress.update_spinner("build", "Waiting for Docker build to start");
324
325    let mut maybe_image_id = None;
326    let mut log_state = BuildLogState::new();
327
328    while let Some(result) = stream.next().await {
329        let Ok(info) = result else {
330            return Err(handle_stream_error(
331                "Build failed",
332                result.expect_err("checked error").to_string(),
333                &log_state,
334                progress,
335            ));
336        };
337
338        handle_stream_message(&info, progress, &mut log_state);
339
340        if let Some(error_detail) = &info.error_detail
341            && let Some(error_msg) = &error_detail.message
342        {
343            progress.abandon_all(error_msg);
344            let context = format_build_error_with_context(
345                error_msg,
346                &log_state.recent_logs,
347                &log_state.error_logs,
348                &log_state.recent_buildkit_logs,
349            );
350            return Err(DockerError::Build(context));
351        }
352
353        if let Some(aux) = info.aux {
354            match aux {
355                BuildInfoAux::Default(image_id) => {
356                    if let Some(id) = image_id.id {
357                        maybe_image_id = Some(id);
358                    }
359                }
360                BuildInfoAux::BuildKit(status) => {
361                    handle_buildkit_status(&status, progress, &mut log_state);
362                }
363            }
364        }
365    }
366
367    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
368    let finish_msg = format!("Build complete: {image_id}");
369    progress.finish("build", &finish_msg);
370
371    Ok(full_name)
372}
373
374struct BuildLogState {
375    recent_logs: VecDeque<String>,
376    error_logs: VecDeque<String>,
377    recent_buildkit_logs: VecDeque<String>,
378    build_log_buffer_size: usize,
379    error_log_buffer_size: usize,
380    last_buildkit_vertex: Option<String>,
381    last_buildkit_vertex_id: Option<String>,
382    export_vertex_id: Option<String>,
383    export_vertex_name: Option<String>,
384    buildkit_logs_by_vertex_id: HashMap<String, String>,
385    vertex_name_by_vertex_id: HashMap<String, String>,
386}
387
388impl BuildLogState {
389    fn new() -> Self {
390        let build_log_buffer_size = read_log_buffer_size(
391            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
392            DEFAULT_BUILD_LOG_BUFFER_SIZE,
393        );
394        let error_log_buffer_size = read_log_buffer_size(
395            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
396            DEFAULT_ERROR_LOG_BUFFER_SIZE,
397        );
398        Self {
399            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
400            error_logs: VecDeque::with_capacity(error_log_buffer_size),
401            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
402            build_log_buffer_size,
403            error_log_buffer_size,
404            last_buildkit_vertex: None,
405            last_buildkit_vertex_id: None,
406            export_vertex_id: None,
407            export_vertex_name: None,
408            buildkit_logs_by_vertex_id: HashMap::new(),
409            vertex_name_by_vertex_id: HashMap::new(),
410        }
411    }
412}
413
414fn handle_stream_message(
415    info: &bollard::models::BuildInfo,
416    progress: &mut ProgressReporter,
417    state: &mut BuildLogState,
418) {
419    let Some(stream_msg) = info.stream.as_deref() else {
420        return;
421    };
422    let msg = stream_msg.trim();
423    if msg.is_empty() {
424        return;
425    }
426
427    if progress.is_plain_output() {
428        eprint!("{stream_msg}");
429    } else {
430        let has_runtime_vertex = state
431            .last_buildkit_vertex
432            .as_deref()
433            .is_some_and(|name| name.starts_with("[runtime "));
434        let is_internal_msg = msg.contains("[internal]");
435        if !(has_runtime_vertex && is_internal_msg) {
436            progress.update_spinner("build", stream_msg);
437        }
438    }
439
440    if state.recent_logs.len() >= state.build_log_buffer_size {
441        state.recent_logs.pop_front();
442    }
443    state.recent_logs.push_back(msg.to_string());
444
445    if is_error_line(msg) {
446        if state.error_logs.len() >= state.error_log_buffer_size {
447            state.error_logs.pop_front();
448        }
449        state.error_logs.push_back(msg.to_string());
450    }
451
452    if msg.starts_with("Step ") {
453        debug!("Build step: {}", msg);
454    }
455}
456
457fn handle_buildkit_status(
458    status: &BuildkitStatusResponse,
459    progress: &mut ProgressReporter,
460    state: &mut BuildLogState,
461) {
462    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
463    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
464    update_export_vertex_from_logs(
465        &latest_logs,
466        &state.vertex_name_by_vertex_id,
467        &mut state.export_vertex_id,
468        &mut state.export_vertex_name,
469    );
470    let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
471        status,
472        &state.vertex_name_by_vertex_id,
473        state.export_vertex_id.as_deref(),
474        state.export_vertex_name.as_deref(),
475    ) {
476        Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
477        None => {
478            let Some(log_entry) = latest_logs.last() else {
479                return;
480            };
481            let name = state
482                .vertex_name_by_vertex_id
483                .get(&log_entry.vertex_id)
484                .cloned()
485                .or_else(|| state.last_buildkit_vertex.clone())
486                .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
487            (log_entry.vertex_id.clone(), name)
488        }
489    };
490    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
491    state.last_buildkit_vertex_id = Some(vertex_id.clone());
492    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
493        state.last_buildkit_vertex = Some(vertex_name.clone());
494    }
495
496    let message = if progress.is_plain_output() {
497        vertex_name
498    } else if let Some(log_entry) = latest_logs
499        .iter()
500        .rev()
501        .find(|entry| entry.vertex_id == vertex_id)
502    {
503        format!("{vertex_name} · {}", log_entry.message)
504    } else {
505        vertex_name
506    };
507    progress.update_spinner("build", &message);
508
509    if progress.is_plain_output() {
510        for log_entry in latest_logs {
511            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
512        }
513        return;
514    }
515
516    let (Some(current_id), Some(current_name)) = (
517        state.last_buildkit_vertex_id.as_ref(),
518        state.last_buildkit_vertex.as_ref(),
519    ) else {
520        return;
521    };
522
523    let name = state
524        .vertex_name_by_vertex_id
525        .get(current_id)
526        .unwrap_or(current_name);
527    // Keep non-verbose output on the spinner line only.
528    let _ = name;
529}
530
531fn handle_stream_error(
532    prefix: &str,
533    error_str: String,
534    state: &BuildLogState,
535    progress: &mut ProgressReporter,
536) -> DockerError {
537    progress.abandon_all(prefix);
538
539    let buildkit_hint = if error_str.contains("mount")
540        || error_str.contains("--mount")
541        || state
542            .recent_logs
543            .iter()
544            .any(|log| log.contains("--mount") && log.contains("cache"))
545    {
546        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
547         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
548         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
549    } else {
550        ""
551    };
552
553    let context = format!(
554        "{}{}",
555        format_build_error_with_context(
556            &error_str,
557            &state.recent_logs,
558            &state.error_logs,
559            &state.recent_buildkit_logs,
560        ),
561        buildkit_hint
562    );
563    DockerError::Build(context)
564}
565
566fn update_buildkit_vertex_names(
567    vertex_name_by_vertex_id: &mut HashMap<String, String>,
568    status: &BuildkitStatusResponse,
569) {
570    for vertex in &status.vertexes {
571        if vertex.name.is_empty() {
572            continue;
573        }
574        vertex_name_by_vertex_id
575            .entry(vertex.digest.clone())
576            .or_insert_with(|| vertex.name.clone());
577    }
578}
579
580fn select_latest_buildkit_vertex(
581    status: &BuildkitStatusResponse,
582    vertex_name_by_vertex_id: &HashMap<String, String>,
583    export_vertex_id: Option<&str>,
584    export_vertex_name: Option<&str>,
585) -> Option<(String, String)> {
586    if let Some(export_vertex_id) = export_vertex_id {
587        let name = export_vertex_name
588            .map(str::to_string)
589            .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
590            .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
591        return Some((export_vertex_id.to_string(), name));
592    }
593
594    let mut best_runtime: Option<(u32, String, String)> = None;
595    let mut fallback: Option<(String, String)> = None;
596
597    for vertex in &status.vertexes {
598        let name = if vertex.name.is_empty() {
599            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
600        } else {
601            Some(vertex.name.clone())
602        };
603
604        let Some(name) = name else {
605            continue;
606        };
607
608        if fallback.is_none() && !name.starts_with("[internal]") {
609            fallback = Some((vertex.digest.clone(), name.clone()));
610        }
611
612        if let Some(step) = parse_runtime_step(&name) {
613            match &best_runtime {
614                Some((best_step, _, _)) if *best_step >= step => {}
615                _ => {
616                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
617                }
618            }
619        }
620    }
621
622    if let Some((_, digest, name)) = best_runtime {
623        Some((digest, name))
624    } else {
625        fallback.or_else(|| {
626            status.vertexes.iter().find_map(|vertex| {
627                let name = if vertex.name.is_empty() {
628                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
629                } else {
630                    Some(vertex.name.clone())
631                };
632                name.map(|resolved| (vertex.digest.clone(), resolved))
633            })
634        })
635    }
636}
637
638fn parse_runtime_step(name: &str) -> Option<u32> {
639    let prefix = "[runtime ";
640    let start = name.find(prefix)? + prefix.len();
641    let rest = &name[start..];
642    let end = rest.find('/')?;
643    rest[..end].trim().parse::<u32>().ok()
644}
645
646fn format_vertex_fallback_label(vertex_id: &str) -> String {
647    let short = vertex_id
648        .strip_prefix("sha256:")
649        .unwrap_or(vertex_id)
650        .chars()
651        .take(12)
652        .collect::<String>();
653    format!("vertex {short}")
654}
655
656fn update_export_vertex_from_logs(
657    latest_logs: &[BuildkitLogEntry],
658    vertex_name_by_vertex_id: &HashMap<String, String>,
659    export_vertex_id: &mut Option<String>,
660    export_vertex_name: &mut Option<String>,
661) {
662    if let Some(entry) = latest_logs
663        .iter()
664        .rev()
665        .find(|log| log.message.trim_start().starts_with("exporting to image"))
666    {
667        *export_vertex_id = Some(entry.vertex_id.clone());
668        if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
669            *export_vertex_name = Some(name.clone());
670        }
671    }
672}
673
674fn record_buildkit_logs(
675    state: &mut BuildLogState,
676    latest_logs: &[BuildkitLogEntry],
677    current_vertex_id: &str,
678    current_vertex_name: &str,
679) {
680    for log_entry in latest_logs {
681        let name = state
682            .vertex_name_by_vertex_id
683            .get(&log_entry.vertex_id)
684            .cloned()
685            .or_else(|| {
686                if log_entry.vertex_id == current_vertex_id {
687                    Some(current_vertex_name.to_string())
688                } else {
689                    None
690                }
691            })
692            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
693
694        let message = log_entry.message.replace('\r', "").trim_end().to_string();
695        if message.is_empty() {
696            continue;
697        }
698
699        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
700            state.recent_buildkit_logs.pop_front();
701        }
702        state
703            .recent_buildkit_logs
704            .push_back(format!("[{name}] {message}"));
705    }
706}
707
708#[derive(Debug, Clone)]
709struct BuildkitLogEntry {
710    vertex_id: String,
711    message: String,
712}
713
714fn append_buildkit_logs(
715    logs: &mut HashMap<String, String>,
716    status: &BuildkitStatusResponse,
717) -> Vec<BuildkitLogEntry> {
718    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
719
720    for log in &status.logs {
721        let vertex_id = log.vertex.clone();
722        let message = String::from_utf8_lossy(&log.msg).to_string();
723        let entry = logs.entry(vertex_id.clone()).or_default();
724        entry.push_str(&message);
725        latest.push(BuildkitLogEntry { vertex_id, message });
726    }
727
728    latest
729}
730
731/// Pull the opencode image from registry with automatic fallback
732///
733/// Tries GHCR first, falls back to Docker Hub on failure.
734/// Returns the full image:tag string on success.
735pub async fn pull_image(
736    client: &DockerClient,
737    tag: Option<&str>,
738    progress: &mut ProgressReporter,
739) -> Result<String, DockerError> {
740    let requested_tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
741    let resolved_tag = effective_image_tag(requested_tag);
742    let isolated_default_tag =
743        requested_tag == IMAGE_TAG_DEFAULT && resolved_tag != IMAGE_TAG_DEFAULT;
744    let registry_pull_tag = if isolated_default_tag {
745        IMAGE_TAG_DEFAULT
746    } else {
747        requested_tag
748    };
749
750    // Try GHCR first
751    debug!(
752        "Attempting to pull from GHCR: {}:{}",
753        IMAGE_NAME_GHCR, registry_pull_tag
754    );
755    let ghcr_err =
756        match pull_from_registry(client, IMAGE_NAME_GHCR, registry_pull_tag, progress).await {
757            Ok(()) => {
758                if isolated_default_tag {
759                    retag_local_image(
760                        client,
761                        &format!("{IMAGE_NAME_GHCR}:{registry_pull_tag}"),
762                        &resolved_tag,
763                    )
764                    .await?;
765                }
766                let full_name = format!("{IMAGE_NAME_GHCR}:{resolved_tag}");
767                return Ok(full_name);
768            }
769            Err(e) => e,
770        };
771
772    warn!(
773        "GHCR pull failed: {}. Trying Docker Hub fallback...",
774        ghcr_err
775    );
776
777    // Try Docker Hub as fallback
778    debug!(
779        "Attempting to pull from Docker Hub: {}:{}",
780        IMAGE_NAME_DOCKERHUB, registry_pull_tag
781    );
782    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, registry_pull_tag, progress).await {
783        Ok(()) => {
784            if isolated_default_tag {
785                retag_local_image(
786                    client,
787                    &format!("{IMAGE_NAME_DOCKERHUB}:{registry_pull_tag}"),
788                    &resolved_tag,
789                )
790                .await?;
791                return Ok(format!("{IMAGE_NAME_GHCR}:{resolved_tag}"));
792            }
793            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{resolved_tag}");
794            Ok(full_name)
795        }
796        Err(dockerhub_err) => Err(DockerError::Pull(format!(
797            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
798        ))),
799    }
800}
801
802async fn retag_local_image(
803    client: &DockerClient,
804    source_image: &str,
805    target_tag: &str,
806) -> Result<(), DockerError> {
807    let options = TagImageOptions {
808        repo: Some(IMAGE_NAME_GHCR.to_string()),
809        tag: Some(target_tag.to_string()),
810    };
811    client
812        .inner()
813        .tag_image(source_image, Some(options))
814        .await
815        .map_err(|e| {
816            DockerError::Pull(format!(
817                "Failed to retag pulled image {source_image} as {IMAGE_NAME_GHCR}:{target_tag}: {e}"
818            ))
819        })?;
820    Ok(())
821}
822
823/// Maximum number of retry attempts for pull operations
824const MAX_PULL_RETRIES: usize = 3;
825
826/// Pull from a specific registry with retry logic
827async fn pull_from_registry(
828    client: &DockerClient,
829    image: &str,
830    tag: &str,
831    progress: &mut ProgressReporter,
832) -> Result<(), DockerError> {
833    let full_name = format!("{image}:{tag}");
834
835    // Manual retry loop since async closures can't capture mutable references
836    let mut last_error = None;
837    for attempt in 1..=MAX_PULL_RETRIES {
838        debug!(
839            "Pull attempt {}/{} for {}",
840            attempt, MAX_PULL_RETRIES, full_name
841        );
842
843        match do_pull(client, image, tag, progress).await {
844            Ok(()) => return Ok(()),
845            Err(e) => {
846                warn!("Pull attempt {} failed: {}", attempt, e);
847                last_error = Some(e);
848
849                if attempt < MAX_PULL_RETRIES {
850                    // Exponential backoff: 1s, 2s, 4s
851                    let delay_ms = 1000 * (1 << (attempt - 1));
852                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
853                }
854            }
855        }
856    }
857
858    Err(last_error.unwrap_or_else(|| {
859        DockerError::Pull(format!(
860            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
861        ))
862    }))
863}
864
865/// Perform the actual pull operation
866async fn do_pull(
867    client: &DockerClient,
868    image: &str,
869    tag: &str,
870    progress: &mut ProgressReporter,
871) -> Result<(), DockerError> {
872    let full_name = format!("{image}:{tag}");
873
874    let options = CreateImageOptions {
875        from_image: Some(image.to_string()),
876        tag: Some(tag.to_string()),
877        platform: String::new(),
878        ..Default::default()
879    };
880
881    let mut stream = client.inner().create_image(Some(options), None, None);
882
883    // Add main spinner for overall progress
884    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
885
886    while let Some(result) = stream.next().await {
887        match result {
888            Ok(info) => {
889                // Handle errors from the stream
890                if let Some(error_detail) = &info.error_detail
891                    && let Some(error_msg) = &error_detail.message
892                {
893                    progress.abandon_all(error_msg);
894                    return Err(DockerError::Pull(error_msg.to_string()));
895                }
896
897                // Handle layer progress
898                if let Some(layer_id) = &info.id {
899                    let status = info.status.as_deref().unwrap_or("");
900
901                    match status {
902                        "Already exists" => {
903                            progress.finish(layer_id, "Already exists");
904                        }
905                        "Pull complete" => {
906                            progress.finish(layer_id, "Pull complete");
907                        }
908                        "Downloading" | "Extracting" => {
909                            if let Some(progress_detail) = &info.progress_detail {
910                                let current = progress_detail.current.unwrap_or(0) as u64;
911                                let total = progress_detail.total.unwrap_or(0) as u64;
912
913                                if total > 0 {
914                                    progress.update_layer(layer_id, current, total, status);
915                                }
916                            }
917                        }
918                        _ => {
919                            // Other statuses (Waiting, Verifying, etc.)
920                            progress.update_spinner(layer_id, status);
921                        }
922                    }
923                } else if let Some(status) = &info.status {
924                    // Overall status messages (no layer id)
925                    progress.update_spinner("pull", status);
926                }
927            }
928            Err(e) => {
929                progress.abandon_all("Pull failed");
930                return Err(DockerError::Pull(format!("Pull failed: {e}")));
931            }
932        }
933    }
934
935    progress.finish("pull", &format!("Pull complete: {full_name}"));
936    Ok(())
937}
938
939/// Format a build error with recent log context for actionable debugging
940fn format_build_error_with_context(
941    error: &str,
942    recent_logs: &VecDeque<String>,
943    error_logs: &VecDeque<String>,
944    recent_buildkit_logs: &VecDeque<String>,
945) -> String {
946    let mut message = String::new();
947
948    // Add main error message
949    message.push_str(error);
950
951    // Add captured error lines if they differ from recent logs
952    // (these are error-like lines that may have scrolled off)
953    if !error_logs.is_empty() {
954        // Check if error_logs contains lines not in recent_logs
955        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
956        let unique_errors: Vec<_> = error_logs
957            .iter()
958            .filter(|line| !recent_set.contains(line))
959            .collect();
960
961        if !unique_errors.is_empty() {
962            message.push_str("\n\nPotential errors detected during build:");
963            for line in unique_errors {
964                message.push_str("\n  ");
965                message.push_str(line);
966            }
967        }
968    }
969
970    // Add recent BuildKit log context if available
971    if !recent_buildkit_logs.is_empty() {
972        message.push_str("\n\nRecent BuildKit output:");
973        for line in recent_buildkit_logs {
974            message.push_str("\n  ");
975            message.push_str(line);
976        }
977    }
978
979    // Add recent log context if available
980    if !recent_logs.is_empty() {
981        message.push_str("\n\nRecent build output:");
982        for line in recent_logs {
983            message.push_str("\n  ");
984            message.push_str(line);
985        }
986    } else if recent_buildkit_logs.is_empty() {
987        message.push_str("\n\nNo build output was received from the Docker daemon.");
988        message.push_str("\nThis usually means the build failed before any logs were streamed.");
989    }
990
991    // Add actionable suggestions based on common error patterns
992    let error_lower = error.to_lowercase();
993    if error_lower.contains("network")
994        || error_lower.contains("connection")
995        || error_lower.contains("timeout")
996    {
997        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
998    } else if error_lower.contains("disk")
999        || error_lower.contains("space")
1000        || error_lower.contains("no space")
1001    {
1002        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
1003    } else if error_lower.contains("permission") || error_lower.contains("denied") {
1004        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
1005    }
1006
1007    message
1008}
1009
1010/// Create a gzipped tar archive containing the Dockerfile.
1011fn create_build_context(options: BuildContextOptions) -> Result<Vec<u8>, io::Error> {
1012    let repo_root = if options.include_local_opencode_submodule {
1013        Some(workspace_root_for_build_context()?)
1014    } else {
1015        None
1016    };
1017    create_build_context_with_repo_root(options, repo_root.as_deref())
1018}
1019
1020fn workspace_root_for_build_context() -> Result<PathBuf, io::Error> {
1021    Path::new(env!("CARGO_MANIFEST_DIR"))
1022        .join("../..")
1023        .canonicalize()
1024}
1025
1026fn create_build_context_with_repo_root(
1027    options: BuildContextOptions,
1028    repo_root: Option<&Path>,
1029) -> Result<Vec<u8>, io::Error> {
1030    let mut archive_buffer = Vec::new();
1031
1032    {
1033        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
1034        let mut tar = TarBuilder::new(encoder);
1035
1036        // Add Dockerfile to archive
1037        let dockerfile_bytes = DOCKERFILE.as_bytes();
1038        append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
1039        append_bytes(
1040            &mut tar,
1041            "packages/core/src/docker/files/entrypoint.sh",
1042            ENTRYPOINT_SH,
1043            0o644,
1044        )?;
1045        append_bytes(
1046            &mut tar,
1047            "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1048            OPENCODE_CLOUD_BOOTSTRAP_SH,
1049            0o644,
1050        )?;
1051        append_bytes(
1052            &mut tar,
1053            "packages/core/src/docker/files/healthcheck.sh",
1054            HEALTHCHECK_SH,
1055            0o644,
1056        )?;
1057        append_bytes(
1058            &mut tar,
1059            "packages/core/src/docker/files/opencode-broker.service",
1060            include_bytes!("files/opencode-broker.service"),
1061            0o644,
1062        )?;
1063        append_bytes(
1064            &mut tar,
1065            "packages/core/src/docker/files/opencode.service",
1066            include_bytes!("files/opencode.service"),
1067            0o644,
1068        )?;
1069        append_bytes(
1070            &mut tar,
1071            "packages/core/src/docker/files/pam/opencode",
1072            include_bytes!("files/pam/opencode"),
1073            0o644,
1074        )?;
1075        append_bytes(
1076            &mut tar,
1077            "packages/core/src/docker/files/opencode.jsonc",
1078            include_bytes!("files/opencode.jsonc"),
1079            0o644,
1080        )?;
1081        append_bytes(
1082            &mut tar,
1083            "packages/core/src/docker/files/starship.toml",
1084            include_bytes!("files/starship.toml"),
1085            0o644,
1086        )?;
1087        append_bytes(
1088            &mut tar,
1089            "packages/core/src/docker/files/bashrc.extra",
1090            include_bytes!("files/bashrc.extra"),
1091            0o644,
1092        )?;
1093
1094        // Dockerfile always references this path with COPY. Keep an empty directory present
1095        // even in remote mode so default builds stay lightweight.
1096        append_directory(
1097            &mut tar,
1098            Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1099            0o755,
1100        )?;
1101        if options.include_local_opencode_submodule {
1102            let repo_root = repo_root.ok_or_else(|| {
1103                io::Error::new(
1104                    io::ErrorKind::NotFound,
1105                    "Local opencode build requested but workspace root is unavailable",
1106                )
1107            })?;
1108            append_local_opencode_submodule(&mut tar, repo_root)?;
1109        }
1110
1111        tar.finish()?;
1112
1113        // Finish gzip encoding
1114        let encoder = tar.into_inner()?;
1115        encoder.finish()?;
1116    }
1117
1118    Ok(archive_buffer)
1119}
1120
1121fn append_local_opencode_submodule<W: Write>(
1122    tar: &mut TarBuilder<W>,
1123    repo_root: &Path,
1124) -> Result<(), io::Error> {
1125    let source_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1126    if !source_root.is_dir() {
1127        return Err(io::Error::new(
1128            io::ErrorKind::NotFound,
1129            format!(
1130                "Local opencode submodule path not found: {}",
1131                source_root.display()
1132            ),
1133        ));
1134    }
1135    let canonical_source_root = source_root.canonicalize()?;
1136
1137    append_local_tree_recursive(
1138        tar,
1139        &source_root,
1140        &canonical_source_root,
1141        Path::new(""),
1142        Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1143    )
1144}
1145
1146fn append_local_tree_recursive<W: Write>(
1147    tar: &mut TarBuilder<W>,
1148    source_root: &Path,
1149    canonical_source_root: &Path,
1150    relative_path: &Path,
1151    archive_root: &Path,
1152) -> Result<(), io::Error> {
1153    let current_path = source_root.join(relative_path);
1154    let mut entries: Vec<_> =
1155        fs::read_dir(&current_path)?.collect::<Result<Vec<_>, io::Error>>()?;
1156    entries.sort_by_key(|a| a.file_name());
1157
1158    for entry in entries {
1159        let file_name = entry.file_name();
1160        let entry_relative = if relative_path.as_os_str().is_empty() {
1161            PathBuf::from(&file_name)
1162        } else {
1163            relative_path.join(&file_name)
1164        };
1165
1166        if should_exclude_local_opencode_path(&entry_relative) {
1167            continue;
1168        }
1169
1170        let entry_path = entry.path();
1171        let metadata = fs::symlink_metadata(&entry_path)?;
1172        let archive_path = archive_root.join(&entry_relative);
1173
1174        if metadata.is_dir() {
1175            append_directory(tar, &archive_path, mode_from_metadata(&metadata, 0o755))?;
1176            append_local_tree_recursive(
1177                tar,
1178                source_root,
1179                canonical_source_root,
1180                &entry_relative,
1181                archive_root,
1182            )?;
1183            continue;
1184        }
1185
1186        if metadata.is_file() {
1187            append_file_from_disk(
1188                tar,
1189                &archive_path,
1190                &entry_path,
1191                mode_from_metadata(&metadata, 0o644),
1192            )?;
1193            continue;
1194        }
1195
1196        if metadata.file_type().is_symlink() {
1197            // Some opencode assets (for example UI fonts) are symlinks. Materialize symlinked files
1198            // into the archive when they stay inside the checkout; skip links outside the tree.
1199            match resolve_local_symlink_target(&entry_path, canonical_source_root)? {
1200                Some(target_path) => {
1201                    let target_metadata = fs::metadata(&target_path)?;
1202                    if target_metadata.is_file() {
1203                        append_file_from_disk(
1204                            tar,
1205                            &archive_path,
1206                            &target_path,
1207                            mode_from_metadata(&target_metadata, 0o644),
1208                        )?;
1209                    } else {
1210                        debug!(
1211                            "Skipping symlink with non-file target in local opencode context: {} -> {}",
1212                            entry_path.display(),
1213                            target_path.display()
1214                        );
1215                    }
1216                }
1217                None => {
1218                    debug!(
1219                        "Skipping symlink outside checkout or unresolved in local opencode context: {}",
1220                        entry_path.display()
1221                    );
1222                }
1223            }
1224        }
1225    }
1226
1227    Ok(())
1228}
1229
1230fn resolve_local_symlink_target(
1231    link_path: &Path,
1232    canonical_source_root: &Path,
1233) -> Result<Option<PathBuf>, io::Error> {
1234    let link_target = fs::read_link(link_path)?;
1235    let resolved = if link_target.is_absolute() {
1236        link_target
1237    } else {
1238        link_path
1239            .parent()
1240            .unwrap_or_else(|| Path::new(""))
1241            .join(link_target)
1242    };
1243
1244    // Broken links are ignored in local dev mode instead of failing the entire build context.
1245    let canonical_target = match resolved.canonicalize() {
1246        Ok(path) => path,
1247        Err(_) => return Ok(None),
1248    };
1249    if canonical_target.starts_with(canonical_source_root) {
1250        Ok(Some(canonical_target))
1251    } else {
1252        Ok(None)
1253    }
1254}
1255
1256fn should_exclude_local_opencode_path(relative_path: &Path) -> bool {
1257    if relative_path.file_name().is_some_and(|name| {
1258        LOCAL_OPENCODE_EXCLUDED_FILES
1259            .iter()
1260            .any(|candidate| name == OsStr::new(candidate))
1261    }) {
1262        return true;
1263    }
1264
1265    relative_path.components().any(|component| {
1266        let part = component.as_os_str();
1267        LOCAL_OPENCODE_EXCLUDED_DIRS
1268            .iter()
1269            .any(|candidate| part == OsStr::new(candidate))
1270    })
1271}
1272
1273#[cfg(unix)]
1274fn mode_from_metadata(metadata: &fs::Metadata, fallback: u32) -> u32 {
1275    use std::os::unix::fs::PermissionsExt;
1276    let mode = metadata.permissions().mode() & 0o7777;
1277    if mode == 0 { fallback } else { mode }
1278}
1279
1280#[cfg(not(unix))]
1281fn mode_from_metadata(_metadata: &fs::Metadata, fallback: u32) -> u32 {
1282    fallback
1283}
1284
1285fn append_directory<W: Write>(
1286    tar: &mut TarBuilder<W>,
1287    path: &Path,
1288    mode: u32,
1289) -> Result<(), io::Error> {
1290    let mut header = tar::Header::new_gnu();
1291    header.set_size(0);
1292    header.set_mode(mode);
1293    header.set_entry_type(tar::EntryType::Directory);
1294    // Use append_data path handling so GNU long-name extensions are emitted when needed.
1295    // Direct set_path() is brittle for deep/long local dev paths.
1296    tar.append_data(&mut header, path, io::empty())?;
1297    Ok(())
1298}
1299
1300fn append_file_from_disk<W: Write>(
1301    tar: &mut TarBuilder<W>,
1302    archive_path: &Path,
1303    source_path: &Path,
1304    mode: u32,
1305) -> Result<(), io::Error> {
1306    let mut file = fs::File::open(source_path)?;
1307    let metadata = file.metadata()?;
1308    let mut header = tar::Header::new_gnu();
1309    header.set_size(metadata.len());
1310    header.set_mode(mode);
1311    // Use append_data path handling so GNU long-name extensions are emitted when needed.
1312    // Direct set_path() is brittle for deep/long local dev paths.
1313    tar.append_data(&mut header, archive_path, &mut file)?;
1314    Ok(())
1315}
1316
1317fn append_bytes<W: Write>(
1318    tar: &mut TarBuilder<W>,
1319    path: &str,
1320    contents: &[u8],
1321    mode: u32,
1322) -> Result<(), io::Error> {
1323    let mut header = tar::Header::new_gnu();
1324    header.set_path(path)?;
1325    header.set_size(contents.len() as u64);
1326    header.set_mode(mode);
1327    header.set_cksum();
1328
1329    tar.append(&header, contents)?;
1330    Ok(())
1331}
1332
1333#[cfg(test)]
1334mod tests {
1335    use super::*;
1336    use bollard::models::ImageSummary;
1337    use flate2::read::GzDecoder;
1338    use std::collections::{HashMap, HashSet};
1339    use std::fs;
1340    use std::io::{Cursor, Read};
1341    use tar::Archive;
1342    use tempfile::tempdir;
1343
1344    fn make_image_summary(
1345        id: &str,
1346        tags: Vec<&str>,
1347        digests: Vec<&str>,
1348        labels: HashMap<String, String>,
1349    ) -> ImageSummary {
1350        ImageSummary {
1351            id: id.to_string(),
1352            parent_id: String::new(),
1353            repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
1354            repo_digests: digests
1355                .into_iter()
1356                .map(|digest| digest.to_string())
1357                .collect(),
1358            created: 0,
1359            size: 0,
1360            shared_size: -1,
1361            labels,
1362            containers: 0,
1363            manifests: None,
1364            descriptor: None,
1365        }
1366    }
1367
1368    fn archive_entries(context: Vec<u8>) -> HashSet<String> {
1369        let cursor = Cursor::new(context);
1370        let decoder = GzDecoder::new(cursor);
1371        let mut archive = Archive::new(decoder);
1372        let mut paths = HashSet::new();
1373        for entry in archive.entries().expect("should read archive entries") {
1374            let entry = entry.expect("should read entry");
1375            let path = entry.path().expect("should read entry path");
1376            paths.insert(path.to_string_lossy().to_string());
1377        }
1378        paths
1379    }
1380
1381    fn archive_entry_bytes(context: Vec<u8>, wanted_path: &str) -> Option<Vec<u8>> {
1382        let cursor = Cursor::new(context);
1383        let decoder = GzDecoder::new(cursor);
1384        let mut archive = Archive::new(decoder);
1385        for entry in archive.entries().expect("should read archive entries") {
1386            let mut entry = entry.expect("should read entry");
1387            let path = entry.path().expect("should read entry path");
1388            if path == Path::new(wanted_path) {
1389                let mut bytes = Vec::new();
1390                entry
1391                    .read_to_end(&mut bytes)
1392                    .expect("should read entry bytes");
1393                return Some(bytes);
1394            }
1395        }
1396        None
1397    }
1398
1399    #[test]
1400    fn create_build_context_succeeds() {
1401        let context =
1402            create_build_context(BuildContextOptions::default()).expect("should create context");
1403        assert!(!context.is_empty(), "context should not be empty");
1404
1405        // Verify it's gzip-compressed (gzip magic bytes)
1406        assert_eq!(context[0], 0x1f, "should be gzip compressed");
1407        assert_eq!(context[1], 0x8b, "should be gzip compressed");
1408    }
1409
1410    #[test]
1411    fn build_context_includes_docker_assets() {
1412        let context =
1413            create_build_context(BuildContextOptions::default()).expect("should create context");
1414        let cursor = Cursor::new(context);
1415        let decoder = GzDecoder::new(cursor);
1416        let mut archive = Archive::new(decoder);
1417        let mut found_entrypoint = false;
1418        let mut found_healthcheck = false;
1419        let mut found_bootstrap_helper = false;
1420
1421        for entry in archive.entries().expect("should read archive entries") {
1422            let entry = entry.expect("should read entry");
1423            let path = entry.path().expect("should read entry path");
1424            if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
1425                found_entrypoint = true;
1426            }
1427            if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
1428                found_healthcheck = true;
1429            }
1430            if path
1431                == std::path::Path::new(
1432                    "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1433                )
1434            {
1435                found_bootstrap_helper = true;
1436            }
1437            if found_entrypoint && found_healthcheck && found_bootstrap_helper {
1438                break;
1439            }
1440        }
1441
1442        assert!(
1443            found_entrypoint,
1444            "entrypoint asset should be in the build context"
1445        );
1446        assert!(
1447            found_healthcheck,
1448            "healthcheck asset should be in the build context"
1449        );
1450        assert!(
1451            found_bootstrap_helper,
1452            "bootstrap helper asset should be in the build context"
1453        );
1454    }
1455
1456    #[test]
1457    fn build_context_includes_opencode_placeholder_in_default_mode() {
1458        let context =
1459            create_build_context(BuildContextOptions::default()).expect("should create context");
1460        let entries = archive_entries(context);
1461        assert!(
1462            entries
1463                .iter()
1464                .any(|path| path.trim_end_matches('/') == "packages/opencode"),
1465            "default mode should include an empty packages/opencode placeholder"
1466        );
1467    }
1468
1469    #[test]
1470    fn build_context_local_mode_includes_submodule_and_excludes_heavy_paths() {
1471        let temp = tempdir().expect("should create tempdir");
1472        let repo_root = temp.path();
1473
1474        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1475        fs::create_dir_all(submodule_root.join("src")).expect("should create src");
1476        fs::create_dir_all(submodule_root.join(".git")).expect("should create .git");
1477        fs::create_dir_all(submodule_root.join("node_modules/pkg"))
1478            .expect("should create node_modules");
1479        fs::create_dir_all(submodule_root.join("target/release")).expect("should create target");
1480        fs::create_dir_all(submodule_root.join("dist")).expect("should create dist");
1481        fs::create_dir_all(submodule_root.join(".turbo")).expect("should create .turbo");
1482        fs::create_dir_all(submodule_root.join(".cache")).expect("should create .cache");
1483        fs::create_dir_all(submodule_root.join(".planning/phases/very-long-planning-phase-name"))
1484            .expect("should create planning");
1485
1486        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1487        fs::write(submodule_root.join("src/main.ts"), "console.log('ok');")
1488            .expect("should write source");
1489        fs::write(submodule_root.join(".git/config"), "dirty").expect("should write .git file");
1490        fs::write(submodule_root.join("node_modules/pkg/index.js"), "ignored")
1491            .expect("should write node_modules file");
1492        fs::write(submodule_root.join("target/release/app"), "ignored")
1493            .expect("should write target file");
1494        fs::write(submodule_root.join("dist/ui.js"), "ignored").expect("should write dist file");
1495        fs::write(submodule_root.join(".turbo/state.json"), "ignored")
1496            .expect("should write turbo file");
1497        fs::write(submodule_root.join(".cache/cache.bin"), "ignored")
1498            .expect("should write cache file");
1499        fs::write(
1500            submodule_root.join(".planning/phases/very-long-planning-phase-name/phase.md"),
1501            "ignored",
1502        )
1503        .expect("should write planning file");
1504        fs::write(submodule_root.join(".DS_Store"), "ignored").expect("should write ds_store");
1505
1506        let context = create_build_context_with_repo_root(
1507            BuildContextOptions {
1508                include_local_opencode_submodule: true,
1509            },
1510            Some(repo_root),
1511        )
1512        .expect("should create local context");
1513        let entries = archive_entries(context);
1514
1515        assert!(
1516            entries.contains("packages/opencode/package.json"),
1517            "local mode should include submodule files"
1518        );
1519        assert!(
1520            entries.contains("packages/opencode/src/main.ts"),
1521            "local mode should include source files"
1522        );
1523        assert!(
1524            !entries.contains("packages/opencode/.git/config"),
1525            "local mode should exclude .git"
1526        );
1527        assert!(
1528            !entries.contains("packages/opencode/node_modules/pkg/index.js"),
1529            "local mode should exclude node_modules"
1530        );
1531        assert!(
1532            !entries.contains("packages/opencode/target/release/app"),
1533            "local mode should exclude target"
1534        );
1535        assert!(
1536            !entries.contains("packages/opencode/dist/ui.js"),
1537            "local mode should exclude dist"
1538        );
1539        assert!(
1540            !entries.contains("packages/opencode/.turbo/state.json"),
1541            "local mode should exclude .turbo"
1542        );
1543        assert!(
1544            !entries.contains("packages/opencode/.cache/cache.bin"),
1545            "local mode should exclude .cache"
1546        );
1547        assert!(
1548            !entries.contains(
1549                "packages/opencode/.planning/phases/very-long-planning-phase-name/phase.md"
1550            ),
1551            "local mode should exclude .planning"
1552        );
1553        assert!(
1554            !entries.contains("packages/opencode/.DS_Store"),
1555            "local mode should exclude .DS_Store files"
1556        );
1557    }
1558
1559    #[test]
1560    fn build_context_local_mode_supports_long_non_excluded_paths() {
1561        let temp = tempdir().expect("should create tempdir");
1562        let repo_root = temp.path();
1563        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1564
1565        fs::create_dir_all(&submodule_root).expect("should create submodule root");
1566        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1567
1568        let long_segment = "a".repeat(140);
1569        let long_dir = submodule_root.join("src").join(&long_segment);
1570        fs::create_dir_all(&long_dir).expect("should create long path directory");
1571        fs::write(long_dir.join("main.ts"), "console.log('long path');")
1572            .expect("should write long path file");
1573
1574        let context = create_build_context_with_repo_root(
1575            BuildContextOptions {
1576                include_local_opencode_submodule: true,
1577            },
1578            Some(repo_root),
1579        )
1580        .expect("should create local context with long paths");
1581        let entries = archive_entries(context);
1582        let long_entry = format!("packages/opencode/src/{long_segment}/main.ts");
1583        assert!(
1584            entries.contains(&long_entry),
1585            "long non-excluded path should be archived via GNU long-name handling"
1586        );
1587    }
1588
1589    #[cfg(unix)]
1590    #[test]
1591    fn build_context_local_mode_materializes_symlinked_files() {
1592        use std::os::unix::fs::symlink;
1593
1594        let temp = tempdir().expect("should create tempdir");
1595        let repo_root = temp.path();
1596        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1597        let fonts_dir = submodule_root.join("packages/ui/src/assets/fonts");
1598        fs::create_dir_all(&fonts_dir).expect("should create fonts dir");
1599        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1600        fs::write(
1601            fonts_dir.join("BlexMonoNerdFontMono-Regular.woff2"),
1602            b"font-bytes",
1603        )
1604        .expect("should write target font");
1605        symlink(
1606            "BlexMonoNerdFontMono-Regular.woff2",
1607            fonts_dir.join("ibm-plex-mono.woff2"),
1608        )
1609        .expect("should create symlinked font");
1610
1611        let context = create_build_context_with_repo_root(
1612            BuildContextOptions {
1613                include_local_opencode_submodule: true,
1614            },
1615            Some(repo_root),
1616        )
1617        .expect("should create local context with symlink");
1618        let entries = archive_entries(context.clone());
1619
1620        assert!(
1621            entries.contains("packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2"),
1622            "local mode should include symlinked asset paths"
1623        );
1624        let alias_bytes = archive_entry_bytes(
1625            context,
1626            "packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2",
1627        )
1628        .expect("symlinked asset should contain bytes");
1629        assert_eq!(alias_bytes, b"font-bytes");
1630    }
1631
1632    #[test]
1633    fn default_tag_is_latest() {
1634        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
1635    }
1636
1637    #[test]
1638    fn format_build_error_includes_recent_logs() {
1639        let mut logs = VecDeque::new();
1640        logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
1641        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
1642        logs.push_back("E: Unable to fetch some archives".to_string());
1643        let error_logs = VecDeque::new();
1644        let buildkit_logs = VecDeque::new();
1645
1646        let result = format_build_error_with_context(
1647            "Build failed: exit code 1",
1648            &logs,
1649            &error_logs,
1650            &buildkit_logs,
1651        );
1652
1653        assert!(result.contains("Build failed: exit code 1"));
1654        assert!(result.contains("Recent build output:"));
1655        assert!(result.contains("Step 1/5"));
1656        assert!(result.contains("Unable to fetch"));
1657    }
1658
1659    #[test]
1660    fn format_build_error_handles_empty_logs() {
1661        let logs = VecDeque::new();
1662        let error_logs = VecDeque::new();
1663        let buildkit_logs = VecDeque::new();
1664        let result =
1665            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
1666
1667        assert!(result.contains("Stream error"));
1668        assert!(!result.contains("Recent build output:"));
1669    }
1670
1671    #[test]
1672    fn format_build_error_adds_network_suggestion() {
1673        let logs = VecDeque::new();
1674        let error_logs = VecDeque::new();
1675        let buildkit_logs = VecDeque::new();
1676        let result = format_build_error_with_context(
1677            "connection timeout",
1678            &logs,
1679            &error_logs,
1680            &buildkit_logs,
1681        );
1682
1683        assert!(result.contains("Check your network connection"));
1684    }
1685
1686    #[test]
1687    fn format_build_error_adds_disk_suggestion() {
1688        let logs = VecDeque::new();
1689        let error_logs = VecDeque::new();
1690        let buildkit_logs = VecDeque::new();
1691        let result = format_build_error_with_context(
1692            "no space left on device",
1693            &logs,
1694            &error_logs,
1695            &buildkit_logs,
1696        );
1697
1698        assert!(result.contains("Free up disk space"));
1699    }
1700
1701    #[test]
1702    fn format_build_error_shows_error_lines_separately() {
1703        let mut recent_logs = VecDeque::new();
1704        recent_logs.push_back("Compiling foo v1.0".to_string());
1705        recent_logs.push_back("Successfully installed bar".to_string());
1706
1707        let mut error_logs = VecDeque::new();
1708        error_logs.push_back("error: failed to compile dust".to_string());
1709        error_logs.push_back("error: failed to compile glow".to_string());
1710
1711        let buildkit_logs = VecDeque::new();
1712        let result = format_build_error_with_context(
1713            "Build failed",
1714            &recent_logs,
1715            &error_logs,
1716            &buildkit_logs,
1717        );
1718
1719        assert!(result.contains("Potential errors detected during build:"));
1720        assert!(result.contains("failed to compile dust"));
1721        assert!(result.contains("failed to compile glow"));
1722    }
1723
1724    #[test]
1725    fn is_error_line_detects_errors() {
1726        assert!(is_error_line("error: something failed"));
1727        assert!(is_error_line("Error: build failed"));
1728        assert!(is_error_line("Failed to install package"));
1729        assert!(is_error_line("cannot find module"));
1730        assert!(is_error_line("Unable to locate package"));
1731        assert!(!is_error_line("Compiling foo v1.0"));
1732        assert!(!is_error_line("Successfully installed"));
1733    }
1734
1735    #[test]
1736    fn collect_image_ids_matches_labels() {
1737        let mut labels = HashMap::new();
1738        labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
1739
1740        let images = vec![
1741            make_image_summary("sha256:opencode", vec![], vec![], labels),
1742            make_image_summary(
1743                "sha256:other",
1744                vec!["busybox:latest"],
1745                vec![],
1746                HashMap::new(),
1747            ),
1748        ];
1749
1750        let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
1751        assert!(ids.contains("sha256:opencode"));
1752        assert!(!ids.contains("sha256:other"));
1753    }
1754}