Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    CONTAINER_NAME, DOCKERFILE, DockerClient, DockerError, ENTRYPOINT_SH, HEALTHCHECK_SH,
9    IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT, OPENCODE_CLOUD_BOOTSTRAP_SH,
10    active_resource_names, remap_image_tag,
11};
12use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
13use bollard::models::BuildInfoAux;
14use bollard::query_parameters::{
15    BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
16    RemoveImageOptionsBuilder, TagImageOptions,
17};
18use bytes::Bytes;
19use flate2::Compression;
20use flate2::write::GzEncoder;
21use futures_util::StreamExt;
22use http_body_util::{Either, Full};
23use std::collections::{HashMap, HashSet, VecDeque};
24use std::env;
25use std::ffi::OsStr;
26use std::fs;
27use std::io::{self, Write};
28use std::path::{Path, PathBuf};
29use std::time::{SystemTime, UNIX_EPOCH};
30use tar::Builder as TarBuilder;
31use tracing::{debug, warn};
32
33/// Default number of recent build log lines to capture for error context
34const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
35
36/// Default number of error lines to capture separately
37const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
38
39const LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH: &str = "packages/opencode";
40// Keep local source excludes aligned with Dockerfile Build Hygiene Rules and
41// the Dockerfile Optimization Checklist in README docs.
42const LOCAL_OPENCODE_EXCLUDED_DIRS: &[&str] = &[
43    ".git",
44    ".planning",
45    "node_modules",
46    "target",
47    "dist",
48    ".turbo",
49    ".cache",
50];
51const LOCAL_OPENCODE_EXCLUDED_FILES: &[&str] = &[".DS_Store"];
52
53#[derive(Debug, Clone, Copy, Default)]
54struct BuildContextOptions {
55    include_local_opencode_submodule: bool,
56}
57
58fn effective_image_tag(tag: &str) -> String {
59    remap_image_tag(tag)
60}
61
62fn profile_scoped_image_ids(images: &[bollard::models::ImageSummary]) -> Option<HashSet<String>> {
63    let names = active_resource_names();
64    let instance_id = names.instance_id.as_deref()?;
65    let expected_tags = [
66        format!("{IMAGE_NAME_GHCR}:{}", names.image_tag),
67        format!("{IMAGE_NAME_DOCKERHUB}:{}", names.image_tag),
68        format!("{IMAGE_NAME_GHCR}:{}", names.previous_image_tag),
69        format!("{IMAGE_NAME_DOCKERHUB}:{}", names.previous_image_tag),
70    ];
71
72    // In isolated mode, avoid broad "contains name fragment" matching and only remove
73    // image tags associated with the active instance.
74    let mut ids = HashSet::new();
75    for image in images {
76        let tag_match = image
77            .repo_tags
78            .iter()
79            .any(|tag| expected_tags.contains(tag));
80        let label_match = image
81            .labels
82            .get(super::INSTANCE_LABEL_KEY)
83            .is_some_and(|value| value == instance_id);
84        if tag_match || label_match {
85            ids.insert(image.id.clone());
86        }
87    }
88    Some(ids)
89}
90
91/// Read a log buffer size from env with bounds
92fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
93    let Ok(value) = env::var(var_name) else {
94        return default;
95    };
96    let Ok(parsed) = value.trim().parse::<usize>() else {
97        return default;
98    };
99    parsed.clamp(5, 500)
100}
101
102/// Check if a line looks like an error message
103fn is_error_line(line: &str) -> bool {
104    let lower = line.to_lowercase();
105    lower.contains("error")
106        || lower.contains("failed")
107        || lower.contains("cannot")
108        || lower.contains("unable to")
109        || lower.contains("not found")
110        || lower.contains("permission denied")
111}
112
113/// Check if an image exists locally
114pub async fn image_exists(
115    client: &DockerClient,
116    image: &str,
117    tag: &str,
118) -> Result<bool, DockerError> {
119    let tag = effective_image_tag(tag);
120    let full_name = format!("{image}:{tag}");
121    debug!("Checking if image exists: {}", full_name);
122
123    match client.inner().inspect_image(&full_name).await {
124        Ok(_) => Ok(true),
125        Err(bollard::errors::Error::DockerResponseServerError {
126            status_code: 404, ..
127        }) => Ok(false),
128        Err(e) => Err(DockerError::from(e)),
129    }
130}
131
132/// Remove all images whose tags, digests, or labels match the provided name fragment
133///
134/// Returns the number of images removed.
135pub async fn remove_images_by_name(
136    client: &DockerClient,
137    name_fragment: &str,
138    force: bool,
139) -> Result<usize, DockerError> {
140    debug!("Removing Docker images matching '{name_fragment}'");
141
142    let images = list_docker_images(client).await?;
143
144    let image_ids = if name_fragment == CONTAINER_NAME {
145        profile_scoped_image_ids(&images)
146            .unwrap_or_else(|| collect_image_ids(&images, name_fragment))
147    } else {
148        collect_image_ids(&images, name_fragment)
149    };
150    remove_image_ids(client, image_ids, force).await
151}
152
153/// List all local Docker images (including intermediate layers).
154async fn list_docker_images(
155    client: &DockerClient,
156) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
157    let list_options = ListImagesOptionsBuilder::new().all(true).build();
158    client
159        .inner()
160        .list_images(Some(list_options))
161        .await
162        .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
163}
164
165const LABEL_TITLE: &str = "org.opencontainers.image.title";
166const LABEL_SOURCE: &str = "org.opencontainers.image.source";
167const LABEL_URL: &str = "org.opencontainers.image.url";
168
169const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
170const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
171const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
172
173/// Collect image IDs that contain the provided name fragment or match opencode labels.
174fn collect_image_ids(
175    images: &[bollard::models::ImageSummary],
176    name_fragment: &str,
177) -> HashSet<String> {
178    let mut image_ids = HashSet::new();
179    for image in images {
180        if image_matches_fragment_or_labels(image, name_fragment) {
181            image_ids.insert(image.id.clone());
182        }
183    }
184    image_ids
185}
186
187fn image_matches_fragment_or_labels(
188    image: &bollard::models::ImageSummary,
189    name_fragment: &str,
190) -> bool {
191    let tag_match = image
192        .repo_tags
193        .iter()
194        .any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
195    let digest_match = image
196        .repo_digests
197        .iter()
198        .any(|digest| digest.contains(name_fragment));
199    let label_match = image_labels_match(&image.labels);
200
201    tag_match || digest_match || label_match
202}
203
204fn image_labels_match(labels: &HashMap<String, String>) -> bool {
205    labels
206        .get(LABEL_SOURCE)
207        .is_some_and(|value| value == LABEL_SOURCE_VALUE)
208        || labels
209            .get(LABEL_URL)
210            .is_some_and(|value| value == LABEL_URL_VALUE)
211        || labels
212            .get(LABEL_TITLE)
213            .is_some_and(|value| value == LABEL_TITLE_VALUE)
214}
215
216/// Remove image IDs, returning the number removed.
217async fn remove_image_ids(
218    client: &DockerClient,
219    image_ids: HashSet<String>,
220    force: bool,
221) -> Result<usize, DockerError> {
222    if image_ids.is_empty() {
223        return Ok(0);
224    }
225
226    let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
227    let mut removed = 0usize;
228    for image_id in image_ids {
229        let result = client
230            .inner()
231            .remove_image(&image_id, Some(remove_options.clone()), None)
232            .await;
233        match result {
234            Ok(_) => removed += 1,
235            Err(bollard::errors::Error::DockerResponseServerError {
236                status_code: 404, ..
237            }) => {
238                debug!("Docker image already removed: {}", image_id);
239            }
240            Err(err) => {
241                return Err(DockerError::Image(format!(
242                    "Failed to remove image {image_id}: {err}"
243                )));
244            }
245        }
246    }
247
248    Ok(removed)
249}
250
251/// Build the opencode image from embedded Dockerfile
252///
253/// Shows real-time build progress with streaming output.
254/// Returns the full image:tag string on success.
255///
256/// # Arguments
257/// * `client` - Docker client
258/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
259/// * `progress` - Progress reporter for build feedback
260/// * `no_cache` - If true, build without using Docker layer cache
261pub async fn build_image(
262    client: &DockerClient,
263    tag: Option<&str>,
264    progress: &mut ProgressReporter,
265    no_cache: bool,
266    build_args: Option<HashMap<String, String>>,
267) -> Result<String, DockerError> {
268    let tag = effective_image_tag(tag.unwrap_or(IMAGE_TAG_DEFAULT));
269    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
270    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
271
272    let build_args = build_args.unwrap_or_default();
273    let include_local_opencode_submodule = build_args
274        .get("OPENCODE_SOURCE")
275        .is_some_and(|value| value.eq_ignore_ascii_case("local"));
276
277    // Create tar archive containing Dockerfile
278    let context = create_build_context(BuildContextOptions {
279        include_local_opencode_submodule,
280    })
281    .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
282
283    // Set up build options
284    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
285    // BuildKit requires a unique session ID for each build
286    let session_id = format!(
287        "opencode-cloud-build-{}",
288        SystemTime::now()
289            .duration_since(UNIX_EPOCH)
290            .unwrap_or_default()
291            .as_nanos()
292    );
293    let options = BuildImageOptions {
294        t: Some(full_name.clone()),
295        dockerfile: "Dockerfile".to_string(),
296        version: BuilderVersion::BuilderBuildKit,
297        session: Some(session_id),
298        rm: true,
299        nocache: no_cache,
300        buildargs: Some(build_args),
301        platform: String::new(),
302        target: String::new(),
303        ..Default::default()
304    };
305
306    // Create build body from context
307    let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
308
309    // Start build with streaming output
310    let mut stream = client.inner().build_image(options, None, Some(body));
311
312    // Add main build spinner (context prefix like "Building image" is set by caller)
313    progress.add_spinner("build", "Initializing...");
314
315    let mut maybe_image_id = None;
316    let mut log_state = BuildLogState::new();
317
318    while let Some(result) = stream.next().await {
319        let Ok(info) = result else {
320            return Err(handle_stream_error(
321                "Build failed",
322                result.expect_err("checked error").to_string(),
323                &log_state,
324                progress,
325            ));
326        };
327
328        handle_stream_message(&info, progress, &mut log_state);
329
330        if let Some(error_detail) = &info.error_detail
331            && let Some(error_msg) = &error_detail.message
332        {
333            progress.abandon_all(error_msg);
334            let context = format_build_error_with_context(
335                error_msg,
336                &log_state.recent_logs,
337                &log_state.error_logs,
338                &log_state.recent_buildkit_logs,
339            );
340            return Err(DockerError::Build(context));
341        }
342
343        if let Some(aux) = info.aux {
344            match aux {
345                BuildInfoAux::Default(image_id) => {
346                    if let Some(id) = image_id.id {
347                        maybe_image_id = Some(id);
348                    }
349                }
350                BuildInfoAux::BuildKit(status) => {
351                    handle_buildkit_status(&status, progress, &mut log_state);
352                }
353            }
354        }
355    }
356
357    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
358    let finish_msg = format!("Build complete: {image_id}");
359    progress.finish("build", &finish_msg);
360
361    Ok(full_name)
362}
363
364struct BuildLogState {
365    recent_logs: VecDeque<String>,
366    error_logs: VecDeque<String>,
367    recent_buildkit_logs: VecDeque<String>,
368    build_log_buffer_size: usize,
369    error_log_buffer_size: usize,
370    last_buildkit_vertex: Option<String>,
371    last_buildkit_vertex_id: Option<String>,
372    export_vertex_id: Option<String>,
373    export_vertex_name: Option<String>,
374    buildkit_logs_by_vertex_id: HashMap<String, String>,
375    vertex_name_by_vertex_id: HashMap<String, String>,
376}
377
378impl BuildLogState {
379    fn new() -> Self {
380        let build_log_buffer_size = read_log_buffer_size(
381            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
382            DEFAULT_BUILD_LOG_BUFFER_SIZE,
383        );
384        let error_log_buffer_size = read_log_buffer_size(
385            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
386            DEFAULT_ERROR_LOG_BUFFER_SIZE,
387        );
388        Self {
389            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
390            error_logs: VecDeque::with_capacity(error_log_buffer_size),
391            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
392            build_log_buffer_size,
393            error_log_buffer_size,
394            last_buildkit_vertex: None,
395            last_buildkit_vertex_id: None,
396            export_vertex_id: None,
397            export_vertex_name: None,
398            buildkit_logs_by_vertex_id: HashMap::new(),
399            vertex_name_by_vertex_id: HashMap::new(),
400        }
401    }
402}
403
404fn handle_stream_message(
405    info: &bollard::models::BuildInfo,
406    progress: &mut ProgressReporter,
407    state: &mut BuildLogState,
408) {
409    let Some(stream_msg) = info.stream.as_deref() else {
410        return;
411    };
412    let msg = stream_msg.trim();
413    if msg.is_empty() {
414        return;
415    }
416
417    if progress.is_plain_output() {
418        eprint!("{stream_msg}");
419    } else {
420        let has_runtime_vertex = state
421            .last_buildkit_vertex
422            .as_deref()
423            .is_some_and(|name| name.starts_with("[runtime "));
424        let is_internal_msg = msg.contains("[internal]");
425        if !(has_runtime_vertex && is_internal_msg) {
426            progress.update_spinner("build", stream_msg);
427        }
428    }
429
430    if state.recent_logs.len() >= state.build_log_buffer_size {
431        state.recent_logs.pop_front();
432    }
433    state.recent_logs.push_back(msg.to_string());
434
435    if is_error_line(msg) {
436        if state.error_logs.len() >= state.error_log_buffer_size {
437            state.error_logs.pop_front();
438        }
439        state.error_logs.push_back(msg.to_string());
440    }
441
442    if msg.starts_with("Step ") {
443        debug!("Build step: {}", msg);
444    }
445}
446
447fn handle_buildkit_status(
448    status: &BuildkitStatusResponse,
449    progress: &mut ProgressReporter,
450    state: &mut BuildLogState,
451) {
452    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
453    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
454    update_export_vertex_from_logs(
455        &latest_logs,
456        &state.vertex_name_by_vertex_id,
457        &mut state.export_vertex_id,
458        &mut state.export_vertex_name,
459    );
460    let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
461        status,
462        &state.vertex_name_by_vertex_id,
463        state.export_vertex_id.as_deref(),
464        state.export_vertex_name.as_deref(),
465    ) {
466        Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
467        None => {
468            let Some(log_entry) = latest_logs.last() else {
469                return;
470            };
471            let name = state
472                .vertex_name_by_vertex_id
473                .get(&log_entry.vertex_id)
474                .cloned()
475                .or_else(|| state.last_buildkit_vertex.clone())
476                .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
477            (log_entry.vertex_id.clone(), name)
478        }
479    };
480    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
481    state.last_buildkit_vertex_id = Some(vertex_id.clone());
482    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
483        state.last_buildkit_vertex = Some(vertex_name.clone());
484    }
485
486    let message = if progress.is_plain_output() {
487        vertex_name
488    } else if let Some(log_entry) = latest_logs
489        .iter()
490        .rev()
491        .find(|entry| entry.vertex_id == vertex_id)
492    {
493        format!("{vertex_name} · {}", log_entry.message)
494    } else {
495        vertex_name
496    };
497    progress.update_spinner("build", &message);
498
499    if progress.is_plain_output() {
500        for log_entry in latest_logs {
501            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
502        }
503        return;
504    }
505
506    let (Some(current_id), Some(current_name)) = (
507        state.last_buildkit_vertex_id.as_ref(),
508        state.last_buildkit_vertex.as_ref(),
509    ) else {
510        return;
511    };
512
513    let name = state
514        .vertex_name_by_vertex_id
515        .get(current_id)
516        .unwrap_or(current_name);
517    // Keep non-verbose output on the spinner line only.
518    let _ = name;
519}
520
521fn handle_stream_error(
522    prefix: &str,
523    error_str: String,
524    state: &BuildLogState,
525    progress: &mut ProgressReporter,
526) -> DockerError {
527    progress.abandon_all(prefix);
528
529    let buildkit_hint = if error_str.contains("mount")
530        || error_str.contains("--mount")
531        || state
532            .recent_logs
533            .iter()
534            .any(|log| log.contains("--mount") && log.contains("cache"))
535    {
536        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
537         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
538         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
539    } else {
540        ""
541    };
542
543    let context = format!(
544        "{}{}",
545        format_build_error_with_context(
546            &error_str,
547            &state.recent_logs,
548            &state.error_logs,
549            &state.recent_buildkit_logs,
550        ),
551        buildkit_hint
552    );
553    DockerError::Build(context)
554}
555
556fn update_buildkit_vertex_names(
557    vertex_name_by_vertex_id: &mut HashMap<String, String>,
558    status: &BuildkitStatusResponse,
559) {
560    for vertex in &status.vertexes {
561        if vertex.name.is_empty() {
562            continue;
563        }
564        vertex_name_by_vertex_id
565            .entry(vertex.digest.clone())
566            .or_insert_with(|| vertex.name.clone());
567    }
568}
569
570fn select_latest_buildkit_vertex(
571    status: &BuildkitStatusResponse,
572    vertex_name_by_vertex_id: &HashMap<String, String>,
573    export_vertex_id: Option<&str>,
574    export_vertex_name: Option<&str>,
575) -> Option<(String, String)> {
576    if let Some(export_vertex_id) = export_vertex_id {
577        let name = export_vertex_name
578            .map(str::to_string)
579            .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
580            .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
581        return Some((export_vertex_id.to_string(), name));
582    }
583
584    let mut best_runtime: Option<(u32, String, String)> = None;
585    let mut fallback: Option<(String, String)> = None;
586
587    for vertex in &status.vertexes {
588        let name = if vertex.name.is_empty() {
589            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
590        } else {
591            Some(vertex.name.clone())
592        };
593
594        let Some(name) = name else {
595            continue;
596        };
597
598        if fallback.is_none() && !name.starts_with("[internal]") {
599            fallback = Some((vertex.digest.clone(), name.clone()));
600        }
601
602        if let Some(step) = parse_runtime_step(&name) {
603            match &best_runtime {
604                Some((best_step, _, _)) if *best_step >= step => {}
605                _ => {
606                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
607                }
608            }
609        }
610    }
611
612    if let Some((_, digest, name)) = best_runtime {
613        Some((digest, name))
614    } else {
615        fallback.or_else(|| {
616            status.vertexes.iter().find_map(|vertex| {
617                let name = if vertex.name.is_empty() {
618                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
619                } else {
620                    Some(vertex.name.clone())
621                };
622                name.map(|resolved| (vertex.digest.clone(), resolved))
623            })
624        })
625    }
626}
627
628fn parse_runtime_step(name: &str) -> Option<u32> {
629    let prefix = "[runtime ";
630    let start = name.find(prefix)? + prefix.len();
631    let rest = &name[start..];
632    let end = rest.find('/')?;
633    rest[..end].trim().parse::<u32>().ok()
634}
635
636fn format_vertex_fallback_label(vertex_id: &str) -> String {
637    let short = vertex_id
638        .strip_prefix("sha256:")
639        .unwrap_or(vertex_id)
640        .chars()
641        .take(12)
642        .collect::<String>();
643    format!("vertex {short}")
644}
645
646fn update_export_vertex_from_logs(
647    latest_logs: &[BuildkitLogEntry],
648    vertex_name_by_vertex_id: &HashMap<String, String>,
649    export_vertex_id: &mut Option<String>,
650    export_vertex_name: &mut Option<String>,
651) {
652    if let Some(entry) = latest_logs
653        .iter()
654        .rev()
655        .find(|log| log.message.trim_start().starts_with("exporting to image"))
656    {
657        *export_vertex_id = Some(entry.vertex_id.clone());
658        if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
659            *export_vertex_name = Some(name.clone());
660        }
661    }
662}
663
664fn record_buildkit_logs(
665    state: &mut BuildLogState,
666    latest_logs: &[BuildkitLogEntry],
667    current_vertex_id: &str,
668    current_vertex_name: &str,
669) {
670    for log_entry in latest_logs {
671        let name = state
672            .vertex_name_by_vertex_id
673            .get(&log_entry.vertex_id)
674            .cloned()
675            .or_else(|| {
676                if log_entry.vertex_id == current_vertex_id {
677                    Some(current_vertex_name.to_string())
678                } else {
679                    None
680                }
681            })
682            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
683
684        let message = log_entry.message.replace('\r', "").trim_end().to_string();
685        if message.is_empty() {
686            continue;
687        }
688
689        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
690            state.recent_buildkit_logs.pop_front();
691        }
692        state
693            .recent_buildkit_logs
694            .push_back(format!("[{name}] {message}"));
695    }
696}
697
698#[derive(Debug, Clone)]
699struct BuildkitLogEntry {
700    vertex_id: String,
701    message: String,
702}
703
704fn append_buildkit_logs(
705    logs: &mut HashMap<String, String>,
706    status: &BuildkitStatusResponse,
707) -> Vec<BuildkitLogEntry> {
708    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
709
710    for log in &status.logs {
711        let vertex_id = log.vertex.clone();
712        let message = String::from_utf8_lossy(&log.msg).to_string();
713        let entry = logs.entry(vertex_id.clone()).or_default();
714        entry.push_str(&message);
715        latest.push(BuildkitLogEntry { vertex_id, message });
716    }
717
718    latest
719}
720
721/// Pull the opencode image from registry with automatic fallback
722///
723/// Tries GHCR first, falls back to Docker Hub on failure.
724/// Returns the full image:tag string on success.
725pub async fn pull_image(
726    client: &DockerClient,
727    tag: Option<&str>,
728    progress: &mut ProgressReporter,
729) -> Result<String, DockerError> {
730    let requested_tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
731    let resolved_tag = effective_image_tag(requested_tag);
732    let isolated_default_tag =
733        requested_tag == IMAGE_TAG_DEFAULT && resolved_tag != IMAGE_TAG_DEFAULT;
734    let registry_pull_tag = if isolated_default_tag {
735        IMAGE_TAG_DEFAULT
736    } else {
737        requested_tag
738    };
739
740    // Try GHCR first
741    debug!(
742        "Attempting to pull from GHCR: {}:{}",
743        IMAGE_NAME_GHCR, registry_pull_tag
744    );
745    let ghcr_err =
746        match pull_from_registry(client, IMAGE_NAME_GHCR, registry_pull_tag, progress).await {
747            Ok(()) => {
748                if isolated_default_tag {
749                    retag_local_image(
750                        client,
751                        &format!("{IMAGE_NAME_GHCR}:{registry_pull_tag}"),
752                        &resolved_tag,
753                    )
754                    .await?;
755                }
756                let full_name = format!("{IMAGE_NAME_GHCR}:{resolved_tag}");
757                return Ok(full_name);
758            }
759            Err(e) => e,
760        };
761
762    warn!(
763        "GHCR pull failed: {}. Trying Docker Hub fallback...",
764        ghcr_err
765    );
766
767    // Try Docker Hub as fallback
768    debug!(
769        "Attempting to pull from Docker Hub: {}:{}",
770        IMAGE_NAME_DOCKERHUB, registry_pull_tag
771    );
772    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, registry_pull_tag, progress).await {
773        Ok(()) => {
774            if isolated_default_tag {
775                retag_local_image(
776                    client,
777                    &format!("{IMAGE_NAME_DOCKERHUB}:{registry_pull_tag}"),
778                    &resolved_tag,
779                )
780                .await?;
781                return Ok(format!("{IMAGE_NAME_GHCR}:{resolved_tag}"));
782            }
783            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{resolved_tag}");
784            Ok(full_name)
785        }
786        Err(dockerhub_err) => Err(DockerError::Pull(format!(
787            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
788        ))),
789    }
790}
791
792async fn retag_local_image(
793    client: &DockerClient,
794    source_image: &str,
795    target_tag: &str,
796) -> Result<(), DockerError> {
797    let options = TagImageOptions {
798        repo: Some(IMAGE_NAME_GHCR.to_string()),
799        tag: Some(target_tag.to_string()),
800    };
801    client
802        .inner()
803        .tag_image(source_image, Some(options))
804        .await
805        .map_err(|e| {
806            DockerError::Pull(format!(
807                "Failed to retag pulled image {source_image} as {IMAGE_NAME_GHCR}:{target_tag}: {e}"
808            ))
809        })?;
810    Ok(())
811}
812
813/// Maximum number of retry attempts for pull operations
814const MAX_PULL_RETRIES: usize = 3;
815
816/// Pull from a specific registry with retry logic
817async fn pull_from_registry(
818    client: &DockerClient,
819    image: &str,
820    tag: &str,
821    progress: &mut ProgressReporter,
822) -> Result<(), DockerError> {
823    let full_name = format!("{image}:{tag}");
824
825    // Manual retry loop since async closures can't capture mutable references
826    let mut last_error = None;
827    for attempt in 1..=MAX_PULL_RETRIES {
828        debug!(
829            "Pull attempt {}/{} for {}",
830            attempt, MAX_PULL_RETRIES, full_name
831        );
832
833        match do_pull(client, image, tag, progress).await {
834            Ok(()) => return Ok(()),
835            Err(e) => {
836                warn!("Pull attempt {} failed: {}", attempt, e);
837                last_error = Some(e);
838
839                if attempt < MAX_PULL_RETRIES {
840                    // Exponential backoff: 1s, 2s, 4s
841                    let delay_ms = 1000 * (1 << (attempt - 1));
842                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
843                }
844            }
845        }
846    }
847
848    Err(last_error.unwrap_or_else(|| {
849        DockerError::Pull(format!(
850            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
851        ))
852    }))
853}
854
855/// Perform the actual pull operation
856async fn do_pull(
857    client: &DockerClient,
858    image: &str,
859    tag: &str,
860    progress: &mut ProgressReporter,
861) -> Result<(), DockerError> {
862    let full_name = format!("{image}:{tag}");
863
864    let options = CreateImageOptions {
865        from_image: Some(image.to_string()),
866        tag: Some(tag.to_string()),
867        platform: String::new(),
868        ..Default::default()
869    };
870
871    let mut stream = client.inner().create_image(Some(options), None, None);
872
873    // Add main spinner for overall progress
874    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
875
876    while let Some(result) = stream.next().await {
877        match result {
878            Ok(info) => {
879                // Handle errors from the stream
880                if let Some(error_detail) = &info.error_detail
881                    && let Some(error_msg) = &error_detail.message
882                {
883                    progress.abandon_all(error_msg);
884                    return Err(DockerError::Pull(error_msg.to_string()));
885                }
886
887                // Handle layer progress
888                if let Some(layer_id) = &info.id {
889                    let status = info.status.as_deref().unwrap_or("");
890
891                    match status {
892                        "Already exists" => {
893                            progress.finish(layer_id, "Already exists");
894                        }
895                        "Pull complete" => {
896                            progress.finish(layer_id, "Pull complete");
897                        }
898                        "Downloading" | "Extracting" => {
899                            if let Some(progress_detail) = &info.progress_detail {
900                                let current = progress_detail.current.unwrap_or(0) as u64;
901                                let total = progress_detail.total.unwrap_or(0) as u64;
902
903                                if total > 0 {
904                                    progress.update_layer(layer_id, current, total, status);
905                                }
906                            }
907                        }
908                        _ => {
909                            // Other statuses (Waiting, Verifying, etc.)
910                            progress.update_spinner(layer_id, status);
911                        }
912                    }
913                } else if let Some(status) = &info.status {
914                    // Overall status messages (no layer id)
915                    progress.update_spinner("pull", status);
916                }
917            }
918            Err(e) => {
919                progress.abandon_all("Pull failed");
920                return Err(DockerError::Pull(format!("Pull failed: {e}")));
921            }
922        }
923    }
924
925    progress.finish("pull", &format!("Pull complete: {full_name}"));
926    Ok(())
927}
928
929/// Format a build error with recent log context for actionable debugging
930fn format_build_error_with_context(
931    error: &str,
932    recent_logs: &VecDeque<String>,
933    error_logs: &VecDeque<String>,
934    recent_buildkit_logs: &VecDeque<String>,
935) -> String {
936    let mut message = String::new();
937
938    // Add main error message
939    message.push_str(error);
940
941    // Add captured error lines if they differ from recent logs
942    // (these are error-like lines that may have scrolled off)
943    if !error_logs.is_empty() {
944        // Check if error_logs contains lines not in recent_logs
945        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
946        let unique_errors: Vec<_> = error_logs
947            .iter()
948            .filter(|line| !recent_set.contains(line))
949            .collect();
950
951        if !unique_errors.is_empty() {
952            message.push_str("\n\nPotential errors detected during build:");
953            for line in unique_errors {
954                message.push_str("\n  ");
955                message.push_str(line);
956            }
957        }
958    }
959
960    // Add recent BuildKit log context if available
961    if !recent_buildkit_logs.is_empty() {
962        message.push_str("\n\nRecent BuildKit output:");
963        for line in recent_buildkit_logs {
964            message.push_str("\n  ");
965            message.push_str(line);
966        }
967    }
968
969    // Add recent log context if available
970    if !recent_logs.is_empty() {
971        message.push_str("\n\nRecent build output:");
972        for line in recent_logs {
973            message.push_str("\n  ");
974            message.push_str(line);
975        }
976    } else if recent_buildkit_logs.is_empty() {
977        message.push_str("\n\nNo build output was received from the Docker daemon.");
978        message.push_str("\nThis usually means the build failed before any logs were streamed.");
979    }
980
981    // Add actionable suggestions based on common error patterns
982    let error_lower = error.to_lowercase();
983    if error_lower.contains("network")
984        || error_lower.contains("connection")
985        || error_lower.contains("timeout")
986    {
987        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
988    } else if error_lower.contains("disk")
989        || error_lower.contains("space")
990        || error_lower.contains("no space")
991    {
992        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
993    } else if error_lower.contains("permission") || error_lower.contains("denied") {
994        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
995    }
996
997    message
998}
999
1000/// Create a gzipped tar archive containing the Dockerfile.
1001fn create_build_context(options: BuildContextOptions) -> Result<Vec<u8>, io::Error> {
1002    let repo_root = if options.include_local_opencode_submodule {
1003        Some(workspace_root_for_build_context()?)
1004    } else {
1005        None
1006    };
1007    create_build_context_with_repo_root(options, repo_root.as_deref())
1008}
1009
1010fn workspace_root_for_build_context() -> Result<PathBuf, io::Error> {
1011    Path::new(env!("CARGO_MANIFEST_DIR"))
1012        .join("../..")
1013        .canonicalize()
1014}
1015
1016fn create_build_context_with_repo_root(
1017    options: BuildContextOptions,
1018    repo_root: Option<&Path>,
1019) -> Result<Vec<u8>, io::Error> {
1020    let mut archive_buffer = Vec::new();
1021
1022    {
1023        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
1024        let mut tar = TarBuilder::new(encoder);
1025
1026        // Add Dockerfile to archive
1027        let dockerfile_bytes = DOCKERFILE.as_bytes();
1028        append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
1029        append_bytes(
1030            &mut tar,
1031            "packages/core/src/docker/files/entrypoint.sh",
1032            ENTRYPOINT_SH,
1033            0o644,
1034        )?;
1035        append_bytes(
1036            &mut tar,
1037            "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1038            OPENCODE_CLOUD_BOOTSTRAP_SH,
1039            0o644,
1040        )?;
1041        append_bytes(
1042            &mut tar,
1043            "packages/core/src/docker/files/healthcheck.sh",
1044            HEALTHCHECK_SH,
1045            0o644,
1046        )?;
1047        append_bytes(
1048            &mut tar,
1049            "packages/core/src/docker/files/opencode-broker.service",
1050            include_bytes!("files/opencode-broker.service"),
1051            0o644,
1052        )?;
1053        append_bytes(
1054            &mut tar,
1055            "packages/core/src/docker/files/opencode.service",
1056            include_bytes!("files/opencode.service"),
1057            0o644,
1058        )?;
1059        append_bytes(
1060            &mut tar,
1061            "packages/core/src/docker/files/pam/opencode",
1062            include_bytes!("files/pam/opencode"),
1063            0o644,
1064        )?;
1065        append_bytes(
1066            &mut tar,
1067            "packages/core/src/docker/files/opencode.jsonc",
1068            include_bytes!("files/opencode.jsonc"),
1069            0o644,
1070        )?;
1071        append_bytes(
1072            &mut tar,
1073            "packages/core/src/docker/files/starship.toml",
1074            include_bytes!("files/starship.toml"),
1075            0o644,
1076        )?;
1077        append_bytes(
1078            &mut tar,
1079            "packages/core/src/docker/files/bashrc.extra",
1080            include_bytes!("files/bashrc.extra"),
1081            0o644,
1082        )?;
1083
1084        // Dockerfile always references this path with COPY. Keep an empty directory present
1085        // even in remote mode so default builds stay lightweight.
1086        append_directory(
1087            &mut tar,
1088            Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1089            0o755,
1090        )?;
1091        if options.include_local_opencode_submodule {
1092            let repo_root = repo_root.ok_or_else(|| {
1093                io::Error::new(
1094                    io::ErrorKind::NotFound,
1095                    "Local opencode build requested but workspace root is unavailable",
1096                )
1097            })?;
1098            append_local_opencode_submodule(&mut tar, repo_root)?;
1099        }
1100
1101        tar.finish()?;
1102
1103        // Finish gzip encoding
1104        let encoder = tar.into_inner()?;
1105        encoder.finish()?;
1106    }
1107
1108    Ok(archive_buffer)
1109}
1110
1111fn append_local_opencode_submodule<W: Write>(
1112    tar: &mut TarBuilder<W>,
1113    repo_root: &Path,
1114) -> Result<(), io::Error> {
1115    let source_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1116    if !source_root.is_dir() {
1117        return Err(io::Error::new(
1118            io::ErrorKind::NotFound,
1119            format!(
1120                "Local opencode submodule path not found: {}",
1121                source_root.display()
1122            ),
1123        ));
1124    }
1125    let canonical_source_root = source_root.canonicalize()?;
1126
1127    append_local_tree_recursive(
1128        tar,
1129        &source_root,
1130        &canonical_source_root,
1131        Path::new(""),
1132        Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1133    )
1134}
1135
1136fn append_local_tree_recursive<W: Write>(
1137    tar: &mut TarBuilder<W>,
1138    source_root: &Path,
1139    canonical_source_root: &Path,
1140    relative_path: &Path,
1141    archive_root: &Path,
1142) -> Result<(), io::Error> {
1143    let current_path = source_root.join(relative_path);
1144    let mut entries: Vec<_> =
1145        fs::read_dir(&current_path)?.collect::<Result<Vec<_>, io::Error>>()?;
1146    entries.sort_by_key(|a| a.file_name());
1147
1148    for entry in entries {
1149        let file_name = entry.file_name();
1150        let entry_relative = if relative_path.as_os_str().is_empty() {
1151            PathBuf::from(&file_name)
1152        } else {
1153            relative_path.join(&file_name)
1154        };
1155
1156        if should_exclude_local_opencode_path(&entry_relative) {
1157            continue;
1158        }
1159
1160        let entry_path = entry.path();
1161        let metadata = fs::symlink_metadata(&entry_path)?;
1162        let archive_path = archive_root.join(&entry_relative);
1163
1164        if metadata.is_dir() {
1165            append_directory(tar, &archive_path, mode_from_metadata(&metadata, 0o755))?;
1166            append_local_tree_recursive(
1167                tar,
1168                source_root,
1169                canonical_source_root,
1170                &entry_relative,
1171                archive_root,
1172            )?;
1173            continue;
1174        }
1175
1176        if metadata.is_file() {
1177            append_file_from_disk(
1178                tar,
1179                &archive_path,
1180                &entry_path,
1181                mode_from_metadata(&metadata, 0o644),
1182            )?;
1183            continue;
1184        }
1185
1186        if metadata.file_type().is_symlink() {
1187            // Some opencode assets (for example UI fonts) are symlinks. Materialize symlinked files
1188            // into the archive when they stay inside the checkout; skip links outside the tree.
1189            match resolve_local_symlink_target(&entry_path, canonical_source_root)? {
1190                Some(target_path) => {
1191                    let target_metadata = fs::metadata(&target_path)?;
1192                    if target_metadata.is_file() {
1193                        append_file_from_disk(
1194                            tar,
1195                            &archive_path,
1196                            &target_path,
1197                            mode_from_metadata(&target_metadata, 0o644),
1198                        )?;
1199                    } else {
1200                        debug!(
1201                            "Skipping symlink with non-file target in local opencode context: {} -> {}",
1202                            entry_path.display(),
1203                            target_path.display()
1204                        );
1205                    }
1206                }
1207                None => {
1208                    debug!(
1209                        "Skipping symlink outside checkout or unresolved in local opencode context: {}",
1210                        entry_path.display()
1211                    );
1212                }
1213            }
1214        }
1215    }
1216
1217    Ok(())
1218}
1219
1220fn resolve_local_symlink_target(
1221    link_path: &Path,
1222    canonical_source_root: &Path,
1223) -> Result<Option<PathBuf>, io::Error> {
1224    let link_target = fs::read_link(link_path)?;
1225    let resolved = if link_target.is_absolute() {
1226        link_target
1227    } else {
1228        link_path
1229            .parent()
1230            .unwrap_or_else(|| Path::new(""))
1231            .join(link_target)
1232    };
1233
1234    // Broken links are ignored in local dev mode instead of failing the entire build context.
1235    let canonical_target = match resolved.canonicalize() {
1236        Ok(path) => path,
1237        Err(_) => return Ok(None),
1238    };
1239    if canonical_target.starts_with(canonical_source_root) {
1240        Ok(Some(canonical_target))
1241    } else {
1242        Ok(None)
1243    }
1244}
1245
1246fn should_exclude_local_opencode_path(relative_path: &Path) -> bool {
1247    if relative_path.file_name().is_some_and(|name| {
1248        LOCAL_OPENCODE_EXCLUDED_FILES
1249            .iter()
1250            .any(|candidate| name == OsStr::new(candidate))
1251    }) {
1252        return true;
1253    }
1254
1255    relative_path.components().any(|component| {
1256        let part = component.as_os_str();
1257        LOCAL_OPENCODE_EXCLUDED_DIRS
1258            .iter()
1259            .any(|candidate| part == OsStr::new(candidate))
1260    })
1261}
1262
1263#[cfg(unix)]
1264fn mode_from_metadata(metadata: &fs::Metadata, fallback: u32) -> u32 {
1265    use std::os::unix::fs::PermissionsExt;
1266    let mode = metadata.permissions().mode() & 0o7777;
1267    if mode == 0 { fallback } else { mode }
1268}
1269
1270#[cfg(not(unix))]
1271fn mode_from_metadata(_metadata: &fs::Metadata, fallback: u32) -> u32 {
1272    fallback
1273}
1274
1275fn append_directory<W: Write>(
1276    tar: &mut TarBuilder<W>,
1277    path: &Path,
1278    mode: u32,
1279) -> Result<(), io::Error> {
1280    let mut header = tar::Header::new_gnu();
1281    header.set_size(0);
1282    header.set_mode(mode);
1283    header.set_entry_type(tar::EntryType::Directory);
1284    // Use append_data path handling so GNU long-name extensions are emitted when needed.
1285    // Direct set_path() is brittle for deep/long local dev paths.
1286    tar.append_data(&mut header, path, io::empty())?;
1287    Ok(())
1288}
1289
1290fn append_file_from_disk<W: Write>(
1291    tar: &mut TarBuilder<W>,
1292    archive_path: &Path,
1293    source_path: &Path,
1294    mode: u32,
1295) -> Result<(), io::Error> {
1296    let mut file = fs::File::open(source_path)?;
1297    let metadata = file.metadata()?;
1298    let mut header = tar::Header::new_gnu();
1299    header.set_size(metadata.len());
1300    header.set_mode(mode);
1301    // Use append_data path handling so GNU long-name extensions are emitted when needed.
1302    // Direct set_path() is brittle for deep/long local dev paths.
1303    tar.append_data(&mut header, archive_path, &mut file)?;
1304    Ok(())
1305}
1306
1307fn append_bytes<W: Write>(
1308    tar: &mut TarBuilder<W>,
1309    path: &str,
1310    contents: &[u8],
1311    mode: u32,
1312) -> Result<(), io::Error> {
1313    let mut header = tar::Header::new_gnu();
1314    header.set_path(path)?;
1315    header.set_size(contents.len() as u64);
1316    header.set_mode(mode);
1317    header.set_cksum();
1318
1319    tar.append(&header, contents)?;
1320    Ok(())
1321}
1322
1323#[cfg(test)]
1324mod tests {
1325    use super::*;
1326    use bollard::models::ImageSummary;
1327    use flate2::read::GzDecoder;
1328    use std::collections::{HashMap, HashSet};
1329    use std::fs;
1330    use std::io::{Cursor, Read};
1331    use tar::Archive;
1332    use tempfile::tempdir;
1333
1334    fn make_image_summary(
1335        id: &str,
1336        tags: Vec<&str>,
1337        digests: Vec<&str>,
1338        labels: HashMap<String, String>,
1339    ) -> ImageSummary {
1340        ImageSummary {
1341            id: id.to_string(),
1342            parent_id: String::new(),
1343            repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
1344            repo_digests: digests
1345                .into_iter()
1346                .map(|digest| digest.to_string())
1347                .collect(),
1348            created: 0,
1349            size: 0,
1350            shared_size: -1,
1351            labels,
1352            containers: 0,
1353            manifests: None,
1354            descriptor: None,
1355        }
1356    }
1357
1358    fn archive_entries(context: Vec<u8>) -> HashSet<String> {
1359        let cursor = Cursor::new(context);
1360        let decoder = GzDecoder::new(cursor);
1361        let mut archive = Archive::new(decoder);
1362        let mut paths = HashSet::new();
1363        for entry in archive.entries().expect("should read archive entries") {
1364            let entry = entry.expect("should read entry");
1365            let path = entry.path().expect("should read entry path");
1366            paths.insert(path.to_string_lossy().to_string());
1367        }
1368        paths
1369    }
1370
1371    fn archive_entry_bytes(context: Vec<u8>, wanted_path: &str) -> Option<Vec<u8>> {
1372        let cursor = Cursor::new(context);
1373        let decoder = GzDecoder::new(cursor);
1374        let mut archive = Archive::new(decoder);
1375        for entry in archive.entries().expect("should read archive entries") {
1376            let mut entry = entry.expect("should read entry");
1377            let path = entry.path().expect("should read entry path");
1378            if path == Path::new(wanted_path) {
1379                let mut bytes = Vec::new();
1380                entry
1381                    .read_to_end(&mut bytes)
1382                    .expect("should read entry bytes");
1383                return Some(bytes);
1384            }
1385        }
1386        None
1387    }
1388
1389    #[test]
1390    fn create_build_context_succeeds() {
1391        let context =
1392            create_build_context(BuildContextOptions::default()).expect("should create context");
1393        assert!(!context.is_empty(), "context should not be empty");
1394
1395        // Verify it's gzip-compressed (gzip magic bytes)
1396        assert_eq!(context[0], 0x1f, "should be gzip compressed");
1397        assert_eq!(context[1], 0x8b, "should be gzip compressed");
1398    }
1399
1400    #[test]
1401    fn build_context_includes_docker_assets() {
1402        let context =
1403            create_build_context(BuildContextOptions::default()).expect("should create context");
1404        let cursor = Cursor::new(context);
1405        let decoder = GzDecoder::new(cursor);
1406        let mut archive = Archive::new(decoder);
1407        let mut found_entrypoint = false;
1408        let mut found_healthcheck = false;
1409        let mut found_bootstrap_helper = false;
1410
1411        for entry in archive.entries().expect("should read archive entries") {
1412            let entry = entry.expect("should read entry");
1413            let path = entry.path().expect("should read entry path");
1414            if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
1415                found_entrypoint = true;
1416            }
1417            if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
1418                found_healthcheck = true;
1419            }
1420            if path
1421                == std::path::Path::new(
1422                    "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1423                )
1424            {
1425                found_bootstrap_helper = true;
1426            }
1427            if found_entrypoint && found_healthcheck && found_bootstrap_helper {
1428                break;
1429            }
1430        }
1431
1432        assert!(
1433            found_entrypoint,
1434            "entrypoint asset should be in the build context"
1435        );
1436        assert!(
1437            found_healthcheck,
1438            "healthcheck asset should be in the build context"
1439        );
1440        assert!(
1441            found_bootstrap_helper,
1442            "bootstrap helper asset should be in the build context"
1443        );
1444    }
1445
1446    #[test]
1447    fn build_context_includes_opencode_placeholder_in_default_mode() {
1448        let context =
1449            create_build_context(BuildContextOptions::default()).expect("should create context");
1450        let entries = archive_entries(context);
1451        assert!(
1452            entries
1453                .iter()
1454                .any(|path| path.trim_end_matches('/') == "packages/opencode"),
1455            "default mode should include an empty packages/opencode placeholder"
1456        );
1457    }
1458
1459    #[test]
1460    fn build_context_local_mode_includes_submodule_and_excludes_heavy_paths() {
1461        let temp = tempdir().expect("should create tempdir");
1462        let repo_root = temp.path();
1463
1464        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1465        fs::create_dir_all(submodule_root.join("src")).expect("should create src");
1466        fs::create_dir_all(submodule_root.join(".git")).expect("should create .git");
1467        fs::create_dir_all(submodule_root.join("node_modules/pkg"))
1468            .expect("should create node_modules");
1469        fs::create_dir_all(submodule_root.join("target/release")).expect("should create target");
1470        fs::create_dir_all(submodule_root.join("dist")).expect("should create dist");
1471        fs::create_dir_all(submodule_root.join(".turbo")).expect("should create .turbo");
1472        fs::create_dir_all(submodule_root.join(".cache")).expect("should create .cache");
1473        fs::create_dir_all(submodule_root.join(".planning/phases/very-long-planning-phase-name"))
1474            .expect("should create planning");
1475
1476        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1477        fs::write(submodule_root.join("src/main.ts"), "console.log('ok');")
1478            .expect("should write source");
1479        fs::write(submodule_root.join(".git/config"), "dirty").expect("should write .git file");
1480        fs::write(submodule_root.join("node_modules/pkg/index.js"), "ignored")
1481            .expect("should write node_modules file");
1482        fs::write(submodule_root.join("target/release/app"), "ignored")
1483            .expect("should write target file");
1484        fs::write(submodule_root.join("dist/ui.js"), "ignored").expect("should write dist file");
1485        fs::write(submodule_root.join(".turbo/state.json"), "ignored")
1486            .expect("should write turbo file");
1487        fs::write(submodule_root.join(".cache/cache.bin"), "ignored")
1488            .expect("should write cache file");
1489        fs::write(
1490            submodule_root.join(".planning/phases/very-long-planning-phase-name/phase.md"),
1491            "ignored",
1492        )
1493        .expect("should write planning file");
1494        fs::write(submodule_root.join(".DS_Store"), "ignored").expect("should write ds_store");
1495
1496        let context = create_build_context_with_repo_root(
1497            BuildContextOptions {
1498                include_local_opencode_submodule: true,
1499            },
1500            Some(repo_root),
1501        )
1502        .expect("should create local context");
1503        let entries = archive_entries(context);
1504
1505        assert!(
1506            entries.contains("packages/opencode/package.json"),
1507            "local mode should include submodule files"
1508        );
1509        assert!(
1510            entries.contains("packages/opencode/src/main.ts"),
1511            "local mode should include source files"
1512        );
1513        assert!(
1514            !entries.contains("packages/opencode/.git/config"),
1515            "local mode should exclude .git"
1516        );
1517        assert!(
1518            !entries.contains("packages/opencode/node_modules/pkg/index.js"),
1519            "local mode should exclude node_modules"
1520        );
1521        assert!(
1522            !entries.contains("packages/opencode/target/release/app"),
1523            "local mode should exclude target"
1524        );
1525        assert!(
1526            !entries.contains("packages/opencode/dist/ui.js"),
1527            "local mode should exclude dist"
1528        );
1529        assert!(
1530            !entries.contains("packages/opencode/.turbo/state.json"),
1531            "local mode should exclude .turbo"
1532        );
1533        assert!(
1534            !entries.contains("packages/opencode/.cache/cache.bin"),
1535            "local mode should exclude .cache"
1536        );
1537        assert!(
1538            !entries.contains(
1539                "packages/opencode/.planning/phases/very-long-planning-phase-name/phase.md"
1540            ),
1541            "local mode should exclude .planning"
1542        );
1543        assert!(
1544            !entries.contains("packages/opencode/.DS_Store"),
1545            "local mode should exclude .DS_Store files"
1546        );
1547    }
1548
1549    #[test]
1550    fn build_context_local_mode_supports_long_non_excluded_paths() {
1551        let temp = tempdir().expect("should create tempdir");
1552        let repo_root = temp.path();
1553        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1554
1555        fs::create_dir_all(&submodule_root).expect("should create submodule root");
1556        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1557
1558        let long_segment = "a".repeat(140);
1559        let long_dir = submodule_root.join("src").join(&long_segment);
1560        fs::create_dir_all(&long_dir).expect("should create long path directory");
1561        fs::write(long_dir.join("main.ts"), "console.log('long path');")
1562            .expect("should write long path file");
1563
1564        let context = create_build_context_with_repo_root(
1565            BuildContextOptions {
1566                include_local_opencode_submodule: true,
1567            },
1568            Some(repo_root),
1569        )
1570        .expect("should create local context with long paths");
1571        let entries = archive_entries(context);
1572        let long_entry = format!("packages/opencode/src/{long_segment}/main.ts");
1573        assert!(
1574            entries.contains(&long_entry),
1575            "long non-excluded path should be archived via GNU long-name handling"
1576        );
1577    }
1578
1579    #[cfg(unix)]
1580    #[test]
1581    fn build_context_local_mode_materializes_symlinked_files() {
1582        use std::os::unix::fs::symlink;
1583
1584        let temp = tempdir().expect("should create tempdir");
1585        let repo_root = temp.path();
1586        let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1587        let fonts_dir = submodule_root.join("packages/ui/src/assets/fonts");
1588        fs::create_dir_all(&fonts_dir).expect("should create fonts dir");
1589        fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1590        fs::write(
1591            fonts_dir.join("BlexMonoNerdFontMono-Regular.woff2"),
1592            b"font-bytes",
1593        )
1594        .expect("should write target font");
1595        symlink(
1596            "BlexMonoNerdFontMono-Regular.woff2",
1597            fonts_dir.join("ibm-plex-mono.woff2"),
1598        )
1599        .expect("should create symlinked font");
1600
1601        let context = create_build_context_with_repo_root(
1602            BuildContextOptions {
1603                include_local_opencode_submodule: true,
1604            },
1605            Some(repo_root),
1606        )
1607        .expect("should create local context with symlink");
1608        let entries = archive_entries(context.clone());
1609
1610        assert!(
1611            entries.contains("packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2"),
1612            "local mode should include symlinked asset paths"
1613        );
1614        let alias_bytes = archive_entry_bytes(
1615            context,
1616            "packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2",
1617        )
1618        .expect("symlinked asset should contain bytes");
1619        assert_eq!(alias_bytes, b"font-bytes");
1620    }
1621
1622    #[test]
1623    fn default_tag_is_latest() {
1624        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
1625    }
1626
1627    #[test]
1628    fn format_build_error_includes_recent_logs() {
1629        let mut logs = VecDeque::new();
1630        logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
1631        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
1632        logs.push_back("E: Unable to fetch some archives".to_string());
1633        let error_logs = VecDeque::new();
1634        let buildkit_logs = VecDeque::new();
1635
1636        let result = format_build_error_with_context(
1637            "Build failed: exit code 1",
1638            &logs,
1639            &error_logs,
1640            &buildkit_logs,
1641        );
1642
1643        assert!(result.contains("Build failed: exit code 1"));
1644        assert!(result.contains("Recent build output:"));
1645        assert!(result.contains("Step 1/5"));
1646        assert!(result.contains("Unable to fetch"));
1647    }
1648
1649    #[test]
1650    fn format_build_error_handles_empty_logs() {
1651        let logs = VecDeque::new();
1652        let error_logs = VecDeque::new();
1653        let buildkit_logs = VecDeque::new();
1654        let result =
1655            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
1656
1657        assert!(result.contains("Stream error"));
1658        assert!(!result.contains("Recent build output:"));
1659    }
1660
1661    #[test]
1662    fn format_build_error_adds_network_suggestion() {
1663        let logs = VecDeque::new();
1664        let error_logs = VecDeque::new();
1665        let buildkit_logs = VecDeque::new();
1666        let result = format_build_error_with_context(
1667            "connection timeout",
1668            &logs,
1669            &error_logs,
1670            &buildkit_logs,
1671        );
1672
1673        assert!(result.contains("Check your network connection"));
1674    }
1675
1676    #[test]
1677    fn format_build_error_adds_disk_suggestion() {
1678        let logs = VecDeque::new();
1679        let error_logs = VecDeque::new();
1680        let buildkit_logs = VecDeque::new();
1681        let result = format_build_error_with_context(
1682            "no space left on device",
1683            &logs,
1684            &error_logs,
1685            &buildkit_logs,
1686        );
1687
1688        assert!(result.contains("Free up disk space"));
1689    }
1690
1691    #[test]
1692    fn format_build_error_shows_error_lines_separately() {
1693        let mut recent_logs = VecDeque::new();
1694        recent_logs.push_back("Compiling foo v1.0".to_string());
1695        recent_logs.push_back("Successfully installed bar".to_string());
1696
1697        let mut error_logs = VecDeque::new();
1698        error_logs.push_back("error: failed to compile dust".to_string());
1699        error_logs.push_back("error: failed to compile glow".to_string());
1700
1701        let buildkit_logs = VecDeque::new();
1702        let result = format_build_error_with_context(
1703            "Build failed",
1704            &recent_logs,
1705            &error_logs,
1706            &buildkit_logs,
1707        );
1708
1709        assert!(result.contains("Potential errors detected during build:"));
1710        assert!(result.contains("failed to compile dust"));
1711        assert!(result.contains("failed to compile glow"));
1712    }
1713
1714    #[test]
1715    fn is_error_line_detects_errors() {
1716        assert!(is_error_line("error: something failed"));
1717        assert!(is_error_line("Error: build failed"));
1718        assert!(is_error_line("Failed to install package"));
1719        assert!(is_error_line("cannot find module"));
1720        assert!(is_error_line("Unable to locate package"));
1721        assert!(!is_error_line("Compiling foo v1.0"));
1722        assert!(!is_error_line("Successfully installed"));
1723    }
1724
1725    #[test]
1726    fn collect_image_ids_matches_labels() {
1727        let mut labels = HashMap::new();
1728        labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
1729
1730        let images = vec![
1731            make_image_summary("sha256:opencode", vec![], vec![], labels),
1732            make_image_summary(
1733                "sha256:other",
1734                vec!["busybox:latest"],
1735                vec![],
1736                HashMap::new(),
1737            ),
1738        ];
1739
1740        let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
1741        assert!(ids.contains("sha256:opencode"));
1742        assert!(!ids.contains("sha256:other"));
1743    }
1744}