1use super::progress::ProgressReporter;
7use super::{
8 CONTAINER_NAME, DOCKERFILE, DockerClient, DockerError, ENTRYPOINT_SH, HEALTHCHECK_SH,
9 IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT, OPENCODE_CLOUD_BOOTSTRAP_SH,
10 active_resource_names, remap_image_tag,
11};
12use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
13use bollard::models::BuildInfoAux;
14use bollard::query_parameters::{
15 BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
16 RemoveImageOptionsBuilder, TagImageOptions,
17};
18use bytes::Bytes;
19use flate2::Compression;
20use flate2::write::GzEncoder;
21use futures_util::StreamExt;
22use http_body_util::{Either, Full};
23use std::collections::{HashMap, HashSet, VecDeque};
24use std::env;
25use std::ffi::OsStr;
26use std::fs;
27use std::io::{self, Write};
28use std::path::{Path, PathBuf};
29use std::time::{SystemTime, UNIX_EPOCH};
30use tar::Builder as TarBuilder;
31use tracing::{debug, warn};
32
33const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
35
36const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
38
39const LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH: &str = "packages/opencode";
40const LOCAL_OPENCODE_EXCLUDED_DIRS: &[&str] = &[
43 ".git",
44 ".planning",
45 "node_modules",
46 "target",
47 "dist",
48 ".turbo",
49 ".cache",
50];
51const LOCAL_OPENCODE_EXCLUDED_FILES: &[&str] = &[".DS_Store"];
52
53#[derive(Debug, Clone, Copy, Default)]
54struct BuildContextOptions {
55 include_local_opencode_submodule: bool,
56}
57
58fn effective_image_tag(tag: &str) -> String {
59 remap_image_tag(tag)
60}
61
62fn profile_scoped_image_ids(images: &[bollard::models::ImageSummary]) -> Option<HashSet<String>> {
63 let names = active_resource_names();
64 let instance_id = names.instance_id.as_deref()?;
65 let expected_tags = [
66 format!("{IMAGE_NAME_GHCR}:{}", names.image_tag),
67 format!("{IMAGE_NAME_DOCKERHUB}:{}", names.image_tag),
68 format!("{IMAGE_NAME_GHCR}:{}", names.previous_image_tag),
69 format!("{IMAGE_NAME_DOCKERHUB}:{}", names.previous_image_tag),
70 ];
71
72 let mut ids = HashSet::new();
75 for image in images {
76 let tag_match = image
77 .repo_tags
78 .iter()
79 .any(|tag| expected_tags.contains(tag));
80 let label_match = image
81 .labels
82 .get(super::INSTANCE_LABEL_KEY)
83 .is_some_and(|value| value == instance_id);
84 if tag_match || label_match {
85 ids.insert(image.id.clone());
86 }
87 }
88 Some(ids)
89}
90
91fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
93 let Ok(value) = env::var(var_name) else {
94 return default;
95 };
96 let Ok(parsed) = value.trim().parse::<usize>() else {
97 return default;
98 };
99 parsed.clamp(5, 500)
100}
101
102fn is_error_line(line: &str) -> bool {
104 let lower = line.to_lowercase();
105 lower.contains("error")
106 || lower.contains("failed")
107 || lower.contains("cannot")
108 || lower.contains("unable to")
109 || lower.contains("not found")
110 || lower.contains("permission denied")
111}
112
113pub async fn image_exists(
115 client: &DockerClient,
116 image: &str,
117 tag: &str,
118) -> Result<bool, DockerError> {
119 let tag = effective_image_tag(tag);
120 let full_name = format!("{image}:{tag}");
121 debug!("Checking if image exists: {}", full_name);
122
123 match client.inner().inspect_image(&full_name).await {
124 Ok(_) => Ok(true),
125 Err(bollard::errors::Error::DockerResponseServerError {
126 status_code: 404, ..
127 }) => Ok(false),
128 Err(e) => Err(DockerError::from(e)),
129 }
130}
131
132pub async fn remove_images_by_name(
136 client: &DockerClient,
137 name_fragment: &str,
138 force: bool,
139) -> Result<usize, DockerError> {
140 debug!("Removing Docker images matching '{name_fragment}'");
141
142 let images = list_docker_images(client).await?;
143
144 let image_ids = if name_fragment == CONTAINER_NAME {
145 profile_scoped_image_ids(&images)
146 .unwrap_or_else(|| collect_image_ids(&images, name_fragment))
147 } else {
148 collect_image_ids(&images, name_fragment)
149 };
150 remove_image_ids(client, image_ids, force).await
151}
152
153async fn list_docker_images(
155 client: &DockerClient,
156) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
157 let list_options = ListImagesOptionsBuilder::new().all(true).build();
158 client
159 .inner()
160 .list_images(Some(list_options))
161 .await
162 .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
163}
164
165const LABEL_TITLE: &str = "org.opencontainers.image.title";
166const LABEL_SOURCE: &str = "org.opencontainers.image.source";
167const LABEL_URL: &str = "org.opencontainers.image.url";
168
169const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
170const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
171const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
172
173fn collect_image_ids(
175 images: &[bollard::models::ImageSummary],
176 name_fragment: &str,
177) -> HashSet<String> {
178 let mut image_ids = HashSet::new();
179 for image in images {
180 if image_matches_fragment_or_labels(image, name_fragment) {
181 image_ids.insert(image.id.clone());
182 }
183 }
184 image_ids
185}
186
187fn image_matches_fragment_or_labels(
188 image: &bollard::models::ImageSummary,
189 name_fragment: &str,
190) -> bool {
191 let tag_match = image
192 .repo_tags
193 .iter()
194 .any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
195 let digest_match = image
196 .repo_digests
197 .iter()
198 .any(|digest| digest.contains(name_fragment));
199 let label_match = image_labels_match(&image.labels);
200
201 tag_match || digest_match || label_match
202}
203
204fn image_labels_match(labels: &HashMap<String, String>) -> bool {
205 labels
206 .get(LABEL_SOURCE)
207 .is_some_and(|value| value == LABEL_SOURCE_VALUE)
208 || labels
209 .get(LABEL_URL)
210 .is_some_and(|value| value == LABEL_URL_VALUE)
211 || labels
212 .get(LABEL_TITLE)
213 .is_some_and(|value| value == LABEL_TITLE_VALUE)
214}
215
216async fn remove_image_ids(
218 client: &DockerClient,
219 image_ids: HashSet<String>,
220 force: bool,
221) -> Result<usize, DockerError> {
222 if image_ids.is_empty() {
223 return Ok(0);
224 }
225
226 let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
227 let mut removed = 0usize;
228 for image_id in image_ids {
229 let result = client
230 .inner()
231 .remove_image(&image_id, Some(remove_options.clone()), None)
232 .await;
233 match result {
234 Ok(_) => removed += 1,
235 Err(bollard::errors::Error::DockerResponseServerError {
236 status_code: 404, ..
237 }) => {
238 debug!("Docker image already removed: {}", image_id);
239 }
240 Err(err) => {
241 return Err(DockerError::Image(format!(
242 "Failed to remove image {image_id}: {err}"
243 )));
244 }
245 }
246 }
247
248 Ok(removed)
249}
250
251pub async fn build_image(
262 client: &DockerClient,
263 tag: Option<&str>,
264 progress: &mut ProgressReporter,
265 no_cache: bool,
266 build_args: Option<HashMap<String, String>>,
267) -> Result<String, DockerError> {
268 let tag = effective_image_tag(tag.unwrap_or(IMAGE_TAG_DEFAULT));
269 let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
270 debug!("Building image: {} (no_cache: {})", full_name, no_cache);
271
272 let build_args = build_args.unwrap_or_default();
273 let include_local_opencode_submodule = build_args
274 .get("OPENCODE_SOURCE")
275 .is_some_and(|value| value.eq_ignore_ascii_case("local"));
276
277 let context_msg = if include_local_opencode_submodule {
280 "Packaging local opencode checkout"
281 } else {
282 "Preparing build context"
283 };
284 progress.update_spinner("build", context_msg);
285 let context = create_build_context(BuildContextOptions {
286 include_local_opencode_submodule,
287 })
288 .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
289
290 let session_id = format!(
294 "opencode-cloud-build-{}",
295 SystemTime::now()
296 .duration_since(UNIX_EPOCH)
297 .unwrap_or_default()
298 .as_nanos()
299 );
300 let options = BuildImageOptions {
301 t: Some(full_name.clone()),
302 dockerfile: "Dockerfile".to_string(),
303 version: BuilderVersion::BuilderBuildKit,
304 session: Some(session_id),
305 rm: true,
306 nocache: no_cache,
307 buildargs: Some(build_args),
308 platform: String::new(),
309 target: String::new(),
310 ..Default::default()
311 };
312
313 let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
315
316 progress.update_spinner("build", "Sending build context to Docker");
319
320 let mut stream = client.inner().build_image(options, None, Some(body));
322
323 progress.update_spinner("build", "Waiting for Docker build to start");
324
325 let mut maybe_image_id = None;
326 let mut log_state = BuildLogState::new();
327
328 while let Some(result) = stream.next().await {
329 let Ok(info) = result else {
330 return Err(handle_stream_error(
331 "Build failed",
332 result.expect_err("checked error").to_string(),
333 &log_state,
334 progress,
335 ));
336 };
337
338 handle_stream_message(&info, progress, &mut log_state);
339
340 if let Some(error_detail) = &info.error_detail
341 && let Some(error_msg) = &error_detail.message
342 {
343 progress.abandon_all(error_msg);
344 let context = format_build_error_with_context(
345 error_msg,
346 &log_state.recent_logs,
347 &log_state.error_logs,
348 &log_state.recent_buildkit_logs,
349 );
350 return Err(DockerError::Build(context));
351 }
352
353 if let Some(aux) = info.aux {
354 match aux {
355 BuildInfoAux::Default(image_id) => {
356 if let Some(id) = image_id.id {
357 maybe_image_id = Some(id);
358 }
359 }
360 BuildInfoAux::BuildKit(status) => {
361 handle_buildkit_status(&status, progress, &mut log_state);
362 }
363 }
364 }
365 }
366
367 let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
368 let finish_msg = format!("Build complete: {image_id}");
369 progress.finish("build", &finish_msg);
370
371 Ok(full_name)
372}
373
374struct BuildLogState {
375 recent_logs: VecDeque<String>,
376 error_logs: VecDeque<String>,
377 recent_buildkit_logs: VecDeque<String>,
378 build_log_buffer_size: usize,
379 error_log_buffer_size: usize,
380 last_buildkit_vertex: Option<String>,
381 last_buildkit_vertex_id: Option<String>,
382 export_vertex_id: Option<String>,
383 export_vertex_name: Option<String>,
384 buildkit_logs_by_vertex_id: HashMap<String, String>,
385 vertex_name_by_vertex_id: HashMap<String, String>,
386}
387
388impl BuildLogState {
389 fn new() -> Self {
390 let build_log_buffer_size = read_log_buffer_size(
391 "OPENCODE_DOCKER_BUILD_LOG_TAIL",
392 DEFAULT_BUILD_LOG_BUFFER_SIZE,
393 );
394 let error_log_buffer_size = read_log_buffer_size(
395 "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
396 DEFAULT_ERROR_LOG_BUFFER_SIZE,
397 );
398 Self {
399 recent_logs: VecDeque::with_capacity(build_log_buffer_size),
400 error_logs: VecDeque::with_capacity(error_log_buffer_size),
401 recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
402 build_log_buffer_size,
403 error_log_buffer_size,
404 last_buildkit_vertex: None,
405 last_buildkit_vertex_id: None,
406 export_vertex_id: None,
407 export_vertex_name: None,
408 buildkit_logs_by_vertex_id: HashMap::new(),
409 vertex_name_by_vertex_id: HashMap::new(),
410 }
411 }
412}
413
414fn clean_buildkit_label(raw: &str) -> String {
419 let trimmed = raw.trim();
420 let Some(rest) = trimmed.strip_prefix("[internal] ") else {
421 return trimmed.to_string();
422 };
423 if rest.starts_with("load remote build context") {
424 "Loading remote build context".to_string()
425 } else if let Some(image) = rest.strip_prefix("load metadata for ") {
426 format!("Resolving image {image}")
427 } else if rest.starts_with("load build definition") {
428 "Loading Dockerfile".to_string()
429 } else if rest.starts_with("load build context") {
430 "Loading build context".to_string()
431 } else {
432 let mut chars = rest.chars();
433 match chars.next() {
434 None => String::new(),
435 Some(c) => c.to_uppercase().to_string() + chars.as_str(),
436 }
437 }
438}
439
440fn handle_stream_message(
441 info: &bollard::models::BuildInfo,
442 progress: &mut ProgressReporter,
443 state: &mut BuildLogState,
444) {
445 let Some(stream_msg) = info.stream.as_deref() else {
446 return;
447 };
448 let msg = stream_msg.trim();
449 if msg.is_empty() {
450 return;
451 }
452
453 if progress.is_plain_output() {
454 eprint!("{stream_msg}");
455 } else {
456 let has_runtime_vertex = state
457 .last_buildkit_vertex
458 .as_deref()
459 .is_some_and(|name| name.starts_with("[runtime "));
460 let is_internal_msg = msg.contains("[internal]");
461 if !(has_runtime_vertex && is_internal_msg) {
462 progress.update_spinner("build", &clean_buildkit_label(stream_msg));
463 }
464 }
465
466 if state.recent_logs.len() >= state.build_log_buffer_size {
467 state.recent_logs.pop_front();
468 }
469 state.recent_logs.push_back(msg.to_string());
470
471 if is_error_line(msg) {
472 if state.error_logs.len() >= state.error_log_buffer_size {
473 state.error_logs.pop_front();
474 }
475 state.error_logs.push_back(msg.to_string());
476 }
477
478 if msg.starts_with("Step ") {
479 debug!("Build step: {}", msg);
480 }
481}
482
483fn handle_buildkit_status(
484 status: &BuildkitStatusResponse,
485 progress: &mut ProgressReporter,
486 state: &mut BuildLogState,
487) {
488 let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
489 update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
490 update_export_vertex_from_logs(
491 &latest_logs,
492 &state.vertex_name_by_vertex_id,
493 &mut state.export_vertex_id,
494 &mut state.export_vertex_name,
495 );
496 let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
497 status,
498 &state.vertex_name_by_vertex_id,
499 state.export_vertex_id.as_deref(),
500 state.export_vertex_name.as_deref(),
501 ) {
502 Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
503 None => {
504 let Some(log_entry) = latest_logs.last() else {
505 return;
506 };
507 let name = state
508 .vertex_name_by_vertex_id
509 .get(&log_entry.vertex_id)
510 .cloned()
511 .or_else(|| state.last_buildkit_vertex.clone())
512 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
513 (log_entry.vertex_id.clone(), name)
514 }
515 };
516 record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
517 state.last_buildkit_vertex_id = Some(vertex_id.clone());
518 if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
519 state.last_buildkit_vertex = Some(vertex_name.clone());
520 }
521
522 let display_name = clean_buildkit_label(&vertex_name);
523 let message = if progress.is_plain_output() {
524 display_name
525 } else if let Some(log_entry) = latest_logs
526 .iter()
527 .rev()
528 .find(|entry| entry.vertex_id == vertex_id)
529 {
530 format!("{display_name} · {}", log_entry.message)
531 } else {
532 display_name
533 };
534 progress.update_spinner("build", &message);
535
536 if progress.is_plain_output() {
537 for log_entry in latest_logs {
538 eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
539 }
540 return;
541 }
542
543 let (Some(current_id), Some(current_name)) = (
544 state.last_buildkit_vertex_id.as_ref(),
545 state.last_buildkit_vertex.as_ref(),
546 ) else {
547 return;
548 };
549
550 let name = state
551 .vertex_name_by_vertex_id
552 .get(current_id)
553 .unwrap_or(current_name);
554 let _ = name;
556}
557
558fn handle_stream_error(
559 prefix: &str,
560 error_str: String,
561 state: &BuildLogState,
562 progress: &mut ProgressReporter,
563) -> DockerError {
564 progress.abandon_all(prefix);
565
566 let buildkit_hint = if error_str.contains("mount")
567 || error_str.contains("--mount")
568 || state
569 .recent_logs
570 .iter()
571 .any(|log| log.contains("--mount") && log.contains("cache"))
572 {
573 "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
574 The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
575 Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
576 } else {
577 ""
578 };
579
580 let context = format!(
581 "{}{}",
582 format_build_error_with_context(
583 &error_str,
584 &state.recent_logs,
585 &state.error_logs,
586 &state.recent_buildkit_logs,
587 ),
588 buildkit_hint
589 );
590 DockerError::Build(context)
591}
592
593fn update_buildkit_vertex_names(
594 vertex_name_by_vertex_id: &mut HashMap<String, String>,
595 status: &BuildkitStatusResponse,
596) {
597 for vertex in &status.vertexes {
598 if vertex.name.is_empty() {
599 continue;
600 }
601 vertex_name_by_vertex_id
602 .entry(vertex.digest.clone())
603 .or_insert_with(|| vertex.name.clone());
604 }
605}
606
607fn select_latest_buildkit_vertex(
608 status: &BuildkitStatusResponse,
609 vertex_name_by_vertex_id: &HashMap<String, String>,
610 export_vertex_id: Option<&str>,
611 export_vertex_name: Option<&str>,
612) -> Option<(String, String)> {
613 if let Some(export_vertex_id) = export_vertex_id {
614 let name = export_vertex_name
615 .map(str::to_string)
616 .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
617 .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
618 return Some((export_vertex_id.to_string(), name));
619 }
620
621 let mut best_runtime: Option<(u32, String, String)> = None;
622 let mut fallback: Option<(String, String)> = None;
623
624 for vertex in &status.vertexes {
625 let name = if vertex.name.is_empty() {
626 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
627 } else {
628 Some(vertex.name.clone())
629 };
630
631 let Some(name) = name else {
632 continue;
633 };
634
635 if fallback.is_none() && !name.starts_with("[internal]") {
636 fallback = Some((vertex.digest.clone(), name.clone()));
637 }
638
639 if let Some(step) = parse_runtime_step(&name) {
640 match &best_runtime {
641 Some((best_step, _, _)) if *best_step >= step => {}
642 _ => {
643 best_runtime = Some((step, vertex.digest.clone(), name.clone()));
644 }
645 }
646 }
647 }
648
649 if let Some((_, digest, name)) = best_runtime {
650 Some((digest, name))
651 } else {
652 fallback.or_else(|| {
653 status.vertexes.iter().find_map(|vertex| {
654 let name = if vertex.name.is_empty() {
655 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
656 } else {
657 Some(vertex.name.clone())
658 };
659 name.map(|resolved| (vertex.digest.clone(), resolved))
660 })
661 })
662 }
663}
664
665fn parse_runtime_step(name: &str) -> Option<u32> {
666 let prefix = "[runtime ";
667 let start = name.find(prefix)? + prefix.len();
668 let rest = &name[start..];
669 let end = rest.find('/')?;
670 rest[..end].trim().parse::<u32>().ok()
671}
672
673fn format_vertex_fallback_label(vertex_id: &str) -> String {
674 let short = vertex_id
675 .strip_prefix("sha256:")
676 .unwrap_or(vertex_id)
677 .chars()
678 .take(12)
679 .collect::<String>();
680 format!("vertex {short}")
681}
682
683fn update_export_vertex_from_logs(
684 latest_logs: &[BuildkitLogEntry],
685 vertex_name_by_vertex_id: &HashMap<String, String>,
686 export_vertex_id: &mut Option<String>,
687 export_vertex_name: &mut Option<String>,
688) {
689 if let Some(entry) = latest_logs
690 .iter()
691 .rev()
692 .find(|log| log.message.trim_start().starts_with("exporting to image"))
693 {
694 *export_vertex_id = Some(entry.vertex_id.clone());
695 if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
696 *export_vertex_name = Some(name.clone());
697 }
698 }
699}
700
701fn record_buildkit_logs(
702 state: &mut BuildLogState,
703 latest_logs: &[BuildkitLogEntry],
704 current_vertex_id: &str,
705 current_vertex_name: &str,
706) {
707 for log_entry in latest_logs {
708 let name = state
709 .vertex_name_by_vertex_id
710 .get(&log_entry.vertex_id)
711 .cloned()
712 .or_else(|| {
713 if log_entry.vertex_id == current_vertex_id {
714 Some(current_vertex_name.to_string())
715 } else {
716 None
717 }
718 })
719 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
720
721 let message = log_entry.message.replace('\r', "").trim_end().to_string();
722 if message.is_empty() {
723 continue;
724 }
725
726 if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
727 state.recent_buildkit_logs.pop_front();
728 }
729 state
730 .recent_buildkit_logs
731 .push_back(format!("[{name}] {message}"));
732 }
733}
734
735#[derive(Debug, Clone)]
736struct BuildkitLogEntry {
737 vertex_id: String,
738 message: String,
739}
740
741fn append_buildkit_logs(
742 logs: &mut HashMap<String, String>,
743 status: &BuildkitStatusResponse,
744) -> Vec<BuildkitLogEntry> {
745 let mut latest: Vec<BuildkitLogEntry> = Vec::new();
746
747 for log in &status.logs {
748 let vertex_id = log.vertex.clone();
749 let message = String::from_utf8_lossy(&log.msg).to_string();
750 let entry = logs.entry(vertex_id.clone()).or_default();
751 entry.push_str(&message);
752 latest.push(BuildkitLogEntry { vertex_id, message });
753 }
754
755 latest
756}
757
758pub async fn pull_image(
763 client: &DockerClient,
764 tag: Option<&str>,
765 progress: &mut ProgressReporter,
766) -> Result<String, DockerError> {
767 let requested_tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
768 let resolved_tag = effective_image_tag(requested_tag);
769 let isolated_default_tag =
770 requested_tag == IMAGE_TAG_DEFAULT && resolved_tag != IMAGE_TAG_DEFAULT;
771 let registry_pull_tag = if isolated_default_tag {
772 IMAGE_TAG_DEFAULT
773 } else {
774 requested_tag
775 };
776
777 debug!(
779 "Attempting to pull from GHCR: {}:{}",
780 IMAGE_NAME_GHCR, registry_pull_tag
781 );
782 let ghcr_err =
783 match pull_from_registry(client, IMAGE_NAME_GHCR, registry_pull_tag, progress).await {
784 Ok(()) => {
785 if isolated_default_tag {
786 retag_local_image(
787 client,
788 &format!("{IMAGE_NAME_GHCR}:{registry_pull_tag}"),
789 &resolved_tag,
790 )
791 .await?;
792 }
793 let full_name = format!("{IMAGE_NAME_GHCR}:{resolved_tag}");
794 return Ok(full_name);
795 }
796 Err(e) => e,
797 };
798
799 warn!(
800 "GHCR pull failed: {}. Trying Docker Hub fallback...",
801 ghcr_err
802 );
803
804 debug!(
806 "Attempting to pull from Docker Hub: {}:{}",
807 IMAGE_NAME_DOCKERHUB, registry_pull_tag
808 );
809 match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, registry_pull_tag, progress).await {
810 Ok(()) => {
811 if isolated_default_tag {
812 retag_local_image(
813 client,
814 &format!("{IMAGE_NAME_DOCKERHUB}:{registry_pull_tag}"),
815 &resolved_tag,
816 )
817 .await?;
818 return Ok(format!("{IMAGE_NAME_GHCR}:{resolved_tag}"));
819 }
820 let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{resolved_tag}");
821 Ok(full_name)
822 }
823 Err(dockerhub_err) => Err(DockerError::Pull(format!(
824 "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
825 ))),
826 }
827}
828
829async fn retag_local_image(
830 client: &DockerClient,
831 source_image: &str,
832 target_tag: &str,
833) -> Result<(), DockerError> {
834 let options = TagImageOptions {
835 repo: Some(IMAGE_NAME_GHCR.to_string()),
836 tag: Some(target_tag.to_string()),
837 };
838 client
839 .inner()
840 .tag_image(source_image, Some(options))
841 .await
842 .map_err(|e| {
843 DockerError::Pull(format!(
844 "Failed to retag pulled image {source_image} as {IMAGE_NAME_GHCR}:{target_tag}: {e}"
845 ))
846 })?;
847 Ok(())
848}
849
850const MAX_PULL_RETRIES: usize = 3;
852
853async fn pull_from_registry(
855 client: &DockerClient,
856 image: &str,
857 tag: &str,
858 progress: &mut ProgressReporter,
859) -> Result<(), DockerError> {
860 let full_name = format!("{image}:{tag}");
861
862 let mut last_error = None;
864 for attempt in 1..=MAX_PULL_RETRIES {
865 debug!(
866 "Pull attempt {}/{} for {}",
867 attempt, MAX_PULL_RETRIES, full_name
868 );
869
870 match do_pull(client, image, tag, progress).await {
871 Ok(()) => return Ok(()),
872 Err(e) => {
873 warn!("Pull attempt {} failed: {}", attempt, e);
874 last_error = Some(e);
875
876 if attempt < MAX_PULL_RETRIES {
877 let delay_ms = 1000 * (1 << (attempt - 1));
879 tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
880 }
881 }
882 }
883 }
884
885 Err(last_error.unwrap_or_else(|| {
886 DockerError::Pull(format!(
887 "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
888 ))
889 }))
890}
891
892async fn do_pull(
894 client: &DockerClient,
895 image: &str,
896 tag: &str,
897 progress: &mut ProgressReporter,
898) -> Result<(), DockerError> {
899 let full_name = format!("{image}:{tag}");
900
901 let options = CreateImageOptions {
902 from_image: Some(image.to_string()),
903 tag: Some(tag.to_string()),
904 platform: String::new(),
905 ..Default::default()
906 };
907
908 let mut stream = client.inner().create_image(Some(options), None, None);
909
910 progress.add_spinner("pull", &format!("Pulling {full_name}..."));
912
913 while let Some(result) = stream.next().await {
914 match result {
915 Ok(info) => {
916 if let Some(error_detail) = &info.error_detail
918 && let Some(error_msg) = &error_detail.message
919 {
920 progress.abandon_all(error_msg);
921 return Err(DockerError::Pull(error_msg.to_string()));
922 }
923
924 if let Some(layer_id) = &info.id {
926 let status = info.status.as_deref().unwrap_or("");
927
928 match status {
929 "Already exists" => {
930 progress.finish(layer_id, "Already exists");
931 }
932 "Pull complete" => {
933 progress.finish(layer_id, "Pull complete");
934 }
935 "Downloading" | "Extracting" => {
936 if let Some(progress_detail) = &info.progress_detail {
937 let current = progress_detail.current.unwrap_or(0) as u64;
938 let total = progress_detail.total.unwrap_or(0) as u64;
939
940 if total > 0 {
941 progress.update_layer(layer_id, current, total, status);
942 }
943 }
944 }
945 _ => {
946 progress.update_spinner(layer_id, status);
948 }
949 }
950 } else if let Some(status) = &info.status {
951 progress.update_spinner("pull", status);
953 }
954 }
955 Err(e) => {
956 progress.abandon_all("Pull failed");
957 return Err(DockerError::Pull(format!("Pull failed: {e}")));
958 }
959 }
960 }
961
962 progress.finish("pull", &format!("Pull complete: {full_name}"));
963 Ok(())
964}
965
966fn format_build_error_with_context(
968 error: &str,
969 recent_logs: &VecDeque<String>,
970 error_logs: &VecDeque<String>,
971 recent_buildkit_logs: &VecDeque<String>,
972) -> String {
973 let mut message = String::new();
974
975 message.push_str(error);
977
978 if !error_logs.is_empty() {
981 let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
983 let unique_errors: Vec<_> = error_logs
984 .iter()
985 .filter(|line| !recent_set.contains(line))
986 .collect();
987
988 if !unique_errors.is_empty() {
989 message.push_str("\n\nPotential errors detected during build:");
990 for line in unique_errors {
991 message.push_str("\n ");
992 message.push_str(line);
993 }
994 }
995 }
996
997 if !recent_buildkit_logs.is_empty() {
999 message.push_str("\n\nRecent BuildKit output:");
1000 for line in recent_buildkit_logs {
1001 message.push_str("\n ");
1002 message.push_str(line);
1003 }
1004 }
1005
1006 if !recent_logs.is_empty() {
1008 message.push_str("\n\nRecent build output:");
1009 for line in recent_logs {
1010 message.push_str("\n ");
1011 message.push_str(line);
1012 }
1013 } else if recent_buildkit_logs.is_empty() {
1014 message.push_str("\n\nNo build output was received from the Docker daemon.");
1015 message.push_str("\nThis usually means the build failed before any logs were streamed.");
1016 }
1017
1018 let error_lower = error.to_lowercase();
1020 if error_lower.contains("network")
1021 || error_lower.contains("connection")
1022 || error_lower.contains("timeout")
1023 {
1024 message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
1025 } else if error_lower.contains("disk")
1026 || error_lower.contains("space")
1027 || error_lower.contains("no space")
1028 {
1029 message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
1030 } else if error_lower.contains("permission") || error_lower.contains("denied") {
1031 message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
1032 }
1033
1034 message
1035}
1036
1037fn create_build_context(options: BuildContextOptions) -> Result<Vec<u8>, io::Error> {
1039 let repo_root = if options.include_local_opencode_submodule {
1040 Some(workspace_root_for_build_context()?)
1041 } else {
1042 None
1043 };
1044 create_build_context_with_repo_root(options, repo_root.as_deref())
1045}
1046
1047fn workspace_root_for_build_context() -> Result<PathBuf, io::Error> {
1048 Path::new(env!("CARGO_MANIFEST_DIR"))
1049 .join("../..")
1050 .canonicalize()
1051}
1052
1053fn create_build_context_with_repo_root(
1054 options: BuildContextOptions,
1055 repo_root: Option<&Path>,
1056) -> Result<Vec<u8>, io::Error> {
1057 let mut archive_buffer = Vec::new();
1058
1059 {
1060 let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
1061 let mut tar = TarBuilder::new(encoder);
1062
1063 let dockerfile_bytes = DOCKERFILE.as_bytes();
1065 append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
1066 append_bytes(
1067 &mut tar,
1068 "packages/core/src/docker/files/entrypoint.sh",
1069 ENTRYPOINT_SH,
1070 0o644,
1071 )?;
1072 append_bytes(
1073 &mut tar,
1074 "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1075 OPENCODE_CLOUD_BOOTSTRAP_SH,
1076 0o644,
1077 )?;
1078 append_bytes(
1079 &mut tar,
1080 "packages/core/src/docker/files/healthcheck.sh",
1081 HEALTHCHECK_SH,
1082 0o644,
1083 )?;
1084 append_bytes(
1085 &mut tar,
1086 "packages/core/src/docker/files/opencode-broker.service",
1087 include_bytes!("files/opencode-broker.service"),
1088 0o644,
1089 )?;
1090 append_bytes(
1091 &mut tar,
1092 "packages/core/src/docker/files/opencode.service",
1093 include_bytes!("files/opencode.service"),
1094 0o644,
1095 )?;
1096 append_bytes(
1097 &mut tar,
1098 "packages/core/src/docker/files/pam/opencode",
1099 include_bytes!("files/pam/opencode"),
1100 0o644,
1101 )?;
1102 append_bytes(
1103 &mut tar,
1104 "packages/core/src/docker/files/opencode.jsonc",
1105 include_bytes!("files/opencode.jsonc"),
1106 0o644,
1107 )?;
1108 append_bytes(
1109 &mut tar,
1110 "packages/core/src/docker/files/starship.toml",
1111 include_bytes!("files/starship.toml"),
1112 0o644,
1113 )?;
1114 append_bytes(
1115 &mut tar,
1116 "packages/core/src/docker/files/bashrc.extra",
1117 include_bytes!("files/bashrc.extra"),
1118 0o644,
1119 )?;
1120
1121 append_directory(
1124 &mut tar,
1125 Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1126 0o755,
1127 )?;
1128 if options.include_local_opencode_submodule {
1129 let repo_root = repo_root.ok_or_else(|| {
1130 io::Error::new(
1131 io::ErrorKind::NotFound,
1132 "Local opencode build requested but workspace root is unavailable",
1133 )
1134 })?;
1135 append_local_opencode_submodule(&mut tar, repo_root)?;
1136 }
1137
1138 tar.finish()?;
1139
1140 let encoder = tar.into_inner()?;
1142 encoder.finish()?;
1143 }
1144
1145 Ok(archive_buffer)
1146}
1147
1148fn append_local_opencode_submodule<W: Write>(
1149 tar: &mut TarBuilder<W>,
1150 repo_root: &Path,
1151) -> Result<(), io::Error> {
1152 let source_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1153 if !source_root.is_dir() {
1154 return Err(io::Error::new(
1155 io::ErrorKind::NotFound,
1156 format!(
1157 "Local opencode submodule path not found: {}",
1158 source_root.display()
1159 ),
1160 ));
1161 }
1162 let canonical_source_root = source_root.canonicalize()?;
1163
1164 append_local_tree_recursive(
1165 tar,
1166 &source_root,
1167 &canonical_source_root,
1168 Path::new(""),
1169 Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1170 )
1171}
1172
1173fn append_local_tree_recursive<W: Write>(
1174 tar: &mut TarBuilder<W>,
1175 source_root: &Path,
1176 canonical_source_root: &Path,
1177 relative_path: &Path,
1178 archive_root: &Path,
1179) -> Result<(), io::Error> {
1180 let current_path = source_root.join(relative_path);
1181 let mut entries: Vec<_> =
1182 fs::read_dir(¤t_path)?.collect::<Result<Vec<_>, io::Error>>()?;
1183 entries.sort_by_key(|a| a.file_name());
1184
1185 for entry in entries {
1186 let file_name = entry.file_name();
1187 let entry_relative = if relative_path.as_os_str().is_empty() {
1188 PathBuf::from(&file_name)
1189 } else {
1190 relative_path.join(&file_name)
1191 };
1192
1193 if should_exclude_local_opencode_path(&entry_relative) {
1194 continue;
1195 }
1196
1197 let entry_path = entry.path();
1198 let metadata = fs::symlink_metadata(&entry_path)?;
1199 let archive_path = archive_root.join(&entry_relative);
1200
1201 if metadata.is_dir() {
1202 append_directory(tar, &archive_path, mode_from_metadata(&metadata, 0o755))?;
1203 append_local_tree_recursive(
1204 tar,
1205 source_root,
1206 canonical_source_root,
1207 &entry_relative,
1208 archive_root,
1209 )?;
1210 continue;
1211 }
1212
1213 if metadata.is_file() {
1214 append_file_from_disk(
1215 tar,
1216 &archive_path,
1217 &entry_path,
1218 mode_from_metadata(&metadata, 0o644),
1219 )?;
1220 continue;
1221 }
1222
1223 if metadata.file_type().is_symlink() {
1224 match resolve_local_symlink_target(&entry_path, canonical_source_root)? {
1227 Some(target_path) => {
1228 let target_metadata = fs::metadata(&target_path)?;
1229 if target_metadata.is_file() {
1230 append_file_from_disk(
1231 tar,
1232 &archive_path,
1233 &target_path,
1234 mode_from_metadata(&target_metadata, 0o644),
1235 )?;
1236 } else {
1237 debug!(
1238 "Skipping symlink with non-file target in local opencode context: {} -> {}",
1239 entry_path.display(),
1240 target_path.display()
1241 );
1242 }
1243 }
1244 None => {
1245 debug!(
1246 "Skipping symlink outside checkout or unresolved in local opencode context: {}",
1247 entry_path.display()
1248 );
1249 }
1250 }
1251 }
1252 }
1253
1254 Ok(())
1255}
1256
1257fn resolve_local_symlink_target(
1258 link_path: &Path,
1259 canonical_source_root: &Path,
1260) -> Result<Option<PathBuf>, io::Error> {
1261 let link_target = fs::read_link(link_path)?;
1262 let resolved = if link_target.is_absolute() {
1263 link_target
1264 } else {
1265 link_path
1266 .parent()
1267 .unwrap_or_else(|| Path::new(""))
1268 .join(link_target)
1269 };
1270
1271 let canonical_target = match resolved.canonicalize() {
1273 Ok(path) => path,
1274 Err(_) => return Ok(None),
1275 };
1276 if canonical_target.starts_with(canonical_source_root) {
1277 Ok(Some(canonical_target))
1278 } else {
1279 Ok(None)
1280 }
1281}
1282
1283fn should_exclude_local_opencode_path(relative_path: &Path) -> bool {
1284 if relative_path.file_name().is_some_and(|name| {
1285 LOCAL_OPENCODE_EXCLUDED_FILES
1286 .iter()
1287 .any(|candidate| name == OsStr::new(candidate))
1288 }) {
1289 return true;
1290 }
1291
1292 relative_path.components().any(|component| {
1293 let part = component.as_os_str();
1294 LOCAL_OPENCODE_EXCLUDED_DIRS
1295 .iter()
1296 .any(|candidate| part == OsStr::new(candidate))
1297 })
1298}
1299
1300#[cfg(unix)]
1301fn mode_from_metadata(metadata: &fs::Metadata, fallback: u32) -> u32 {
1302 use std::os::unix::fs::PermissionsExt;
1303 let mode = metadata.permissions().mode() & 0o7777;
1304 if mode == 0 { fallback } else { mode }
1305}
1306
1307#[cfg(not(unix))]
1308fn mode_from_metadata(_metadata: &fs::Metadata, fallback: u32) -> u32 {
1309 fallback
1310}
1311
1312fn append_directory<W: Write>(
1313 tar: &mut TarBuilder<W>,
1314 path: &Path,
1315 mode: u32,
1316) -> Result<(), io::Error> {
1317 let mut header = tar::Header::new_gnu();
1318 header.set_size(0);
1319 header.set_mode(mode);
1320 header.set_entry_type(tar::EntryType::Directory);
1321 tar.append_data(&mut header, path, io::empty())?;
1324 Ok(())
1325}
1326
1327fn append_file_from_disk<W: Write>(
1328 tar: &mut TarBuilder<W>,
1329 archive_path: &Path,
1330 source_path: &Path,
1331 mode: u32,
1332) -> Result<(), io::Error> {
1333 let mut file = fs::File::open(source_path)?;
1334 let metadata = file.metadata()?;
1335 let mut header = tar::Header::new_gnu();
1336 header.set_size(metadata.len());
1337 header.set_mode(mode);
1338 tar.append_data(&mut header, archive_path, &mut file)?;
1341 Ok(())
1342}
1343
1344fn append_bytes<W: Write>(
1345 tar: &mut TarBuilder<W>,
1346 path: &str,
1347 contents: &[u8],
1348 mode: u32,
1349) -> Result<(), io::Error> {
1350 let mut header = tar::Header::new_gnu();
1351 header.set_path(path)?;
1352 header.set_size(contents.len() as u64);
1353 header.set_mode(mode);
1354 header.set_cksum();
1355
1356 tar.append(&header, contents)?;
1357 Ok(())
1358}
1359
1360#[cfg(test)]
1361mod tests {
1362 use super::*;
1363 use bollard::models::ImageSummary;
1364 use flate2::read::GzDecoder;
1365 use std::collections::{HashMap, HashSet};
1366 use std::fs;
1367 use std::io::{Cursor, Read};
1368 use tar::Archive;
1369 use tempfile::tempdir;
1370
1371 fn make_image_summary(
1372 id: &str,
1373 tags: Vec<&str>,
1374 digests: Vec<&str>,
1375 labels: HashMap<String, String>,
1376 ) -> ImageSummary {
1377 ImageSummary {
1378 id: id.to_string(),
1379 parent_id: String::new(),
1380 repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
1381 repo_digests: digests
1382 .into_iter()
1383 .map(|digest| digest.to_string())
1384 .collect(),
1385 created: 0,
1386 size: 0,
1387 shared_size: -1,
1388 labels,
1389 containers: 0,
1390 manifests: None,
1391 descriptor: None,
1392 }
1393 }
1394
1395 fn archive_entries(context: Vec<u8>) -> HashSet<String> {
1396 let cursor = Cursor::new(context);
1397 let decoder = GzDecoder::new(cursor);
1398 let mut archive = Archive::new(decoder);
1399 let mut paths = HashSet::new();
1400 for entry in archive.entries().expect("should read archive entries") {
1401 let entry = entry.expect("should read entry");
1402 let path = entry.path().expect("should read entry path");
1403 paths.insert(path.to_string_lossy().to_string());
1404 }
1405 paths
1406 }
1407
1408 fn archive_entry_bytes(context: Vec<u8>, wanted_path: &str) -> Option<Vec<u8>> {
1409 let cursor = Cursor::new(context);
1410 let decoder = GzDecoder::new(cursor);
1411 let mut archive = Archive::new(decoder);
1412 for entry in archive.entries().expect("should read archive entries") {
1413 let mut entry = entry.expect("should read entry");
1414 let path = entry.path().expect("should read entry path");
1415 if path == Path::new(wanted_path) {
1416 let mut bytes = Vec::new();
1417 entry
1418 .read_to_end(&mut bytes)
1419 .expect("should read entry bytes");
1420 return Some(bytes);
1421 }
1422 }
1423 None
1424 }
1425
1426 #[test]
1427 fn create_build_context_succeeds() {
1428 let context =
1429 create_build_context(BuildContextOptions::default()).expect("should create context");
1430 assert!(!context.is_empty(), "context should not be empty");
1431
1432 assert_eq!(context[0], 0x1f, "should be gzip compressed");
1434 assert_eq!(context[1], 0x8b, "should be gzip compressed");
1435 }
1436
1437 #[test]
1438 fn build_context_includes_docker_assets() {
1439 let context =
1440 create_build_context(BuildContextOptions::default()).expect("should create context");
1441 let cursor = Cursor::new(context);
1442 let decoder = GzDecoder::new(cursor);
1443 let mut archive = Archive::new(decoder);
1444 let mut found_entrypoint = false;
1445 let mut found_healthcheck = false;
1446 let mut found_bootstrap_helper = false;
1447
1448 for entry in archive.entries().expect("should read archive entries") {
1449 let entry = entry.expect("should read entry");
1450 let path = entry.path().expect("should read entry path");
1451 if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
1452 found_entrypoint = true;
1453 }
1454 if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
1455 found_healthcheck = true;
1456 }
1457 if path
1458 == std::path::Path::new(
1459 "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1460 )
1461 {
1462 found_bootstrap_helper = true;
1463 }
1464 if found_entrypoint && found_healthcheck && found_bootstrap_helper {
1465 break;
1466 }
1467 }
1468
1469 assert!(
1470 found_entrypoint,
1471 "entrypoint asset should be in the build context"
1472 );
1473 assert!(
1474 found_healthcheck,
1475 "healthcheck asset should be in the build context"
1476 );
1477 assert!(
1478 found_bootstrap_helper,
1479 "bootstrap helper asset should be in the build context"
1480 );
1481 }
1482
1483 #[test]
1484 fn build_context_includes_opencode_placeholder_in_default_mode() {
1485 let context =
1486 create_build_context(BuildContextOptions::default()).expect("should create context");
1487 let entries = archive_entries(context);
1488 assert!(
1489 entries
1490 .iter()
1491 .any(|path| path.trim_end_matches('/') == "packages/opencode"),
1492 "default mode should include an empty packages/opencode placeholder"
1493 );
1494 }
1495
1496 #[test]
1497 fn build_context_local_mode_includes_submodule_and_excludes_heavy_paths() {
1498 let temp = tempdir().expect("should create tempdir");
1499 let repo_root = temp.path();
1500
1501 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1502 fs::create_dir_all(submodule_root.join("src")).expect("should create src");
1503 fs::create_dir_all(submodule_root.join(".git")).expect("should create .git");
1504 fs::create_dir_all(submodule_root.join("node_modules/pkg"))
1505 .expect("should create node_modules");
1506 fs::create_dir_all(submodule_root.join("target/release")).expect("should create target");
1507 fs::create_dir_all(submodule_root.join("dist")).expect("should create dist");
1508 fs::create_dir_all(submodule_root.join(".turbo")).expect("should create .turbo");
1509 fs::create_dir_all(submodule_root.join(".cache")).expect("should create .cache");
1510 fs::create_dir_all(submodule_root.join(".planning/phases/very-long-planning-phase-name"))
1511 .expect("should create planning");
1512
1513 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1514 fs::write(submodule_root.join("src/main.ts"), "console.log('ok');")
1515 .expect("should write source");
1516 fs::write(submodule_root.join(".git/config"), "dirty").expect("should write .git file");
1517 fs::write(submodule_root.join("node_modules/pkg/index.js"), "ignored")
1518 .expect("should write node_modules file");
1519 fs::write(submodule_root.join("target/release/app"), "ignored")
1520 .expect("should write target file");
1521 fs::write(submodule_root.join("dist/ui.js"), "ignored").expect("should write dist file");
1522 fs::write(submodule_root.join(".turbo/state.json"), "ignored")
1523 .expect("should write turbo file");
1524 fs::write(submodule_root.join(".cache/cache.bin"), "ignored")
1525 .expect("should write cache file");
1526 fs::write(
1527 submodule_root.join(".planning/phases/very-long-planning-phase-name/phase.md"),
1528 "ignored",
1529 )
1530 .expect("should write planning file");
1531 fs::write(submodule_root.join(".DS_Store"), "ignored").expect("should write ds_store");
1532
1533 let context = create_build_context_with_repo_root(
1534 BuildContextOptions {
1535 include_local_opencode_submodule: true,
1536 },
1537 Some(repo_root),
1538 )
1539 .expect("should create local context");
1540 let entries = archive_entries(context);
1541
1542 assert!(
1543 entries.contains("packages/opencode/package.json"),
1544 "local mode should include submodule files"
1545 );
1546 assert!(
1547 entries.contains("packages/opencode/src/main.ts"),
1548 "local mode should include source files"
1549 );
1550 assert!(
1551 !entries.contains("packages/opencode/.git/config"),
1552 "local mode should exclude .git"
1553 );
1554 assert!(
1555 !entries.contains("packages/opencode/node_modules/pkg/index.js"),
1556 "local mode should exclude node_modules"
1557 );
1558 assert!(
1559 !entries.contains("packages/opencode/target/release/app"),
1560 "local mode should exclude target"
1561 );
1562 assert!(
1563 !entries.contains("packages/opencode/dist/ui.js"),
1564 "local mode should exclude dist"
1565 );
1566 assert!(
1567 !entries.contains("packages/opencode/.turbo/state.json"),
1568 "local mode should exclude .turbo"
1569 );
1570 assert!(
1571 !entries.contains("packages/opencode/.cache/cache.bin"),
1572 "local mode should exclude .cache"
1573 );
1574 assert!(
1575 !entries.contains(
1576 "packages/opencode/.planning/phases/very-long-planning-phase-name/phase.md"
1577 ),
1578 "local mode should exclude .planning"
1579 );
1580 assert!(
1581 !entries.contains("packages/opencode/.DS_Store"),
1582 "local mode should exclude .DS_Store files"
1583 );
1584 }
1585
1586 #[test]
1587 fn build_context_local_mode_supports_long_non_excluded_paths() {
1588 let temp = tempdir().expect("should create tempdir");
1589 let repo_root = temp.path();
1590 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1591
1592 fs::create_dir_all(&submodule_root).expect("should create submodule root");
1593 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1594
1595 let long_segment = "a".repeat(140);
1596 let long_dir = submodule_root.join("src").join(&long_segment);
1597 fs::create_dir_all(&long_dir).expect("should create long path directory");
1598 fs::write(long_dir.join("main.ts"), "console.log('long path');")
1599 .expect("should write long path file");
1600
1601 let context = create_build_context_with_repo_root(
1602 BuildContextOptions {
1603 include_local_opencode_submodule: true,
1604 },
1605 Some(repo_root),
1606 )
1607 .expect("should create local context with long paths");
1608 let entries = archive_entries(context);
1609 let long_entry = format!("packages/opencode/src/{long_segment}/main.ts");
1610 assert!(
1611 entries.contains(&long_entry),
1612 "long non-excluded path should be archived via GNU long-name handling"
1613 );
1614 }
1615
1616 #[cfg(unix)]
1617 #[test]
1618 fn build_context_local_mode_materializes_symlinked_files() {
1619 use std::os::unix::fs::symlink;
1620
1621 let temp = tempdir().expect("should create tempdir");
1622 let repo_root = temp.path();
1623 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1624 let fonts_dir = submodule_root.join("packages/ui/src/assets/fonts");
1625 fs::create_dir_all(&fonts_dir).expect("should create fonts dir");
1626 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1627 fs::write(
1628 fonts_dir.join("BlexMonoNerdFontMono-Regular.woff2"),
1629 b"font-bytes",
1630 )
1631 .expect("should write target font");
1632 symlink(
1633 "BlexMonoNerdFontMono-Regular.woff2",
1634 fonts_dir.join("ibm-plex-mono.woff2"),
1635 )
1636 .expect("should create symlinked font");
1637
1638 let context = create_build_context_with_repo_root(
1639 BuildContextOptions {
1640 include_local_opencode_submodule: true,
1641 },
1642 Some(repo_root),
1643 )
1644 .expect("should create local context with symlink");
1645 let entries = archive_entries(context.clone());
1646
1647 assert!(
1648 entries.contains("packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2"),
1649 "local mode should include symlinked asset paths"
1650 );
1651 let alias_bytes = archive_entry_bytes(
1652 context,
1653 "packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2",
1654 )
1655 .expect("symlinked asset should contain bytes");
1656 assert_eq!(alias_bytes, b"font-bytes");
1657 }
1658
1659 #[test]
1660 fn default_tag_is_latest() {
1661 assert_eq!(IMAGE_TAG_DEFAULT, "latest");
1662 }
1663
1664 #[test]
1665 fn format_build_error_includes_recent_logs() {
1666 let mut logs = VecDeque::new();
1667 logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
1668 logs.push_back("Step 2/5 : RUN apt-get update".to_string());
1669 logs.push_back("E: Unable to fetch some archives".to_string());
1670 let error_logs = VecDeque::new();
1671 let buildkit_logs = VecDeque::new();
1672
1673 let result = format_build_error_with_context(
1674 "Build failed: exit code 1",
1675 &logs,
1676 &error_logs,
1677 &buildkit_logs,
1678 );
1679
1680 assert!(result.contains("Build failed: exit code 1"));
1681 assert!(result.contains("Recent build output:"));
1682 assert!(result.contains("Step 1/5"));
1683 assert!(result.contains("Unable to fetch"));
1684 }
1685
1686 #[test]
1687 fn format_build_error_handles_empty_logs() {
1688 let logs = VecDeque::new();
1689 let error_logs = VecDeque::new();
1690 let buildkit_logs = VecDeque::new();
1691 let result =
1692 format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
1693
1694 assert!(result.contains("Stream error"));
1695 assert!(!result.contains("Recent build output:"));
1696 }
1697
1698 #[test]
1699 fn format_build_error_adds_network_suggestion() {
1700 let logs = VecDeque::new();
1701 let error_logs = VecDeque::new();
1702 let buildkit_logs = VecDeque::new();
1703 let result = format_build_error_with_context(
1704 "connection timeout",
1705 &logs,
1706 &error_logs,
1707 &buildkit_logs,
1708 );
1709
1710 assert!(result.contains("Check your network connection"));
1711 }
1712
1713 #[test]
1714 fn format_build_error_adds_disk_suggestion() {
1715 let logs = VecDeque::new();
1716 let error_logs = VecDeque::new();
1717 let buildkit_logs = VecDeque::new();
1718 let result = format_build_error_with_context(
1719 "no space left on device",
1720 &logs,
1721 &error_logs,
1722 &buildkit_logs,
1723 );
1724
1725 assert!(result.contains("Free up disk space"));
1726 }
1727
1728 #[test]
1729 fn format_build_error_shows_error_lines_separately() {
1730 let mut recent_logs = VecDeque::new();
1731 recent_logs.push_back("Compiling foo v1.0".to_string());
1732 recent_logs.push_back("Successfully installed bar".to_string());
1733
1734 let mut error_logs = VecDeque::new();
1735 error_logs.push_back("error: failed to compile dust".to_string());
1736 error_logs.push_back("error: failed to compile glow".to_string());
1737
1738 let buildkit_logs = VecDeque::new();
1739 let result = format_build_error_with_context(
1740 "Build failed",
1741 &recent_logs,
1742 &error_logs,
1743 &buildkit_logs,
1744 );
1745
1746 assert!(result.contains("Potential errors detected during build:"));
1747 assert!(result.contains("failed to compile dust"));
1748 assert!(result.contains("failed to compile glow"));
1749 }
1750
1751 #[test]
1752 fn is_error_line_detects_errors() {
1753 assert!(is_error_line("error: something failed"));
1754 assert!(is_error_line("Error: build failed"));
1755 assert!(is_error_line("Failed to install package"));
1756 assert!(is_error_line("cannot find module"));
1757 assert!(is_error_line("Unable to locate package"));
1758 assert!(!is_error_line("Compiling foo v1.0"));
1759 assert!(!is_error_line("Successfully installed"));
1760 }
1761
1762 #[test]
1763 fn collect_image_ids_matches_labels() {
1764 let mut labels = HashMap::new();
1765 labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
1766
1767 let images = vec![
1768 make_image_summary("sha256:opencode", vec![], vec![], labels),
1769 make_image_summary(
1770 "sha256:other",
1771 vec!["busybox:latest"],
1772 vec![],
1773 HashMap::new(),
1774 ),
1775 ];
1776
1777 let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
1778 assert!(ids.contains("sha256:opencode"));
1779 assert!(!ids.contains("sha256:other"));
1780 }
1781
1782 #[test]
1783 fn clean_buildkit_label_strips_internal_load_remote_context() {
1784 assert_eq!(
1785 clean_buildkit_label("[internal] load remote build context"),
1786 "Loading remote build context"
1787 );
1788 }
1789
1790 #[test]
1791 fn clean_buildkit_label_strips_internal_load_metadata() {
1792 assert_eq!(
1793 clean_buildkit_label("[internal] load metadata for docker.io/library/ubuntu:24.04"),
1794 "Resolving image docker.io/library/ubuntu:24.04"
1795 );
1796 }
1797
1798 #[test]
1799 fn clean_buildkit_label_strips_internal_load_build_definition() {
1800 assert_eq!(
1801 clean_buildkit_label("[internal] load build definition from Dockerfile"),
1802 "Loading Dockerfile"
1803 );
1804 }
1805
1806 #[test]
1807 fn clean_buildkit_label_strips_internal_load_build_context() {
1808 assert_eq!(
1809 clean_buildkit_label("[internal] load build context"),
1810 "Loading build context"
1811 );
1812 }
1813
1814 #[test]
1815 fn clean_buildkit_label_capitalizes_unknown_internal() {
1816 assert_eq!(
1817 clean_buildkit_label("[internal] some unknown thing"),
1818 "Some unknown thing"
1819 );
1820 }
1821
1822 #[test]
1823 fn clean_buildkit_label_preserves_runtime_steps() {
1824 assert_eq!(
1825 clean_buildkit_label("[runtime 1/15] RUN apt-get update"),
1826 "[runtime 1/15] RUN apt-get update"
1827 );
1828 }
1829
1830 #[test]
1831 fn clean_buildkit_label_preserves_plain_text() {
1832 assert_eq!(
1833 clean_buildkit_label("Step 3/10 : COPY . ."),
1834 "Step 3/10 : COPY . ."
1835 );
1836 }
1837
1838 #[test]
1839 fn clean_buildkit_label_trims_whitespace() {
1840 assert_eq!(
1841 clean_buildkit_label(" [internal] load build context "),
1842 "Loading build context"
1843 );
1844 }
1845}