1use super::progress::ProgressReporter;
7use super::{
8 CONTAINER_NAME, DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR,
9 IMAGE_TAG_DEFAULT, active_resource_names, remap_image_tag,
10};
11use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
12use bollard::models::BuildInfoAux;
13use bollard::query_parameters::{
14 BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
15 RemoveImageOptionsBuilder, TagImageOptions,
16};
17use bytes::Bytes;
18use flate2::Compression;
19use flate2::write::GzEncoder;
20use futures_util::StreamExt;
21use http_body_util::{Either, Full};
22use std::collections::{HashMap, HashSet, VecDeque};
23use std::env;
24use std::ffi::OsStr;
25use std::fs;
26use std::io::{self, Write};
27use std::path::{Path, PathBuf};
28use std::time::{SystemTime, UNIX_EPOCH};
29use tar::Builder as TarBuilder;
30use tracing::{debug, warn};
31
32const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
34
35const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
37
38const LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH: &str = "packages/opencode";
39const LOCAL_OPENCODE_EXCLUDED_DIRS: &[&str] = &[
42 ".git",
43 ".planning",
44 "node_modules",
45 "target",
46 "dist",
47 ".turbo",
48 ".cache",
49];
50const LOCAL_OPENCODE_EXCLUDED_FILES: &[&str] = &[".DS_Store"];
51
52#[derive(Debug, Clone, Copy, Default)]
53struct BuildContextOptions {
54 include_local_opencode_submodule: bool,
55}
56
57fn effective_image_tag(tag: &str) -> String {
58 remap_image_tag(tag)
59}
60
61fn profile_scoped_image_ids(images: &[bollard::models::ImageSummary]) -> Option<HashSet<String>> {
62 let names = active_resource_names();
63 let instance_id = names.instance_id.as_deref()?;
64 let expected_tags = [
65 format!("{IMAGE_NAME_GHCR}:{}", names.image_tag),
66 format!("{IMAGE_NAME_DOCKERHUB}:{}", names.image_tag),
67 format!("{IMAGE_NAME_GHCR}:{}", names.previous_image_tag),
68 format!("{IMAGE_NAME_DOCKERHUB}:{}", names.previous_image_tag),
69 ];
70
71 let mut ids = HashSet::new();
74 for image in images {
75 let tag_match = image
76 .repo_tags
77 .iter()
78 .any(|tag| expected_tags.contains(tag));
79 let label_match = image
80 .labels
81 .get(super::INSTANCE_LABEL_KEY)
82 .is_some_and(|value| value == instance_id);
83 if tag_match || label_match {
84 ids.insert(image.id.clone());
85 }
86 }
87 Some(ids)
88}
89
90fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
92 let Ok(value) = env::var(var_name) else {
93 return default;
94 };
95 let Ok(parsed) = value.trim().parse::<usize>() else {
96 return default;
97 };
98 parsed.clamp(5, 500)
99}
100
101fn is_error_line(line: &str) -> bool {
103 let lower = line.to_lowercase();
104 lower.contains("error")
105 || lower.contains("failed")
106 || lower.contains("cannot")
107 || lower.contains("unable to")
108 || lower.contains("not found")
109 || lower.contains("permission denied")
110}
111
112pub async fn image_exists(
114 client: &DockerClient,
115 image: &str,
116 tag: &str,
117) -> Result<bool, DockerError> {
118 let tag = effective_image_tag(tag);
119 let full_name = format!("{image}:{tag}");
120 debug!("Checking if image exists: {}", full_name);
121
122 match client.inner().inspect_image(&full_name).await {
123 Ok(_) => Ok(true),
124 Err(bollard::errors::Error::DockerResponseServerError {
125 status_code: 404, ..
126 }) => Ok(false),
127 Err(e) => Err(DockerError::from(e)),
128 }
129}
130
131pub async fn remove_images_by_name(
135 client: &DockerClient,
136 name_fragment: &str,
137 force: bool,
138) -> Result<usize, DockerError> {
139 debug!("Removing Docker images matching '{name_fragment}'");
140
141 let images = list_docker_images(client).await?;
142
143 let image_ids = if name_fragment == CONTAINER_NAME {
144 profile_scoped_image_ids(&images)
145 .unwrap_or_else(|| collect_image_ids(&images, name_fragment))
146 } else {
147 collect_image_ids(&images, name_fragment)
148 };
149 remove_image_ids(client, image_ids, force).await
150}
151
152async fn list_docker_images(
154 client: &DockerClient,
155) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
156 let list_options = ListImagesOptionsBuilder::new().all(true).build();
157 client
158 .inner()
159 .list_images(Some(list_options))
160 .await
161 .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
162}
163
164const LABEL_TITLE: &str = "org.opencontainers.image.title";
165const LABEL_SOURCE: &str = "org.opencontainers.image.source";
166const LABEL_URL: &str = "org.opencontainers.image.url";
167
168const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
169const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
170const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
171
172fn collect_image_ids(
174 images: &[bollard::models::ImageSummary],
175 name_fragment: &str,
176) -> HashSet<String> {
177 let mut image_ids = HashSet::new();
178 for image in images {
179 if image_matches_fragment_or_labels(image, name_fragment) {
180 image_ids.insert(image.id.clone());
181 }
182 }
183 image_ids
184}
185
186fn image_matches_fragment_or_labels(
187 image: &bollard::models::ImageSummary,
188 name_fragment: &str,
189) -> bool {
190 let tag_match = image
191 .repo_tags
192 .iter()
193 .any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
194 let digest_match = image
195 .repo_digests
196 .iter()
197 .any(|digest| digest.contains(name_fragment));
198 let label_match = image_labels_match(&image.labels);
199
200 tag_match || digest_match || label_match
201}
202
203fn image_labels_match(labels: &HashMap<String, String>) -> bool {
204 labels
205 .get(LABEL_SOURCE)
206 .is_some_and(|value| value == LABEL_SOURCE_VALUE)
207 || labels
208 .get(LABEL_URL)
209 .is_some_and(|value| value == LABEL_URL_VALUE)
210 || labels
211 .get(LABEL_TITLE)
212 .is_some_and(|value| value == LABEL_TITLE_VALUE)
213}
214
215async fn remove_image_ids(
217 client: &DockerClient,
218 image_ids: HashSet<String>,
219 force: bool,
220) -> Result<usize, DockerError> {
221 if image_ids.is_empty() {
222 return Ok(0);
223 }
224
225 let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
226 let mut removed = 0usize;
227 for image_id in image_ids {
228 let result = client
229 .inner()
230 .remove_image(&image_id, Some(remove_options.clone()), None)
231 .await;
232 match result {
233 Ok(_) => removed += 1,
234 Err(bollard::errors::Error::DockerResponseServerError {
235 status_code: 404, ..
236 }) => {
237 debug!("Docker image already removed: {}", image_id);
238 }
239 Err(err) => {
240 return Err(DockerError::Image(format!(
241 "Failed to remove image {image_id}: {err}"
242 )));
243 }
244 }
245 }
246
247 Ok(removed)
248}
249
250pub async fn build_image(
261 client: &DockerClient,
262 tag: Option<&str>,
263 progress: &mut ProgressReporter,
264 no_cache: bool,
265 build_args: Option<HashMap<String, String>>,
266) -> Result<String, DockerError> {
267 let tag = effective_image_tag(tag.unwrap_or(IMAGE_TAG_DEFAULT));
268 let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
269 debug!("Building image: {} (no_cache: {})", full_name, no_cache);
270
271 let build_args = build_args.unwrap_or_default();
272 let include_local_opencode_submodule = build_args
273 .get("OPENCODE_SOURCE")
274 .is_some_and(|value| value.eq_ignore_ascii_case("local"));
275
276 let context = create_build_context(BuildContextOptions {
278 include_local_opencode_submodule,
279 })
280 .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
281
282 let session_id = format!(
286 "opencode-cloud-build-{}",
287 SystemTime::now()
288 .duration_since(UNIX_EPOCH)
289 .unwrap_or_default()
290 .as_nanos()
291 );
292 let options = BuildImageOptions {
293 t: Some(full_name.clone()),
294 dockerfile: "Dockerfile".to_string(),
295 version: BuilderVersion::BuilderBuildKit,
296 session: Some(session_id),
297 rm: true,
298 nocache: no_cache,
299 buildargs: Some(build_args),
300 platform: String::new(),
301 target: String::new(),
302 ..Default::default()
303 };
304
305 let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
307
308 let mut stream = client.inner().build_image(options, None, Some(body));
310
311 progress.add_spinner("build", "Initializing...");
313
314 let mut maybe_image_id = None;
315 let mut log_state = BuildLogState::new();
316
317 while let Some(result) = stream.next().await {
318 let Ok(info) = result else {
319 return Err(handle_stream_error(
320 "Build failed",
321 result.expect_err("checked error").to_string(),
322 &log_state,
323 progress,
324 ));
325 };
326
327 handle_stream_message(&info, progress, &mut log_state);
328
329 if let Some(error_detail) = &info.error_detail
330 && let Some(error_msg) = &error_detail.message
331 {
332 progress.abandon_all(error_msg);
333 let context = format_build_error_with_context(
334 error_msg,
335 &log_state.recent_logs,
336 &log_state.error_logs,
337 &log_state.recent_buildkit_logs,
338 );
339 return Err(DockerError::Build(context));
340 }
341
342 if let Some(aux) = info.aux {
343 match aux {
344 BuildInfoAux::Default(image_id) => {
345 if let Some(id) = image_id.id {
346 maybe_image_id = Some(id);
347 }
348 }
349 BuildInfoAux::BuildKit(status) => {
350 handle_buildkit_status(&status, progress, &mut log_state);
351 }
352 }
353 }
354 }
355
356 let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
357 let finish_msg = format!("Build complete: {image_id}");
358 progress.finish("build", &finish_msg);
359
360 Ok(full_name)
361}
362
363struct BuildLogState {
364 recent_logs: VecDeque<String>,
365 error_logs: VecDeque<String>,
366 recent_buildkit_logs: VecDeque<String>,
367 build_log_buffer_size: usize,
368 error_log_buffer_size: usize,
369 last_buildkit_vertex: Option<String>,
370 last_buildkit_vertex_id: Option<String>,
371 export_vertex_id: Option<String>,
372 export_vertex_name: Option<String>,
373 buildkit_logs_by_vertex_id: HashMap<String, String>,
374 vertex_name_by_vertex_id: HashMap<String, String>,
375}
376
377impl BuildLogState {
378 fn new() -> Self {
379 let build_log_buffer_size = read_log_buffer_size(
380 "OPENCODE_DOCKER_BUILD_LOG_TAIL",
381 DEFAULT_BUILD_LOG_BUFFER_SIZE,
382 );
383 let error_log_buffer_size = read_log_buffer_size(
384 "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
385 DEFAULT_ERROR_LOG_BUFFER_SIZE,
386 );
387 Self {
388 recent_logs: VecDeque::with_capacity(build_log_buffer_size),
389 error_logs: VecDeque::with_capacity(error_log_buffer_size),
390 recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
391 build_log_buffer_size,
392 error_log_buffer_size,
393 last_buildkit_vertex: None,
394 last_buildkit_vertex_id: None,
395 export_vertex_id: None,
396 export_vertex_name: None,
397 buildkit_logs_by_vertex_id: HashMap::new(),
398 vertex_name_by_vertex_id: HashMap::new(),
399 }
400 }
401}
402
403fn handle_stream_message(
404 info: &bollard::models::BuildInfo,
405 progress: &mut ProgressReporter,
406 state: &mut BuildLogState,
407) {
408 let Some(stream_msg) = info.stream.as_deref() else {
409 return;
410 };
411 let msg = stream_msg.trim();
412 if msg.is_empty() {
413 return;
414 }
415
416 if progress.is_plain_output() {
417 eprint!("{stream_msg}");
418 } else {
419 let has_runtime_vertex = state
420 .last_buildkit_vertex
421 .as_deref()
422 .is_some_and(|name| name.starts_with("[runtime "));
423 let is_internal_msg = msg.contains("[internal]");
424 if !(has_runtime_vertex && is_internal_msg) {
425 progress.update_spinner("build", stream_msg);
426 }
427 }
428
429 if state.recent_logs.len() >= state.build_log_buffer_size {
430 state.recent_logs.pop_front();
431 }
432 state.recent_logs.push_back(msg.to_string());
433
434 if is_error_line(msg) {
435 if state.error_logs.len() >= state.error_log_buffer_size {
436 state.error_logs.pop_front();
437 }
438 state.error_logs.push_back(msg.to_string());
439 }
440
441 if msg.starts_with("Step ") {
442 debug!("Build step: {}", msg);
443 }
444}
445
446fn handle_buildkit_status(
447 status: &BuildkitStatusResponse,
448 progress: &mut ProgressReporter,
449 state: &mut BuildLogState,
450) {
451 let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
452 update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
453 update_export_vertex_from_logs(
454 &latest_logs,
455 &state.vertex_name_by_vertex_id,
456 &mut state.export_vertex_id,
457 &mut state.export_vertex_name,
458 );
459 let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
460 status,
461 &state.vertex_name_by_vertex_id,
462 state.export_vertex_id.as_deref(),
463 state.export_vertex_name.as_deref(),
464 ) {
465 Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
466 None => {
467 let Some(log_entry) = latest_logs.last() else {
468 return;
469 };
470 let name = state
471 .vertex_name_by_vertex_id
472 .get(&log_entry.vertex_id)
473 .cloned()
474 .or_else(|| state.last_buildkit_vertex.clone())
475 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
476 (log_entry.vertex_id.clone(), name)
477 }
478 };
479 record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
480 state.last_buildkit_vertex_id = Some(vertex_id.clone());
481 if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
482 state.last_buildkit_vertex = Some(vertex_name.clone());
483 }
484
485 let message = if progress.is_plain_output() {
486 vertex_name
487 } else if let Some(log_entry) = latest_logs
488 .iter()
489 .rev()
490 .find(|entry| entry.vertex_id == vertex_id)
491 {
492 format!("{vertex_name} · {}", log_entry.message)
493 } else {
494 vertex_name
495 };
496 progress.update_spinner("build", &message);
497
498 if progress.is_plain_output() {
499 for log_entry in latest_logs {
500 eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
501 }
502 return;
503 }
504
505 let (Some(current_id), Some(current_name)) = (
506 state.last_buildkit_vertex_id.as_ref(),
507 state.last_buildkit_vertex.as_ref(),
508 ) else {
509 return;
510 };
511
512 let name = state
513 .vertex_name_by_vertex_id
514 .get(current_id)
515 .unwrap_or(current_name);
516 let _ = name;
518}
519
520fn handle_stream_error(
521 prefix: &str,
522 error_str: String,
523 state: &BuildLogState,
524 progress: &mut ProgressReporter,
525) -> DockerError {
526 progress.abandon_all(prefix);
527
528 let buildkit_hint = if error_str.contains("mount")
529 || error_str.contains("--mount")
530 || state
531 .recent_logs
532 .iter()
533 .any(|log| log.contains("--mount") && log.contains("cache"))
534 {
535 "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
536 The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
537 Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
538 } else {
539 ""
540 };
541
542 let context = format!(
543 "{}{}",
544 format_build_error_with_context(
545 &error_str,
546 &state.recent_logs,
547 &state.error_logs,
548 &state.recent_buildkit_logs,
549 ),
550 buildkit_hint
551 );
552 DockerError::Build(context)
553}
554
555fn update_buildkit_vertex_names(
556 vertex_name_by_vertex_id: &mut HashMap<String, String>,
557 status: &BuildkitStatusResponse,
558) {
559 for vertex in &status.vertexes {
560 if vertex.name.is_empty() {
561 continue;
562 }
563 vertex_name_by_vertex_id
564 .entry(vertex.digest.clone())
565 .or_insert_with(|| vertex.name.clone());
566 }
567}
568
569fn select_latest_buildkit_vertex(
570 status: &BuildkitStatusResponse,
571 vertex_name_by_vertex_id: &HashMap<String, String>,
572 export_vertex_id: Option<&str>,
573 export_vertex_name: Option<&str>,
574) -> Option<(String, String)> {
575 if let Some(export_vertex_id) = export_vertex_id {
576 let name = export_vertex_name
577 .map(str::to_string)
578 .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
579 .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
580 return Some((export_vertex_id.to_string(), name));
581 }
582
583 let mut best_runtime: Option<(u32, String, String)> = None;
584 let mut fallback: Option<(String, String)> = None;
585
586 for vertex in &status.vertexes {
587 let name = if vertex.name.is_empty() {
588 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
589 } else {
590 Some(vertex.name.clone())
591 };
592
593 let Some(name) = name else {
594 continue;
595 };
596
597 if fallback.is_none() && !name.starts_with("[internal]") {
598 fallback = Some((vertex.digest.clone(), name.clone()));
599 }
600
601 if let Some(step) = parse_runtime_step(&name) {
602 match &best_runtime {
603 Some((best_step, _, _)) if *best_step >= step => {}
604 _ => {
605 best_runtime = Some((step, vertex.digest.clone(), name.clone()));
606 }
607 }
608 }
609 }
610
611 if let Some((_, digest, name)) = best_runtime {
612 Some((digest, name))
613 } else {
614 fallback.or_else(|| {
615 status.vertexes.iter().find_map(|vertex| {
616 let name = if vertex.name.is_empty() {
617 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
618 } else {
619 Some(vertex.name.clone())
620 };
621 name.map(|resolved| (vertex.digest.clone(), resolved))
622 })
623 })
624 }
625}
626
627fn parse_runtime_step(name: &str) -> Option<u32> {
628 let prefix = "[runtime ";
629 let start = name.find(prefix)? + prefix.len();
630 let rest = &name[start..];
631 let end = rest.find('/')?;
632 rest[..end].trim().parse::<u32>().ok()
633}
634
635fn format_vertex_fallback_label(vertex_id: &str) -> String {
636 let short = vertex_id
637 .strip_prefix("sha256:")
638 .unwrap_or(vertex_id)
639 .chars()
640 .take(12)
641 .collect::<String>();
642 format!("vertex {short}")
643}
644
645fn update_export_vertex_from_logs(
646 latest_logs: &[BuildkitLogEntry],
647 vertex_name_by_vertex_id: &HashMap<String, String>,
648 export_vertex_id: &mut Option<String>,
649 export_vertex_name: &mut Option<String>,
650) {
651 if let Some(entry) = latest_logs
652 .iter()
653 .rev()
654 .find(|log| log.message.trim_start().starts_with("exporting to image"))
655 {
656 *export_vertex_id = Some(entry.vertex_id.clone());
657 if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
658 *export_vertex_name = Some(name.clone());
659 }
660 }
661}
662
663fn record_buildkit_logs(
664 state: &mut BuildLogState,
665 latest_logs: &[BuildkitLogEntry],
666 current_vertex_id: &str,
667 current_vertex_name: &str,
668) {
669 for log_entry in latest_logs {
670 let name = state
671 .vertex_name_by_vertex_id
672 .get(&log_entry.vertex_id)
673 .cloned()
674 .or_else(|| {
675 if log_entry.vertex_id == current_vertex_id {
676 Some(current_vertex_name.to_string())
677 } else {
678 None
679 }
680 })
681 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
682
683 let message = log_entry.message.replace('\r', "").trim_end().to_string();
684 if message.is_empty() {
685 continue;
686 }
687
688 if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
689 state.recent_buildkit_logs.pop_front();
690 }
691 state
692 .recent_buildkit_logs
693 .push_back(format!("[{name}] {message}"));
694 }
695}
696
697#[derive(Debug, Clone)]
698struct BuildkitLogEntry {
699 vertex_id: String,
700 message: String,
701}
702
703fn append_buildkit_logs(
704 logs: &mut HashMap<String, String>,
705 status: &BuildkitStatusResponse,
706) -> Vec<BuildkitLogEntry> {
707 let mut latest: Vec<BuildkitLogEntry> = Vec::new();
708
709 for log in &status.logs {
710 let vertex_id = log.vertex.clone();
711 let message = String::from_utf8_lossy(&log.msg).to_string();
712 let entry = logs.entry(vertex_id.clone()).or_default();
713 entry.push_str(&message);
714 latest.push(BuildkitLogEntry { vertex_id, message });
715 }
716
717 latest
718}
719
720pub async fn pull_image(
725 client: &DockerClient,
726 tag: Option<&str>,
727 progress: &mut ProgressReporter,
728) -> Result<String, DockerError> {
729 let requested_tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
730 let resolved_tag = effective_image_tag(requested_tag);
731 let isolated_default_tag =
732 requested_tag == IMAGE_TAG_DEFAULT && resolved_tag != IMAGE_TAG_DEFAULT;
733 let registry_pull_tag = if isolated_default_tag {
734 IMAGE_TAG_DEFAULT
735 } else {
736 requested_tag
737 };
738
739 debug!(
741 "Attempting to pull from GHCR: {}:{}",
742 IMAGE_NAME_GHCR, registry_pull_tag
743 );
744 let ghcr_err =
745 match pull_from_registry(client, IMAGE_NAME_GHCR, registry_pull_tag, progress).await {
746 Ok(()) => {
747 if isolated_default_tag {
748 retag_local_image(
749 client,
750 &format!("{IMAGE_NAME_GHCR}:{registry_pull_tag}"),
751 &resolved_tag,
752 )
753 .await?;
754 }
755 let full_name = format!("{IMAGE_NAME_GHCR}:{resolved_tag}");
756 return Ok(full_name);
757 }
758 Err(e) => e,
759 };
760
761 warn!(
762 "GHCR pull failed: {}. Trying Docker Hub fallback...",
763 ghcr_err
764 );
765
766 debug!(
768 "Attempting to pull from Docker Hub: {}:{}",
769 IMAGE_NAME_DOCKERHUB, registry_pull_tag
770 );
771 match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, registry_pull_tag, progress).await {
772 Ok(()) => {
773 if isolated_default_tag {
774 retag_local_image(
775 client,
776 &format!("{IMAGE_NAME_DOCKERHUB}:{registry_pull_tag}"),
777 &resolved_tag,
778 )
779 .await?;
780 return Ok(format!("{IMAGE_NAME_GHCR}:{resolved_tag}"));
781 }
782 let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{resolved_tag}");
783 Ok(full_name)
784 }
785 Err(dockerhub_err) => Err(DockerError::Pull(format!(
786 "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
787 ))),
788 }
789}
790
791async fn retag_local_image(
792 client: &DockerClient,
793 source_image: &str,
794 target_tag: &str,
795) -> Result<(), DockerError> {
796 let options = TagImageOptions {
797 repo: Some(IMAGE_NAME_GHCR.to_string()),
798 tag: Some(target_tag.to_string()),
799 };
800 client
801 .inner()
802 .tag_image(source_image, Some(options))
803 .await
804 .map_err(|e| {
805 DockerError::Pull(format!(
806 "Failed to retag pulled image {source_image} as {IMAGE_NAME_GHCR}:{target_tag}: {e}"
807 ))
808 })?;
809 Ok(())
810}
811
812const MAX_PULL_RETRIES: usize = 3;
814
815async fn pull_from_registry(
817 client: &DockerClient,
818 image: &str,
819 tag: &str,
820 progress: &mut ProgressReporter,
821) -> Result<(), DockerError> {
822 let full_name = format!("{image}:{tag}");
823
824 let mut last_error = None;
826 for attempt in 1..=MAX_PULL_RETRIES {
827 debug!(
828 "Pull attempt {}/{} for {}",
829 attempt, MAX_PULL_RETRIES, full_name
830 );
831
832 match do_pull(client, image, tag, progress).await {
833 Ok(()) => return Ok(()),
834 Err(e) => {
835 warn!("Pull attempt {} failed: {}", attempt, e);
836 last_error = Some(e);
837
838 if attempt < MAX_PULL_RETRIES {
839 let delay_ms = 1000 * (1 << (attempt - 1));
841 tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
842 }
843 }
844 }
845 }
846
847 Err(last_error.unwrap_or_else(|| {
848 DockerError::Pull(format!(
849 "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
850 ))
851 }))
852}
853
854async fn do_pull(
856 client: &DockerClient,
857 image: &str,
858 tag: &str,
859 progress: &mut ProgressReporter,
860) -> Result<(), DockerError> {
861 let full_name = format!("{image}:{tag}");
862
863 let options = CreateImageOptions {
864 from_image: Some(image.to_string()),
865 tag: Some(tag.to_string()),
866 platform: String::new(),
867 ..Default::default()
868 };
869
870 let mut stream = client.inner().create_image(Some(options), None, None);
871
872 progress.add_spinner("pull", &format!("Pulling {full_name}..."));
874
875 while let Some(result) = stream.next().await {
876 match result {
877 Ok(info) => {
878 if let Some(error_detail) = &info.error_detail
880 && let Some(error_msg) = &error_detail.message
881 {
882 progress.abandon_all(error_msg);
883 return Err(DockerError::Pull(error_msg.to_string()));
884 }
885
886 if let Some(layer_id) = &info.id {
888 let status = info.status.as_deref().unwrap_or("");
889
890 match status {
891 "Already exists" => {
892 progress.finish(layer_id, "Already exists");
893 }
894 "Pull complete" => {
895 progress.finish(layer_id, "Pull complete");
896 }
897 "Downloading" | "Extracting" => {
898 if let Some(progress_detail) = &info.progress_detail {
899 let current = progress_detail.current.unwrap_or(0) as u64;
900 let total = progress_detail.total.unwrap_or(0) as u64;
901
902 if total > 0 {
903 progress.update_layer(layer_id, current, total, status);
904 }
905 }
906 }
907 _ => {
908 progress.update_spinner(layer_id, status);
910 }
911 }
912 } else if let Some(status) = &info.status {
913 progress.update_spinner("pull", status);
915 }
916 }
917 Err(e) => {
918 progress.abandon_all("Pull failed");
919 return Err(DockerError::Pull(format!("Pull failed: {e}")));
920 }
921 }
922 }
923
924 progress.finish("pull", &format!("Pull complete: {full_name}"));
925 Ok(())
926}
927
928fn format_build_error_with_context(
930 error: &str,
931 recent_logs: &VecDeque<String>,
932 error_logs: &VecDeque<String>,
933 recent_buildkit_logs: &VecDeque<String>,
934) -> String {
935 let mut message = String::new();
936
937 message.push_str(error);
939
940 if !error_logs.is_empty() {
943 let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
945 let unique_errors: Vec<_> = error_logs
946 .iter()
947 .filter(|line| !recent_set.contains(line))
948 .collect();
949
950 if !unique_errors.is_empty() {
951 message.push_str("\n\nPotential errors detected during build:");
952 for line in unique_errors {
953 message.push_str("\n ");
954 message.push_str(line);
955 }
956 }
957 }
958
959 if !recent_buildkit_logs.is_empty() {
961 message.push_str("\n\nRecent BuildKit output:");
962 for line in recent_buildkit_logs {
963 message.push_str("\n ");
964 message.push_str(line);
965 }
966 }
967
968 if !recent_logs.is_empty() {
970 message.push_str("\n\nRecent build output:");
971 for line in recent_logs {
972 message.push_str("\n ");
973 message.push_str(line);
974 }
975 } else if recent_buildkit_logs.is_empty() {
976 message.push_str("\n\nNo build output was received from the Docker daemon.");
977 message.push_str("\nThis usually means the build failed before any logs were streamed.");
978 }
979
980 let error_lower = error.to_lowercase();
982 if error_lower.contains("network")
983 || error_lower.contains("connection")
984 || error_lower.contains("timeout")
985 {
986 message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
987 } else if error_lower.contains("disk")
988 || error_lower.contains("space")
989 || error_lower.contains("no space")
990 {
991 message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
992 } else if error_lower.contains("permission") || error_lower.contains("denied") {
993 message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
994 }
995
996 message
997}
998
999fn create_build_context(options: BuildContextOptions) -> Result<Vec<u8>, io::Error> {
1001 let repo_root = if options.include_local_opencode_submodule {
1002 Some(workspace_root_for_build_context()?)
1003 } else {
1004 None
1005 };
1006 create_build_context_with_repo_root(options, repo_root.as_deref())
1007}
1008
1009fn workspace_root_for_build_context() -> Result<PathBuf, io::Error> {
1010 Path::new(env!("CARGO_MANIFEST_DIR"))
1011 .join("../..")
1012 .canonicalize()
1013}
1014
1015fn create_build_context_with_repo_root(
1016 options: BuildContextOptions,
1017 repo_root: Option<&Path>,
1018) -> Result<Vec<u8>, io::Error> {
1019 let mut archive_buffer = Vec::new();
1020
1021 {
1022 let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
1023 let mut tar = TarBuilder::new(encoder);
1024
1025 let dockerfile_bytes = DOCKERFILE.as_bytes();
1027 append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
1028 append_bytes(
1029 &mut tar,
1030 "packages/core/src/docker/files/entrypoint.sh",
1031 include_bytes!("files/entrypoint.sh"),
1032 0o644,
1033 )?;
1034 append_bytes(
1035 &mut tar,
1036 "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1037 include_bytes!("files/opencode-cloud-bootstrap.sh"),
1038 0o644,
1039 )?;
1040 append_bytes(
1041 &mut tar,
1042 "packages/core/src/docker/files/healthcheck.sh",
1043 include_bytes!("files/healthcheck.sh"),
1044 0o644,
1045 )?;
1046 append_bytes(
1047 &mut tar,
1048 "packages/core/src/docker/files/opencode-broker.service",
1049 include_bytes!("files/opencode-broker.service"),
1050 0o644,
1051 )?;
1052 append_bytes(
1053 &mut tar,
1054 "packages/core/src/docker/files/opencode.service",
1055 include_bytes!("files/opencode.service"),
1056 0o644,
1057 )?;
1058 append_bytes(
1059 &mut tar,
1060 "packages/core/src/docker/files/pam/opencode",
1061 include_bytes!("files/pam/opencode"),
1062 0o644,
1063 )?;
1064 append_bytes(
1065 &mut tar,
1066 "packages/core/src/docker/files/opencode.jsonc",
1067 include_bytes!("files/opencode.jsonc"),
1068 0o644,
1069 )?;
1070 append_bytes(
1071 &mut tar,
1072 "packages/core/src/docker/files/starship.toml",
1073 include_bytes!("files/starship.toml"),
1074 0o644,
1075 )?;
1076 append_bytes(
1077 &mut tar,
1078 "packages/core/src/docker/files/bashrc.extra",
1079 include_bytes!("files/bashrc.extra"),
1080 0o644,
1081 )?;
1082
1083 append_directory(
1086 &mut tar,
1087 Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1088 0o755,
1089 )?;
1090 if options.include_local_opencode_submodule {
1091 let repo_root = repo_root.ok_or_else(|| {
1092 io::Error::new(
1093 io::ErrorKind::NotFound,
1094 "Local opencode build requested but workspace root is unavailable",
1095 )
1096 })?;
1097 append_local_opencode_submodule(&mut tar, repo_root)?;
1098 }
1099
1100 tar.finish()?;
1101
1102 let encoder = tar.into_inner()?;
1104 encoder.finish()?;
1105 }
1106
1107 Ok(archive_buffer)
1108}
1109
1110fn append_local_opencode_submodule<W: Write>(
1111 tar: &mut TarBuilder<W>,
1112 repo_root: &Path,
1113) -> Result<(), io::Error> {
1114 let source_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1115 if !source_root.is_dir() {
1116 return Err(io::Error::new(
1117 io::ErrorKind::NotFound,
1118 format!(
1119 "Local opencode submodule path not found: {}",
1120 source_root.display()
1121 ),
1122 ));
1123 }
1124 let canonical_source_root = source_root.canonicalize()?;
1125
1126 append_local_tree_recursive(
1127 tar,
1128 &source_root,
1129 &canonical_source_root,
1130 Path::new(""),
1131 Path::new(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH),
1132 )
1133}
1134
1135fn append_local_tree_recursive<W: Write>(
1136 tar: &mut TarBuilder<W>,
1137 source_root: &Path,
1138 canonical_source_root: &Path,
1139 relative_path: &Path,
1140 archive_root: &Path,
1141) -> Result<(), io::Error> {
1142 let current_path = source_root.join(relative_path);
1143 let mut entries: Vec<_> =
1144 fs::read_dir(¤t_path)?.collect::<Result<Vec<_>, io::Error>>()?;
1145 entries.sort_by_key(|a| a.file_name());
1146
1147 for entry in entries {
1148 let file_name = entry.file_name();
1149 let entry_relative = if relative_path.as_os_str().is_empty() {
1150 PathBuf::from(&file_name)
1151 } else {
1152 relative_path.join(&file_name)
1153 };
1154
1155 if should_exclude_local_opencode_path(&entry_relative) {
1156 continue;
1157 }
1158
1159 let entry_path = entry.path();
1160 let metadata = fs::symlink_metadata(&entry_path)?;
1161 let archive_path = archive_root.join(&entry_relative);
1162
1163 if metadata.is_dir() {
1164 append_directory(tar, &archive_path, mode_from_metadata(&metadata, 0o755))?;
1165 append_local_tree_recursive(
1166 tar,
1167 source_root,
1168 canonical_source_root,
1169 &entry_relative,
1170 archive_root,
1171 )?;
1172 continue;
1173 }
1174
1175 if metadata.is_file() {
1176 append_file_from_disk(
1177 tar,
1178 &archive_path,
1179 &entry_path,
1180 mode_from_metadata(&metadata, 0o644),
1181 )?;
1182 continue;
1183 }
1184
1185 if metadata.file_type().is_symlink() {
1186 match resolve_local_symlink_target(&entry_path, canonical_source_root)? {
1189 Some(target_path) => {
1190 let target_metadata = fs::metadata(&target_path)?;
1191 if target_metadata.is_file() {
1192 append_file_from_disk(
1193 tar,
1194 &archive_path,
1195 &target_path,
1196 mode_from_metadata(&target_metadata, 0o644),
1197 )?;
1198 } else {
1199 debug!(
1200 "Skipping symlink with non-file target in local opencode context: {} -> {}",
1201 entry_path.display(),
1202 target_path.display()
1203 );
1204 }
1205 }
1206 None => {
1207 debug!(
1208 "Skipping symlink outside checkout or unresolved in local opencode context: {}",
1209 entry_path.display()
1210 );
1211 }
1212 }
1213 }
1214 }
1215
1216 Ok(())
1217}
1218
1219fn resolve_local_symlink_target(
1220 link_path: &Path,
1221 canonical_source_root: &Path,
1222) -> Result<Option<PathBuf>, io::Error> {
1223 let link_target = fs::read_link(link_path)?;
1224 let resolved = if link_target.is_absolute() {
1225 link_target
1226 } else {
1227 link_path
1228 .parent()
1229 .unwrap_or_else(|| Path::new(""))
1230 .join(link_target)
1231 };
1232
1233 let canonical_target = match resolved.canonicalize() {
1235 Ok(path) => path,
1236 Err(_) => return Ok(None),
1237 };
1238 if canonical_target.starts_with(canonical_source_root) {
1239 Ok(Some(canonical_target))
1240 } else {
1241 Ok(None)
1242 }
1243}
1244
1245fn should_exclude_local_opencode_path(relative_path: &Path) -> bool {
1246 if relative_path.file_name().is_some_and(|name| {
1247 LOCAL_OPENCODE_EXCLUDED_FILES
1248 .iter()
1249 .any(|candidate| name == OsStr::new(candidate))
1250 }) {
1251 return true;
1252 }
1253
1254 relative_path.components().any(|component| {
1255 let part = component.as_os_str();
1256 LOCAL_OPENCODE_EXCLUDED_DIRS
1257 .iter()
1258 .any(|candidate| part == OsStr::new(candidate))
1259 })
1260}
1261
1262#[cfg(unix)]
1263fn mode_from_metadata(metadata: &fs::Metadata, fallback: u32) -> u32 {
1264 use std::os::unix::fs::PermissionsExt;
1265 let mode = metadata.permissions().mode() & 0o7777;
1266 if mode == 0 { fallback } else { mode }
1267}
1268
1269#[cfg(not(unix))]
1270fn mode_from_metadata(_metadata: &fs::Metadata, fallback: u32) -> u32 {
1271 fallback
1272}
1273
1274fn append_directory<W: Write>(
1275 tar: &mut TarBuilder<W>,
1276 path: &Path,
1277 mode: u32,
1278) -> Result<(), io::Error> {
1279 let mut header = tar::Header::new_gnu();
1280 header.set_size(0);
1281 header.set_mode(mode);
1282 header.set_entry_type(tar::EntryType::Directory);
1283 tar.append_data(&mut header, path, io::empty())?;
1286 Ok(())
1287}
1288
1289fn append_file_from_disk<W: Write>(
1290 tar: &mut TarBuilder<W>,
1291 archive_path: &Path,
1292 source_path: &Path,
1293 mode: u32,
1294) -> Result<(), io::Error> {
1295 let mut file = fs::File::open(source_path)?;
1296 let metadata = file.metadata()?;
1297 let mut header = tar::Header::new_gnu();
1298 header.set_size(metadata.len());
1299 header.set_mode(mode);
1300 tar.append_data(&mut header, archive_path, &mut file)?;
1303 Ok(())
1304}
1305
1306fn append_bytes<W: Write>(
1307 tar: &mut TarBuilder<W>,
1308 path: &str,
1309 contents: &[u8],
1310 mode: u32,
1311) -> Result<(), io::Error> {
1312 let mut header = tar::Header::new_gnu();
1313 header.set_path(path)?;
1314 header.set_size(contents.len() as u64);
1315 header.set_mode(mode);
1316 header.set_cksum();
1317
1318 tar.append(&header, contents)?;
1319 Ok(())
1320}
1321
1322#[cfg(test)]
1323mod tests {
1324 use super::*;
1325 use bollard::models::ImageSummary;
1326 use flate2::read::GzDecoder;
1327 use std::collections::{HashMap, HashSet};
1328 use std::fs;
1329 use std::io::{Cursor, Read};
1330 use tar::Archive;
1331 use tempfile::tempdir;
1332
1333 fn make_image_summary(
1334 id: &str,
1335 tags: Vec<&str>,
1336 digests: Vec<&str>,
1337 labels: HashMap<String, String>,
1338 ) -> ImageSummary {
1339 ImageSummary {
1340 id: id.to_string(),
1341 parent_id: String::new(),
1342 repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
1343 repo_digests: digests
1344 .into_iter()
1345 .map(|digest| digest.to_string())
1346 .collect(),
1347 created: 0,
1348 size: 0,
1349 shared_size: -1,
1350 labels,
1351 containers: 0,
1352 manifests: None,
1353 descriptor: None,
1354 }
1355 }
1356
1357 fn archive_entries(context: Vec<u8>) -> HashSet<String> {
1358 let cursor = Cursor::new(context);
1359 let decoder = GzDecoder::new(cursor);
1360 let mut archive = Archive::new(decoder);
1361 let mut paths = HashSet::new();
1362 for entry in archive.entries().expect("should read archive entries") {
1363 let entry = entry.expect("should read entry");
1364 let path = entry.path().expect("should read entry path");
1365 paths.insert(path.to_string_lossy().to_string());
1366 }
1367 paths
1368 }
1369
1370 fn archive_entry_bytes(context: Vec<u8>, wanted_path: &str) -> Option<Vec<u8>> {
1371 let cursor = Cursor::new(context);
1372 let decoder = GzDecoder::new(cursor);
1373 let mut archive = Archive::new(decoder);
1374 for entry in archive.entries().expect("should read archive entries") {
1375 let mut entry = entry.expect("should read entry");
1376 let path = entry.path().expect("should read entry path");
1377 if path == Path::new(wanted_path) {
1378 let mut bytes = Vec::new();
1379 entry
1380 .read_to_end(&mut bytes)
1381 .expect("should read entry bytes");
1382 return Some(bytes);
1383 }
1384 }
1385 None
1386 }
1387
1388 #[test]
1389 fn create_build_context_succeeds() {
1390 let context =
1391 create_build_context(BuildContextOptions::default()).expect("should create context");
1392 assert!(!context.is_empty(), "context should not be empty");
1393
1394 assert_eq!(context[0], 0x1f, "should be gzip compressed");
1396 assert_eq!(context[1], 0x8b, "should be gzip compressed");
1397 }
1398
1399 #[test]
1400 fn build_context_includes_docker_assets() {
1401 let context =
1402 create_build_context(BuildContextOptions::default()).expect("should create context");
1403 let cursor = Cursor::new(context);
1404 let decoder = GzDecoder::new(cursor);
1405 let mut archive = Archive::new(decoder);
1406 let mut found_entrypoint = false;
1407 let mut found_healthcheck = false;
1408 let mut found_bootstrap_helper = false;
1409
1410 for entry in archive.entries().expect("should read archive entries") {
1411 let entry = entry.expect("should read entry");
1412 let path = entry.path().expect("should read entry path");
1413 if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
1414 found_entrypoint = true;
1415 }
1416 if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
1417 found_healthcheck = true;
1418 }
1419 if path
1420 == std::path::Path::new(
1421 "packages/core/src/docker/files/opencode-cloud-bootstrap.sh",
1422 )
1423 {
1424 found_bootstrap_helper = true;
1425 }
1426 if found_entrypoint && found_healthcheck && found_bootstrap_helper {
1427 break;
1428 }
1429 }
1430
1431 assert!(
1432 found_entrypoint,
1433 "entrypoint asset should be in the build context"
1434 );
1435 assert!(
1436 found_healthcheck,
1437 "healthcheck asset should be in the build context"
1438 );
1439 assert!(
1440 found_bootstrap_helper,
1441 "bootstrap helper asset should be in the build context"
1442 );
1443 }
1444
1445 #[test]
1446 fn build_context_includes_opencode_placeholder_in_default_mode() {
1447 let context =
1448 create_build_context(BuildContextOptions::default()).expect("should create context");
1449 let entries = archive_entries(context);
1450 assert!(
1451 entries
1452 .iter()
1453 .any(|path| path.trim_end_matches('/') == "packages/opencode"),
1454 "default mode should include an empty packages/opencode placeholder"
1455 );
1456 }
1457
1458 #[test]
1459 fn build_context_local_mode_includes_submodule_and_excludes_heavy_paths() {
1460 let temp = tempdir().expect("should create tempdir");
1461 let repo_root = temp.path();
1462
1463 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1464 fs::create_dir_all(submodule_root.join("src")).expect("should create src");
1465 fs::create_dir_all(submodule_root.join(".git")).expect("should create .git");
1466 fs::create_dir_all(submodule_root.join("node_modules/pkg"))
1467 .expect("should create node_modules");
1468 fs::create_dir_all(submodule_root.join("target/release")).expect("should create target");
1469 fs::create_dir_all(submodule_root.join("dist")).expect("should create dist");
1470 fs::create_dir_all(submodule_root.join(".turbo")).expect("should create .turbo");
1471 fs::create_dir_all(submodule_root.join(".cache")).expect("should create .cache");
1472 fs::create_dir_all(submodule_root.join(".planning/phases/very-long-planning-phase-name"))
1473 .expect("should create planning");
1474
1475 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1476 fs::write(submodule_root.join("src/main.ts"), "console.log('ok');")
1477 .expect("should write source");
1478 fs::write(submodule_root.join(".git/config"), "dirty").expect("should write .git file");
1479 fs::write(submodule_root.join("node_modules/pkg/index.js"), "ignored")
1480 .expect("should write node_modules file");
1481 fs::write(submodule_root.join("target/release/app"), "ignored")
1482 .expect("should write target file");
1483 fs::write(submodule_root.join("dist/ui.js"), "ignored").expect("should write dist file");
1484 fs::write(submodule_root.join(".turbo/state.json"), "ignored")
1485 .expect("should write turbo file");
1486 fs::write(submodule_root.join(".cache/cache.bin"), "ignored")
1487 .expect("should write cache file");
1488 fs::write(
1489 submodule_root.join(".planning/phases/very-long-planning-phase-name/phase.md"),
1490 "ignored",
1491 )
1492 .expect("should write planning file");
1493 fs::write(submodule_root.join(".DS_Store"), "ignored").expect("should write ds_store");
1494
1495 let context = create_build_context_with_repo_root(
1496 BuildContextOptions {
1497 include_local_opencode_submodule: true,
1498 },
1499 Some(repo_root),
1500 )
1501 .expect("should create local context");
1502 let entries = archive_entries(context);
1503
1504 assert!(
1505 entries.contains("packages/opencode/package.json"),
1506 "local mode should include submodule files"
1507 );
1508 assert!(
1509 entries.contains("packages/opencode/src/main.ts"),
1510 "local mode should include source files"
1511 );
1512 assert!(
1513 !entries.contains("packages/opencode/.git/config"),
1514 "local mode should exclude .git"
1515 );
1516 assert!(
1517 !entries.contains("packages/opencode/node_modules/pkg/index.js"),
1518 "local mode should exclude node_modules"
1519 );
1520 assert!(
1521 !entries.contains("packages/opencode/target/release/app"),
1522 "local mode should exclude target"
1523 );
1524 assert!(
1525 !entries.contains("packages/opencode/dist/ui.js"),
1526 "local mode should exclude dist"
1527 );
1528 assert!(
1529 !entries.contains("packages/opencode/.turbo/state.json"),
1530 "local mode should exclude .turbo"
1531 );
1532 assert!(
1533 !entries.contains("packages/opencode/.cache/cache.bin"),
1534 "local mode should exclude .cache"
1535 );
1536 assert!(
1537 !entries.contains(
1538 "packages/opencode/.planning/phases/very-long-planning-phase-name/phase.md"
1539 ),
1540 "local mode should exclude .planning"
1541 );
1542 assert!(
1543 !entries.contains("packages/opencode/.DS_Store"),
1544 "local mode should exclude .DS_Store files"
1545 );
1546 }
1547
1548 #[test]
1549 fn build_context_local_mode_supports_long_non_excluded_paths() {
1550 let temp = tempdir().expect("should create tempdir");
1551 let repo_root = temp.path();
1552 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1553
1554 fs::create_dir_all(&submodule_root).expect("should create submodule root");
1555 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1556
1557 let long_segment = "a".repeat(140);
1558 let long_dir = submodule_root.join("src").join(&long_segment);
1559 fs::create_dir_all(&long_dir).expect("should create long path directory");
1560 fs::write(long_dir.join("main.ts"), "console.log('long path');")
1561 .expect("should write long path file");
1562
1563 let context = create_build_context_with_repo_root(
1564 BuildContextOptions {
1565 include_local_opencode_submodule: true,
1566 },
1567 Some(repo_root),
1568 )
1569 .expect("should create local context with long paths");
1570 let entries = archive_entries(context);
1571 let long_entry = format!("packages/opencode/src/{long_segment}/main.ts");
1572 assert!(
1573 entries.contains(&long_entry),
1574 "long non-excluded path should be archived via GNU long-name handling"
1575 );
1576 }
1577
1578 #[cfg(unix)]
1579 #[test]
1580 fn build_context_local_mode_materializes_symlinked_files() {
1581 use std::os::unix::fs::symlink;
1582
1583 let temp = tempdir().expect("should create tempdir");
1584 let repo_root = temp.path();
1585 let submodule_root = repo_root.join(LOCAL_OPENCODE_SUBMODULE_RELATIVE_PATH);
1586 let fonts_dir = submodule_root.join("packages/ui/src/assets/fonts");
1587 fs::create_dir_all(&fonts_dir).expect("should create fonts dir");
1588 fs::write(submodule_root.join("package.json"), "{}").expect("should write package.json");
1589 fs::write(
1590 fonts_dir.join("BlexMonoNerdFontMono-Regular.woff2"),
1591 b"font-bytes",
1592 )
1593 .expect("should write target font");
1594 symlink(
1595 "BlexMonoNerdFontMono-Regular.woff2",
1596 fonts_dir.join("ibm-plex-mono.woff2"),
1597 )
1598 .expect("should create symlinked font");
1599
1600 let context = create_build_context_with_repo_root(
1601 BuildContextOptions {
1602 include_local_opencode_submodule: true,
1603 },
1604 Some(repo_root),
1605 )
1606 .expect("should create local context with symlink");
1607 let entries = archive_entries(context.clone());
1608
1609 assert!(
1610 entries.contains("packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2"),
1611 "local mode should include symlinked asset paths"
1612 );
1613 let alias_bytes = archive_entry_bytes(
1614 context,
1615 "packages/opencode/packages/ui/src/assets/fonts/ibm-plex-mono.woff2",
1616 )
1617 .expect("symlinked asset should contain bytes");
1618 assert_eq!(alias_bytes, b"font-bytes");
1619 }
1620
1621 #[test]
1622 fn default_tag_is_latest() {
1623 assert_eq!(IMAGE_TAG_DEFAULT, "latest");
1624 }
1625
1626 #[test]
1627 fn format_build_error_includes_recent_logs() {
1628 let mut logs = VecDeque::new();
1629 logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
1630 logs.push_back("Step 2/5 : RUN apt-get update".to_string());
1631 logs.push_back("E: Unable to fetch some archives".to_string());
1632 let error_logs = VecDeque::new();
1633 let buildkit_logs = VecDeque::new();
1634
1635 let result = format_build_error_with_context(
1636 "Build failed: exit code 1",
1637 &logs,
1638 &error_logs,
1639 &buildkit_logs,
1640 );
1641
1642 assert!(result.contains("Build failed: exit code 1"));
1643 assert!(result.contains("Recent build output:"));
1644 assert!(result.contains("Step 1/5"));
1645 assert!(result.contains("Unable to fetch"));
1646 }
1647
1648 #[test]
1649 fn format_build_error_handles_empty_logs() {
1650 let logs = VecDeque::new();
1651 let error_logs = VecDeque::new();
1652 let buildkit_logs = VecDeque::new();
1653 let result =
1654 format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
1655
1656 assert!(result.contains("Stream error"));
1657 assert!(!result.contains("Recent build output:"));
1658 }
1659
1660 #[test]
1661 fn format_build_error_adds_network_suggestion() {
1662 let logs = VecDeque::new();
1663 let error_logs = VecDeque::new();
1664 let buildkit_logs = VecDeque::new();
1665 let result = format_build_error_with_context(
1666 "connection timeout",
1667 &logs,
1668 &error_logs,
1669 &buildkit_logs,
1670 );
1671
1672 assert!(result.contains("Check your network connection"));
1673 }
1674
1675 #[test]
1676 fn format_build_error_adds_disk_suggestion() {
1677 let logs = VecDeque::new();
1678 let error_logs = VecDeque::new();
1679 let buildkit_logs = VecDeque::new();
1680 let result = format_build_error_with_context(
1681 "no space left on device",
1682 &logs,
1683 &error_logs,
1684 &buildkit_logs,
1685 );
1686
1687 assert!(result.contains("Free up disk space"));
1688 }
1689
1690 #[test]
1691 fn format_build_error_shows_error_lines_separately() {
1692 let mut recent_logs = VecDeque::new();
1693 recent_logs.push_back("Compiling foo v1.0".to_string());
1694 recent_logs.push_back("Successfully installed bar".to_string());
1695
1696 let mut error_logs = VecDeque::new();
1697 error_logs.push_back("error: failed to compile dust".to_string());
1698 error_logs.push_back("error: failed to compile glow".to_string());
1699
1700 let buildkit_logs = VecDeque::new();
1701 let result = format_build_error_with_context(
1702 "Build failed",
1703 &recent_logs,
1704 &error_logs,
1705 &buildkit_logs,
1706 );
1707
1708 assert!(result.contains("Potential errors detected during build:"));
1709 assert!(result.contains("failed to compile dust"));
1710 assert!(result.contains("failed to compile glow"));
1711 }
1712
1713 #[test]
1714 fn is_error_line_detects_errors() {
1715 assert!(is_error_line("error: something failed"));
1716 assert!(is_error_line("Error: build failed"));
1717 assert!(is_error_line("Failed to install package"));
1718 assert!(is_error_line("cannot find module"));
1719 assert!(is_error_line("Unable to locate package"));
1720 assert!(!is_error_line("Compiling foo v1.0"));
1721 assert!(!is_error_line("Successfully installed"));
1722 }
1723
1724 #[test]
1725 fn collect_image_ids_matches_labels() {
1726 let mut labels = HashMap::new();
1727 labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
1728
1729 let images = vec![
1730 make_image_summary("sha256:opencode", vec![], vec![], labels),
1731 make_image_summary(
1732 "sha256:other",
1733 vec!["busybox:latest"],
1734 vec![],
1735 HashMap::new(),
1736 ),
1737 ];
1738
1739 let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
1740 assert!(ids.contains("sha256:opencode"));
1741 assert!(!ids.contains("sha256:other"));
1742 }
1743}