1use super::progress::ProgressReporter;
7use super::{
8 DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT,
9};
10use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
11use bollard::models::BuildInfoAux;
12use bollard::query_parameters::{
13 BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
14 RemoveImageOptionsBuilder,
15};
16use bytes::Bytes;
17use flate2::Compression;
18use flate2::write::GzEncoder;
19use futures_util::StreamExt;
20use http_body_util::{Either, Full};
21use std::collections::{HashMap, HashSet, VecDeque};
22use std::env;
23use std::io::Write;
24use std::time::{SystemTime, UNIX_EPOCH};
25use tar::Builder as TarBuilder;
26use tracing::{debug, warn};
27
28const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
30
31const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
33
34fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
36 let Ok(value) = env::var(var_name) else {
37 return default;
38 };
39 let Ok(parsed) = value.trim().parse::<usize>() else {
40 return default;
41 };
42 parsed.clamp(5, 500)
43}
44
45fn is_error_line(line: &str) -> bool {
47 let lower = line.to_lowercase();
48 lower.contains("error")
49 || lower.contains("failed")
50 || lower.contains("cannot")
51 || lower.contains("unable to")
52 || lower.contains("not found")
53 || lower.contains("permission denied")
54}
55
56pub async fn image_exists(
58 client: &DockerClient,
59 image: &str,
60 tag: &str,
61) -> Result<bool, DockerError> {
62 let full_name = format!("{image}:{tag}");
63 debug!("Checking if image exists: {}", full_name);
64
65 match client.inner().inspect_image(&full_name).await {
66 Ok(_) => Ok(true),
67 Err(bollard::errors::Error::DockerResponseServerError {
68 status_code: 404, ..
69 }) => Ok(false),
70 Err(e) => Err(DockerError::from(e)),
71 }
72}
73
74pub async fn remove_images_by_name(
78 client: &DockerClient,
79 name_fragment: &str,
80 force: bool,
81) -> Result<usize, DockerError> {
82 debug!("Removing Docker images matching '{name_fragment}'");
83
84 let images = list_docker_images(client).await?;
85
86 let image_ids = collect_image_ids(&images, name_fragment);
87 remove_image_ids(client, image_ids, force).await
88}
89
90async fn list_docker_images(
92 client: &DockerClient,
93) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
94 let list_options = ListImagesOptionsBuilder::new().all(true).build();
95 client
96 .inner()
97 .list_images(Some(list_options))
98 .await
99 .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
100}
101
102const LABEL_TITLE: &str = "org.opencontainers.image.title";
103const LABEL_SOURCE: &str = "org.opencontainers.image.source";
104const LABEL_URL: &str = "org.opencontainers.image.url";
105
106const LABEL_TITLE_VALUE: &str = "opencode-cloud-sandbox";
107const LABEL_SOURCE_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
108const LABEL_URL_VALUE: &str = "https://github.com/pRizz/opencode-cloud";
109
110fn collect_image_ids(
112 images: &[bollard::models::ImageSummary],
113 name_fragment: &str,
114) -> HashSet<String> {
115 let mut image_ids = HashSet::new();
116 for image in images {
117 if image_matches_fragment_or_labels(image, name_fragment) {
118 image_ids.insert(image.id.clone());
119 }
120 }
121 image_ids
122}
123
124fn image_matches_fragment_or_labels(
125 image: &bollard::models::ImageSummary,
126 name_fragment: &str,
127) -> bool {
128 let tag_match = image
129 .repo_tags
130 .iter()
131 .any(|tag| tag != "<none>:<none>" && tag.contains(name_fragment));
132 let digest_match = image
133 .repo_digests
134 .iter()
135 .any(|digest| digest.contains(name_fragment));
136 let label_match = image_labels_match(&image.labels);
137
138 tag_match || digest_match || label_match
139}
140
141fn image_labels_match(labels: &HashMap<String, String>) -> bool {
142 labels
143 .get(LABEL_SOURCE)
144 .is_some_and(|value| value == LABEL_SOURCE_VALUE)
145 || labels
146 .get(LABEL_URL)
147 .is_some_and(|value| value == LABEL_URL_VALUE)
148 || labels
149 .get(LABEL_TITLE)
150 .is_some_and(|value| value == LABEL_TITLE_VALUE)
151}
152
153async fn remove_image_ids(
155 client: &DockerClient,
156 image_ids: HashSet<String>,
157 force: bool,
158) -> Result<usize, DockerError> {
159 if image_ids.is_empty() {
160 return Ok(0);
161 }
162
163 let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
164 let mut removed = 0usize;
165 for image_id in image_ids {
166 let result = client
167 .inner()
168 .remove_image(&image_id, Some(remove_options.clone()), None)
169 .await;
170 match result {
171 Ok(_) => removed += 1,
172 Err(bollard::errors::Error::DockerResponseServerError {
173 status_code: 404, ..
174 }) => {
175 debug!("Docker image already removed: {}", image_id);
176 }
177 Err(err) => {
178 return Err(DockerError::Image(format!(
179 "Failed to remove image {image_id}: {err}"
180 )));
181 }
182 }
183 }
184
185 Ok(removed)
186}
187
188pub async fn build_image(
199 client: &DockerClient,
200 tag: Option<&str>,
201 progress: &mut ProgressReporter,
202 no_cache: bool,
203 build_args: Option<HashMap<String, String>>,
204) -> Result<String, DockerError> {
205 let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
206 let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
207 debug!("Building image: {} (no_cache: {})", full_name, no_cache);
208
209 let context = create_build_context()
211 .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
212
213 let session_id = format!(
217 "opencode-cloud-build-{}",
218 SystemTime::now()
219 .duration_since(UNIX_EPOCH)
220 .unwrap_or_default()
221 .as_nanos()
222 );
223 let build_args = build_args.unwrap_or_default();
224 let options = BuildImageOptions {
225 t: Some(full_name.clone()),
226 dockerfile: "Dockerfile".to_string(),
227 version: BuilderVersion::BuilderBuildKit,
228 session: Some(session_id),
229 rm: true,
230 nocache: no_cache,
231 buildargs: Some(build_args),
232 platform: String::new(),
233 target: String::new(),
234 ..Default::default()
235 };
236
237 let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
239
240 let mut stream = client.inner().build_image(options, None, Some(body));
242
243 progress.add_spinner("build", "Initializing...");
245
246 let mut maybe_image_id = None;
247 let mut log_state = BuildLogState::new();
248
249 while let Some(result) = stream.next().await {
250 let Ok(info) = result else {
251 return Err(handle_stream_error(
252 "Build failed",
253 result.expect_err("checked error").to_string(),
254 &log_state,
255 progress,
256 ));
257 };
258
259 handle_stream_message(&info, progress, &mut log_state);
260
261 if let Some(error_detail) = &info.error_detail
262 && let Some(error_msg) = &error_detail.message
263 {
264 progress.abandon_all(error_msg);
265 let context = format_build_error_with_context(
266 error_msg,
267 &log_state.recent_logs,
268 &log_state.error_logs,
269 &log_state.recent_buildkit_logs,
270 );
271 return Err(DockerError::Build(context));
272 }
273
274 if let Some(aux) = info.aux {
275 match aux {
276 BuildInfoAux::Default(image_id) => {
277 if let Some(id) = image_id.id {
278 maybe_image_id = Some(id);
279 }
280 }
281 BuildInfoAux::BuildKit(status) => {
282 handle_buildkit_status(&status, progress, &mut log_state);
283 }
284 }
285 }
286 }
287
288 let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
289 let finish_msg = format!("Build complete: {image_id}");
290 progress.finish("build", &finish_msg);
291
292 Ok(full_name)
293}
294
295struct BuildLogState {
296 recent_logs: VecDeque<String>,
297 error_logs: VecDeque<String>,
298 recent_buildkit_logs: VecDeque<String>,
299 build_log_buffer_size: usize,
300 error_log_buffer_size: usize,
301 last_buildkit_vertex: Option<String>,
302 last_buildkit_vertex_id: Option<String>,
303 export_vertex_id: Option<String>,
304 export_vertex_name: Option<String>,
305 buildkit_logs_by_vertex_id: HashMap<String, String>,
306 vertex_name_by_vertex_id: HashMap<String, String>,
307}
308
309impl BuildLogState {
310 fn new() -> Self {
311 let build_log_buffer_size = read_log_buffer_size(
312 "OPENCODE_DOCKER_BUILD_LOG_TAIL",
313 DEFAULT_BUILD_LOG_BUFFER_SIZE,
314 );
315 let error_log_buffer_size = read_log_buffer_size(
316 "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
317 DEFAULT_ERROR_LOG_BUFFER_SIZE,
318 );
319 Self {
320 recent_logs: VecDeque::with_capacity(build_log_buffer_size),
321 error_logs: VecDeque::with_capacity(error_log_buffer_size),
322 recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
323 build_log_buffer_size,
324 error_log_buffer_size,
325 last_buildkit_vertex: None,
326 last_buildkit_vertex_id: None,
327 export_vertex_id: None,
328 export_vertex_name: None,
329 buildkit_logs_by_vertex_id: HashMap::new(),
330 vertex_name_by_vertex_id: HashMap::new(),
331 }
332 }
333}
334
335fn handle_stream_message(
336 info: &bollard::models::BuildInfo,
337 progress: &mut ProgressReporter,
338 state: &mut BuildLogState,
339) {
340 let Some(stream_msg) = info.stream.as_deref() else {
341 return;
342 };
343 let msg = stream_msg.trim();
344 if msg.is_empty() {
345 return;
346 }
347
348 if progress.is_plain_output() {
349 eprint!("{stream_msg}");
350 } else {
351 let has_runtime_vertex = state
352 .last_buildkit_vertex
353 .as_deref()
354 .is_some_and(|name| name.starts_with("[runtime "));
355 let is_internal_msg = msg.contains("[internal]");
356 if !(has_runtime_vertex && is_internal_msg) {
357 progress.update_spinner("build", stream_msg);
358 }
359 }
360
361 if state.recent_logs.len() >= state.build_log_buffer_size {
362 state.recent_logs.pop_front();
363 }
364 state.recent_logs.push_back(msg.to_string());
365
366 if is_error_line(msg) {
367 if state.error_logs.len() >= state.error_log_buffer_size {
368 state.error_logs.pop_front();
369 }
370 state.error_logs.push_back(msg.to_string());
371 }
372
373 if msg.starts_with("Step ") {
374 debug!("Build step: {}", msg);
375 }
376}
377
378fn handle_buildkit_status(
379 status: &BuildkitStatusResponse,
380 progress: &mut ProgressReporter,
381 state: &mut BuildLogState,
382) {
383 let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
384 update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
385 update_export_vertex_from_logs(
386 &latest_logs,
387 &state.vertex_name_by_vertex_id,
388 &mut state.export_vertex_id,
389 &mut state.export_vertex_name,
390 );
391 let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
392 status,
393 &state.vertex_name_by_vertex_id,
394 state.export_vertex_id.as_deref(),
395 state.export_vertex_name.as_deref(),
396 ) {
397 Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
398 None => {
399 let Some(log_entry) = latest_logs.last() else {
400 return;
401 };
402 let name = state
403 .vertex_name_by_vertex_id
404 .get(&log_entry.vertex_id)
405 .cloned()
406 .or_else(|| state.last_buildkit_vertex.clone())
407 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
408 (log_entry.vertex_id.clone(), name)
409 }
410 };
411 record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
412 state.last_buildkit_vertex_id = Some(vertex_id.clone());
413 if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
414 state.last_buildkit_vertex = Some(vertex_name.clone());
415 }
416
417 let message = if progress.is_plain_output() {
418 vertex_name
419 } else if let Some(log_entry) = latest_logs
420 .iter()
421 .rev()
422 .find(|entry| entry.vertex_id == vertex_id)
423 {
424 format!("{vertex_name} ยท {}", log_entry.message)
425 } else {
426 vertex_name
427 };
428 progress.update_spinner("build", &message);
429
430 if progress.is_plain_output() {
431 for log_entry in latest_logs {
432 eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
433 }
434 return;
435 }
436
437 let (Some(current_id), Some(current_name)) = (
438 state.last_buildkit_vertex_id.as_ref(),
439 state.last_buildkit_vertex.as_ref(),
440 ) else {
441 return;
442 };
443
444 let name = state
445 .vertex_name_by_vertex_id
446 .get(current_id)
447 .unwrap_or(current_name);
448 let _ = name;
450}
451
452fn handle_stream_error(
453 prefix: &str,
454 error_str: String,
455 state: &BuildLogState,
456 progress: &mut ProgressReporter,
457) -> DockerError {
458 progress.abandon_all(prefix);
459
460 let buildkit_hint = if error_str.contains("mount")
461 || error_str.contains("--mount")
462 || state
463 .recent_logs
464 .iter()
465 .any(|log| log.contains("--mount") && log.contains("cache"))
466 {
467 "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
468 The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
469 Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
470 } else {
471 ""
472 };
473
474 let context = format!(
475 "{}{}",
476 format_build_error_with_context(
477 &error_str,
478 &state.recent_logs,
479 &state.error_logs,
480 &state.recent_buildkit_logs,
481 ),
482 buildkit_hint
483 );
484 DockerError::Build(context)
485}
486
487fn update_buildkit_vertex_names(
488 vertex_name_by_vertex_id: &mut HashMap<String, String>,
489 status: &BuildkitStatusResponse,
490) {
491 for vertex in &status.vertexes {
492 if vertex.name.is_empty() {
493 continue;
494 }
495 vertex_name_by_vertex_id
496 .entry(vertex.digest.clone())
497 .or_insert_with(|| vertex.name.clone());
498 }
499}
500
501fn select_latest_buildkit_vertex(
502 status: &BuildkitStatusResponse,
503 vertex_name_by_vertex_id: &HashMap<String, String>,
504 export_vertex_id: Option<&str>,
505 export_vertex_name: Option<&str>,
506) -> Option<(String, String)> {
507 if let Some(export_vertex_id) = export_vertex_id {
508 let name = export_vertex_name
509 .map(str::to_string)
510 .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
511 .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
512 return Some((export_vertex_id.to_string(), name));
513 }
514
515 let mut best_runtime: Option<(u32, String, String)> = None;
516 let mut fallback: Option<(String, String)> = None;
517
518 for vertex in &status.vertexes {
519 let name = if vertex.name.is_empty() {
520 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
521 } else {
522 Some(vertex.name.clone())
523 };
524
525 let Some(name) = name else {
526 continue;
527 };
528
529 if fallback.is_none() && !name.starts_with("[internal]") {
530 fallback = Some((vertex.digest.clone(), name.clone()));
531 }
532
533 if let Some(step) = parse_runtime_step(&name) {
534 match &best_runtime {
535 Some((best_step, _, _)) if *best_step >= step => {}
536 _ => {
537 best_runtime = Some((step, vertex.digest.clone(), name.clone()));
538 }
539 }
540 }
541 }
542
543 if let Some((_, digest, name)) = best_runtime {
544 Some((digest, name))
545 } else {
546 fallback.or_else(|| {
547 status.vertexes.iter().find_map(|vertex| {
548 let name = if vertex.name.is_empty() {
549 vertex_name_by_vertex_id.get(&vertex.digest).cloned()
550 } else {
551 Some(vertex.name.clone())
552 };
553 name.map(|resolved| (vertex.digest.clone(), resolved))
554 })
555 })
556 }
557}
558
559fn parse_runtime_step(name: &str) -> Option<u32> {
560 let prefix = "[runtime ";
561 let start = name.find(prefix)? + prefix.len();
562 let rest = &name[start..];
563 let end = rest.find('/')?;
564 rest[..end].trim().parse::<u32>().ok()
565}
566
567fn format_vertex_fallback_label(vertex_id: &str) -> String {
568 let short = vertex_id
569 .strip_prefix("sha256:")
570 .unwrap_or(vertex_id)
571 .chars()
572 .take(12)
573 .collect::<String>();
574 format!("vertex {short}")
575}
576
577fn update_export_vertex_from_logs(
578 latest_logs: &[BuildkitLogEntry],
579 vertex_name_by_vertex_id: &HashMap<String, String>,
580 export_vertex_id: &mut Option<String>,
581 export_vertex_name: &mut Option<String>,
582) {
583 if let Some(entry) = latest_logs
584 .iter()
585 .rev()
586 .find(|log| log.message.trim_start().starts_with("exporting to image"))
587 {
588 *export_vertex_id = Some(entry.vertex_id.clone());
589 if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
590 *export_vertex_name = Some(name.clone());
591 }
592 }
593}
594
595fn record_buildkit_logs(
596 state: &mut BuildLogState,
597 latest_logs: &[BuildkitLogEntry],
598 current_vertex_id: &str,
599 current_vertex_name: &str,
600) {
601 for log_entry in latest_logs {
602 let name = state
603 .vertex_name_by_vertex_id
604 .get(&log_entry.vertex_id)
605 .cloned()
606 .or_else(|| {
607 if log_entry.vertex_id == current_vertex_id {
608 Some(current_vertex_name.to_string())
609 } else {
610 None
611 }
612 })
613 .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
614
615 let message = log_entry.message.replace('\r', "").trim_end().to_string();
616 if message.is_empty() {
617 continue;
618 }
619
620 if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
621 state.recent_buildkit_logs.pop_front();
622 }
623 state
624 .recent_buildkit_logs
625 .push_back(format!("[{name}] {message}"));
626 }
627}
628
629#[derive(Debug, Clone)]
630struct BuildkitLogEntry {
631 vertex_id: String,
632 message: String,
633}
634
635fn append_buildkit_logs(
636 logs: &mut HashMap<String, String>,
637 status: &BuildkitStatusResponse,
638) -> Vec<BuildkitLogEntry> {
639 let mut latest: Vec<BuildkitLogEntry> = Vec::new();
640
641 for log in &status.logs {
642 let vertex_id = log.vertex.clone();
643 let message = String::from_utf8_lossy(&log.msg).to_string();
644 let entry = logs.entry(vertex_id.clone()).or_default();
645 entry.push_str(&message);
646 latest.push(BuildkitLogEntry { vertex_id, message });
647 }
648
649 latest
650}
651
652pub async fn pull_image(
657 client: &DockerClient,
658 tag: Option<&str>,
659 progress: &mut ProgressReporter,
660) -> Result<String, DockerError> {
661 let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
662
663 debug!("Attempting to pull from GHCR: {}:{}", IMAGE_NAME_GHCR, tag);
665 let ghcr_err = match pull_from_registry(client, IMAGE_NAME_GHCR, tag, progress).await {
666 Ok(()) => {
667 let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
668 return Ok(full_name);
669 }
670 Err(e) => e,
671 };
672
673 warn!(
674 "GHCR pull failed: {}. Trying Docker Hub fallback...",
675 ghcr_err
676 );
677
678 debug!(
680 "Attempting to pull from Docker Hub: {}:{}",
681 IMAGE_NAME_DOCKERHUB, tag
682 );
683 match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, tag, progress).await {
684 Ok(()) => {
685 let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{tag}");
686 Ok(full_name)
687 }
688 Err(dockerhub_err) => Err(DockerError::Pull(format!(
689 "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
690 ))),
691 }
692}
693
694const MAX_PULL_RETRIES: usize = 3;
696
697async fn pull_from_registry(
699 client: &DockerClient,
700 image: &str,
701 tag: &str,
702 progress: &mut ProgressReporter,
703) -> Result<(), DockerError> {
704 let full_name = format!("{image}:{tag}");
705
706 let mut last_error = None;
708 for attempt in 1..=MAX_PULL_RETRIES {
709 debug!(
710 "Pull attempt {}/{} for {}",
711 attempt, MAX_PULL_RETRIES, full_name
712 );
713
714 match do_pull(client, image, tag, progress).await {
715 Ok(()) => return Ok(()),
716 Err(e) => {
717 warn!("Pull attempt {} failed: {}", attempt, e);
718 last_error = Some(e);
719
720 if attempt < MAX_PULL_RETRIES {
721 let delay_ms = 1000 * (1 << (attempt - 1));
723 tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
724 }
725 }
726 }
727 }
728
729 Err(last_error.unwrap_or_else(|| {
730 DockerError::Pull(format!(
731 "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
732 ))
733 }))
734}
735
736async fn do_pull(
738 client: &DockerClient,
739 image: &str,
740 tag: &str,
741 progress: &mut ProgressReporter,
742) -> Result<(), DockerError> {
743 let full_name = format!("{image}:{tag}");
744
745 let options = CreateImageOptions {
746 from_image: Some(image.to_string()),
747 tag: Some(tag.to_string()),
748 platform: String::new(),
749 ..Default::default()
750 };
751
752 let mut stream = client.inner().create_image(Some(options), None, None);
753
754 progress.add_spinner("pull", &format!("Pulling {full_name}..."));
756
757 while let Some(result) = stream.next().await {
758 match result {
759 Ok(info) => {
760 if let Some(error_detail) = &info.error_detail
762 && let Some(error_msg) = &error_detail.message
763 {
764 progress.abandon_all(error_msg);
765 return Err(DockerError::Pull(error_msg.to_string()));
766 }
767
768 if let Some(layer_id) = &info.id {
770 let status = info.status.as_deref().unwrap_or("");
771
772 match status {
773 "Already exists" => {
774 progress.finish(layer_id, "Already exists");
775 }
776 "Pull complete" => {
777 progress.finish(layer_id, "Pull complete");
778 }
779 "Downloading" | "Extracting" => {
780 if let Some(progress_detail) = &info.progress_detail {
781 let current = progress_detail.current.unwrap_or(0) as u64;
782 let total = progress_detail.total.unwrap_or(0) as u64;
783
784 if total > 0 {
785 progress.update_layer(layer_id, current, total, status);
786 }
787 }
788 }
789 _ => {
790 progress.update_spinner(layer_id, status);
792 }
793 }
794 } else if let Some(status) = &info.status {
795 progress.update_spinner("pull", status);
797 }
798 }
799 Err(e) => {
800 progress.abandon_all("Pull failed");
801 return Err(DockerError::Pull(format!("Pull failed: {e}")));
802 }
803 }
804 }
805
806 progress.finish("pull", &format!("Pull complete: {full_name}"));
807 Ok(())
808}
809
810fn format_build_error_with_context(
812 error: &str,
813 recent_logs: &VecDeque<String>,
814 error_logs: &VecDeque<String>,
815 recent_buildkit_logs: &VecDeque<String>,
816) -> String {
817 let mut message = String::new();
818
819 message.push_str(error);
821
822 if !error_logs.is_empty() {
825 let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
827 let unique_errors: Vec<_> = error_logs
828 .iter()
829 .filter(|line| !recent_set.contains(line))
830 .collect();
831
832 if !unique_errors.is_empty() {
833 message.push_str("\n\nPotential errors detected during build:");
834 for line in unique_errors {
835 message.push_str("\n ");
836 message.push_str(line);
837 }
838 }
839 }
840
841 if !recent_buildkit_logs.is_empty() {
843 message.push_str("\n\nRecent BuildKit output:");
844 for line in recent_buildkit_logs {
845 message.push_str("\n ");
846 message.push_str(line);
847 }
848 }
849
850 if !recent_logs.is_empty() {
852 message.push_str("\n\nRecent build output:");
853 for line in recent_logs {
854 message.push_str("\n ");
855 message.push_str(line);
856 }
857 } else if recent_buildkit_logs.is_empty() {
858 message.push_str("\n\nNo build output was received from the Docker daemon.");
859 message.push_str("\nThis usually means the build failed before any logs were streamed.");
860 }
861
862 let error_lower = error.to_lowercase();
864 if error_lower.contains("network")
865 || error_lower.contains("connection")
866 || error_lower.contains("timeout")
867 {
868 message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
869 } else if error_lower.contains("disk")
870 || error_lower.contains("space")
871 || error_lower.contains("no space")
872 {
873 message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
874 } else if error_lower.contains("permission") || error_lower.contains("denied") {
875 message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
876 }
877
878 message
879}
880
881fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
883 let mut archive_buffer = Vec::new();
884
885 {
886 let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
887 let mut tar = TarBuilder::new(encoder);
888
889 let dockerfile_bytes = DOCKERFILE.as_bytes();
891 append_bytes(&mut tar, "Dockerfile", dockerfile_bytes, 0o644)?;
892 append_bytes(
893 &mut tar,
894 "packages/core/src/docker/files/entrypoint.sh",
895 include_bytes!("files/entrypoint.sh"),
896 0o644,
897 )?;
898 append_bytes(
899 &mut tar,
900 "packages/core/src/docker/files/healthcheck.sh",
901 include_bytes!("files/healthcheck.sh"),
902 0o644,
903 )?;
904 append_bytes(
905 &mut tar,
906 "packages/core/src/docker/files/opencode-broker.service",
907 include_bytes!("files/opencode-broker.service"),
908 0o644,
909 )?;
910 append_bytes(
911 &mut tar,
912 "packages/core/src/docker/files/opencode.service",
913 include_bytes!("files/opencode.service"),
914 0o644,
915 )?;
916 append_bytes(
917 &mut tar,
918 "packages/core/src/docker/files/pam/opencode",
919 include_bytes!("files/pam/opencode"),
920 0o644,
921 )?;
922 append_bytes(
923 &mut tar,
924 "packages/core/src/docker/files/opencode.jsonc",
925 include_bytes!("files/opencode.jsonc"),
926 0o644,
927 )?;
928 append_bytes(
929 &mut tar,
930 "packages/core/src/docker/files/starship.toml",
931 include_bytes!("files/starship.toml"),
932 0o644,
933 )?;
934 append_bytes(
935 &mut tar,
936 "packages/core/src/docker/files/bashrc.extra",
937 include_bytes!("files/bashrc.extra"),
938 0o644,
939 )?;
940 tar.finish()?;
941
942 let encoder = tar.into_inner()?;
944 encoder.finish()?;
945 }
946
947 Ok(archive_buffer)
948}
949
950fn append_bytes<W: Write>(
951 tar: &mut TarBuilder<W>,
952 path: &str,
953 contents: &[u8],
954 mode: u32,
955) -> Result<(), std::io::Error> {
956 let mut header = tar::Header::new_gnu();
957 header.set_path(path)?;
958 header.set_size(contents.len() as u64);
959 header.set_mode(mode);
960 header.set_cksum();
961
962 tar.append(&header, contents)?;
963 Ok(())
964}
965
966#[cfg(test)]
967mod tests {
968 use super::*;
969 use bollard::models::ImageSummary;
970 use flate2::read::GzDecoder;
971 use std::collections::HashMap;
972 use std::io::Cursor;
973 use tar::Archive;
974
975 fn make_image_summary(
976 id: &str,
977 tags: Vec<&str>,
978 digests: Vec<&str>,
979 labels: HashMap<String, String>,
980 ) -> ImageSummary {
981 ImageSummary {
982 id: id.to_string(),
983 parent_id: String::new(),
984 repo_tags: tags.into_iter().map(|tag| tag.to_string()).collect(),
985 repo_digests: digests
986 .into_iter()
987 .map(|digest| digest.to_string())
988 .collect(),
989 created: 0,
990 size: 0,
991 shared_size: -1,
992 labels,
993 containers: 0,
994 manifests: None,
995 descriptor: None,
996 }
997 }
998
999 #[test]
1000 fn create_build_context_succeeds() {
1001 let context = create_build_context().expect("should create context");
1002 assert!(!context.is_empty(), "context should not be empty");
1003
1004 assert_eq!(context[0], 0x1f, "should be gzip compressed");
1006 assert_eq!(context[1], 0x8b, "should be gzip compressed");
1007 }
1008
1009 #[test]
1010 fn build_context_includes_docker_assets() {
1011 let context = create_build_context().expect("should create context");
1012 let cursor = Cursor::new(context);
1013 let decoder = GzDecoder::new(cursor);
1014 let mut archive = Archive::new(decoder);
1015 let mut found_entrypoint = false;
1016 let mut found_healthcheck = false;
1017
1018 for entry in archive.entries().expect("should read archive entries") {
1019 let entry = entry.expect("should read entry");
1020 let path = entry.path().expect("should read entry path");
1021 if path == std::path::Path::new("packages/core/src/docker/files/entrypoint.sh") {
1022 found_entrypoint = true;
1023 }
1024 if path == std::path::Path::new("packages/core/src/docker/files/healthcheck.sh") {
1025 found_healthcheck = true;
1026 }
1027 if found_entrypoint && found_healthcheck {
1028 break;
1029 }
1030 }
1031
1032 assert!(
1033 found_entrypoint,
1034 "entrypoint asset should be in the build context"
1035 );
1036 assert!(
1037 found_healthcheck,
1038 "healthcheck asset should be in the build context"
1039 );
1040 }
1041
1042 #[test]
1043 fn default_tag_is_latest() {
1044 assert_eq!(IMAGE_TAG_DEFAULT, "latest");
1045 }
1046
1047 #[test]
1048 fn format_build_error_includes_recent_logs() {
1049 let mut logs = VecDeque::new();
1050 logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
1051 logs.push_back("Step 2/5 : RUN apt-get update".to_string());
1052 logs.push_back("E: Unable to fetch some archives".to_string());
1053 let error_logs = VecDeque::new();
1054 let buildkit_logs = VecDeque::new();
1055
1056 let result = format_build_error_with_context(
1057 "Build failed: exit code 1",
1058 &logs,
1059 &error_logs,
1060 &buildkit_logs,
1061 );
1062
1063 assert!(result.contains("Build failed: exit code 1"));
1064 assert!(result.contains("Recent build output:"));
1065 assert!(result.contains("Step 1/5"));
1066 assert!(result.contains("Unable to fetch"));
1067 }
1068
1069 #[test]
1070 fn format_build_error_handles_empty_logs() {
1071 let logs = VecDeque::new();
1072 let error_logs = VecDeque::new();
1073 let buildkit_logs = VecDeque::new();
1074 let result =
1075 format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
1076
1077 assert!(result.contains("Stream error"));
1078 assert!(!result.contains("Recent build output:"));
1079 }
1080
1081 #[test]
1082 fn format_build_error_adds_network_suggestion() {
1083 let logs = VecDeque::new();
1084 let error_logs = VecDeque::new();
1085 let buildkit_logs = VecDeque::new();
1086 let result = format_build_error_with_context(
1087 "connection timeout",
1088 &logs,
1089 &error_logs,
1090 &buildkit_logs,
1091 );
1092
1093 assert!(result.contains("Check your network connection"));
1094 }
1095
1096 #[test]
1097 fn format_build_error_adds_disk_suggestion() {
1098 let logs = VecDeque::new();
1099 let error_logs = VecDeque::new();
1100 let buildkit_logs = VecDeque::new();
1101 let result = format_build_error_with_context(
1102 "no space left on device",
1103 &logs,
1104 &error_logs,
1105 &buildkit_logs,
1106 );
1107
1108 assert!(result.contains("Free up disk space"));
1109 }
1110
1111 #[test]
1112 fn format_build_error_shows_error_lines_separately() {
1113 let mut recent_logs = VecDeque::new();
1114 recent_logs.push_back("Compiling foo v1.0".to_string());
1115 recent_logs.push_back("Successfully installed bar".to_string());
1116
1117 let mut error_logs = VecDeque::new();
1118 error_logs.push_back("error: failed to compile dust".to_string());
1119 error_logs.push_back("error: failed to compile glow".to_string());
1120
1121 let buildkit_logs = VecDeque::new();
1122 let result = format_build_error_with_context(
1123 "Build failed",
1124 &recent_logs,
1125 &error_logs,
1126 &buildkit_logs,
1127 );
1128
1129 assert!(result.contains("Potential errors detected during build:"));
1130 assert!(result.contains("failed to compile dust"));
1131 assert!(result.contains("failed to compile glow"));
1132 }
1133
1134 #[test]
1135 fn is_error_line_detects_errors() {
1136 assert!(is_error_line("error: something failed"));
1137 assert!(is_error_line("Error: build failed"));
1138 assert!(is_error_line("Failed to install package"));
1139 assert!(is_error_line("cannot find module"));
1140 assert!(is_error_line("Unable to locate package"));
1141 assert!(!is_error_line("Compiling foo v1.0"));
1142 assert!(!is_error_line("Successfully installed"));
1143 }
1144
1145 #[test]
1146 fn collect_image_ids_matches_labels() {
1147 let mut labels = HashMap::new();
1148 labels.insert(LABEL_SOURCE.to_string(), LABEL_SOURCE_VALUE.to_string());
1149
1150 let images = vec![
1151 make_image_summary("sha256:opencode", vec![], vec![], labels),
1152 make_image_summary(
1153 "sha256:other",
1154 vec!["busybox:latest"],
1155 vec![],
1156 HashMap::new(),
1157 ),
1158 ];
1159
1160 let ids = collect_image_ids(&images, "opencode-cloud-sandbox");
1161 assert!(ids.contains("sha256:opencode"));
1162 assert!(!ids.contains("sha256:other"));
1163 }
1164}