Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT,
9};
10use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
11use bollard::models::BuildInfoAux;
12use bollard::query_parameters::{
13    BuildImageOptions, BuilderVersion, CreateImageOptions, ListImagesOptionsBuilder,
14    RemoveImageOptionsBuilder,
15};
16use bytes::Bytes;
17use flate2::Compression;
18use flate2::write::GzEncoder;
19use futures_util::StreamExt;
20use http_body_util::{Either, Full};
21use std::collections::{HashMap, HashSet, VecDeque};
22use std::env;
23use std::time::{SystemTime, UNIX_EPOCH};
24use tar::Builder as TarBuilder;
25use tracing::{debug, warn};
26
27/// Default number of recent build log lines to capture for error context
28const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
29
30/// Default number of error lines to capture separately
31const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
32
33/// Read a log buffer size from env with bounds
34fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
35    let Ok(value) = env::var(var_name) else {
36        return default;
37    };
38    let Ok(parsed) = value.trim().parse::<usize>() else {
39        return default;
40    };
41    parsed.clamp(5, 500)
42}
43
44/// Check if a line looks like an error message
45fn is_error_line(line: &str) -> bool {
46    let lower = line.to_lowercase();
47    lower.contains("error")
48        || lower.contains("failed")
49        || lower.contains("cannot")
50        || lower.contains("unable to")
51        || lower.contains("not found")
52        || lower.contains("permission denied")
53}
54
55/// Check if an image exists locally
56pub async fn image_exists(
57    client: &DockerClient,
58    image: &str,
59    tag: &str,
60) -> Result<bool, DockerError> {
61    let full_name = format!("{image}:{tag}");
62    debug!("Checking if image exists: {}", full_name);
63
64    match client.inner().inspect_image(&full_name).await {
65        Ok(_) => Ok(true),
66        Err(bollard::errors::Error::DockerResponseServerError {
67            status_code: 404, ..
68        }) => Ok(false),
69        Err(e) => Err(DockerError::from(e)),
70    }
71}
72
73/// Remove all images whose tags or digests contain the provided name fragment
74///
75/// Returns the number of image references removed.
76pub async fn remove_images_by_name(
77    client: &DockerClient,
78    name_fragment: &str,
79    force: bool,
80) -> Result<usize, DockerError> {
81    debug!("Removing Docker images matching '{name_fragment}'");
82
83    let images = list_docker_images(client).await?;
84
85    let references = collect_image_references(&images, name_fragment);
86    remove_image_references(client, references, force).await
87}
88
89/// List all local Docker images (including intermediate layers).
90async fn list_docker_images(
91    client: &DockerClient,
92) -> Result<Vec<bollard::models::ImageSummary>, DockerError> {
93    let list_options = ListImagesOptionsBuilder::new().all(true).build();
94    client
95        .inner()
96        .list_images(Some(list_options))
97        .await
98        .map_err(|e| DockerError::Image(format!("Failed to list images: {e}")))
99}
100
101/// Collect tags and digests that contain the provided name fragment.
102fn collect_image_references(
103    images: &[bollard::models::ImageSummary],
104    name_fragment: &str,
105) -> HashSet<String> {
106    let mut references = HashSet::new();
107    for image in images {
108        for tag in &image.repo_tags {
109            if tag != "<none>:<none>" && tag.contains(name_fragment) {
110                references.insert(tag.to_string());
111            }
112        }
113
114        for digest in &image.repo_digests {
115            if digest.contains(name_fragment) {
116                references.insert(digest.to_string());
117            }
118        }
119    }
120    references
121}
122
123/// Remove image references (tags/digests), returning the number removed.
124async fn remove_image_references(
125    client: &DockerClient,
126    references: HashSet<String>,
127    force: bool,
128) -> Result<usize, DockerError> {
129    if references.is_empty() {
130        return Ok(0);
131    }
132
133    let remove_options = RemoveImageOptionsBuilder::new().force(force).build();
134    let mut removed = 0usize;
135    for reference in references {
136        client
137            .inner()
138            .remove_image(&reference, Some(remove_options.clone()), None)
139            .await
140            .map_err(|e| DockerError::Image(format!("Failed to remove image {reference}: {e}")))?;
141        removed += 1;
142    }
143
144    Ok(removed)
145}
146
147/// Build the opencode image from embedded Dockerfile
148///
149/// Shows real-time build progress with streaming output.
150/// Returns the full image:tag string on success.
151///
152/// # Arguments
153/// * `client` - Docker client
154/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
155/// * `progress` - Progress reporter for build feedback
156/// * `no_cache` - If true, build without using Docker layer cache
157pub async fn build_image(
158    client: &DockerClient,
159    tag: Option<&str>,
160    progress: &mut ProgressReporter,
161    no_cache: bool,
162    build_args: Option<HashMap<String, String>>,
163) -> Result<String, DockerError> {
164    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
165    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
166    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
167
168    // Create tar archive containing Dockerfile
169    let context = create_build_context()
170        .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
171
172    // Set up build options
173    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
174    // BuildKit requires a unique session ID for each build
175    let session_id = format!(
176        "opencode-cloud-build-{}",
177        SystemTime::now()
178            .duration_since(UNIX_EPOCH)
179            .unwrap_or_default()
180            .as_nanos()
181    );
182    let build_args = build_args.unwrap_or_default();
183    let options = BuildImageOptions {
184        t: Some(full_name.clone()),
185        dockerfile: "Dockerfile".to_string(),
186        version: BuilderVersion::BuilderBuildKit,
187        session: Some(session_id),
188        rm: true,
189        nocache: no_cache,
190        buildargs: Some(build_args),
191        platform: String::new(),
192        target: String::new(),
193        ..Default::default()
194    };
195
196    // Create build body from context
197    let body: Either<Full<Bytes>, _> = Either::Left(Full::new(Bytes::from(context)));
198
199    // Start build with streaming output
200    let mut stream = client.inner().build_image(options, None, Some(body));
201
202    // Add main build spinner (context prefix like "Building image" is set by caller)
203    progress.add_spinner("build", "Initializing...");
204
205    let mut maybe_image_id = None;
206    let mut log_state = BuildLogState::new();
207
208    while let Some(result) = stream.next().await {
209        let Ok(info) = result else {
210            return Err(handle_stream_error(
211                "Build failed",
212                result.expect_err("checked error").to_string(),
213                &log_state,
214                progress,
215            ));
216        };
217
218        handle_stream_message(&info, progress, &mut log_state);
219
220        if let Some(error_detail) = &info.error_detail
221            && let Some(error_msg) = &error_detail.message
222        {
223            progress.abandon_all(error_msg);
224            let context = format_build_error_with_context(
225                error_msg,
226                &log_state.recent_logs,
227                &log_state.error_logs,
228                &log_state.recent_buildkit_logs,
229            );
230            return Err(DockerError::Build(context));
231        }
232
233        if let Some(aux) = info.aux {
234            match aux {
235                BuildInfoAux::Default(image_id) => {
236                    if let Some(id) = image_id.id {
237                        maybe_image_id = Some(id);
238                    }
239                }
240                BuildInfoAux::BuildKit(status) => {
241                    handle_buildkit_status(&status, progress, &mut log_state);
242                }
243            }
244        }
245    }
246
247    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
248    let finish_msg = format!("Build complete: {image_id}");
249    progress.finish("build", &finish_msg);
250
251    Ok(full_name)
252}
253
254struct BuildLogState {
255    recent_logs: VecDeque<String>,
256    error_logs: VecDeque<String>,
257    recent_buildkit_logs: VecDeque<String>,
258    build_log_buffer_size: usize,
259    error_log_buffer_size: usize,
260    last_buildkit_vertex: Option<String>,
261    last_buildkit_vertex_id: Option<String>,
262    export_vertex_id: Option<String>,
263    export_vertex_name: Option<String>,
264    buildkit_logs_by_vertex_id: HashMap<String, String>,
265    vertex_name_by_vertex_id: HashMap<String, String>,
266}
267
268impl BuildLogState {
269    fn new() -> Self {
270        let build_log_buffer_size = read_log_buffer_size(
271            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
272            DEFAULT_BUILD_LOG_BUFFER_SIZE,
273        );
274        let error_log_buffer_size = read_log_buffer_size(
275            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
276            DEFAULT_ERROR_LOG_BUFFER_SIZE,
277        );
278        Self {
279            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
280            error_logs: VecDeque::with_capacity(error_log_buffer_size),
281            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
282            build_log_buffer_size,
283            error_log_buffer_size,
284            last_buildkit_vertex: None,
285            last_buildkit_vertex_id: None,
286            export_vertex_id: None,
287            export_vertex_name: None,
288            buildkit_logs_by_vertex_id: HashMap::new(),
289            vertex_name_by_vertex_id: HashMap::new(),
290        }
291    }
292}
293
294fn handle_stream_message(
295    info: &bollard::models::BuildInfo,
296    progress: &mut ProgressReporter,
297    state: &mut BuildLogState,
298) {
299    let Some(stream_msg) = info.stream.as_deref() else {
300        return;
301    };
302    let msg = stream_msg.trim();
303    if msg.is_empty() {
304        return;
305    }
306
307    if progress.is_plain_output() {
308        eprint!("{stream_msg}");
309    } else {
310        let has_runtime_vertex = state
311            .last_buildkit_vertex
312            .as_deref()
313            .is_some_and(|name| name.starts_with("[runtime "));
314        let is_internal_msg = msg.contains("[internal]");
315        if !(has_runtime_vertex && is_internal_msg) {
316            progress.update_spinner("build", stream_msg);
317        }
318    }
319
320    if state.recent_logs.len() >= state.build_log_buffer_size {
321        state.recent_logs.pop_front();
322    }
323    state.recent_logs.push_back(msg.to_string());
324
325    if is_error_line(msg) {
326        if state.error_logs.len() >= state.error_log_buffer_size {
327            state.error_logs.pop_front();
328        }
329        state.error_logs.push_back(msg.to_string());
330    }
331
332    if msg.starts_with("Step ") {
333        debug!("Build step: {}", msg);
334    }
335}
336
337fn handle_buildkit_status(
338    status: &BuildkitStatusResponse,
339    progress: &mut ProgressReporter,
340    state: &mut BuildLogState,
341) {
342    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
343    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
344    update_export_vertex_from_logs(
345        &latest_logs,
346        &state.vertex_name_by_vertex_id,
347        &mut state.export_vertex_id,
348        &mut state.export_vertex_name,
349    );
350    let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
351        status,
352        &state.vertex_name_by_vertex_id,
353        state.export_vertex_id.as_deref(),
354        state.export_vertex_name.as_deref(),
355    ) {
356        Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
357        None => {
358            let Some(log_entry) = latest_logs.last() else {
359                return;
360            };
361            let name = state
362                .vertex_name_by_vertex_id
363                .get(&log_entry.vertex_id)
364                .cloned()
365                .or_else(|| state.last_buildkit_vertex.clone())
366                .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
367            (log_entry.vertex_id.clone(), name)
368        }
369    };
370    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
371    state.last_buildkit_vertex_id = Some(vertex_id.clone());
372    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
373        state.last_buildkit_vertex = Some(vertex_name.clone());
374    }
375
376    let message = if progress.is_plain_output() {
377        vertex_name
378    } else if let Some(log_entry) = latest_logs
379        .iter()
380        .rev()
381        .find(|entry| entry.vertex_id == vertex_id)
382    {
383        format!("{vertex_name} ยท {}", log_entry.message)
384    } else {
385        vertex_name
386    };
387    progress.update_spinner("build", &message);
388
389    if progress.is_plain_output() {
390        for log_entry in latest_logs {
391            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
392        }
393        return;
394    }
395
396    let (Some(current_id), Some(current_name)) = (
397        state.last_buildkit_vertex_id.as_ref(),
398        state.last_buildkit_vertex.as_ref(),
399    ) else {
400        return;
401    };
402
403    let name = state
404        .vertex_name_by_vertex_id
405        .get(current_id)
406        .unwrap_or(current_name);
407    // Keep non-verbose output on the spinner line only.
408    let _ = name;
409}
410
411fn handle_stream_error(
412    prefix: &str,
413    error_str: String,
414    state: &BuildLogState,
415    progress: &mut ProgressReporter,
416) -> DockerError {
417    progress.abandon_all(prefix);
418
419    let buildkit_hint = if error_str.contains("mount")
420        || error_str.contains("--mount")
421        || state
422            .recent_logs
423            .iter()
424            .any(|log| log.contains("--mount") && log.contains("cache"))
425    {
426        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
427         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
428         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
429    } else {
430        ""
431    };
432
433    let context = format!(
434        "{}{}",
435        format_build_error_with_context(
436            &error_str,
437            &state.recent_logs,
438            &state.error_logs,
439            &state.recent_buildkit_logs,
440        ),
441        buildkit_hint
442    );
443    DockerError::Build(context)
444}
445
446fn update_buildkit_vertex_names(
447    vertex_name_by_vertex_id: &mut HashMap<String, String>,
448    status: &BuildkitStatusResponse,
449) {
450    for vertex in &status.vertexes {
451        if vertex.name.is_empty() {
452            continue;
453        }
454        vertex_name_by_vertex_id
455            .entry(vertex.digest.clone())
456            .or_insert_with(|| vertex.name.clone());
457    }
458}
459
460fn select_latest_buildkit_vertex(
461    status: &BuildkitStatusResponse,
462    vertex_name_by_vertex_id: &HashMap<String, String>,
463    export_vertex_id: Option<&str>,
464    export_vertex_name: Option<&str>,
465) -> Option<(String, String)> {
466    if let Some(export_vertex_id) = export_vertex_id {
467        let name = export_vertex_name
468            .map(str::to_string)
469            .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
470            .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
471        return Some((export_vertex_id.to_string(), name));
472    }
473
474    let mut best_runtime: Option<(u32, String, String)> = None;
475    let mut fallback: Option<(String, String)> = None;
476
477    for vertex in &status.vertexes {
478        let name = if vertex.name.is_empty() {
479            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
480        } else {
481            Some(vertex.name.clone())
482        };
483
484        let Some(name) = name else {
485            continue;
486        };
487
488        if fallback.is_none() && !name.starts_with("[internal]") {
489            fallback = Some((vertex.digest.clone(), name.clone()));
490        }
491
492        if let Some(step) = parse_runtime_step(&name) {
493            match &best_runtime {
494                Some((best_step, _, _)) if *best_step >= step => {}
495                _ => {
496                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
497                }
498            }
499        }
500    }
501
502    if let Some((_, digest, name)) = best_runtime {
503        Some((digest, name))
504    } else {
505        fallback.or_else(|| {
506            status.vertexes.iter().find_map(|vertex| {
507                let name = if vertex.name.is_empty() {
508                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
509                } else {
510                    Some(vertex.name.clone())
511                };
512                name.map(|resolved| (vertex.digest.clone(), resolved))
513            })
514        })
515    }
516}
517
518fn parse_runtime_step(name: &str) -> Option<u32> {
519    let prefix = "[runtime ";
520    let start = name.find(prefix)? + prefix.len();
521    let rest = &name[start..];
522    let end = rest.find('/')?;
523    rest[..end].trim().parse::<u32>().ok()
524}
525
526fn format_vertex_fallback_label(vertex_id: &str) -> String {
527    let short = vertex_id
528        .strip_prefix("sha256:")
529        .unwrap_or(vertex_id)
530        .chars()
531        .take(12)
532        .collect::<String>();
533    format!("vertex {short}")
534}
535
536fn update_export_vertex_from_logs(
537    latest_logs: &[BuildkitLogEntry],
538    vertex_name_by_vertex_id: &HashMap<String, String>,
539    export_vertex_id: &mut Option<String>,
540    export_vertex_name: &mut Option<String>,
541) {
542    if let Some(entry) = latest_logs
543        .iter()
544        .rev()
545        .find(|log| log.message.trim_start().starts_with("exporting to image"))
546    {
547        *export_vertex_id = Some(entry.vertex_id.clone());
548        if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
549            *export_vertex_name = Some(name.clone());
550        }
551    }
552}
553
554fn record_buildkit_logs(
555    state: &mut BuildLogState,
556    latest_logs: &[BuildkitLogEntry],
557    current_vertex_id: &str,
558    current_vertex_name: &str,
559) {
560    for log_entry in latest_logs {
561        let name = state
562            .vertex_name_by_vertex_id
563            .get(&log_entry.vertex_id)
564            .cloned()
565            .or_else(|| {
566                if log_entry.vertex_id == current_vertex_id {
567                    Some(current_vertex_name.to_string())
568                } else {
569                    None
570                }
571            })
572            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
573
574        let message = log_entry.message.replace('\r', "").trim_end().to_string();
575        if message.is_empty() {
576            continue;
577        }
578
579        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
580            state.recent_buildkit_logs.pop_front();
581        }
582        state
583            .recent_buildkit_logs
584            .push_back(format!("[{name}] {message}"));
585    }
586}
587
588#[derive(Debug, Clone)]
589struct BuildkitLogEntry {
590    vertex_id: String,
591    message: String,
592}
593
594fn append_buildkit_logs(
595    logs: &mut HashMap<String, String>,
596    status: &BuildkitStatusResponse,
597) -> Vec<BuildkitLogEntry> {
598    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
599
600    for log in &status.logs {
601        let vertex_id = log.vertex.clone();
602        let message = String::from_utf8_lossy(&log.msg).to_string();
603        let entry = logs.entry(vertex_id.clone()).or_default();
604        entry.push_str(&message);
605        latest.push(BuildkitLogEntry { vertex_id, message });
606    }
607
608    latest
609}
610
611/// Pull the opencode image from registry with automatic fallback
612///
613/// Tries GHCR first, falls back to Docker Hub on failure.
614/// Returns the full image:tag string on success.
615pub async fn pull_image(
616    client: &DockerClient,
617    tag: Option<&str>,
618    progress: &mut ProgressReporter,
619) -> Result<String, DockerError> {
620    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
621
622    // Try GHCR first
623    debug!("Attempting to pull from GHCR: {}:{}", IMAGE_NAME_GHCR, tag);
624    let ghcr_err = match pull_from_registry(client, IMAGE_NAME_GHCR, tag, progress).await {
625        Ok(()) => {
626            let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
627            return Ok(full_name);
628        }
629        Err(e) => e,
630    };
631
632    warn!(
633        "GHCR pull failed: {}. Trying Docker Hub fallback...",
634        ghcr_err
635    );
636
637    // Try Docker Hub as fallback
638    debug!(
639        "Attempting to pull from Docker Hub: {}:{}",
640        IMAGE_NAME_DOCKERHUB, tag
641    );
642    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, tag, progress).await {
643        Ok(()) => {
644            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{tag}");
645            Ok(full_name)
646        }
647        Err(dockerhub_err) => Err(DockerError::Pull(format!(
648            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
649        ))),
650    }
651}
652
653/// Maximum number of retry attempts for pull operations
654const MAX_PULL_RETRIES: usize = 3;
655
656/// Pull from a specific registry with retry logic
657async fn pull_from_registry(
658    client: &DockerClient,
659    image: &str,
660    tag: &str,
661    progress: &mut ProgressReporter,
662) -> Result<(), DockerError> {
663    let full_name = format!("{image}:{tag}");
664
665    // Manual retry loop since async closures can't capture mutable references
666    let mut last_error = None;
667    for attempt in 1..=MAX_PULL_RETRIES {
668        debug!(
669            "Pull attempt {}/{} for {}",
670            attempt, MAX_PULL_RETRIES, full_name
671        );
672
673        match do_pull(client, image, tag, progress).await {
674            Ok(()) => return Ok(()),
675            Err(e) => {
676                warn!("Pull attempt {} failed: {}", attempt, e);
677                last_error = Some(e);
678
679                if attempt < MAX_PULL_RETRIES {
680                    // Exponential backoff: 1s, 2s, 4s
681                    let delay_ms = 1000 * (1 << (attempt - 1));
682                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
683                }
684            }
685        }
686    }
687
688    Err(last_error.unwrap_or_else(|| {
689        DockerError::Pull(format!(
690            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
691        ))
692    }))
693}
694
695/// Perform the actual pull operation
696async fn do_pull(
697    client: &DockerClient,
698    image: &str,
699    tag: &str,
700    progress: &mut ProgressReporter,
701) -> Result<(), DockerError> {
702    let full_name = format!("{image}:{tag}");
703
704    let options = CreateImageOptions {
705        from_image: Some(image.to_string()),
706        tag: Some(tag.to_string()),
707        platform: String::new(),
708        ..Default::default()
709    };
710
711    let mut stream = client.inner().create_image(Some(options), None, None);
712
713    // Add main spinner for overall progress
714    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
715
716    while let Some(result) = stream.next().await {
717        match result {
718            Ok(info) => {
719                // Handle errors from the stream
720                if let Some(error_detail) = &info.error_detail
721                    && let Some(error_msg) = &error_detail.message
722                {
723                    progress.abandon_all(error_msg);
724                    return Err(DockerError::Pull(error_msg.to_string()));
725                }
726
727                // Handle layer progress
728                if let Some(layer_id) = &info.id {
729                    let status = info.status.as_deref().unwrap_or("");
730
731                    match status {
732                        "Already exists" => {
733                            progress.finish(layer_id, "Already exists");
734                        }
735                        "Pull complete" => {
736                            progress.finish(layer_id, "Pull complete");
737                        }
738                        "Downloading" | "Extracting" => {
739                            if let Some(progress_detail) = &info.progress_detail {
740                                let current = progress_detail.current.unwrap_or(0) as u64;
741                                let total = progress_detail.total.unwrap_or(0) as u64;
742
743                                if total > 0 {
744                                    progress.update_layer(layer_id, current, total, status);
745                                }
746                            }
747                        }
748                        _ => {
749                            // Other statuses (Waiting, Verifying, etc.)
750                            progress.update_spinner(layer_id, status);
751                        }
752                    }
753                } else if let Some(status) = &info.status {
754                    // Overall status messages (no layer id)
755                    progress.update_spinner("pull", status);
756                }
757            }
758            Err(e) => {
759                progress.abandon_all("Pull failed");
760                return Err(DockerError::Pull(format!("Pull failed: {e}")));
761            }
762        }
763    }
764
765    progress.finish("pull", &format!("Pull complete: {full_name}"));
766    Ok(())
767}
768
769/// Format a build error with recent log context for actionable debugging
770fn format_build_error_with_context(
771    error: &str,
772    recent_logs: &VecDeque<String>,
773    error_logs: &VecDeque<String>,
774    recent_buildkit_logs: &VecDeque<String>,
775) -> String {
776    let mut message = String::new();
777
778    // Add main error message
779    message.push_str(error);
780
781    // Add captured error lines if they differ from recent logs
782    // (these are error-like lines that may have scrolled off)
783    if !error_logs.is_empty() {
784        // Check if error_logs contains lines not in recent_logs
785        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
786        let unique_errors: Vec<_> = error_logs
787            .iter()
788            .filter(|line| !recent_set.contains(line))
789            .collect();
790
791        if !unique_errors.is_empty() {
792            message.push_str("\n\nPotential errors detected during build:");
793            for line in unique_errors {
794                message.push_str("\n  ");
795                message.push_str(line);
796            }
797        }
798    }
799
800    // Add recent BuildKit log context if available
801    if !recent_buildkit_logs.is_empty() {
802        message.push_str("\n\nRecent BuildKit output:");
803        for line in recent_buildkit_logs {
804            message.push_str("\n  ");
805            message.push_str(line);
806        }
807    }
808
809    // Add recent log context if available
810    if !recent_logs.is_empty() {
811        message.push_str("\n\nRecent build output:");
812        for line in recent_logs {
813            message.push_str("\n  ");
814            message.push_str(line);
815        }
816    } else if recent_buildkit_logs.is_empty() {
817        message.push_str("\n\nNo build output was received from the Docker daemon.");
818        message.push_str("\nThis usually means the build failed before any logs were streamed.");
819    }
820
821    // Add actionable suggestions based on common error patterns
822    let error_lower = error.to_lowercase();
823    if error_lower.contains("network")
824        || error_lower.contains("connection")
825        || error_lower.contains("timeout")
826    {
827        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
828    } else if error_lower.contains("disk")
829        || error_lower.contains("space")
830        || error_lower.contains("no space")
831    {
832        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
833    } else if error_lower.contains("permission") || error_lower.contains("denied") {
834        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
835    }
836
837    message
838}
839
840/// Create a gzipped tar archive containing the Dockerfile
841fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
842    let mut archive_buffer = Vec::new();
843
844    {
845        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
846        let mut tar = TarBuilder::new(encoder);
847
848        // Add Dockerfile to archive
849        let dockerfile_bytes = DOCKERFILE.as_bytes();
850        let mut header = tar::Header::new_gnu();
851        header.set_path("Dockerfile")?;
852        header.set_size(dockerfile_bytes.len() as u64);
853        header.set_mode(0o644);
854        header.set_cksum();
855
856        tar.append(&header, dockerfile_bytes)?;
857        tar.finish()?;
858
859        // Finish gzip encoding
860        let encoder = tar.into_inner()?;
861        encoder.finish()?;
862    }
863
864    Ok(archive_buffer)
865}
866
867#[cfg(test)]
868mod tests {
869    use super::*;
870
871    #[test]
872    fn create_build_context_succeeds() {
873        let context = create_build_context().expect("should create context");
874        assert!(!context.is_empty(), "context should not be empty");
875
876        // Verify it's gzip-compressed (gzip magic bytes)
877        assert_eq!(context[0], 0x1f, "should be gzip compressed");
878        assert_eq!(context[1], 0x8b, "should be gzip compressed");
879    }
880
881    #[test]
882    fn default_tag_is_latest() {
883        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
884    }
885
886    #[test]
887    fn format_build_error_includes_recent_logs() {
888        let mut logs = VecDeque::new();
889        logs.push_back("Step 1/5 : FROM ubuntu:24.04".to_string());
890        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
891        logs.push_back("E: Unable to fetch some archives".to_string());
892        let error_logs = VecDeque::new();
893        let buildkit_logs = VecDeque::new();
894
895        let result = format_build_error_with_context(
896            "Build failed: exit code 1",
897            &logs,
898            &error_logs,
899            &buildkit_logs,
900        );
901
902        assert!(result.contains("Build failed: exit code 1"));
903        assert!(result.contains("Recent build output:"));
904        assert!(result.contains("Step 1/5"));
905        assert!(result.contains("Unable to fetch"));
906    }
907
908    #[test]
909    fn format_build_error_handles_empty_logs() {
910        let logs = VecDeque::new();
911        let error_logs = VecDeque::new();
912        let buildkit_logs = VecDeque::new();
913        let result =
914            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
915
916        assert!(result.contains("Stream error"));
917        assert!(!result.contains("Recent build output:"));
918    }
919
920    #[test]
921    fn format_build_error_adds_network_suggestion() {
922        let logs = VecDeque::new();
923        let error_logs = VecDeque::new();
924        let buildkit_logs = VecDeque::new();
925        let result = format_build_error_with_context(
926            "connection timeout",
927            &logs,
928            &error_logs,
929            &buildkit_logs,
930        );
931
932        assert!(result.contains("Check your network connection"));
933    }
934
935    #[test]
936    fn format_build_error_adds_disk_suggestion() {
937        let logs = VecDeque::new();
938        let error_logs = VecDeque::new();
939        let buildkit_logs = VecDeque::new();
940        let result = format_build_error_with_context(
941            "no space left on device",
942            &logs,
943            &error_logs,
944            &buildkit_logs,
945        );
946
947        assert!(result.contains("Free up disk space"));
948    }
949
950    #[test]
951    fn format_build_error_shows_error_lines_separately() {
952        let mut recent_logs = VecDeque::new();
953        recent_logs.push_back("Compiling foo v1.0".to_string());
954        recent_logs.push_back("Successfully installed bar".to_string());
955
956        let mut error_logs = VecDeque::new();
957        error_logs.push_back("error: failed to compile dust".to_string());
958        error_logs.push_back("error: failed to compile glow".to_string());
959
960        let buildkit_logs = VecDeque::new();
961        let result = format_build_error_with_context(
962            "Build failed",
963            &recent_logs,
964            &error_logs,
965            &buildkit_logs,
966        );
967
968        assert!(result.contains("Potential errors detected during build:"));
969        assert!(result.contains("failed to compile dust"));
970        assert!(result.contains("failed to compile glow"));
971    }
972
973    #[test]
974    fn is_error_line_detects_errors() {
975        assert!(is_error_line("error: something failed"));
976        assert!(is_error_line("Error: build failed"));
977        assert!(is_error_line("Failed to install package"));
978        assert!(is_error_line("cannot find module"));
979        assert!(is_error_line("Unable to locate package"));
980        assert!(!is_error_line("Compiling foo v1.0"));
981        assert!(!is_error_line("Successfully installed"));
982    }
983}