Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT,
9};
10use bollard::image::{BuildImageOptions, BuilderVersion, CreateImageOptions};
11use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
12use bollard::models::BuildInfoAux;
13use bytes::Bytes;
14use flate2::Compression;
15use flate2::write::GzEncoder;
16use futures_util::StreamExt;
17use std::collections::{HashMap, VecDeque};
18use std::env;
19use std::time::{SystemTime, UNIX_EPOCH};
20use tar::Builder as TarBuilder;
21use tracing::{debug, warn};
22
23/// Default number of recent build log lines to capture for error context
24const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
25
26/// Default number of error lines to capture separately
27const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
28
29/// Read a log buffer size from env with bounds
30fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
31    let Ok(value) = env::var(var_name) else {
32        return default;
33    };
34    let Ok(parsed) = value.trim().parse::<usize>() else {
35        return default;
36    };
37    parsed.clamp(5, 500)
38}
39
40/// Check if a line looks like an error message
41fn is_error_line(line: &str) -> bool {
42    let lower = line.to_lowercase();
43    lower.contains("error")
44        || lower.contains("failed")
45        || lower.contains("cannot")
46        || lower.contains("unable to")
47        || lower.contains("not found")
48        || lower.contains("permission denied")
49}
50
51/// Check if an image exists locally
52pub async fn image_exists(
53    client: &DockerClient,
54    image: &str,
55    tag: &str,
56) -> Result<bool, DockerError> {
57    let full_name = format!("{image}:{tag}");
58    debug!("Checking if image exists: {}", full_name);
59
60    match client.inner().inspect_image(&full_name).await {
61        Ok(_) => Ok(true),
62        Err(bollard::errors::Error::DockerResponseServerError {
63            status_code: 404, ..
64        }) => Ok(false),
65        Err(e) => Err(DockerError::from(e)),
66    }
67}
68
69/// Build the opencode image from embedded Dockerfile
70///
71/// Shows real-time build progress with streaming output.
72/// Returns the full image:tag string on success.
73///
74/// # Arguments
75/// * `client` - Docker client
76/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
77/// * `progress` - Progress reporter for build feedback
78/// * `no_cache` - If true, build without using Docker layer cache
79pub async fn build_image(
80    client: &DockerClient,
81    tag: Option<&str>,
82    progress: &mut ProgressReporter,
83    no_cache: bool,
84    build_args: Option<HashMap<String, String>>,
85) -> Result<String, DockerError> {
86    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
87    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
88    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
89
90    // Create tar archive containing Dockerfile
91    let context = create_build_context()
92        .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
93
94    // Set up build options
95    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
96    // BuildKit requires a unique session ID for each build
97    let session_id = format!(
98        "opencode-cloud-build-{}",
99        SystemTime::now()
100            .duration_since(UNIX_EPOCH)
101            .unwrap_or_default()
102            .as_nanos()
103    );
104    let build_args = build_args.unwrap_or_default();
105    let options = BuildImageOptions {
106        t: full_name.clone(),
107        dockerfile: "Dockerfile".to_string(),
108        version: BuilderVersion::BuilderBuildKit,
109        session: Some(session_id),
110        rm: true,
111        nocache: no_cache,
112        buildargs: build_args,
113        ..Default::default()
114    };
115
116    // Create build body from context
117    let body = Bytes::from(context);
118
119    // Start build with streaming output
120    let mut stream = client.inner().build_image(options, None, Some(body));
121
122    // Add main build spinner (context prefix like "Building image" is set by caller)
123    progress.add_spinner("build", "Initializing...");
124
125    let mut maybe_image_id = None;
126    let mut log_state = BuildLogState::new();
127
128    while let Some(result) = stream.next().await {
129        let Ok(info) = result else {
130            return Err(handle_stream_error(
131                "Build failed",
132                result.expect_err("checked error").to_string(),
133                &log_state,
134                progress,
135            ));
136        };
137
138        handle_stream_message(&info, progress, &mut log_state);
139
140        if let Some(error_msg) = info.error {
141            progress.abandon_all(&error_msg);
142            let context = format_build_error_with_context(
143                &error_msg,
144                &log_state.recent_logs,
145                &log_state.error_logs,
146                &log_state.recent_buildkit_logs,
147            );
148            return Err(DockerError::Build(context));
149        }
150
151        if let Some(aux) = info.aux {
152            match aux {
153                BuildInfoAux::Default(image_id) => {
154                    if let Some(id) = image_id.id {
155                        maybe_image_id = Some(id);
156                    }
157                }
158                BuildInfoAux::BuildKit(status) => {
159                    handle_buildkit_status(&status, progress, &mut log_state);
160                }
161            }
162        }
163    }
164
165    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
166    let finish_msg = format!("Build complete: {image_id}");
167    progress.finish("build", &finish_msg);
168
169    Ok(full_name)
170}
171
172struct BuildLogState {
173    recent_logs: VecDeque<String>,
174    error_logs: VecDeque<String>,
175    recent_buildkit_logs: VecDeque<String>,
176    build_log_buffer_size: usize,
177    error_log_buffer_size: usize,
178    last_buildkit_vertex: Option<String>,
179    last_buildkit_vertex_id: Option<String>,
180    export_vertex_id: Option<String>,
181    export_vertex_name: Option<String>,
182    buildkit_logs_by_vertex_id: HashMap<String, String>,
183    vertex_name_by_vertex_id: HashMap<String, String>,
184}
185
186impl BuildLogState {
187    fn new() -> Self {
188        let build_log_buffer_size = read_log_buffer_size(
189            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
190            DEFAULT_BUILD_LOG_BUFFER_SIZE,
191        );
192        let error_log_buffer_size = read_log_buffer_size(
193            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
194            DEFAULT_ERROR_LOG_BUFFER_SIZE,
195        );
196        Self {
197            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
198            error_logs: VecDeque::with_capacity(error_log_buffer_size),
199            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
200            build_log_buffer_size,
201            error_log_buffer_size,
202            last_buildkit_vertex: None,
203            last_buildkit_vertex_id: None,
204            export_vertex_id: None,
205            export_vertex_name: None,
206            buildkit_logs_by_vertex_id: HashMap::new(),
207            vertex_name_by_vertex_id: HashMap::new(),
208        }
209    }
210}
211
212fn handle_stream_message(
213    info: &bollard::models::BuildInfo,
214    progress: &mut ProgressReporter,
215    state: &mut BuildLogState,
216) {
217    let Some(stream_msg) = info.stream.as_deref() else {
218        return;
219    };
220    let msg = stream_msg.trim();
221    if msg.is_empty() {
222        return;
223    }
224
225    if progress.is_plain_output() {
226        eprint!("{stream_msg}");
227    } else {
228        let has_runtime_vertex = state
229            .last_buildkit_vertex
230            .as_deref()
231            .is_some_and(|name| name.starts_with("[runtime "));
232        let is_internal_msg = msg.contains("[internal]");
233        if !(has_runtime_vertex && is_internal_msg) {
234            progress.update_spinner("build", stream_msg);
235        }
236    }
237
238    if state.recent_logs.len() >= state.build_log_buffer_size {
239        state.recent_logs.pop_front();
240    }
241    state.recent_logs.push_back(msg.to_string());
242
243    if is_error_line(msg) {
244        if state.error_logs.len() >= state.error_log_buffer_size {
245            state.error_logs.pop_front();
246        }
247        state.error_logs.push_back(msg.to_string());
248    }
249
250    if msg.starts_with("Step ") {
251        debug!("Build step: {}", msg);
252    }
253}
254
255fn handle_buildkit_status(
256    status: &BuildkitStatusResponse,
257    progress: &mut ProgressReporter,
258    state: &mut BuildLogState,
259) {
260    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
261    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
262    update_export_vertex_from_logs(
263        &latest_logs,
264        &state.vertex_name_by_vertex_id,
265        &mut state.export_vertex_id,
266        &mut state.export_vertex_name,
267    );
268    let (vertex_id, vertex_name) = match select_latest_buildkit_vertex(
269        status,
270        &state.vertex_name_by_vertex_id,
271        state.export_vertex_id.as_deref(),
272        state.export_vertex_name.as_deref(),
273    ) {
274        Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
275        None => {
276            let Some(log_entry) = latest_logs.last() else {
277                return;
278            };
279            let name = state
280                .vertex_name_by_vertex_id
281                .get(&log_entry.vertex_id)
282                .cloned()
283                .or_else(|| state.last_buildkit_vertex.clone())
284                .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
285            (log_entry.vertex_id.clone(), name)
286        }
287    };
288    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
289    state.last_buildkit_vertex_id = Some(vertex_id.clone());
290    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
291        state.last_buildkit_vertex = Some(vertex_name.clone());
292    }
293
294    let message = if progress.is_plain_output() {
295        vertex_name
296    } else if let Some(log_entry) = latest_logs
297        .iter()
298        .rev()
299        .find(|entry| entry.vertex_id == vertex_id)
300    {
301        format!("{vertex_name} ยท {}", log_entry.message)
302    } else {
303        vertex_name
304    };
305    progress.update_spinner("build", &message);
306
307    if progress.is_plain_output() {
308        for log_entry in latest_logs {
309            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
310        }
311        return;
312    }
313
314    let (Some(current_id), Some(current_name)) = (
315        state.last_buildkit_vertex_id.as_ref(),
316        state.last_buildkit_vertex.as_ref(),
317    ) else {
318        return;
319    };
320
321    let name = state
322        .vertex_name_by_vertex_id
323        .get(current_id)
324        .unwrap_or(current_name);
325    // Keep non-verbose output on the spinner line only.
326    let _ = name;
327}
328
329fn handle_stream_error(
330    prefix: &str,
331    error_str: String,
332    state: &BuildLogState,
333    progress: &mut ProgressReporter,
334) -> DockerError {
335    progress.abandon_all(prefix);
336
337    let buildkit_hint = if error_str.contains("mount")
338        || error_str.contains("--mount")
339        || state
340            .recent_logs
341            .iter()
342            .any(|log| log.contains("--mount") && log.contains("cache"))
343    {
344        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
345         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
346         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
347    } else {
348        ""
349    };
350
351    let context = format!(
352        "{}{}",
353        format_build_error_with_context(
354            &error_str,
355            &state.recent_logs,
356            &state.error_logs,
357            &state.recent_buildkit_logs,
358        ),
359        buildkit_hint
360    );
361    DockerError::Build(context)
362}
363
364fn update_buildkit_vertex_names(
365    vertex_name_by_vertex_id: &mut HashMap<String, String>,
366    status: &BuildkitStatusResponse,
367) {
368    for vertex in &status.vertexes {
369        if vertex.name.is_empty() {
370            continue;
371        }
372        vertex_name_by_vertex_id
373            .entry(vertex.digest.clone())
374            .or_insert_with(|| vertex.name.clone());
375    }
376}
377
378fn select_latest_buildkit_vertex(
379    status: &BuildkitStatusResponse,
380    vertex_name_by_vertex_id: &HashMap<String, String>,
381    export_vertex_id: Option<&str>,
382    export_vertex_name: Option<&str>,
383) -> Option<(String, String)> {
384    if let Some(export_vertex_id) = export_vertex_id {
385        let name = export_vertex_name
386            .map(str::to_string)
387            .or_else(|| vertex_name_by_vertex_id.get(export_vertex_id).cloned())
388            .unwrap_or_else(|| format_vertex_fallback_label(export_vertex_id));
389        return Some((export_vertex_id.to_string(), name));
390    }
391
392    let mut best_runtime: Option<(u32, String, String)> = None;
393    let mut fallback: Option<(String, String)> = None;
394
395    for vertex in &status.vertexes {
396        let name = if vertex.name.is_empty() {
397            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
398        } else {
399            Some(vertex.name.clone())
400        };
401
402        let Some(name) = name else {
403            continue;
404        };
405
406        if fallback.is_none() && !name.starts_with("[internal]") {
407            fallback = Some((vertex.digest.clone(), name.clone()));
408        }
409
410        if let Some(step) = parse_runtime_step(&name) {
411            match &best_runtime {
412                Some((best_step, _, _)) if *best_step >= step => {}
413                _ => {
414                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
415                }
416            }
417        }
418    }
419
420    if let Some((_, digest, name)) = best_runtime {
421        Some((digest, name))
422    } else {
423        fallback.or_else(|| {
424            status.vertexes.iter().find_map(|vertex| {
425                let name = if vertex.name.is_empty() {
426                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
427                } else {
428                    Some(vertex.name.clone())
429                };
430                name.map(|resolved| (vertex.digest.clone(), resolved))
431            })
432        })
433    }
434}
435
436fn parse_runtime_step(name: &str) -> Option<u32> {
437    let prefix = "[runtime ";
438    let start = name.find(prefix)? + prefix.len();
439    let rest = &name[start..];
440    let end = rest.find('/')?;
441    rest[..end].trim().parse::<u32>().ok()
442}
443
444fn format_vertex_fallback_label(vertex_id: &str) -> String {
445    let short = vertex_id
446        .strip_prefix("sha256:")
447        .unwrap_or(vertex_id)
448        .chars()
449        .take(12)
450        .collect::<String>();
451    format!("vertex {short}")
452}
453
454fn update_export_vertex_from_logs(
455    latest_logs: &[BuildkitLogEntry],
456    vertex_name_by_vertex_id: &HashMap<String, String>,
457    export_vertex_id: &mut Option<String>,
458    export_vertex_name: &mut Option<String>,
459) {
460    if let Some(entry) = latest_logs
461        .iter()
462        .rev()
463        .find(|log| log.message.trim_start().starts_with("exporting to image"))
464    {
465        *export_vertex_id = Some(entry.vertex_id.clone());
466        if let Some(name) = vertex_name_by_vertex_id.get(&entry.vertex_id) {
467            *export_vertex_name = Some(name.clone());
468        }
469    }
470}
471
472fn record_buildkit_logs(
473    state: &mut BuildLogState,
474    latest_logs: &[BuildkitLogEntry],
475    current_vertex_id: &str,
476    current_vertex_name: &str,
477) {
478    for log_entry in latest_logs {
479        let name = state
480            .vertex_name_by_vertex_id
481            .get(&log_entry.vertex_id)
482            .cloned()
483            .or_else(|| {
484                if log_entry.vertex_id == current_vertex_id {
485                    Some(current_vertex_name.to_string())
486                } else {
487                    None
488                }
489            })
490            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
491
492        let message = log_entry.message.replace('\r', "").trim_end().to_string();
493        if message.is_empty() {
494            continue;
495        }
496
497        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
498            state.recent_buildkit_logs.pop_front();
499        }
500        state
501            .recent_buildkit_logs
502            .push_back(format!("[{name}] {message}"));
503    }
504}
505
506#[derive(Debug, Clone)]
507struct BuildkitLogEntry {
508    vertex_id: String,
509    message: String,
510}
511
512fn append_buildkit_logs(
513    logs: &mut HashMap<String, String>,
514    status: &BuildkitStatusResponse,
515) -> Vec<BuildkitLogEntry> {
516    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
517
518    for log in &status.logs {
519        let vertex_id = log.vertex.clone();
520        let message = String::from_utf8_lossy(&log.msg).to_string();
521        let entry = logs.entry(vertex_id.clone()).or_default();
522        entry.push_str(&message);
523        latest.push(BuildkitLogEntry { vertex_id, message });
524    }
525
526    latest
527}
528
529/// Pull the opencode image from registry with automatic fallback
530///
531/// Tries GHCR first, falls back to Docker Hub on failure.
532/// Returns the full image:tag string on success.
533pub async fn pull_image(
534    client: &DockerClient,
535    tag: Option<&str>,
536    progress: &mut ProgressReporter,
537) -> Result<String, DockerError> {
538    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
539
540    // Try GHCR first
541    debug!("Attempting to pull from GHCR: {}:{}", IMAGE_NAME_GHCR, tag);
542    let ghcr_err = match pull_from_registry(client, IMAGE_NAME_GHCR, tag, progress).await {
543        Ok(()) => {
544            let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
545            return Ok(full_name);
546        }
547        Err(e) => e,
548    };
549
550    warn!(
551        "GHCR pull failed: {}. Trying Docker Hub fallback...",
552        ghcr_err
553    );
554
555    // Try Docker Hub as fallback
556    debug!(
557        "Attempting to pull from Docker Hub: {}:{}",
558        IMAGE_NAME_DOCKERHUB, tag
559    );
560    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, tag, progress).await {
561        Ok(()) => {
562            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{tag}");
563            Ok(full_name)
564        }
565        Err(dockerhub_err) => Err(DockerError::Pull(format!(
566            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
567        ))),
568    }
569}
570
571/// Maximum number of retry attempts for pull operations
572const MAX_PULL_RETRIES: usize = 3;
573
574/// Pull from a specific registry with retry logic
575async fn pull_from_registry(
576    client: &DockerClient,
577    image: &str,
578    tag: &str,
579    progress: &mut ProgressReporter,
580) -> Result<(), DockerError> {
581    let full_name = format!("{image}:{tag}");
582
583    // Manual retry loop since async closures can't capture mutable references
584    let mut last_error = None;
585    for attempt in 1..=MAX_PULL_RETRIES {
586        debug!(
587            "Pull attempt {}/{} for {}",
588            attempt, MAX_PULL_RETRIES, full_name
589        );
590
591        match do_pull(client, image, tag, progress).await {
592            Ok(()) => return Ok(()),
593            Err(e) => {
594                warn!("Pull attempt {} failed: {}", attempt, e);
595                last_error = Some(e);
596
597                if attempt < MAX_PULL_RETRIES {
598                    // Exponential backoff: 1s, 2s, 4s
599                    let delay_ms = 1000 * (1 << (attempt - 1));
600                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
601                }
602            }
603        }
604    }
605
606    Err(last_error.unwrap_or_else(|| {
607        DockerError::Pull(format!(
608            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
609        ))
610    }))
611}
612
613/// Perform the actual pull operation
614async fn do_pull(
615    client: &DockerClient,
616    image: &str,
617    tag: &str,
618    progress: &mut ProgressReporter,
619) -> Result<(), DockerError> {
620    let full_name = format!("{image}:{tag}");
621
622    let options = CreateImageOptions {
623        from_image: image,
624        tag,
625        ..Default::default()
626    };
627
628    let mut stream = client.inner().create_image(Some(options), None, None);
629
630    // Add main spinner for overall progress
631    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
632
633    while let Some(result) = stream.next().await {
634        match result {
635            Ok(info) => {
636                // Handle errors from the stream
637                if let Some(error_msg) = info.error {
638                    progress.abandon_all(&error_msg);
639                    return Err(DockerError::Pull(error_msg));
640                }
641
642                // Handle layer progress
643                if let Some(layer_id) = &info.id {
644                    let status = info.status.as_deref().unwrap_or("");
645
646                    match status {
647                        "Already exists" => {
648                            progress.finish(layer_id, "Already exists");
649                        }
650                        "Pull complete" => {
651                            progress.finish(layer_id, "Pull complete");
652                        }
653                        "Downloading" | "Extracting" => {
654                            if let Some(progress_detail) = &info.progress_detail {
655                                let current = progress_detail.current.unwrap_or(0) as u64;
656                                let total = progress_detail.total.unwrap_or(0) as u64;
657
658                                if total > 0 {
659                                    progress.update_layer(layer_id, current, total, status);
660                                }
661                            }
662                        }
663                        _ => {
664                            // Other statuses (Waiting, Verifying, etc.)
665                            progress.update_spinner(layer_id, status);
666                        }
667                    }
668                } else if let Some(status) = &info.status {
669                    // Overall status messages (no layer id)
670                    progress.update_spinner("pull", status);
671                }
672            }
673            Err(e) => {
674                progress.abandon_all("Pull failed");
675                return Err(DockerError::Pull(format!("Pull failed: {e}")));
676            }
677        }
678    }
679
680    progress.finish("pull", &format!("Pull complete: {full_name}"));
681    Ok(())
682}
683
684/// Format a build error with recent log context for actionable debugging
685fn format_build_error_with_context(
686    error: &str,
687    recent_logs: &VecDeque<String>,
688    error_logs: &VecDeque<String>,
689    recent_buildkit_logs: &VecDeque<String>,
690) -> String {
691    let mut message = String::new();
692
693    // Add main error message
694    message.push_str(error);
695
696    // Add captured error lines if they differ from recent logs
697    // (these are error-like lines that may have scrolled off)
698    if !error_logs.is_empty() {
699        // Check if error_logs contains lines not in recent_logs
700        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
701        let unique_errors: Vec<_> = error_logs
702            .iter()
703            .filter(|line| !recent_set.contains(line))
704            .collect();
705
706        if !unique_errors.is_empty() {
707            message.push_str("\n\nPotential errors detected during build:");
708            for line in unique_errors {
709                message.push_str("\n  ");
710                message.push_str(line);
711            }
712        }
713    }
714
715    // Add recent BuildKit log context if available
716    if !recent_buildkit_logs.is_empty() {
717        message.push_str("\n\nRecent BuildKit output:");
718        for line in recent_buildkit_logs {
719            message.push_str("\n  ");
720            message.push_str(line);
721        }
722    }
723
724    // Add recent log context if available
725    if !recent_logs.is_empty() {
726        message.push_str("\n\nRecent build output:");
727        for line in recent_logs {
728            message.push_str("\n  ");
729            message.push_str(line);
730        }
731    } else if recent_buildkit_logs.is_empty() {
732        message.push_str("\n\nNo build output was received from the Docker daemon.");
733        message.push_str("\nThis usually means the build failed before any logs were streamed.");
734    }
735
736    // Add actionable suggestions based on common error patterns
737    let error_lower = error.to_lowercase();
738    if error_lower.contains("network")
739        || error_lower.contains("connection")
740        || error_lower.contains("timeout")
741    {
742        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
743    } else if error_lower.contains("disk")
744        || error_lower.contains("space")
745        || error_lower.contains("no space")
746    {
747        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
748    } else if error_lower.contains("permission") || error_lower.contains("denied") {
749        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
750    }
751
752    message
753}
754
755/// Create a gzipped tar archive containing the Dockerfile
756fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
757    let mut archive_buffer = Vec::new();
758
759    {
760        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
761        let mut tar = TarBuilder::new(encoder);
762
763        // Add Dockerfile to archive
764        let dockerfile_bytes = DOCKERFILE.as_bytes();
765        let mut header = tar::Header::new_gnu();
766        header.set_path("Dockerfile")?;
767        header.set_size(dockerfile_bytes.len() as u64);
768        header.set_mode(0o644);
769        header.set_cksum();
770
771        tar.append(&header, dockerfile_bytes)?;
772        tar.finish()?;
773
774        // Finish gzip encoding
775        let encoder = tar.into_inner()?;
776        encoder.finish()?;
777    }
778
779    Ok(archive_buffer)
780}
781
782#[cfg(test)]
783mod tests {
784    use super::*;
785
786    #[test]
787    fn create_build_context_succeeds() {
788        let context = create_build_context().expect("should create context");
789        assert!(!context.is_empty(), "context should not be empty");
790
791        // Verify it's gzip-compressed (gzip magic bytes)
792        assert_eq!(context[0], 0x1f, "should be gzip compressed");
793        assert_eq!(context[1], 0x8b, "should be gzip compressed");
794    }
795
796    #[test]
797    fn default_tag_is_latest() {
798        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
799    }
800
801    #[test]
802    fn format_build_error_includes_recent_logs() {
803        let mut logs = VecDeque::new();
804        logs.push_back("Step 1/5 : FROM ubuntu:22.04".to_string());
805        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
806        logs.push_back("E: Unable to fetch some archives".to_string());
807        let error_logs = VecDeque::new();
808        let buildkit_logs = VecDeque::new();
809
810        let result = format_build_error_with_context(
811            "Build failed: exit code 1",
812            &logs,
813            &error_logs,
814            &buildkit_logs,
815        );
816
817        assert!(result.contains("Build failed: exit code 1"));
818        assert!(result.contains("Recent build output:"));
819        assert!(result.contains("Step 1/5"));
820        assert!(result.contains("Unable to fetch"));
821    }
822
823    #[test]
824    fn format_build_error_handles_empty_logs() {
825        let logs = VecDeque::new();
826        let error_logs = VecDeque::new();
827        let buildkit_logs = VecDeque::new();
828        let result =
829            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
830
831        assert!(result.contains("Stream error"));
832        assert!(!result.contains("Recent build output:"));
833    }
834
835    #[test]
836    fn format_build_error_adds_network_suggestion() {
837        let logs = VecDeque::new();
838        let error_logs = VecDeque::new();
839        let buildkit_logs = VecDeque::new();
840        let result = format_build_error_with_context(
841            "connection timeout",
842            &logs,
843            &error_logs,
844            &buildkit_logs,
845        );
846
847        assert!(result.contains("Check your network connection"));
848    }
849
850    #[test]
851    fn format_build_error_adds_disk_suggestion() {
852        let logs = VecDeque::new();
853        let error_logs = VecDeque::new();
854        let buildkit_logs = VecDeque::new();
855        let result = format_build_error_with_context(
856            "no space left on device",
857            &logs,
858            &error_logs,
859            &buildkit_logs,
860        );
861
862        assert!(result.contains("Free up disk space"));
863    }
864
865    #[test]
866    fn format_build_error_shows_error_lines_separately() {
867        let mut recent_logs = VecDeque::new();
868        recent_logs.push_back("Compiling foo v1.0".to_string());
869        recent_logs.push_back("Successfully installed bar".to_string());
870
871        let mut error_logs = VecDeque::new();
872        error_logs.push_back("error: failed to compile dust".to_string());
873        error_logs.push_back("error: failed to compile glow".to_string());
874
875        let buildkit_logs = VecDeque::new();
876        let result = format_build_error_with_context(
877            "Build failed",
878            &recent_logs,
879            &error_logs,
880            &buildkit_logs,
881        );
882
883        assert!(result.contains("Potential errors detected during build:"));
884        assert!(result.contains("failed to compile dust"));
885        assert!(result.contains("failed to compile glow"));
886    }
887
888    #[test]
889    fn is_error_line_detects_errors() {
890        assert!(is_error_line("error: something failed"));
891        assert!(is_error_line("Error: build failed"));
892        assert!(is_error_line("Failed to install package"));
893        assert!(is_error_line("cannot find module"));
894        assert!(is_error_line("Unable to locate package"));
895        assert!(!is_error_line("Compiling foo v1.0"));
896        assert!(!is_error_line("Successfully installed"));
897    }
898}