Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT,
9};
10use bollard::image::{BuildImageOptions, BuilderVersion, CreateImageOptions};
11use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
12use bollard::models::BuildInfoAux;
13use bytes::Bytes;
14use flate2::Compression;
15use flate2::write::GzEncoder;
16use futures_util::StreamExt;
17use std::collections::{HashMap, VecDeque};
18use std::env;
19use std::time::{SystemTime, UNIX_EPOCH};
20use tar::Builder as TarBuilder;
21use tracing::{debug, warn};
22
23/// Default number of recent build log lines to capture for error context
24const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
25
26/// Default number of error lines to capture separately
27const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
28
29/// Read a log buffer size from env with bounds
30fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
31    let Ok(value) = env::var(var_name) else {
32        return default;
33    };
34    let Ok(parsed) = value.trim().parse::<usize>() else {
35        return default;
36    };
37    parsed.clamp(5, 500)
38}
39
40/// Check if a line looks like an error message
41fn is_error_line(line: &str) -> bool {
42    let lower = line.to_lowercase();
43    lower.contains("error")
44        || lower.contains("failed")
45        || lower.contains("cannot")
46        || lower.contains("unable to")
47        || lower.contains("not found")
48        || lower.contains("permission denied")
49}
50
51/// Check if an image exists locally
52pub async fn image_exists(
53    client: &DockerClient,
54    image: &str,
55    tag: &str,
56) -> Result<bool, DockerError> {
57    let full_name = format!("{image}:{tag}");
58    debug!("Checking if image exists: {}", full_name);
59
60    match client.inner().inspect_image(&full_name).await {
61        Ok(_) => Ok(true),
62        Err(bollard::errors::Error::DockerResponseServerError {
63            status_code: 404, ..
64        }) => Ok(false),
65        Err(e) => Err(DockerError::from(e)),
66    }
67}
68
69/// Build the opencode image from embedded Dockerfile
70///
71/// Shows real-time build progress with streaming output.
72/// Returns the full image:tag string on success.
73///
74/// # Arguments
75/// * `client` - Docker client
76/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
77/// * `progress` - Progress reporter for build feedback
78/// * `no_cache` - If true, build without using Docker layer cache
79pub async fn build_image(
80    client: &DockerClient,
81    tag: Option<&str>,
82    progress: &mut ProgressReporter,
83    no_cache: bool,
84) -> Result<String, DockerError> {
85    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
86    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
87    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
88
89    // Create tar archive containing Dockerfile
90    let context = create_build_context()
91        .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
92
93    // Set up build options
94    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
95    // BuildKit requires a unique session ID for each build
96    let session_id = format!(
97        "opencode-cloud-build-{}",
98        SystemTime::now()
99            .duration_since(UNIX_EPOCH)
100            .unwrap_or_default()
101            .as_nanos()
102    );
103    let options = BuildImageOptions {
104        t: full_name.clone(),
105        dockerfile: "Dockerfile".to_string(),
106        version: BuilderVersion::BuilderBuildKit,
107        session: Some(session_id),
108        rm: true,
109        nocache: no_cache,
110        ..Default::default()
111    };
112
113    // Create build body from context
114    let body = Bytes::from(context);
115
116    // Start build with streaming output
117    let mut stream = client.inner().build_image(options, None, Some(body));
118
119    // Add main build spinner (context prefix like "Building image" is set by caller)
120    progress.add_spinner("build", "Initializing...");
121
122    let mut maybe_image_id = None;
123    let mut log_state = BuildLogState::new();
124
125    while let Some(result) = stream.next().await {
126        let Ok(info) = result else {
127            return Err(handle_stream_error(
128                "Build failed",
129                result.expect_err("checked error").to_string(),
130                &log_state,
131                progress,
132            ));
133        };
134
135        handle_stream_message(&info, progress, &mut log_state);
136
137        if let Some(error_msg) = info.error {
138            progress.abandon_all(&error_msg);
139            let context = format_build_error_with_context(
140                &error_msg,
141                &log_state.recent_logs,
142                &log_state.error_logs,
143                &log_state.recent_buildkit_logs,
144            );
145            return Err(DockerError::Build(context));
146        }
147
148        if let Some(aux) = info.aux {
149            match aux {
150                BuildInfoAux::Default(image_id) => {
151                    if let Some(id) = image_id.id {
152                        maybe_image_id = Some(id);
153                    }
154                }
155                BuildInfoAux::BuildKit(status) => {
156                    handle_buildkit_status(&status, progress, &mut log_state);
157                }
158            }
159        }
160    }
161
162    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
163    let finish_msg = format!("Build complete: {image_id}");
164    progress.finish("build", &finish_msg);
165
166    Ok(full_name)
167}
168
169struct BuildLogState {
170    recent_logs: VecDeque<String>,
171    error_logs: VecDeque<String>,
172    recent_buildkit_logs: VecDeque<String>,
173    build_log_buffer_size: usize,
174    error_log_buffer_size: usize,
175    last_buildkit_vertex: Option<String>,
176    last_buildkit_vertex_id: Option<String>,
177    buildkit_logs_by_vertex_id: HashMap<String, String>,
178    vertex_name_by_vertex_id: HashMap<String, String>,
179}
180
181impl BuildLogState {
182    fn new() -> Self {
183        let build_log_buffer_size = read_log_buffer_size(
184            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
185            DEFAULT_BUILD_LOG_BUFFER_SIZE,
186        );
187        let error_log_buffer_size = read_log_buffer_size(
188            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
189            DEFAULT_ERROR_LOG_BUFFER_SIZE,
190        );
191        Self {
192            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
193            error_logs: VecDeque::with_capacity(error_log_buffer_size),
194            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
195            build_log_buffer_size,
196            error_log_buffer_size,
197            last_buildkit_vertex: None,
198            last_buildkit_vertex_id: None,
199            buildkit_logs_by_vertex_id: HashMap::new(),
200            vertex_name_by_vertex_id: HashMap::new(),
201        }
202    }
203}
204
205fn handle_stream_message(
206    info: &bollard::models::BuildInfo,
207    progress: &mut ProgressReporter,
208    state: &mut BuildLogState,
209) {
210    let Some(stream_msg) = info.stream.as_deref() else {
211        return;
212    };
213    let msg = stream_msg.trim();
214    if msg.is_empty() {
215        return;
216    }
217
218    if progress.is_plain_output() {
219        eprint!("{stream_msg}");
220    } else {
221        let has_runtime_vertex = state
222            .last_buildkit_vertex
223            .as_deref()
224            .is_some_and(|name| name.starts_with("[runtime "));
225        let is_internal_msg = msg.contains("[internal]");
226        if !(has_runtime_vertex && is_internal_msg) {
227            progress.update_spinner("build", stream_msg);
228        }
229    }
230
231    if state.recent_logs.len() >= state.build_log_buffer_size {
232        state.recent_logs.pop_front();
233    }
234    state.recent_logs.push_back(msg.to_string());
235
236    if is_error_line(msg) {
237        if state.error_logs.len() >= state.error_log_buffer_size {
238            state.error_logs.pop_front();
239        }
240        state.error_logs.push_back(msg.to_string());
241    }
242
243    if msg.starts_with("Step ") {
244        debug!("Build step: {}", msg);
245    }
246}
247
248fn handle_buildkit_status(
249    status: &BuildkitStatusResponse,
250    progress: &mut ProgressReporter,
251    state: &mut BuildLogState,
252) {
253    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
254    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
255    let (vertex_id, vertex_name) =
256        match select_latest_buildkit_vertex(status, &state.vertex_name_by_vertex_id) {
257            Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
258            None => {
259                let Some(log_entry) = latest_logs.last() else {
260                    return;
261                };
262                let name = state
263                    .vertex_name_by_vertex_id
264                    .get(&log_entry.vertex_id)
265                    .cloned()
266                    .or_else(|| state.last_buildkit_vertex.clone())
267                    .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
268                (log_entry.vertex_id.clone(), name)
269            }
270        };
271    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
272    state.last_buildkit_vertex_id = Some(vertex_id);
273    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
274        state.last_buildkit_vertex = Some(vertex_name.clone());
275    }
276
277    let message = if progress.is_plain_output() {
278        vertex_name
279    } else if let Some(log_entry) = latest_logs.last() {
280        format!("{vertex_name} ยท {}", log_entry.message)
281    } else {
282        vertex_name
283    };
284    progress.update_spinner("build", &message);
285
286    if progress.is_plain_output() {
287        for log_entry in latest_logs {
288            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
289        }
290        return;
291    }
292
293    let (Some(current_id), Some(current_name)) = (
294        state.last_buildkit_vertex_id.as_ref(),
295        state.last_buildkit_vertex.as_ref(),
296    ) else {
297        return;
298    };
299
300    let name = state
301        .vertex_name_by_vertex_id
302        .get(current_id)
303        .unwrap_or(current_name);
304    // Keep non-verbose output on the spinner line only.
305    let _ = name;
306}
307
308fn handle_stream_error(
309    prefix: &str,
310    error_str: String,
311    state: &BuildLogState,
312    progress: &mut ProgressReporter,
313) -> DockerError {
314    progress.abandon_all(prefix);
315
316    let buildkit_hint = if error_str.contains("mount")
317        || error_str.contains("--mount")
318        || state
319            .recent_logs
320            .iter()
321            .any(|log| log.contains("--mount") && log.contains("cache"))
322    {
323        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
324         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
325         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
326    } else {
327        ""
328    };
329
330    let context = format!(
331        "{}{}",
332        format_build_error_with_context(
333            &error_str,
334            &state.recent_logs,
335            &state.error_logs,
336            &state.recent_buildkit_logs,
337        ),
338        buildkit_hint
339    );
340    DockerError::Build(context)
341}
342
343fn update_buildkit_vertex_names(
344    vertex_name_by_vertex_id: &mut HashMap<String, String>,
345    status: &BuildkitStatusResponse,
346) {
347    for vertex in &status.vertexes {
348        if vertex.name.is_empty() {
349            continue;
350        }
351        vertex_name_by_vertex_id
352            .entry(vertex.digest.clone())
353            .or_insert_with(|| vertex.name.clone());
354    }
355}
356
357fn select_latest_buildkit_vertex(
358    status: &BuildkitStatusResponse,
359    vertex_name_by_vertex_id: &HashMap<String, String>,
360) -> Option<(String, String)> {
361    let mut best_runtime: Option<(u32, String, String)> = None;
362    let mut fallback: Option<(String, String)> = None;
363
364    for vertex in &status.vertexes {
365        let name = if vertex.name.is_empty() {
366            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
367        } else {
368            Some(vertex.name.clone())
369        };
370
371        let Some(name) = name else {
372            continue;
373        };
374
375        if fallback.is_none() && !name.starts_with("[internal]") {
376            fallback = Some((vertex.digest.clone(), name.clone()));
377        }
378
379        if let Some(step) = parse_runtime_step(&name) {
380            match &best_runtime {
381                Some((best_step, _, _)) if *best_step >= step => {}
382                _ => {
383                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
384                }
385            }
386        }
387    }
388
389    if let Some((_, digest, name)) = best_runtime {
390        Some((digest, name))
391    } else {
392        fallback.or_else(|| {
393            status.vertexes.iter().find_map(|vertex| {
394                let name = if vertex.name.is_empty() {
395                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
396                } else {
397                    Some(vertex.name.clone())
398                };
399                name.map(|resolved| (vertex.digest.clone(), resolved))
400            })
401        })
402    }
403}
404
405fn parse_runtime_step(name: &str) -> Option<u32> {
406    let prefix = "[runtime ";
407    let start = name.find(prefix)? + prefix.len();
408    let rest = &name[start..];
409    let end = rest.find('/')?;
410    rest[..end].trim().parse::<u32>().ok()
411}
412
413fn format_vertex_fallback_label(vertex_id: &str) -> String {
414    let short = vertex_id
415        .strip_prefix("sha256:")
416        .unwrap_or(vertex_id)
417        .chars()
418        .take(12)
419        .collect::<String>();
420    format!("vertex {short}")
421}
422
423fn record_buildkit_logs(
424    state: &mut BuildLogState,
425    latest_logs: &[BuildkitLogEntry],
426    current_vertex_id: &str,
427    current_vertex_name: &str,
428) {
429    for log_entry in latest_logs {
430        let name = state
431            .vertex_name_by_vertex_id
432            .get(&log_entry.vertex_id)
433            .cloned()
434            .or_else(|| {
435                if log_entry.vertex_id == current_vertex_id {
436                    Some(current_vertex_name.to_string())
437                } else {
438                    None
439                }
440            })
441            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
442
443        let message = log_entry.message.replace('\r', "").trim_end().to_string();
444        if message.is_empty() {
445            continue;
446        }
447
448        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
449            state.recent_buildkit_logs.pop_front();
450        }
451        state
452            .recent_buildkit_logs
453            .push_back(format!("[{name}] {message}"));
454    }
455}
456
457#[derive(Debug, Clone)]
458struct BuildkitLogEntry {
459    vertex_id: String,
460    message: String,
461}
462
463fn append_buildkit_logs(
464    logs: &mut HashMap<String, String>,
465    status: &BuildkitStatusResponse,
466) -> Vec<BuildkitLogEntry> {
467    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
468
469    for log in &status.logs {
470        let vertex_id = log.vertex.clone();
471        let message = String::from_utf8_lossy(&log.msg).to_string();
472        let entry = logs.entry(vertex_id.clone()).or_default();
473        entry.push_str(&message);
474        latest.push(BuildkitLogEntry { vertex_id, message });
475    }
476
477    latest
478}
479
480/// Pull the opencode image from registry with automatic fallback
481///
482/// Tries GHCR first, falls back to Docker Hub on failure.
483/// Returns the full image:tag string on success.
484pub async fn pull_image(
485    client: &DockerClient,
486    tag: Option<&str>,
487    progress: &mut ProgressReporter,
488) -> Result<String, DockerError> {
489    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
490
491    // Try GHCR first
492    debug!("Attempting to pull from GHCR: {}:{}", IMAGE_NAME_GHCR, tag);
493    let ghcr_err = match pull_from_registry(client, IMAGE_NAME_GHCR, tag, progress).await {
494        Ok(()) => {
495            let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
496            return Ok(full_name);
497        }
498        Err(e) => e,
499    };
500
501    warn!(
502        "GHCR pull failed: {}. Trying Docker Hub fallback...",
503        ghcr_err
504    );
505
506    // Try Docker Hub as fallback
507    debug!(
508        "Attempting to pull from Docker Hub: {}:{}",
509        IMAGE_NAME_DOCKERHUB, tag
510    );
511    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, tag, progress).await {
512        Ok(()) => {
513            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{tag}");
514            Ok(full_name)
515        }
516        Err(dockerhub_err) => Err(DockerError::Pull(format!(
517            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
518        ))),
519    }
520}
521
522/// Maximum number of retry attempts for pull operations
523const MAX_PULL_RETRIES: usize = 3;
524
525/// Pull from a specific registry with retry logic
526async fn pull_from_registry(
527    client: &DockerClient,
528    image: &str,
529    tag: &str,
530    progress: &mut ProgressReporter,
531) -> Result<(), DockerError> {
532    let full_name = format!("{image}:{tag}");
533
534    // Manual retry loop since async closures can't capture mutable references
535    let mut last_error = None;
536    for attempt in 1..=MAX_PULL_RETRIES {
537        debug!(
538            "Pull attempt {}/{} for {}",
539            attempt, MAX_PULL_RETRIES, full_name
540        );
541
542        match do_pull(client, image, tag, progress).await {
543            Ok(()) => return Ok(()),
544            Err(e) => {
545                warn!("Pull attempt {} failed: {}", attempt, e);
546                last_error = Some(e);
547
548                if attempt < MAX_PULL_RETRIES {
549                    // Exponential backoff: 1s, 2s, 4s
550                    let delay_ms = 1000 * (1 << (attempt - 1));
551                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
552                }
553            }
554        }
555    }
556
557    Err(last_error.unwrap_or_else(|| {
558        DockerError::Pull(format!(
559            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
560        ))
561    }))
562}
563
564/// Perform the actual pull operation
565async fn do_pull(
566    client: &DockerClient,
567    image: &str,
568    tag: &str,
569    progress: &mut ProgressReporter,
570) -> Result<(), DockerError> {
571    let full_name = format!("{image}:{tag}");
572
573    let options = CreateImageOptions {
574        from_image: image,
575        tag,
576        ..Default::default()
577    };
578
579    let mut stream = client.inner().create_image(Some(options), None, None);
580
581    // Add main spinner for overall progress
582    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
583
584    while let Some(result) = stream.next().await {
585        match result {
586            Ok(info) => {
587                // Handle errors from the stream
588                if let Some(error_msg) = info.error {
589                    progress.abandon_all(&error_msg);
590                    return Err(DockerError::Pull(error_msg));
591                }
592
593                // Handle layer progress
594                if let Some(layer_id) = &info.id {
595                    let status = info.status.as_deref().unwrap_or("");
596
597                    match status {
598                        "Already exists" => {
599                            progress.finish(layer_id, "Already exists");
600                        }
601                        "Pull complete" => {
602                            progress.finish(layer_id, "Pull complete");
603                        }
604                        "Downloading" | "Extracting" => {
605                            if let Some(progress_detail) = &info.progress_detail {
606                                let current = progress_detail.current.unwrap_or(0) as u64;
607                                let total = progress_detail.total.unwrap_or(0) as u64;
608
609                                if total > 0 {
610                                    progress.update_layer(layer_id, current, total, status);
611                                }
612                            }
613                        }
614                        _ => {
615                            // Other statuses (Waiting, Verifying, etc.)
616                            progress.update_spinner(layer_id, status);
617                        }
618                    }
619                } else if let Some(status) = &info.status {
620                    // Overall status messages (no layer id)
621                    progress.update_spinner("pull", status);
622                }
623            }
624            Err(e) => {
625                progress.abandon_all("Pull failed");
626                return Err(DockerError::Pull(format!("Pull failed: {e}")));
627            }
628        }
629    }
630
631    progress.finish("pull", &format!("Pull complete: {full_name}"));
632    Ok(())
633}
634
635/// Format a build error with recent log context for actionable debugging
636fn format_build_error_with_context(
637    error: &str,
638    recent_logs: &VecDeque<String>,
639    error_logs: &VecDeque<String>,
640    recent_buildkit_logs: &VecDeque<String>,
641) -> String {
642    let mut message = String::new();
643
644    // Add main error message
645    message.push_str(error);
646
647    // Add captured error lines if they differ from recent logs
648    // (these are error-like lines that may have scrolled off)
649    if !error_logs.is_empty() {
650        // Check if error_logs contains lines not in recent_logs
651        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
652        let unique_errors: Vec<_> = error_logs
653            .iter()
654            .filter(|line| !recent_set.contains(line))
655            .collect();
656
657        if !unique_errors.is_empty() {
658            message.push_str("\n\nPotential errors detected during build:");
659            for line in unique_errors {
660                message.push_str("\n  ");
661                message.push_str(line);
662            }
663        }
664    }
665
666    // Add recent BuildKit log context if available
667    if !recent_buildkit_logs.is_empty() {
668        message.push_str("\n\nRecent BuildKit output:");
669        for line in recent_buildkit_logs {
670            message.push_str("\n  ");
671            message.push_str(line);
672        }
673    }
674
675    // Add recent log context if available
676    if !recent_logs.is_empty() {
677        message.push_str("\n\nRecent build output:");
678        for line in recent_logs {
679            message.push_str("\n  ");
680            message.push_str(line);
681        }
682    } else if recent_buildkit_logs.is_empty() {
683        message.push_str("\n\nNo build output was received from the Docker daemon.");
684        message.push_str("\nThis usually means the build failed before any logs were streamed.");
685    }
686
687    // Add actionable suggestions based on common error patterns
688    let error_lower = error.to_lowercase();
689    if error_lower.contains("network")
690        || error_lower.contains("connection")
691        || error_lower.contains("timeout")
692    {
693        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
694    } else if error_lower.contains("disk")
695        || error_lower.contains("space")
696        || error_lower.contains("no space")
697    {
698        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
699    } else if error_lower.contains("permission") || error_lower.contains("denied") {
700        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
701    }
702
703    message
704}
705
706/// Create a gzipped tar archive containing the Dockerfile
707fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
708    let mut archive_buffer = Vec::new();
709
710    {
711        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
712        let mut tar = TarBuilder::new(encoder);
713
714        // Add Dockerfile to archive
715        let dockerfile_bytes = DOCKERFILE.as_bytes();
716        let mut header = tar::Header::new_gnu();
717        header.set_path("Dockerfile")?;
718        header.set_size(dockerfile_bytes.len() as u64);
719        header.set_mode(0o644);
720        header.set_cksum();
721
722        tar.append(&header, dockerfile_bytes)?;
723        tar.finish()?;
724
725        // Finish gzip encoding
726        let encoder = tar.into_inner()?;
727        encoder.finish()?;
728    }
729
730    Ok(archive_buffer)
731}
732
733#[cfg(test)]
734mod tests {
735    use super::*;
736
737    #[test]
738    fn create_build_context_succeeds() {
739        let context = create_build_context().expect("should create context");
740        assert!(!context.is_empty(), "context should not be empty");
741
742        // Verify it's gzip-compressed (gzip magic bytes)
743        assert_eq!(context[0], 0x1f, "should be gzip compressed");
744        assert_eq!(context[1], 0x8b, "should be gzip compressed");
745    }
746
747    #[test]
748    fn default_tag_is_latest() {
749        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
750    }
751
752    #[test]
753    fn format_build_error_includes_recent_logs() {
754        let mut logs = VecDeque::new();
755        logs.push_back("Step 1/5 : FROM ubuntu:22.04".to_string());
756        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
757        logs.push_back("E: Unable to fetch some archives".to_string());
758        let error_logs = VecDeque::new();
759        let buildkit_logs = VecDeque::new();
760
761        let result = format_build_error_with_context(
762            "Build failed: exit code 1",
763            &logs,
764            &error_logs,
765            &buildkit_logs,
766        );
767
768        assert!(result.contains("Build failed: exit code 1"));
769        assert!(result.contains("Recent build output:"));
770        assert!(result.contains("Step 1/5"));
771        assert!(result.contains("Unable to fetch"));
772    }
773
774    #[test]
775    fn format_build_error_handles_empty_logs() {
776        let logs = VecDeque::new();
777        let error_logs = VecDeque::new();
778        let buildkit_logs = VecDeque::new();
779        let result =
780            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
781
782        assert!(result.contains("Stream error"));
783        assert!(!result.contains("Recent build output:"));
784    }
785
786    #[test]
787    fn format_build_error_adds_network_suggestion() {
788        let logs = VecDeque::new();
789        let error_logs = VecDeque::new();
790        let buildkit_logs = VecDeque::new();
791        let result = format_build_error_with_context(
792            "connection timeout",
793            &logs,
794            &error_logs,
795            &buildkit_logs,
796        );
797
798        assert!(result.contains("Check your network connection"));
799    }
800
801    #[test]
802    fn format_build_error_adds_disk_suggestion() {
803        let logs = VecDeque::new();
804        let error_logs = VecDeque::new();
805        let buildkit_logs = VecDeque::new();
806        let result = format_build_error_with_context(
807            "no space left on device",
808            &logs,
809            &error_logs,
810            &buildkit_logs,
811        );
812
813        assert!(result.contains("Free up disk space"));
814    }
815
816    #[test]
817    fn format_build_error_shows_error_lines_separately() {
818        let mut recent_logs = VecDeque::new();
819        recent_logs.push_back("Compiling foo v1.0".to_string());
820        recent_logs.push_back("Successfully installed bar".to_string());
821
822        let mut error_logs = VecDeque::new();
823        error_logs.push_back("error: failed to compile dust".to_string());
824        error_logs.push_back("error: failed to compile glow".to_string());
825
826        let buildkit_logs = VecDeque::new();
827        let result = format_build_error_with_context(
828            "Build failed",
829            &recent_logs,
830            &error_logs,
831            &buildkit_logs,
832        );
833
834        assert!(result.contains("Potential errors detected during build:"));
835        assert!(result.contains("failed to compile dust"));
836        assert!(result.contains("failed to compile glow"));
837    }
838
839    #[test]
840    fn is_error_line_detects_errors() {
841        assert!(is_error_line("error: something failed"));
842        assert!(is_error_line("Error: build failed"));
843        assert!(is_error_line("Failed to install package"));
844        assert!(is_error_line("cannot find module"));
845        assert!(is_error_line("Unable to locate package"));
846        assert!(!is_error_line("Compiling foo v1.0"));
847        assert!(!is_error_line("Successfully installed"));
848    }
849}