Skip to main content

opencode_cloud_core/docker/
image.rs

1//! Docker image build and pull operations
2//!
3//! This module provides functionality to build Docker images from the embedded
4//! Dockerfile and pull images from registries with progress feedback.
5
6use super::progress::ProgressReporter;
7use super::{
8    DOCKERFILE, DockerClient, DockerError, IMAGE_NAME_DOCKERHUB, IMAGE_NAME_GHCR, IMAGE_TAG_DEFAULT,
9};
10use bollard::image::{BuildImageOptions, BuilderVersion, CreateImageOptions};
11use bollard::moby::buildkit::v1::StatusResponse as BuildkitStatusResponse;
12use bollard::models::BuildInfoAux;
13use bytes::Bytes;
14use flate2::Compression;
15use flate2::write::GzEncoder;
16use futures_util::StreamExt;
17use std::collections::{HashMap, VecDeque};
18use std::env;
19use std::time::{SystemTime, UNIX_EPOCH};
20use tar::Builder as TarBuilder;
21use tracing::{debug, warn};
22
23/// Default number of recent build log lines to capture for error context
24const DEFAULT_BUILD_LOG_BUFFER_SIZE: usize = 20;
25
26/// Default number of error lines to capture separately
27const DEFAULT_ERROR_LOG_BUFFER_SIZE: usize = 10;
28
29/// Read a log buffer size from env with bounds
30fn read_log_buffer_size(var_name: &str, default: usize) -> usize {
31    let Ok(value) = env::var(var_name) else {
32        return default;
33    };
34    let Ok(parsed) = value.trim().parse::<usize>() else {
35        return default;
36    };
37    parsed.clamp(5, 500)
38}
39
40/// Check if a line looks like an error message
41fn is_error_line(line: &str) -> bool {
42    let lower = line.to_lowercase();
43    lower.contains("error")
44        || lower.contains("failed")
45        || lower.contains("cannot")
46        || lower.contains("unable to")
47        || lower.contains("not found")
48        || lower.contains("permission denied")
49}
50
51/// Check if an image exists locally
52pub async fn image_exists(
53    client: &DockerClient,
54    image: &str,
55    tag: &str,
56) -> Result<bool, DockerError> {
57    let full_name = format!("{image}:{tag}");
58    debug!("Checking if image exists: {}", full_name);
59
60    match client.inner().inspect_image(&full_name).await {
61        Ok(_) => Ok(true),
62        Err(bollard::errors::Error::DockerResponseServerError {
63            status_code: 404, ..
64        }) => Ok(false),
65        Err(e) => Err(DockerError::from(e)),
66    }
67}
68
69/// Build the opencode image from embedded Dockerfile
70///
71/// Shows real-time build progress with streaming output.
72/// Returns the full image:tag string on success.
73///
74/// # Arguments
75/// * `client` - Docker client
76/// * `tag` - Image tag (defaults to IMAGE_TAG_DEFAULT)
77/// * `progress` - Progress reporter for build feedback
78/// * `no_cache` - If true, build without using Docker layer cache
79pub async fn build_image(
80    client: &DockerClient,
81    tag: Option<&str>,
82    progress: &mut ProgressReporter,
83    no_cache: bool,
84    build_args: Option<HashMap<String, String>>,
85) -> Result<String, DockerError> {
86    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
87    let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
88    debug!("Building image: {} (no_cache: {})", full_name, no_cache);
89
90    // Create tar archive containing Dockerfile
91    let context = create_build_context()
92        .map_err(|e| DockerError::Build(format!("Failed to create build context: {e}")))?;
93
94    // Set up build options
95    // Explicitly use BuildKit builder to support cache mounts (--mount=type=cache)
96    // BuildKit requires a unique session ID for each build
97    let session_id = format!(
98        "opencode-cloud-build-{}",
99        SystemTime::now()
100            .duration_since(UNIX_EPOCH)
101            .unwrap_or_default()
102            .as_nanos()
103    );
104    let build_args = build_args.unwrap_or_default();
105    let options = BuildImageOptions {
106        t: full_name.clone(),
107        dockerfile: "Dockerfile".to_string(),
108        version: BuilderVersion::BuilderBuildKit,
109        session: Some(session_id),
110        rm: true,
111        nocache: no_cache,
112        buildargs: build_args,
113        ..Default::default()
114    };
115
116    // Create build body from context
117    let body = Bytes::from(context);
118
119    // Start build with streaming output
120    let mut stream = client.inner().build_image(options, None, Some(body));
121
122    // Add main build spinner (context prefix like "Building image" is set by caller)
123    progress.add_spinner("build", "Initializing...");
124
125    let mut maybe_image_id = None;
126    let mut log_state = BuildLogState::new();
127
128    while let Some(result) = stream.next().await {
129        let Ok(info) = result else {
130            return Err(handle_stream_error(
131                "Build failed",
132                result.expect_err("checked error").to_string(),
133                &log_state,
134                progress,
135            ));
136        };
137
138        handle_stream_message(&info, progress, &mut log_state);
139
140        if let Some(error_msg) = info.error {
141            progress.abandon_all(&error_msg);
142            let context = format_build_error_with_context(
143                &error_msg,
144                &log_state.recent_logs,
145                &log_state.error_logs,
146                &log_state.recent_buildkit_logs,
147            );
148            return Err(DockerError::Build(context));
149        }
150
151        if let Some(aux) = info.aux {
152            match aux {
153                BuildInfoAux::Default(image_id) => {
154                    if let Some(id) = image_id.id {
155                        maybe_image_id = Some(id);
156                    }
157                }
158                BuildInfoAux::BuildKit(status) => {
159                    handle_buildkit_status(&status, progress, &mut log_state);
160                }
161            }
162        }
163    }
164
165    let image_id = maybe_image_id.unwrap_or_else(|| "unknown".to_string());
166    let finish_msg = format!("Build complete: {image_id}");
167    progress.finish("build", &finish_msg);
168
169    Ok(full_name)
170}
171
172struct BuildLogState {
173    recent_logs: VecDeque<String>,
174    error_logs: VecDeque<String>,
175    recent_buildkit_logs: VecDeque<String>,
176    build_log_buffer_size: usize,
177    error_log_buffer_size: usize,
178    last_buildkit_vertex: Option<String>,
179    last_buildkit_vertex_id: Option<String>,
180    buildkit_logs_by_vertex_id: HashMap<String, String>,
181    vertex_name_by_vertex_id: HashMap<String, String>,
182}
183
184impl BuildLogState {
185    fn new() -> Self {
186        let build_log_buffer_size = read_log_buffer_size(
187            "OPENCODE_DOCKER_BUILD_LOG_TAIL",
188            DEFAULT_BUILD_LOG_BUFFER_SIZE,
189        );
190        let error_log_buffer_size = read_log_buffer_size(
191            "OPENCODE_DOCKER_BUILD_ERROR_TAIL",
192            DEFAULT_ERROR_LOG_BUFFER_SIZE,
193        );
194        Self {
195            recent_logs: VecDeque::with_capacity(build_log_buffer_size),
196            error_logs: VecDeque::with_capacity(error_log_buffer_size),
197            recent_buildkit_logs: VecDeque::with_capacity(build_log_buffer_size),
198            build_log_buffer_size,
199            error_log_buffer_size,
200            last_buildkit_vertex: None,
201            last_buildkit_vertex_id: None,
202            buildkit_logs_by_vertex_id: HashMap::new(),
203            vertex_name_by_vertex_id: HashMap::new(),
204        }
205    }
206}
207
208fn handle_stream_message(
209    info: &bollard::models::BuildInfo,
210    progress: &mut ProgressReporter,
211    state: &mut BuildLogState,
212) {
213    let Some(stream_msg) = info.stream.as_deref() else {
214        return;
215    };
216    let msg = stream_msg.trim();
217    if msg.is_empty() {
218        return;
219    }
220
221    if progress.is_plain_output() {
222        eprint!("{stream_msg}");
223    } else {
224        let has_runtime_vertex = state
225            .last_buildkit_vertex
226            .as_deref()
227            .is_some_and(|name| name.starts_with("[runtime "));
228        let is_internal_msg = msg.contains("[internal]");
229        if !(has_runtime_vertex && is_internal_msg) {
230            progress.update_spinner("build", stream_msg);
231        }
232    }
233
234    if state.recent_logs.len() >= state.build_log_buffer_size {
235        state.recent_logs.pop_front();
236    }
237    state.recent_logs.push_back(msg.to_string());
238
239    if is_error_line(msg) {
240        if state.error_logs.len() >= state.error_log_buffer_size {
241            state.error_logs.pop_front();
242        }
243        state.error_logs.push_back(msg.to_string());
244    }
245
246    if msg.starts_with("Step ") {
247        debug!("Build step: {}", msg);
248    }
249}
250
251fn handle_buildkit_status(
252    status: &BuildkitStatusResponse,
253    progress: &mut ProgressReporter,
254    state: &mut BuildLogState,
255) {
256    let latest_logs = append_buildkit_logs(&mut state.buildkit_logs_by_vertex_id, status);
257    update_buildkit_vertex_names(&mut state.vertex_name_by_vertex_id, status);
258    let (vertex_id, vertex_name) =
259        match select_latest_buildkit_vertex(status, &state.vertex_name_by_vertex_id) {
260            Some((vertex_id, vertex_name)) => (vertex_id, vertex_name),
261            None => {
262                let Some(log_entry) = latest_logs.last() else {
263                    return;
264                };
265                let name = state
266                    .vertex_name_by_vertex_id
267                    .get(&log_entry.vertex_id)
268                    .cloned()
269                    .or_else(|| state.last_buildkit_vertex.clone())
270                    .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
271                (log_entry.vertex_id.clone(), name)
272            }
273        };
274    record_buildkit_logs(state, &latest_logs, &vertex_id, &vertex_name);
275    state.last_buildkit_vertex_id = Some(vertex_id);
276    if state.last_buildkit_vertex.as_deref() != Some(&vertex_name) {
277        state.last_buildkit_vertex = Some(vertex_name.clone());
278    }
279
280    let message = if progress.is_plain_output() {
281        vertex_name
282    } else if let Some(log_entry) = latest_logs.last() {
283        format!("{vertex_name} ยท {}", log_entry.message)
284    } else {
285        vertex_name
286    };
287    progress.update_spinner("build", &message);
288
289    if progress.is_plain_output() {
290        for log_entry in latest_logs {
291            eprintln!("[{}] {}", log_entry.vertex_id, log_entry.message);
292        }
293        return;
294    }
295
296    let (Some(current_id), Some(current_name)) = (
297        state.last_buildkit_vertex_id.as_ref(),
298        state.last_buildkit_vertex.as_ref(),
299    ) else {
300        return;
301    };
302
303    let name = state
304        .vertex_name_by_vertex_id
305        .get(current_id)
306        .unwrap_or(current_name);
307    // Keep non-verbose output on the spinner line only.
308    let _ = name;
309}
310
311fn handle_stream_error(
312    prefix: &str,
313    error_str: String,
314    state: &BuildLogState,
315    progress: &mut ProgressReporter,
316) -> DockerError {
317    progress.abandon_all(prefix);
318
319    let buildkit_hint = if error_str.contains("mount")
320        || error_str.contains("--mount")
321        || state
322            .recent_logs
323            .iter()
324            .any(|log| log.contains("--mount") && log.contains("cache"))
325    {
326        "\n\nNote: This Dockerfile uses BuildKit cache mounts (--mount=type=cache).\n\
327         The build is configured to use BuildKit, but the Docker daemon may not support it.\n\
328         Ensure BuildKit is enabled in Docker Desktop settings and the daemon is restarted."
329    } else {
330        ""
331    };
332
333    let context = format!(
334        "{}{}",
335        format_build_error_with_context(
336            &error_str,
337            &state.recent_logs,
338            &state.error_logs,
339            &state.recent_buildkit_logs,
340        ),
341        buildkit_hint
342    );
343    DockerError::Build(context)
344}
345
346fn update_buildkit_vertex_names(
347    vertex_name_by_vertex_id: &mut HashMap<String, String>,
348    status: &BuildkitStatusResponse,
349) {
350    for vertex in &status.vertexes {
351        if vertex.name.is_empty() {
352            continue;
353        }
354        vertex_name_by_vertex_id
355            .entry(vertex.digest.clone())
356            .or_insert_with(|| vertex.name.clone());
357    }
358}
359
360fn select_latest_buildkit_vertex(
361    status: &BuildkitStatusResponse,
362    vertex_name_by_vertex_id: &HashMap<String, String>,
363) -> Option<(String, String)> {
364    let mut best_runtime: Option<(u32, String, String)> = None;
365    let mut fallback: Option<(String, String)> = None;
366
367    for vertex in &status.vertexes {
368        let name = if vertex.name.is_empty() {
369            vertex_name_by_vertex_id.get(&vertex.digest).cloned()
370        } else {
371            Some(vertex.name.clone())
372        };
373
374        let Some(name) = name else {
375            continue;
376        };
377
378        if fallback.is_none() && !name.starts_with("[internal]") {
379            fallback = Some((vertex.digest.clone(), name.clone()));
380        }
381
382        if let Some(step) = parse_runtime_step(&name) {
383            match &best_runtime {
384                Some((best_step, _, _)) if *best_step >= step => {}
385                _ => {
386                    best_runtime = Some((step, vertex.digest.clone(), name.clone()));
387                }
388            }
389        }
390    }
391
392    if let Some((_, digest, name)) = best_runtime {
393        Some((digest, name))
394    } else {
395        fallback.or_else(|| {
396            status.vertexes.iter().find_map(|vertex| {
397                let name = if vertex.name.is_empty() {
398                    vertex_name_by_vertex_id.get(&vertex.digest).cloned()
399                } else {
400                    Some(vertex.name.clone())
401                };
402                name.map(|resolved| (vertex.digest.clone(), resolved))
403            })
404        })
405    }
406}
407
408fn parse_runtime_step(name: &str) -> Option<u32> {
409    let prefix = "[runtime ";
410    let start = name.find(prefix)? + prefix.len();
411    let rest = &name[start..];
412    let end = rest.find('/')?;
413    rest[..end].trim().parse::<u32>().ok()
414}
415
416fn format_vertex_fallback_label(vertex_id: &str) -> String {
417    let short = vertex_id
418        .strip_prefix("sha256:")
419        .unwrap_or(vertex_id)
420        .chars()
421        .take(12)
422        .collect::<String>();
423    format!("vertex {short}")
424}
425
426fn record_buildkit_logs(
427    state: &mut BuildLogState,
428    latest_logs: &[BuildkitLogEntry],
429    current_vertex_id: &str,
430    current_vertex_name: &str,
431) {
432    for log_entry in latest_logs {
433        let name = state
434            .vertex_name_by_vertex_id
435            .get(&log_entry.vertex_id)
436            .cloned()
437            .or_else(|| {
438                if log_entry.vertex_id == current_vertex_id {
439                    Some(current_vertex_name.to_string())
440                } else {
441                    None
442                }
443            })
444            .unwrap_or_else(|| format_vertex_fallback_label(&log_entry.vertex_id));
445
446        let message = log_entry.message.replace('\r', "").trim_end().to_string();
447        if message.is_empty() {
448            continue;
449        }
450
451        if state.recent_buildkit_logs.len() >= state.build_log_buffer_size {
452            state.recent_buildkit_logs.pop_front();
453        }
454        state
455            .recent_buildkit_logs
456            .push_back(format!("[{name}] {message}"));
457    }
458}
459
460#[derive(Debug, Clone)]
461struct BuildkitLogEntry {
462    vertex_id: String,
463    message: String,
464}
465
466fn append_buildkit_logs(
467    logs: &mut HashMap<String, String>,
468    status: &BuildkitStatusResponse,
469) -> Vec<BuildkitLogEntry> {
470    let mut latest: Vec<BuildkitLogEntry> = Vec::new();
471
472    for log in &status.logs {
473        let vertex_id = log.vertex.clone();
474        let message = String::from_utf8_lossy(&log.msg).to_string();
475        let entry = logs.entry(vertex_id.clone()).or_default();
476        entry.push_str(&message);
477        latest.push(BuildkitLogEntry { vertex_id, message });
478    }
479
480    latest
481}
482
483/// Pull the opencode image from registry with automatic fallback
484///
485/// Tries GHCR first, falls back to Docker Hub on failure.
486/// Returns the full image:tag string on success.
487pub async fn pull_image(
488    client: &DockerClient,
489    tag: Option<&str>,
490    progress: &mut ProgressReporter,
491) -> Result<String, DockerError> {
492    let tag = tag.unwrap_or(IMAGE_TAG_DEFAULT);
493
494    // Try GHCR first
495    debug!("Attempting to pull from GHCR: {}:{}", IMAGE_NAME_GHCR, tag);
496    let ghcr_err = match pull_from_registry(client, IMAGE_NAME_GHCR, tag, progress).await {
497        Ok(()) => {
498            let full_name = format!("{IMAGE_NAME_GHCR}:{tag}");
499            return Ok(full_name);
500        }
501        Err(e) => e,
502    };
503
504    warn!(
505        "GHCR pull failed: {}. Trying Docker Hub fallback...",
506        ghcr_err
507    );
508
509    // Try Docker Hub as fallback
510    debug!(
511        "Attempting to pull from Docker Hub: {}:{}",
512        IMAGE_NAME_DOCKERHUB, tag
513    );
514    match pull_from_registry(client, IMAGE_NAME_DOCKERHUB, tag, progress).await {
515        Ok(()) => {
516            let full_name = format!("{IMAGE_NAME_DOCKERHUB}:{tag}");
517            Ok(full_name)
518        }
519        Err(dockerhub_err) => Err(DockerError::Pull(format!(
520            "Failed to pull from both registries. GHCR: {ghcr_err}. Docker Hub: {dockerhub_err}"
521        ))),
522    }
523}
524
525/// Maximum number of retry attempts for pull operations
526const MAX_PULL_RETRIES: usize = 3;
527
528/// Pull from a specific registry with retry logic
529async fn pull_from_registry(
530    client: &DockerClient,
531    image: &str,
532    tag: &str,
533    progress: &mut ProgressReporter,
534) -> Result<(), DockerError> {
535    let full_name = format!("{image}:{tag}");
536
537    // Manual retry loop since async closures can't capture mutable references
538    let mut last_error = None;
539    for attempt in 1..=MAX_PULL_RETRIES {
540        debug!(
541            "Pull attempt {}/{} for {}",
542            attempt, MAX_PULL_RETRIES, full_name
543        );
544
545        match do_pull(client, image, tag, progress).await {
546            Ok(()) => return Ok(()),
547            Err(e) => {
548                warn!("Pull attempt {} failed: {}", attempt, e);
549                last_error = Some(e);
550
551                if attempt < MAX_PULL_RETRIES {
552                    // Exponential backoff: 1s, 2s, 4s
553                    let delay_ms = 1000 * (1 << (attempt - 1));
554                    tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
555                }
556            }
557        }
558    }
559
560    Err(last_error.unwrap_or_else(|| {
561        DockerError::Pull(format!(
562            "Pull failed for {full_name} after {MAX_PULL_RETRIES} attempts"
563        ))
564    }))
565}
566
567/// Perform the actual pull operation
568async fn do_pull(
569    client: &DockerClient,
570    image: &str,
571    tag: &str,
572    progress: &mut ProgressReporter,
573) -> Result<(), DockerError> {
574    let full_name = format!("{image}:{tag}");
575
576    let options = CreateImageOptions {
577        from_image: image,
578        tag,
579        ..Default::default()
580    };
581
582    let mut stream = client.inner().create_image(Some(options), None, None);
583
584    // Add main spinner for overall progress
585    progress.add_spinner("pull", &format!("Pulling {full_name}..."));
586
587    while let Some(result) = stream.next().await {
588        match result {
589            Ok(info) => {
590                // Handle errors from the stream
591                if let Some(error_msg) = info.error {
592                    progress.abandon_all(&error_msg);
593                    return Err(DockerError::Pull(error_msg));
594                }
595
596                // Handle layer progress
597                if let Some(layer_id) = &info.id {
598                    let status = info.status.as_deref().unwrap_or("");
599
600                    match status {
601                        "Already exists" => {
602                            progress.finish(layer_id, "Already exists");
603                        }
604                        "Pull complete" => {
605                            progress.finish(layer_id, "Pull complete");
606                        }
607                        "Downloading" | "Extracting" => {
608                            if let Some(progress_detail) = &info.progress_detail {
609                                let current = progress_detail.current.unwrap_or(0) as u64;
610                                let total = progress_detail.total.unwrap_or(0) as u64;
611
612                                if total > 0 {
613                                    progress.update_layer(layer_id, current, total, status);
614                                }
615                            }
616                        }
617                        _ => {
618                            // Other statuses (Waiting, Verifying, etc.)
619                            progress.update_spinner(layer_id, status);
620                        }
621                    }
622                } else if let Some(status) = &info.status {
623                    // Overall status messages (no layer id)
624                    progress.update_spinner("pull", status);
625                }
626            }
627            Err(e) => {
628                progress.abandon_all("Pull failed");
629                return Err(DockerError::Pull(format!("Pull failed: {e}")));
630            }
631        }
632    }
633
634    progress.finish("pull", &format!("Pull complete: {full_name}"));
635    Ok(())
636}
637
638/// Format a build error with recent log context for actionable debugging
639fn format_build_error_with_context(
640    error: &str,
641    recent_logs: &VecDeque<String>,
642    error_logs: &VecDeque<String>,
643    recent_buildkit_logs: &VecDeque<String>,
644) -> String {
645    let mut message = String::new();
646
647    // Add main error message
648    message.push_str(error);
649
650    // Add captured error lines if they differ from recent logs
651    // (these are error-like lines that may have scrolled off)
652    if !error_logs.is_empty() {
653        // Check if error_logs contains lines not in recent_logs
654        let recent_set: std::collections::HashSet<_> = recent_logs.iter().collect();
655        let unique_errors: Vec<_> = error_logs
656            .iter()
657            .filter(|line| !recent_set.contains(line))
658            .collect();
659
660        if !unique_errors.is_empty() {
661            message.push_str("\n\nPotential errors detected during build:");
662            for line in unique_errors {
663                message.push_str("\n  ");
664                message.push_str(line);
665            }
666        }
667    }
668
669    // Add recent BuildKit log context if available
670    if !recent_buildkit_logs.is_empty() {
671        message.push_str("\n\nRecent BuildKit output:");
672        for line in recent_buildkit_logs {
673            message.push_str("\n  ");
674            message.push_str(line);
675        }
676    }
677
678    // Add recent log context if available
679    if !recent_logs.is_empty() {
680        message.push_str("\n\nRecent build output:");
681        for line in recent_logs {
682            message.push_str("\n  ");
683            message.push_str(line);
684        }
685    } else if recent_buildkit_logs.is_empty() {
686        message.push_str("\n\nNo build output was received from the Docker daemon.");
687        message.push_str("\nThis usually means the build failed before any logs were streamed.");
688    }
689
690    // Add actionable suggestions based on common error patterns
691    let error_lower = error.to_lowercase();
692    if error_lower.contains("network")
693        || error_lower.contains("connection")
694        || error_lower.contains("timeout")
695    {
696        message.push_str("\n\nSuggestion: Check your network connection and Docker's ability to reach the internet.");
697    } else if error_lower.contains("disk")
698        || error_lower.contains("space")
699        || error_lower.contains("no space")
700    {
701        message.push_str("\n\nSuggestion: Free up disk space with 'docker system prune' or check available storage.");
702    } else if error_lower.contains("permission") || error_lower.contains("denied") {
703        message.push_str("\n\nSuggestion: Check Docker permissions. You may need to add your user to the 'docker' group.");
704    }
705
706    message
707}
708
709/// Create a gzipped tar archive containing the Dockerfile
710fn create_build_context() -> Result<Vec<u8>, std::io::Error> {
711    let mut archive_buffer = Vec::new();
712
713    {
714        let encoder = GzEncoder::new(&mut archive_buffer, Compression::default());
715        let mut tar = TarBuilder::new(encoder);
716
717        // Add Dockerfile to archive
718        let dockerfile_bytes = DOCKERFILE.as_bytes();
719        let mut header = tar::Header::new_gnu();
720        header.set_path("Dockerfile")?;
721        header.set_size(dockerfile_bytes.len() as u64);
722        header.set_mode(0o644);
723        header.set_cksum();
724
725        tar.append(&header, dockerfile_bytes)?;
726        tar.finish()?;
727
728        // Finish gzip encoding
729        let encoder = tar.into_inner()?;
730        encoder.finish()?;
731    }
732
733    Ok(archive_buffer)
734}
735
736#[cfg(test)]
737mod tests {
738    use super::*;
739
740    #[test]
741    fn create_build_context_succeeds() {
742        let context = create_build_context().expect("should create context");
743        assert!(!context.is_empty(), "context should not be empty");
744
745        // Verify it's gzip-compressed (gzip magic bytes)
746        assert_eq!(context[0], 0x1f, "should be gzip compressed");
747        assert_eq!(context[1], 0x8b, "should be gzip compressed");
748    }
749
750    #[test]
751    fn default_tag_is_latest() {
752        assert_eq!(IMAGE_TAG_DEFAULT, "latest");
753    }
754
755    #[test]
756    fn format_build_error_includes_recent_logs() {
757        let mut logs = VecDeque::new();
758        logs.push_back("Step 1/5 : FROM ubuntu:22.04".to_string());
759        logs.push_back("Step 2/5 : RUN apt-get update".to_string());
760        logs.push_back("E: Unable to fetch some archives".to_string());
761        let error_logs = VecDeque::new();
762        let buildkit_logs = VecDeque::new();
763
764        let result = format_build_error_with_context(
765            "Build failed: exit code 1",
766            &logs,
767            &error_logs,
768            &buildkit_logs,
769        );
770
771        assert!(result.contains("Build failed: exit code 1"));
772        assert!(result.contains("Recent build output:"));
773        assert!(result.contains("Step 1/5"));
774        assert!(result.contains("Unable to fetch"));
775    }
776
777    #[test]
778    fn format_build_error_handles_empty_logs() {
779        let logs = VecDeque::new();
780        let error_logs = VecDeque::new();
781        let buildkit_logs = VecDeque::new();
782        let result =
783            format_build_error_with_context("Stream error", &logs, &error_logs, &buildkit_logs);
784
785        assert!(result.contains("Stream error"));
786        assert!(!result.contains("Recent build output:"));
787    }
788
789    #[test]
790    fn format_build_error_adds_network_suggestion() {
791        let logs = VecDeque::new();
792        let error_logs = VecDeque::new();
793        let buildkit_logs = VecDeque::new();
794        let result = format_build_error_with_context(
795            "connection timeout",
796            &logs,
797            &error_logs,
798            &buildkit_logs,
799        );
800
801        assert!(result.contains("Check your network connection"));
802    }
803
804    #[test]
805    fn format_build_error_adds_disk_suggestion() {
806        let logs = VecDeque::new();
807        let error_logs = VecDeque::new();
808        let buildkit_logs = VecDeque::new();
809        let result = format_build_error_with_context(
810            "no space left on device",
811            &logs,
812            &error_logs,
813            &buildkit_logs,
814        );
815
816        assert!(result.contains("Free up disk space"));
817    }
818
819    #[test]
820    fn format_build_error_shows_error_lines_separately() {
821        let mut recent_logs = VecDeque::new();
822        recent_logs.push_back("Compiling foo v1.0".to_string());
823        recent_logs.push_back("Successfully installed bar".to_string());
824
825        let mut error_logs = VecDeque::new();
826        error_logs.push_back("error: failed to compile dust".to_string());
827        error_logs.push_back("error: failed to compile glow".to_string());
828
829        let buildkit_logs = VecDeque::new();
830        let result = format_build_error_with_context(
831            "Build failed",
832            &recent_logs,
833            &error_logs,
834            &buildkit_logs,
835        );
836
837        assert!(result.contains("Potential errors detected during build:"));
838        assert!(result.contains("failed to compile dust"));
839        assert!(result.contains("failed to compile glow"));
840    }
841
842    #[test]
843    fn is_error_line_detects_errors() {
844        assert!(is_error_line("error: something failed"));
845        assert!(is_error_line("Error: build failed"));
846        assert!(is_error_line("Failed to install package"));
847        assert!(is_error_line("cannot find module"));
848        assert!(is_error_line("Unable to locate package"));
849        assert!(!is_error_line("Compiling foo v1.0"));
850        assert!(!is_error_line("Successfully installed"));
851    }
852}