Skip to main content

zlayer_builder/
builder.rs

1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//!     // Build from a Dockerfile
15//!     let image = ImageBuilder::new("./my-app").await?
16//!         .tag("myapp:latest")
17//!         .tag("myapp:v1.0.0")
18//!         .build()
19//!         .await?;
20//!
21//!     println!("Built image: {}", image.image_id);
22//!     Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//!     // Build using a runtime template (no Dockerfile needed)
34//!     let image = ImageBuilder::new("./my-node-app").await?
35//!         .runtime(Runtime::Node20)
36//!         .tag("myapp:latest")
37//!         .build()
38//!         .await?;
39//!
40//!     println!("Built image: {}", image.image_id);
41//!     Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//!     // Build only up to a specific stage
53//!     let image = ImageBuilder::new("./my-app").await?
54//!         .target("builder")
55//!         .tag("myapp:builder")
56//!         .build()
57//!         .await?;
58//!
59//!     println!("Built intermediate image: {}", image.image_id);
60//!     Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//!     // Start TUI in another thread
75//!     std::thread::spawn(move || {
76//!         // Process events from rx...
77//!         while let Ok(event) = rx.recv() {
78//!             println!("Event: {:?}", event);
79//!         }
80//!     });
81//!
82//!     let image = ImageBuilder::new("./my-app").await?
83//!         .tag("myapp:latest")
84//!         .with_events(tx)
85//!         .build()
86//!         .await?;
87//!
88//!     Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//!     let image = ImageBuilder::new("./my-app").await?
100//!         .with_cache_dir("/var/cache/zlayer")  // Use persistent disk cache
101//!         .tag("myapp:latest")
102//!         .build()
103//!         .await?;
104//!
105//!     println!("Built image: {}", image.image_id);
106//!     Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116use tracing::{debug, info, instrument};
117
118use crate::backend::BuildBackend;
119use crate::buildah::BuildahExecutor;
120use crate::dockerfile::{Dockerfile, RunMount};
121use crate::error::{BuildError, Result};
122use crate::templates::{get_template, Runtime};
123use crate::tui::BuildEvent;
124
125#[cfg(feature = "cache")]
126use zlayer_registry::cache::BlobCacheBackend;
127
128#[cfg(feature = "local-registry")]
129use zlayer_registry::LocalRegistry;
130
131/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
132/// or a WASM build result for WebAssembly builds.
133///
134/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
135/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
136/// a compiled artifact directly, bypassing the container build pipeline.
137#[derive(Debug)]
138pub enum BuildOutput {
139    /// Standard container build - produces a Dockerfile to be built with buildah.
140    Dockerfile(Dockerfile),
141    /// WASM component build - already built, produces artifact path.
142    WasmArtifact {
143        /// Path to the compiled WASM binary.
144        wasm_path: PathBuf,
145        /// Path to the OCI artifact directory (if exported).
146        oci_path: Option<PathBuf>,
147        /// Source language used.
148        language: String,
149        /// Whether optimization was applied.
150        optimized: bool,
151        /// Size of the output file in bytes.
152        size: u64,
153    },
154}
155
156/// Configuration for the layer cache backend.
157///
158/// This enum specifies which cache backend to use for storing and retrieving
159/// cached layers during builds. The cache feature must be enabled for this
160/// to be available.
161///
162/// # Example
163///
164/// ```no_run,ignore
165/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
166///
167/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
168/// // Use persistent disk cache
169/// let builder = ImageBuilder::new("./my-app").await?
170///     .with_cache_config(CacheBackendConfig::Persistent {
171///         path: "/var/cache/zlayer".into(),
172///     })
173///     .tag("myapp:latest");
174/// # Ok(())
175/// # }
176/// ```
177#[cfg(feature = "cache")]
178#[derive(Debug, Clone, Default)]
179pub enum CacheBackendConfig {
180    /// In-memory cache (cleared when process exits).
181    ///
182    /// Useful for CI/CD environments where persistence isn't needed
183    /// but you want to avoid re-downloading base image layers within
184    /// a single build session.
185    #[default]
186    Memory,
187
188    /// Persistent disk-based cache using redb.
189    ///
190    /// Requires the `cache-persistent` feature. Layers are stored on disk
191    /// and persist across builds, significantly speeding up repeated builds.
192    #[cfg(feature = "cache-persistent")]
193    Persistent {
194        /// Path to the cache directory or database file.
195        /// If a directory, `blob_cache.redb` will be created inside it.
196        path: PathBuf,
197    },
198
199    /// S3-compatible object storage backend.
200    ///
201    /// Requires the `cache-s3` feature. Useful for distributed build systems
202    /// where multiple build machines need to share a cache.
203    #[cfg(feature = "cache-s3")]
204    S3 {
205        /// S3 bucket name
206        bucket: String,
207        /// AWS region (optional, uses SDK default if not set)
208        region: Option<String>,
209        /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
210        endpoint: Option<String>,
211        /// Key prefix for cached blobs (default: "zlayer/layers/")
212        prefix: Option<String>,
213    },
214}
215
216/// Built image information returned after a successful build
217#[derive(Debug, Clone)]
218pub struct BuiltImage {
219    /// Image ID (sha256:...)
220    pub image_id: String,
221    /// Applied tags
222    pub tags: Vec<String>,
223    /// Number of layers in the final image
224    pub layer_count: usize,
225    /// Total size in bytes (0 if not computed)
226    pub size: u64,
227    /// Build duration in milliseconds
228    pub build_time_ms: u64,
229    /// Whether this image is a manifest list (multi-arch).
230    pub is_manifest: bool,
231}
232
233/// Registry authentication credentials
234#[derive(Debug, Clone)]
235pub struct RegistryAuth {
236    /// Registry username
237    pub username: String,
238    /// Registry password or token
239    pub password: String,
240}
241
242impl RegistryAuth {
243    /// Create new registry authentication
244    pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
245        Self {
246            username: username.into(),
247            password: password.into(),
248        }
249    }
250}
251
252/// Build options for customizing the image build process
253#[derive(Debug, Clone)]
254#[allow(clippy::struct_excessive_bools)]
255pub struct BuildOptions {
256    /// Dockerfile path (default: Dockerfile in context)
257    pub dockerfile: Option<PathBuf>,
258    /// `ZImagefile` path (alternative to Dockerfile)
259    pub zimagefile: Option<PathBuf>,
260    /// Use runtime template instead of Dockerfile
261    pub runtime: Option<Runtime>,
262    /// Build arguments (ARG values)
263    pub build_args: HashMap<String, String>,
264    /// Target stage for multi-stage builds
265    pub target: Option<String>,
266    /// Image tags to apply
267    pub tags: Vec<String>,
268    /// Disable layer caching
269    pub no_cache: bool,
270    /// Push to registry after build
271    pub push: bool,
272    /// Registry auth (if pushing)
273    pub registry_auth: Option<RegistryAuth>,
274    /// Squash all layers into one
275    pub squash: bool,
276    /// Image format (oci or docker)
277    pub format: Option<String>,
278    /// Enable buildah layer caching (--layers flag for `buildah build`).
279    /// Default: true
280    ///
281    /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
282    /// `buildah commit`) rather than `buildah build`, so this flag is reserved
283    /// for future use when/if we switch to `buildah build` (bud) command.
284    pub layers: bool,
285    /// Registry to pull cache from (--cache-from for `buildah build`).
286    ///
287    /// Note: This would be used with `buildah build --cache-from=<registry>`.
288    /// Currently `ZLayer` uses manual container creation, so this is reserved
289    /// for future implementation or for switching to `buildah build`.
290    ///
291    /// TODO: Implement remote cache support. This would require either:
292    /// 1. Switching to `buildah build` command which supports --cache-from natively
293    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
294    pub cache_from: Option<String>,
295    /// Registry to push cache to (--cache-to for `buildah build`).
296    ///
297    /// Note: This would be used with `buildah build --cache-to=<registry>`.
298    /// Currently `ZLayer` uses manual container creation, so this is reserved
299    /// for future implementation or for switching to `buildah build`.
300    ///
301    /// TODO: Implement remote cache support. This would require either:
302    /// 1. Switching to `buildah build` command which supports --cache-to natively
303    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
304    pub cache_to: Option<String>,
305    /// Maximum cache age (--cache-ttl for `buildah build`).
306    ///
307    /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
308    /// Currently `ZLayer` uses manual container creation, so this is reserved
309    /// for future implementation or for switching to `buildah build`.
310    ///
311    /// TODO: Implement cache TTL support. This would require either:
312    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
313    /// 2. Implementing custom cache expiration logic for our layer caching system
314    pub cache_ttl: Option<std::time::Duration>,
315    /// Cache backend configuration (requires `cache` feature).
316    ///
317    /// When configured, the builder will store layer data in the specified
318    /// cache backend for faster subsequent builds. This is separate from
319    /// buildah's native caching and operates at the `ZLayer` level.
320    ///
321    /// # Integration Points
322    ///
323    /// The cache backend is used at several points during the build:
324    ///
325    /// 1. **Before instruction execution**: Check if a cached layer exists
326    ///    for the (`instruction_hash`, `base_layer`) tuple
327    /// 2. **After instruction execution**: Store the resulting layer data
328    ///    in the cache for future builds
329    /// 3. **Base image layers**: Cache pulled base image layers to avoid
330    ///    re-downloading from registries
331    ///
332    /// TODO: Wire up cache lookups in the build loop once layer digests
333    /// are properly computed and tracked.
334    #[cfg(feature = "cache")]
335    pub cache_backend_config: Option<CacheBackendConfig>,
336    /// Default OCI/WASM-compatible registry to check for images before falling
337    /// back to Docker Hub qualification.
338    ///
339    /// When set, the builder will probe this registry for short image names
340    /// before qualifying them to `docker.io`. For example, if set to
341    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
342    /// the builder will check `git.example.com:5000/myapp:latest` first.
343    pub default_registry: Option<String>,
344    /// Default cache mounts injected into all RUN instructions.
345    /// These are merged with any step-level cache mounts (deduped by target path).
346    pub default_cache_mounts: Vec<RunMount>,
347    /// Number of retries for failed RUN steps (0 = no retries, default)
348    pub retries: u32,
349    /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
350    /// When set, `buildah from` pulls the platform-specific image variant.
351    pub platform: Option<String>,
352    /// SHA-256 hash of the source Dockerfile/ZImagefile content.
353    ///
354    /// When set, the sandbox builder can skip a rebuild if the cached image
355    /// was produced from identical source content (content-based invalidation).
356    pub source_hash: Option<String>,
357}
358
359impl Default for BuildOptions {
360    fn default() -> Self {
361        Self {
362            dockerfile: None,
363            zimagefile: None,
364            runtime: None,
365            build_args: HashMap::new(),
366            target: None,
367            tags: Vec::new(),
368            no_cache: false,
369            push: false,
370            registry_auth: None,
371            squash: false,
372            format: None,
373            layers: true,
374            cache_from: None,
375            cache_to: None,
376            cache_ttl: None,
377            #[cfg(feature = "cache")]
378            cache_backend_config: None,
379            default_registry: None,
380            default_cache_mounts: Vec::new(),
381            retries: 0,
382            platform: None,
383            source_hash: None,
384        }
385    }
386}
387
388/// Image builder - orchestrates the full build process
389///
390/// `ImageBuilder` provides a fluent API for configuring and executing
391/// container image builds using buildah as the backend.
392///
393/// # Build Process
394///
395/// 1. Parse Dockerfile (or use runtime template)
396/// 2. Resolve target stages if specified
397/// 3. Build each stage sequentially:
398///    - Create working container from base image
399///    - Execute each instruction
400///    - Commit intermediate stages for COPY --from
401/// 4. Commit final image with tags
402/// 5. Push to registry if configured
403/// 6. Clean up intermediate containers
404///
405/// # Cache Backend Integration (requires `cache` feature)
406///
407/// When a cache backend is configured, the builder can store and retrieve
408/// cached layer data to speed up subsequent builds:
409///
410/// ```no_run,ignore
411/// use zlayer_builder::ImageBuilder;
412///
413/// let builder = ImageBuilder::new("./my-app").await?
414///     .with_cache_dir("/var/cache/zlayer")
415///     .tag("myapp:latest");
416/// ```
417pub struct ImageBuilder {
418    /// Build context directory
419    context: PathBuf,
420    /// Build options
421    options: BuildOptions,
422    /// Buildah executor (kept for backwards compatibility)
423    #[allow(dead_code)]
424    executor: BuildahExecutor,
425    /// Event sender for TUI updates
426    event_tx: Option<mpsc::Sender<BuildEvent>>,
427    /// Pluggable build backend (buildah, sandbox, etc.).
428    ///
429    /// When set, the `build()` method delegates to this backend instead of
430    /// using the inline buildah logic. Set automatically by `new()` via
431    /// `detect_backend()`, or explicitly via `with_backend()`.
432    backend: Option<Arc<dyn BuildBackend>>,
433    /// Cache backend for layer caching (requires `cache` feature).
434    ///
435    /// When set, the builder will attempt to retrieve cached layers before
436    /// executing instructions, and store results in the cache after execution.
437    ///
438    /// TODO: Implement cache lookups in the build loop. Currently the backend
439    /// is stored but not actively used during builds. Integration points:
440    /// - Check cache before executing RUN instructions
441    /// - Store layer data after successful instruction execution
442    /// - Cache base image layers pulled from registries
443    #[cfg(feature = "cache")]
444    cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
445    /// Local OCI registry for checking cached images before remote pulls.
446    #[cfg(feature = "local-registry")]
447    local_registry: Option<LocalRegistry>,
448}
449
450impl ImageBuilder {
451    /// Create a new `ImageBuilder` with the given context directory
452    ///
453    /// The context directory should contain the Dockerfile (unless using
454    /// a runtime template) and any files that will be copied into the image.
455    ///
456    /// # Arguments
457    ///
458    /// * `context` - Path to the build context directory
459    ///
460    /// # Errors
461    ///
462    /// Returns an error if:
463    /// - The context directory does not exist
464    /// - Buildah is not installed or not accessible
465    ///
466    /// # Example
467    ///
468    /// ```no_run
469    /// use zlayer_builder::ImageBuilder;
470    ///
471    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
472    /// let builder = ImageBuilder::new("./my-project").await?;
473    /// # Ok(())
474    /// # }
475    /// ```
476    #[instrument(skip_all, fields(context = %context.as_ref().display()))]
477    pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
478        let context = context.as_ref().to_path_buf();
479
480        // Verify context exists
481        if !context.exists() {
482            return Err(BuildError::ContextRead {
483                path: context,
484                source: std::io::Error::new(
485                    std::io::ErrorKind::NotFound,
486                    "Build context directory not found",
487                ),
488            });
489        }
490
491        // Detect the best available build backend for this platform.
492        let backend = crate::backend::detect_backend().await.ok();
493
494        // Initialize buildah executor.
495        // On macOS, if buildah is not found we fall back to a default executor
496        // (the backend will handle the actual build dispatch).
497        let executor = match BuildahExecutor::new_async().await {
498            Ok(exec) => exec,
499            #[cfg(target_os = "macos")]
500            Err(_) => {
501                info!("Buildah not found on macOS; backend will handle build dispatch");
502                BuildahExecutor::default()
503            }
504            #[cfg(not(target_os = "macos"))]
505            Err(e) => return Err(e),
506        };
507
508        debug!("Created ImageBuilder for context: {}", context.display());
509
510        Ok(Self {
511            context,
512            options: BuildOptions::default(),
513            executor,
514            event_tx: None,
515            backend,
516            #[cfg(feature = "cache")]
517            cache_backend: None,
518            #[cfg(feature = "local-registry")]
519            local_registry: None,
520        })
521    }
522
523    /// Create an `ImageBuilder` with a custom buildah executor
524    ///
525    /// This is useful for testing or when you need to configure
526    /// the executor with specific storage options. The executor is
527    /// wrapped in a [`BuildahBackend`] so the build dispatches through
528    /// the [`BuildBackend`] trait.
529    ///
530    /// # Errors
531    ///
532    /// Returns an error if the context directory does not exist.
533    pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
534        let context = context.as_ref().to_path_buf();
535
536        if !context.exists() {
537            return Err(BuildError::ContextRead {
538                path: context,
539                source: std::io::Error::new(
540                    std::io::ErrorKind::NotFound,
541                    "Build context directory not found",
542                ),
543            });
544        }
545
546        let backend: Arc<dyn BuildBackend> = Arc::new(
547            crate::backend::BuildahBackend::with_executor(executor.clone()),
548        );
549
550        Ok(Self {
551            context,
552            options: BuildOptions::default(),
553            executor,
554            event_tx: None,
555            backend: Some(backend),
556            #[cfg(feature = "cache")]
557            cache_backend: None,
558            #[cfg(feature = "local-registry")]
559            local_registry: None,
560        })
561    }
562
563    /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
564    ///
565    /// The backend is used for all build, push, tag, and manifest
566    /// operations. The internal `BuildahExecutor` is set to the default
567    /// (it is only used if no backend is set).
568    ///
569    /// # Errors
570    ///
571    /// Returns an error if the context directory does not exist.
572    pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
573        let context = context.as_ref().to_path_buf();
574
575        if !context.exists() {
576            return Err(BuildError::ContextRead {
577                path: context,
578                source: std::io::Error::new(
579                    std::io::ErrorKind::NotFound,
580                    "Build context directory not found",
581                ),
582            });
583        }
584
585        Ok(Self {
586            context,
587            options: BuildOptions::default(),
588            executor: BuildahExecutor::default(),
589            event_tx: None,
590            backend: Some(backend),
591            #[cfg(feature = "cache")]
592            cache_backend: None,
593            #[cfg(feature = "local-registry")]
594            local_registry: None,
595        })
596    }
597
598    /// Set a custom Dockerfile path
599    ///
600    /// By default, the builder looks for a file named `Dockerfile` in the
601    /// context directory. Use this method to specify a different path.
602    ///
603    /// # Example
604    ///
605    /// ```no_run
606    /// # use zlayer_builder::ImageBuilder;
607    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
608    /// let builder = ImageBuilder::new("./my-project").await?
609    ///     .dockerfile("./my-project/Dockerfile.prod");
610    /// # Ok(())
611    /// # }
612    /// ```
613    #[must_use]
614    pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
615        self.options.dockerfile = Some(path.as_ref().to_path_buf());
616        self
617    }
618
619    /// Set a custom `ZImagefile` path
620    ///
621    /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
622    /// the builder will parse the `ZImagefile` and convert it to the internal
623    /// Dockerfile IR for execution.
624    ///
625    /// # Example
626    ///
627    /// ```no_run
628    /// # use zlayer_builder::ImageBuilder;
629    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
630    /// let builder = ImageBuilder::new("./my-project").await?
631    ///     .zimagefile("./my-project/ZImagefile");
632    /// # Ok(())
633    /// # }
634    /// ```
635    #[must_use]
636    pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
637        self.options.zimagefile = Some(path.as_ref().to_path_buf());
638        self
639    }
640
641    /// Use a runtime template instead of a Dockerfile
642    ///
643    /// Runtime templates provide pre-built Dockerfiles for common
644    /// development environments. When set, the Dockerfile option is ignored.
645    ///
646    /// # Example
647    ///
648    /// ```no_run
649    /// use zlayer_builder::{ImageBuilder, Runtime};
650    ///
651    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
652    /// let builder = ImageBuilder::new("./my-node-app").await?
653    ///     .runtime(Runtime::Node20);
654    /// # Ok(())
655    /// # }
656    /// ```
657    #[must_use]
658    pub fn runtime(mut self, runtime: Runtime) -> Self {
659        self.options.runtime = Some(runtime);
660        self
661    }
662
663    /// Add a build argument
664    ///
665    /// Build arguments are passed to the Dockerfile and can be referenced
666    /// using the `ARG` instruction.
667    ///
668    /// # Example
669    ///
670    /// ```no_run
671    /// # use zlayer_builder::ImageBuilder;
672    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
673    /// let builder = ImageBuilder::new("./my-project").await?
674    ///     .build_arg("VERSION", "1.0.0")
675    ///     .build_arg("DEBUG", "false");
676    /// # Ok(())
677    /// # }
678    /// ```
679    #[must_use]
680    pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
681        self.options.build_args.insert(key.into(), value.into());
682        self
683    }
684
685    /// Set multiple build arguments at once
686    #[must_use]
687    pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
688        self.options.build_args.extend(args);
689        self
690    }
691
692    /// Set the target stage for multi-stage builds
693    ///
694    /// When building a multi-stage Dockerfile, you can stop at a specific
695    /// stage instead of building all stages.
696    ///
697    /// # Example
698    ///
699    /// ```no_run
700    /// # use zlayer_builder::ImageBuilder;
701    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
702    /// // Dockerfile:
703    /// // FROM node:20 AS builder
704    /// // ...
705    /// // FROM node:20-slim AS runtime
706    /// // ...
707    ///
708    /// let builder = ImageBuilder::new("./my-project").await?
709    ///     .target("builder")
710    ///     .tag("myapp:builder");
711    /// # Ok(())
712    /// # }
713    /// ```
714    #[must_use]
715    pub fn target(mut self, stage: impl Into<String>) -> Self {
716        self.options.target = Some(stage.into());
717        self
718    }
719
720    /// Add an image tag
721    ///
722    /// Tags are applied to the final image. You can add multiple tags.
723    /// The first tag is used as the primary image name during commit.
724    ///
725    /// # Example
726    ///
727    /// ```no_run
728    /// # use zlayer_builder::ImageBuilder;
729    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
730    /// let builder = ImageBuilder::new("./my-project").await?
731    ///     .tag("myapp:latest")
732    ///     .tag("myapp:v1.0.0")
733    ///     .tag("registry.example.com/myapp:v1.0.0");
734    /// # Ok(())
735    /// # }
736    /// ```
737    #[must_use]
738    pub fn tag(mut self, tag: impl Into<String>) -> Self {
739        self.options.tags.push(tag.into());
740        self
741    }
742
743    /// Disable layer caching
744    ///
745    /// When enabled, all layers are rebuilt from scratch even if
746    /// they could be served from cache.
747    ///
748    /// Note: Currently this flag is tracked but not fully implemented in the
749    /// build process. `ZLayer` uses manual container creation (`buildah from`,
750    /// `buildah run`, `buildah commit`) which doesn't have built-in caching
751    /// like `buildah build` does. Future work could implement layer-level
752    /// caching by checking instruction hashes against previously built layers.
753    #[must_use]
754    pub fn no_cache(mut self) -> Self {
755        self.options.no_cache = true;
756        self
757    }
758
759    /// Enable or disable layer caching
760    ///
761    /// This controls the `--layers` flag for buildah. When enabled (default),
762    /// buildah can cache and reuse intermediate layers.
763    ///
764    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
765    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
766    /// flag is reserved for future use when/if we switch to `buildah build`.
767    ///
768    /// # Example
769    ///
770    /// ```no_run
771    /// # use zlayer_builder::ImageBuilder;
772    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
773    /// let builder = ImageBuilder::new("./my-project").await?
774    ///     .layers(false)  // Disable layer caching
775    ///     .tag("myapp:latest");
776    /// # Ok(())
777    /// # }
778    /// ```
779    #[must_use]
780    pub fn layers(mut self, enable: bool) -> Self {
781        self.options.layers = enable;
782        self
783    }
784
785    /// Set registry to pull cache from
786    ///
787    /// This corresponds to buildah's `--cache-from` flag, which allows
788    /// pulling cached layers from a remote registry to speed up builds.
789    ///
790    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
791    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
792    /// option is reserved for future implementation.
793    ///
794    /// TODO: Implement remote cache support. This would require either:
795    /// 1. Switching to `buildah build` command which supports --cache-from natively
796    /// 2. Implementing custom layer caching with registry pull for intermediate layers
797    ///
798    /// # Example
799    ///
800    /// ```no_run
801    /// # use zlayer_builder::ImageBuilder;
802    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
803    /// let builder = ImageBuilder::new("./my-project").await?
804    ///     .cache_from("registry.example.com/myapp:cache")
805    ///     .tag("myapp:latest");
806    /// # Ok(())
807    /// # }
808    /// ```
809    #[must_use]
810    pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
811        self.options.cache_from = Some(registry.into());
812        self
813    }
814
815    /// Set registry to push cache to
816    ///
817    /// This corresponds to buildah's `--cache-to` flag, which allows
818    /// pushing cached layers to a remote registry for future builds to use.
819    ///
820    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
821    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
822    /// option is reserved for future implementation.
823    ///
824    /// TODO: Implement remote cache support. This would require either:
825    /// 1. Switching to `buildah build` command which supports --cache-to natively
826    /// 2. Implementing custom layer caching with registry push for intermediate layers
827    ///
828    /// # Example
829    ///
830    /// ```no_run
831    /// # use zlayer_builder::ImageBuilder;
832    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
833    /// let builder = ImageBuilder::new("./my-project").await?
834    ///     .cache_to("registry.example.com/myapp:cache")
835    ///     .tag("myapp:latest");
836    /// # Ok(())
837    /// # }
838    /// ```
839    #[must_use]
840    pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
841        self.options.cache_to = Some(registry.into());
842        self
843    }
844
845    /// Set maximum cache age
846    ///
847    /// This corresponds to buildah's `--cache-ttl` flag, which sets the
848    /// maximum age for cached layers before they are considered stale.
849    ///
850    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
851    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
852    /// option is reserved for future implementation.
853    ///
854    /// TODO: Implement cache TTL support. This would require either:
855    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
856    /// 2. Implementing custom cache expiration logic for our layer caching system
857    ///
858    /// # Example
859    ///
860    /// ```no_run
861    /// # use zlayer_builder::ImageBuilder;
862    /// # use std::time::Duration;
863    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
864    /// let builder = ImageBuilder::new("./my-project").await?
865    ///     .cache_ttl(Duration::from_secs(3600 * 24))  // 24 hours
866    ///     .tag("myapp:latest");
867    /// # Ok(())
868    /// # }
869    /// ```
870    #[must_use]
871    pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
872        self.options.cache_ttl = Some(ttl);
873        self
874    }
875
876    /// Push the image to a registry after building
877    ///
878    /// # Arguments
879    ///
880    /// * `auth` - Registry authentication credentials
881    ///
882    /// # Example
883    ///
884    /// ```no_run
885    /// use zlayer_builder::{ImageBuilder, RegistryAuth};
886    ///
887    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
888    /// let builder = ImageBuilder::new("./my-project").await?
889    ///     .tag("registry.example.com/myapp:v1.0.0")
890    ///     .push(RegistryAuth::new("user", "password"));
891    /// # Ok(())
892    /// # }
893    /// ```
894    #[must_use]
895    pub fn push(mut self, auth: RegistryAuth) -> Self {
896        self.options.push = true;
897        self.options.registry_auth = Some(auth);
898        self
899    }
900
901    /// Enable pushing without authentication
902    ///
903    /// Use this for registries that don't require authentication
904    /// (e.g., local registries, insecure registries).
905    #[must_use]
906    pub fn push_without_auth(mut self) -> Self {
907        self.options.push = true;
908        self.options.registry_auth = None;
909        self
910    }
911
912    /// Set a default OCI/WASM-compatible registry to check for images.
913    ///
914    /// When set, the builder will probe this registry for short image names
915    /// before qualifying them to `docker.io`. For example, if set to
916    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
917    /// the builder will check `git.example.com:5000/myapp:latest` first.
918    #[must_use]
919    pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
920        self.options.default_registry = Some(registry.into());
921        self
922    }
923
924    /// Set a local OCI registry for image resolution.
925    ///
926    /// When set, the builder checks the local registry for cached images
927    /// before pulling from remote registries.
928    #[cfg(feature = "local-registry")]
929    #[must_use]
930    pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
931        self.local_registry = Some(registry);
932        self
933    }
934
935    /// Squash all layers into a single layer
936    ///
937    /// This reduces image size but loses layer caching benefits.
938    #[must_use]
939    pub fn squash(mut self) -> Self {
940        self.options.squash = true;
941        self
942    }
943
944    /// Set the image format
945    ///
946    /// Valid values are "oci" (default) or "docker".
947    #[must_use]
948    pub fn format(mut self, format: impl Into<String>) -> Self {
949        self.options.format = Some(format.into());
950        self
951    }
952
953    /// Set default cache mounts to inject into all RUN instructions
954    #[must_use]
955    pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
956        self.options.default_cache_mounts = mounts;
957        self
958    }
959
960    /// Set the number of retries for failed RUN steps
961    #[must_use]
962    pub fn retries(mut self, retries: u32) -> Self {
963        self.options.retries = retries;
964        self
965    }
966
967    /// Set the target platform for cross-architecture builds.
968    #[must_use]
969    pub fn platform(mut self, platform: impl Into<String>) -> Self {
970        self.options.platform = Some(platform.into());
971        self
972    }
973
974    /// Set a pre-computed source hash for content-based cache invalidation.
975    ///
976    /// When set, the sandbox builder can skip a full rebuild if the cached
977    /// image was produced from identical source content.
978    #[must_use]
979    pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
980        self.options.source_hash = Some(hash.into());
981        self
982    }
983
984    /// Set an event sender for TUI progress updates
985    ///
986    /// Events will be sent as the build progresses, allowing you to
987    /// display a progress UI or log build status.
988    ///
989    /// # Example
990    ///
991    /// ```no_run
992    /// use zlayer_builder::{ImageBuilder, BuildEvent};
993    /// use std::sync::mpsc;
994    ///
995    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
996    /// let (tx, rx) = mpsc::channel::<BuildEvent>();
997    ///
998    /// let builder = ImageBuilder::new("./my-project").await?
999    ///     .tag("myapp:latest")
1000    ///     .with_events(tx);
1001    /// # Ok(())
1002    /// # }
1003    /// ```
1004    #[must_use]
1005    pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1006        self.event_tx = Some(tx);
1007        self
1008    }
1009
1010    /// Configure a persistent disk cache backend for layer caching.
1011    ///
1012    /// When configured, the builder will store layer data on disk at the
1013    /// specified path. This cache persists across builds and significantly
1014    /// speeds up repeated builds of similar images.
1015    ///
1016    /// Requires the `cache-persistent` feature to be enabled.
1017    ///
1018    /// # Arguments
1019    ///
1020    /// * `path` - Path to the cache directory. If a directory, creates
1021    ///   `blob_cache.redb` inside it. If a file path, uses it directly.
1022    ///
1023    /// # Example
1024    ///
1025    /// ```no_run,ignore
1026    /// use zlayer_builder::ImageBuilder;
1027    ///
1028    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1029    /// let builder = ImageBuilder::new("./my-project").await?
1030    ///     .with_cache_dir("/var/cache/zlayer")
1031    ///     .tag("myapp:latest");
1032    /// # Ok(())
1033    /// # }
1034    /// ```
1035    ///
1036    /// # Integration Status
1037    ///
1038    /// TODO: The cache backend is currently stored but not actively used
1039    /// during builds. Future work will wire up:
1040    /// - Cache lookups before executing RUN instructions
1041    /// - Storing layer data after successful execution
1042    /// - Caching base image layers from registry pulls
1043    #[cfg(feature = "cache-persistent")]
1044    #[must_use]
1045    pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1046        self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1047            path: path.as_ref().to_path_buf(),
1048        });
1049        debug!(
1050            "Configured persistent cache at: {}",
1051            path.as_ref().display()
1052        );
1053        self
1054    }
1055
1056    /// Configure an in-memory cache backend for layer caching.
1057    ///
1058    /// The in-memory cache is cleared when the process exits, but can
1059    /// speed up builds within a single session by caching intermediate
1060    /// layers and avoiding redundant operations.
1061    ///
1062    /// Requires the `cache` feature to be enabled.
1063    ///
1064    /// # Example
1065    ///
1066    /// ```no_run,ignore
1067    /// use zlayer_builder::ImageBuilder;
1068    ///
1069    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1070    /// let builder = ImageBuilder::new("./my-project").await?
1071    ///     .with_memory_cache()
1072    ///     .tag("myapp:latest");
1073    /// # Ok(())
1074    /// # }
1075    /// ```
1076    ///
1077    /// # Integration Status
1078    ///
1079    /// TODO: The cache backend is currently stored but not actively used
1080    /// during builds. See `with_cache_dir` for integration status details.
1081    #[cfg(feature = "cache")]
1082    #[must_use]
1083    pub fn with_memory_cache(mut self) -> Self {
1084        self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1085        debug!("Configured in-memory cache");
1086        self
1087    }
1088
1089    /// Configure an S3-compatible storage backend for layer caching.
1090    ///
1091    /// This is useful for distributed build systems where multiple build
1092    /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1093    /// Backblaze B2, `MinIO`, and other S3-compatible services.
1094    ///
1095    /// Requires the `cache-s3` feature to be enabled.
1096    ///
1097    /// # Arguments
1098    ///
1099    /// * `bucket` - S3 bucket name
1100    /// * `region` - AWS region (optional, uses SDK default if not set)
1101    ///
1102    /// # Example
1103    ///
1104    /// ```no_run,ignore
1105    /// use zlayer_builder::ImageBuilder;
1106    ///
1107    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1108    /// let builder = ImageBuilder::new("./my-project").await?
1109    ///     .with_s3_cache("my-build-cache", Some("us-west-2"))
1110    ///     .tag("myapp:latest");
1111    /// # Ok(())
1112    /// # }
1113    /// ```
1114    ///
1115    /// # Integration Status
1116    ///
1117    /// TODO: The cache backend is currently stored but not actively used
1118    /// during builds. See `with_cache_dir` for integration status details.
1119    #[cfg(feature = "cache-s3")]
1120    #[must_use]
1121    pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1122        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1123            bucket: bucket.into(),
1124            region,
1125            endpoint: None,
1126            prefix: None,
1127        });
1128        debug!("Configured S3 cache");
1129        self
1130    }
1131
1132    /// Configure an S3-compatible storage backend with custom endpoint.
1133    ///
1134    /// Use this method for S3-compatible services that require a custom
1135    /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1136    ///
1137    /// Requires the `cache-s3` feature to be enabled.
1138    ///
1139    /// # Arguments
1140    ///
1141    /// * `bucket` - S3 bucket name
1142    /// * `endpoint` - Custom endpoint URL
1143    /// * `region` - Region (required for some S3-compatible services)
1144    ///
1145    /// # Example
1146    ///
1147    /// ```no_run,ignore
1148    /// use zlayer_builder::ImageBuilder;
1149    ///
1150    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1151    /// // Cloudflare R2
1152    /// let builder = ImageBuilder::new("./my-project").await?
1153    ///     .with_s3_cache_endpoint(
1154    ///         "my-bucket",
1155    ///         "https://accountid.r2.cloudflarestorage.com",
1156    ///         Some("auto".to_string()),
1157    ///     )
1158    ///     .tag("myapp:latest");
1159    /// # Ok(())
1160    /// # }
1161    /// ```
1162    #[cfg(feature = "cache-s3")]
1163    #[must_use]
1164    pub fn with_s3_cache_endpoint(
1165        mut self,
1166        bucket: impl Into<String>,
1167        endpoint: impl Into<String>,
1168        region: Option<String>,
1169    ) -> Self {
1170        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1171            bucket: bucket.into(),
1172            region,
1173            endpoint: Some(endpoint.into()),
1174            prefix: None,
1175        });
1176        debug!("Configured S3 cache with custom endpoint");
1177        self
1178    }
1179
1180    /// Configure a custom cache backend configuration.
1181    ///
1182    /// This is the most flexible way to configure the cache backend,
1183    /// allowing full control over all cache settings.
1184    ///
1185    /// Requires the `cache` feature to be enabled.
1186    ///
1187    /// # Example
1188    ///
1189    /// ```no_run,ignore
1190    /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1191    ///
1192    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1193    /// let builder = ImageBuilder::new("./my-project").await?
1194    ///     .with_cache_config(CacheBackendConfig::Memory)
1195    ///     .tag("myapp:latest");
1196    /// # Ok(())
1197    /// # }
1198    /// ```
1199    #[cfg(feature = "cache")]
1200    #[must_use]
1201    pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1202        self.options.cache_backend_config = Some(config);
1203        debug!("Configured custom cache backend");
1204        self
1205    }
1206
1207    /// Set an already-initialized cache backend directly.
1208    ///
1209    /// This is useful when you have a pre-configured cache backend instance
1210    /// that you want to share across multiple builders or when you need
1211    /// fine-grained control over cache initialization.
1212    ///
1213    /// Requires the `cache` feature to be enabled.
1214    ///
1215    /// # Example
1216    ///
1217    /// ```no_run,ignore
1218    /// use zlayer_builder::ImageBuilder;
1219    /// use zlayer_registry::cache::BlobCache;
1220    /// use std::sync::Arc;
1221    ///
1222    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1223    /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1224    ///
1225    /// let builder = ImageBuilder::new("./my-project").await?
1226    ///     .with_cache_backend(cache)
1227    ///     .tag("myapp:latest");
1228    /// # Ok(())
1229    /// # }
1230    /// ```
1231    #[cfg(feature = "cache")]
1232    #[must_use]
1233    pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1234        self.cache_backend = Some(backend);
1235        debug!("Configured pre-initialized cache backend");
1236        self
1237    }
1238
1239    /// Run the build
1240    ///
1241    /// This executes the complete build process:
1242    /// 1. Parse Dockerfile or load runtime template
1243    /// 2. Build all required stages
1244    /// 3. Commit and tag the final image
1245    /// 4. Push to registry if configured
1246    /// 5. Clean up intermediate containers
1247    ///
1248    /// # Errors
1249    ///
1250    /// Returns an error if:
1251    /// - Dockerfile parsing fails
1252    /// - A buildah command fails
1253    /// - Target stage is not found
1254    /// - Registry push fails
1255    ///
1256    /// # Panics
1257    ///
1258    /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1259    #[instrument(skip(self), fields(context = %self.context.display()))]
1260    pub async fn build(self) -> Result<BuiltImage> {
1261        let start_time = std::time::Instant::now();
1262
1263        info!("Starting build in context: {}", self.context.display());
1264
1265        // 1. Get build output (Dockerfile IR or WASM artifact)
1266        let build_output = self.get_build_output().await?;
1267
1268        // If this is a WASM build, return early with the artifact info.
1269        if let BuildOutput::WasmArtifact {
1270            wasm_path,
1271            oci_path: _,
1272            language,
1273            optimized,
1274            size,
1275        } = build_output
1276        {
1277            #[allow(clippy::cast_possible_truncation)]
1278            let build_time_ms = start_time.elapsed().as_millis() as u64;
1279
1280            self.send_event(BuildEvent::BuildComplete {
1281                image_id: wasm_path.display().to_string(),
1282            });
1283
1284            info!(
1285                "WASM build completed in {}ms: {} ({}, {} bytes, optimized={})",
1286                build_time_ms,
1287                wasm_path.display(),
1288                language,
1289                size,
1290                optimized
1291            );
1292
1293            return Ok(BuiltImage {
1294                image_id: format!("wasm:{}", wasm_path.display()),
1295                tags: self.options.tags.clone(),
1296                layer_count: 1,
1297                size,
1298                build_time_ms,
1299                is_manifest: false,
1300            });
1301        }
1302
1303        // Extract the Dockerfile from the BuildOutput.
1304        let BuildOutput::Dockerfile(dockerfile) = build_output else {
1305            unreachable!("WasmArtifact case handled above");
1306        };
1307        debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1308
1309        // Delegate the build to the backend.
1310        let backend = self
1311            .backend
1312            .as_ref()
1313            .ok_or_else(|| BuildError::BuildahNotFound {
1314                message: "No build backend configured".into(),
1315            })?;
1316
1317        info!("Delegating build to {} backend", backend.name());
1318        backend
1319            .build_image(
1320                &self.context,
1321                &dockerfile,
1322                &self.options,
1323                self.event_tx.clone(),
1324            )
1325            .await
1326    }
1327
1328    /// Detection order:
1329    /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1330    /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1331    /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1332    /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1333    ///
1334    /// Returns [`BuildOutput::Dockerfile`] for container builds or
1335    /// [`BuildOutput::WasmArtifact`] for WASM builds.
1336    async fn get_build_output(&self) -> Result<BuildOutput> {
1337        // (a) Runtime template takes highest priority.
1338        if let Some(runtime) = &self.options.runtime {
1339            debug!("Using runtime template: {}", runtime);
1340            let content = get_template(*runtime);
1341            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1342        }
1343
1344        // (b) Explicit ZImagefile path.
1345        if let Some(ref zimage_path) = self.options.zimagefile {
1346            debug!("Reading ZImagefile: {}", zimage_path.display());
1347            let content =
1348                fs::read_to_string(zimage_path)
1349                    .await
1350                    .map_err(|e| BuildError::ContextRead {
1351                        path: zimage_path.clone(),
1352                        source: e,
1353                    })?;
1354            let zimage = crate::zimage::parse_zimagefile(&content)?;
1355            return self.handle_zimage(&zimage).await;
1356        }
1357
1358        // (c) Auto-detect ZImagefile in context directory.
1359        let auto_zimage_path = self.context.join("ZImagefile");
1360        if auto_zimage_path.exists() {
1361            debug!(
1362                "Found ZImagefile in context: {}",
1363                auto_zimage_path.display()
1364            );
1365            let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1366                BuildError::ContextRead {
1367                    path: auto_zimage_path,
1368                    source: e,
1369                }
1370            })?;
1371            let zimage = crate::zimage::parse_zimagefile(&content)?;
1372            return self.handle_zimage(&zimage).await;
1373        }
1374
1375        // (d) Fall back to Dockerfile.
1376        let dockerfile_path = self
1377            .options
1378            .dockerfile
1379            .clone()
1380            .unwrap_or_else(|| self.context.join("Dockerfile"));
1381
1382        debug!("Reading Dockerfile: {}", dockerfile_path.display());
1383
1384        let content =
1385            fs::read_to_string(&dockerfile_path)
1386                .await
1387                .map_err(|e| BuildError::ContextRead {
1388                    path: dockerfile_path,
1389                    source: e,
1390                })?;
1391
1392        Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1393    }
1394
1395    /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1396    ///
1397    /// Handles all four `ZImage` modes:
1398    /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1399    /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1400    /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1401    ///
1402    /// Any `build:` directives are resolved first by spawning nested builds.
1403    async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1404        // Runtime mode: delegate to template system.
1405        if let Some(ref runtime_name) = zimage.runtime {
1406            let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1407                BuildError::zimagefile_validation(format!(
1408                    "unknown runtime '{runtime_name}' in ZImagefile"
1409                ))
1410            })?;
1411            let content = get_template(rt);
1412            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1413        }
1414
1415        // WASM mode: build a WASM component.
1416        if let Some(ref wasm_config) = zimage.wasm {
1417            return self.handle_wasm_build(wasm_config).await;
1418        }
1419
1420        // Resolve any `build:` directives to concrete base image tags.
1421        let resolved = self.resolve_build_directives(zimage).await?;
1422
1423        // Single-stage or multi-stage: convert to Dockerfile IR directly.
1424        Ok(BuildOutput::Dockerfile(
1425            crate::zimage::zimage_to_dockerfile(&resolved)?,
1426        ))
1427    }
1428
1429    /// Build a WASM component from the `ZImagefile` wasm configuration.
1430    ///
1431    /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1432    /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1433    /// the WASM builder pipeline.
1434    async fn handle_wasm_build(
1435        &self,
1436        wasm_config: &crate::zimage::ZWasmConfig,
1437    ) -> Result<BuildOutput> {
1438        use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1439
1440        info!("ZImagefile specifies WASM mode, running WASM build");
1441
1442        // Convert target string to WasiTarget enum.
1443        let target = match wasm_config.target.as_str() {
1444            "preview1" => WasiTarget::Preview1,
1445            _ => WasiTarget::Preview2,
1446        };
1447
1448        // Resolve language: parse from string or leave as None for auto-detection.
1449        let language = wasm_config
1450            .language
1451            .as_deref()
1452            .and_then(WasmLanguage::from_name);
1453
1454        if let Some(ref lang_str) = wasm_config.language {
1455            if language.is_none() {
1456                return Err(BuildError::zimagefile_validation(format!(
1457                    "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1458                     typescript, assemblyscript, c, zig"
1459                )));
1460            }
1461        }
1462
1463        // Build the WasmBuildConfig.
1464        let mut config = WasmBuildConfig {
1465            language,
1466            target,
1467            optimize: wasm_config.optimize,
1468            opt_level: wasm_config
1469                .opt_level
1470                .clone()
1471                .unwrap_or_else(|| "Oz".to_string()),
1472            wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1473            output_path: wasm_config.output.as_ref().map(PathBuf::from),
1474            world: wasm_config.world.clone(),
1475            features: wasm_config.features.clone(),
1476            build_args: wasm_config.build_args.clone(),
1477            pre_build: Vec::new(),
1478            post_build: Vec::new(),
1479            adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1480        };
1481
1482        // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1483        for cmd in &wasm_config.pre_build {
1484            config.pre_build.push(zcommand_to_args(cmd));
1485        }
1486        for cmd in &wasm_config.post_build {
1487            config.post_build.push(zcommand_to_args(cmd));
1488        }
1489
1490        // Build the WASM component.
1491        let result = build_wasm(&self.context, config).await?;
1492
1493        let language_name = result.language.name().to_string();
1494        let wasm_path = result.wasm_path;
1495        let size = result.size;
1496
1497        info!(
1498            "WASM build complete: {} ({} bytes, optimized={})",
1499            wasm_path.display(),
1500            size,
1501            wasm_config.optimize
1502        );
1503
1504        Ok(BuildOutput::WasmArtifact {
1505            wasm_path,
1506            oci_path: None,
1507            language: language_name,
1508            optimized: wasm_config.optimize,
1509            size,
1510        })
1511    }
1512
1513    /// Resolve `build:` directives in a `ZImage` by running nested builds.
1514    ///
1515    /// For each `build:` directive (top-level or per-stage), this method:
1516    /// 1. Determines the build context directory
1517    /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1518    /// 3. Spawns a nested `ImageBuilder` to build the context
1519    /// 4. Tags the result and replaces `build` with `base`
1520    async fn resolve_build_directives(
1521        &self,
1522        zimage: &crate::zimage::ZImage,
1523    ) -> Result<crate::zimage::ZImage> {
1524        let mut resolved = zimage.clone();
1525
1526        // Resolve top-level `build:` directive.
1527        if let Some(ref build_ctx) = resolved.build {
1528            let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1529            resolved.base = Some(tag);
1530            resolved.build = None;
1531        }
1532
1533        // Resolve per-stage `build:` directives.
1534        if let Some(ref mut stages) = resolved.stages {
1535            for (name, stage) in stages.iter_mut() {
1536                if let Some(ref build_ctx) = stage.build {
1537                    let tag = self.run_nested_build(build_ctx, name).await?;
1538                    stage.base = Some(tag);
1539                    stage.build = None;
1540                }
1541            }
1542        }
1543
1544        Ok(resolved)
1545    }
1546
1547    /// Run a nested build from a `build:` directive and return the resulting image tag.
1548    fn run_nested_build<'a>(
1549        &'a self,
1550        build_ctx: &'a crate::zimage::types::ZBuildContext,
1551        stage_name: &'a str,
1552    ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1553        Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1554    }
1555
1556    async fn run_nested_build_inner(
1557        &self,
1558        build_ctx: &crate::zimage::types::ZBuildContext,
1559        stage_name: &str,
1560    ) -> Result<String> {
1561        let context_dir = build_ctx.context_dir(&self.context);
1562
1563        if !context_dir.exists() {
1564            return Err(BuildError::ContextRead {
1565                path: context_dir,
1566                source: std::io::Error::new(
1567                    std::io::ErrorKind::NotFound,
1568                    format!(
1569                        "build context directory not found for build directive in '{stage_name}'"
1570                    ),
1571                ),
1572            });
1573        }
1574
1575        info!(
1576            "Building nested image for '{}' from context: {}",
1577            stage_name,
1578            context_dir.display()
1579        );
1580
1581        // Create a tag for the nested build result.
1582        let tag = format!(
1583            "zlayer-build-dep-{}:{}",
1584            stage_name,
1585            chrono_lite_timestamp()
1586        );
1587
1588        // Create nested builder.
1589        let mut nested = ImageBuilder::new(&context_dir).await?;
1590        nested = nested.tag(&tag);
1591
1592        // Apply explicit build file if specified.
1593        if let Some(file) = build_ctx.file() {
1594            let file_path = context_dir.join(file);
1595            if std::path::Path::new(file).extension().is_some_and(|ext| {
1596                ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1597            }) || file.starts_with("ZImagefile")
1598            {
1599                nested = nested.zimagefile(file_path);
1600            } else {
1601                nested = nested.dockerfile(file_path);
1602            }
1603        }
1604
1605        // Apply build args.
1606        for (key, value) in build_ctx.args() {
1607            nested = nested.build_arg(&key, &value);
1608        }
1609
1610        // Propagate default registry if set.
1611        if let Some(ref reg) = self.options.default_registry {
1612            nested = nested.default_registry(reg.clone());
1613        }
1614
1615        // Run the nested build.
1616        let result = nested.build().await?;
1617        info!(
1618            "Nested build for '{}' completed: {}",
1619            stage_name, result.image_id
1620        );
1621
1622        Ok(tag)
1623    }
1624
1625    /// Send an event to the TUI (if configured)
1626    fn send_event(&self, event: BuildEvent) {
1627        if let Some(tx) = &self.event_tx {
1628            // Ignore send errors - the receiver may have been dropped
1629            let _ = tx.send(event);
1630        }
1631    }
1632}
1633
1634// Helper function to generate a timestamp-based name
1635fn chrono_lite_timestamp() -> String {
1636    use std::time::{SystemTime, UNIX_EPOCH};
1637    let duration = SystemTime::now()
1638        .duration_since(UNIX_EPOCH)
1639        .unwrap_or_default();
1640    format!("{}", duration.as_secs())
1641}
1642
1643/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1644/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1645/// pre/post build command lists.
1646fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1647    match cmd {
1648        crate::zimage::ZCommand::Shell(s) => {
1649            vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
1650        }
1651        crate::zimage::ZCommand::Exec(args) => args.clone(),
1652    }
1653}
1654
1655#[cfg(test)]
1656mod tests {
1657    use super::*;
1658
1659    #[test]
1660    fn test_registry_auth_new() {
1661        let auth = RegistryAuth::new("user", "pass");
1662        assert_eq!(auth.username, "user");
1663        assert_eq!(auth.password, "pass");
1664    }
1665
1666    #[test]
1667    fn test_build_options_default() {
1668        let opts = BuildOptions::default();
1669        assert!(opts.dockerfile.is_none());
1670        assert!(opts.zimagefile.is_none());
1671        assert!(opts.runtime.is_none());
1672        assert!(opts.build_args.is_empty());
1673        assert!(opts.target.is_none());
1674        assert!(opts.tags.is_empty());
1675        assert!(!opts.no_cache);
1676        assert!(!opts.push);
1677        assert!(!opts.squash);
1678        // New cache-related fields
1679        assert!(opts.layers); // Default is true
1680        assert!(opts.cache_from.is_none());
1681        assert!(opts.cache_to.is_none());
1682        assert!(opts.cache_ttl.is_none());
1683        // Cache backend config (only with cache feature)
1684        #[cfg(feature = "cache")]
1685        assert!(opts.cache_backend_config.is_none());
1686    }
1687
1688    fn create_test_builder() -> ImageBuilder {
1689        // Create a minimal builder for testing (without async initialization)
1690        ImageBuilder {
1691            context: PathBuf::from("/tmp/test"),
1692            options: BuildOptions::default(),
1693            executor: BuildahExecutor::with_path("/usr/bin/buildah"),
1694            event_tx: None,
1695            backend: None,
1696            #[cfg(feature = "cache")]
1697            cache_backend: None,
1698            #[cfg(feature = "local-registry")]
1699            local_registry: None,
1700        }
1701    }
1702
1703    // Builder method chaining tests
1704    #[test]
1705    fn test_builder_chaining() {
1706        let mut builder = create_test_builder();
1707
1708        builder = builder
1709            .dockerfile("./Dockerfile.test")
1710            .runtime(Runtime::Node20)
1711            .build_arg("VERSION", "1.0")
1712            .target("builder")
1713            .tag("myapp:latest")
1714            .tag("myapp:v1")
1715            .no_cache()
1716            .squash()
1717            .format("oci");
1718
1719        assert_eq!(
1720            builder.options.dockerfile,
1721            Some(PathBuf::from("./Dockerfile.test"))
1722        );
1723        assert_eq!(builder.options.runtime, Some(Runtime::Node20));
1724        assert_eq!(
1725            builder.options.build_args.get("VERSION"),
1726            Some(&"1.0".to_string())
1727        );
1728        assert_eq!(builder.options.target, Some("builder".to_string()));
1729        assert_eq!(builder.options.tags.len(), 2);
1730        assert!(builder.options.no_cache);
1731        assert!(builder.options.squash);
1732        assert_eq!(builder.options.format, Some("oci".to_string()));
1733    }
1734
1735    #[test]
1736    fn test_builder_push_with_auth() {
1737        let mut builder = create_test_builder();
1738        builder = builder.push(RegistryAuth::new("user", "pass"));
1739
1740        assert!(builder.options.push);
1741        assert!(builder.options.registry_auth.is_some());
1742        let auth = builder.options.registry_auth.unwrap();
1743        assert_eq!(auth.username, "user");
1744        assert_eq!(auth.password, "pass");
1745    }
1746
1747    #[test]
1748    fn test_builder_push_without_auth() {
1749        let mut builder = create_test_builder();
1750        builder = builder.push_without_auth();
1751
1752        assert!(builder.options.push);
1753        assert!(builder.options.registry_auth.is_none());
1754    }
1755
1756    #[test]
1757    fn test_builder_layers() {
1758        let mut builder = create_test_builder();
1759        // Default is true
1760        assert!(builder.options.layers);
1761
1762        // Disable layers
1763        builder = builder.layers(false);
1764        assert!(!builder.options.layers);
1765
1766        // Re-enable layers
1767        builder = builder.layers(true);
1768        assert!(builder.options.layers);
1769    }
1770
1771    #[test]
1772    fn test_builder_cache_from() {
1773        let mut builder = create_test_builder();
1774        assert!(builder.options.cache_from.is_none());
1775
1776        builder = builder.cache_from("registry.example.com/myapp:cache");
1777        assert_eq!(
1778            builder.options.cache_from,
1779            Some("registry.example.com/myapp:cache".to_string())
1780        );
1781    }
1782
1783    #[test]
1784    fn test_builder_cache_to() {
1785        let mut builder = create_test_builder();
1786        assert!(builder.options.cache_to.is_none());
1787
1788        builder = builder.cache_to("registry.example.com/myapp:cache");
1789        assert_eq!(
1790            builder.options.cache_to,
1791            Some("registry.example.com/myapp:cache".to_string())
1792        );
1793    }
1794
1795    #[test]
1796    fn test_builder_cache_ttl() {
1797        use std::time::Duration;
1798
1799        let mut builder = create_test_builder();
1800        assert!(builder.options.cache_ttl.is_none());
1801
1802        builder = builder.cache_ttl(Duration::from_secs(3600));
1803        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
1804    }
1805
1806    #[test]
1807    fn test_builder_cache_options_chaining() {
1808        use std::time::Duration;
1809
1810        let builder = create_test_builder()
1811            .layers(true)
1812            .cache_from("registry.example.com/cache:input")
1813            .cache_to("registry.example.com/cache:output")
1814            .cache_ttl(Duration::from_secs(7200))
1815            .no_cache();
1816
1817        assert!(builder.options.layers);
1818        assert_eq!(
1819            builder.options.cache_from,
1820            Some("registry.example.com/cache:input".to_string())
1821        );
1822        assert_eq!(
1823            builder.options.cache_to,
1824            Some("registry.example.com/cache:output".to_string())
1825        );
1826        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
1827        assert!(builder.options.no_cache);
1828    }
1829
1830    #[test]
1831    fn test_chrono_lite_timestamp() {
1832        let ts = chrono_lite_timestamp();
1833        // Should be a valid number
1834        let parsed: u64 = ts.parse().expect("Should be a valid u64");
1835        // Should be reasonably recent (after 2024)
1836        assert!(parsed > 1_700_000_000);
1837    }
1838}