Skip to main content

zlayer_builder/
builder.rs

1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//!     // Build from a Dockerfile
15//!     let image = ImageBuilder::new("./my-app").await?
16//!         .tag("myapp:latest")
17//!         .tag("myapp:v1.0.0")
18//!         .build()
19//!         .await?;
20//!
21//!     println!("Built image: {}", image.image_id);
22//!     Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//!     // Build using a runtime template (no Dockerfile needed)
34//!     let image = ImageBuilder::new("./my-node-app").await?
35//!         .runtime(Runtime::Node20)
36//!         .tag("myapp:latest")
37//!         .build()
38//!         .await?;
39//!
40//!     println!("Built image: {}", image.image_id);
41//!     Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//!     // Build only up to a specific stage
53//!     let image = ImageBuilder::new("./my-app").await?
54//!         .target("builder")
55//!         .tag("myapp:builder")
56//!         .build()
57//!         .await?;
58//!
59//!     println!("Built intermediate image: {}", image.image_id);
60//!     Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//!     // Start TUI in another thread
75//!     std::thread::spawn(move || {
76//!         // Process events from rx...
77//!         while let Ok(event) = rx.recv() {
78//!             println!("Event: {:?}", event);
79//!         }
80//!     });
81//!
82//!     let image = ImageBuilder::new("./my-app").await?
83//!         .tag("myapp:latest")
84//!         .with_events(tx)
85//!         .build()
86//!         .await?;
87//!
88//!     Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//!     let image = ImageBuilder::new("./my-app").await?
100//!         .with_cache_dir("/var/cache/zlayer")  // Use persistent disk cache
101//!         .tag("myapp:latest")
102//!         .build()
103//!         .await?;
104//!
105//!     println!("Built image: {}", image.image_id);
106//!     Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116#[cfg(feature = "local-registry")]
117use tracing::warn;
118use tracing::{debug, info, instrument};
119
120use crate::backend::BuildBackend;
121#[cfg(feature = "local-registry")]
122use crate::buildah::BuildahCommand;
123use crate::buildah::BuildahExecutor;
124use crate::dockerfile::{Dockerfile, RunMount};
125use crate::error::{BuildError, Result};
126use crate::templates::{get_template, Runtime};
127use crate::tui::BuildEvent;
128
129#[cfg(feature = "cache")]
130use zlayer_registry::cache::BlobCacheBackend;
131
132#[cfg(feature = "local-registry")]
133use zlayer_registry::LocalRegistry;
134
135#[cfg(feature = "local-registry")]
136use zlayer_registry::import_image;
137
138/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
139/// or a WASM build result for WebAssembly builds.
140///
141/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
142/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
143/// a compiled artifact directly, bypassing the container build pipeline.
144#[derive(Debug)]
145pub enum BuildOutput {
146    /// Standard container build - produces a Dockerfile to be built with buildah.
147    Dockerfile(Dockerfile),
148    /// WASM component build - already built, produces artifact path.
149    WasmArtifact {
150        /// Path to the compiled WASM binary.
151        wasm_path: PathBuf,
152        /// Path to the OCI artifact directory (if exported).
153        oci_path: Option<PathBuf>,
154        /// Source language used.
155        language: String,
156        /// Whether optimization was applied.
157        optimized: bool,
158        /// Size of the output file in bytes.
159        size: u64,
160    },
161}
162
163/// Configuration for the layer cache backend.
164///
165/// This enum specifies which cache backend to use for storing and retrieving
166/// cached layers during builds. The cache feature must be enabled for this
167/// to be available.
168///
169/// # Example
170///
171/// ```no_run,ignore
172/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
173///
174/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
175/// // Use persistent disk cache
176/// let builder = ImageBuilder::new("./my-app").await?
177///     .with_cache_config(CacheBackendConfig::Persistent {
178///         path: "/var/cache/zlayer".into(),
179///     })
180///     .tag("myapp:latest");
181/// # Ok(())
182/// # }
183/// ```
184#[cfg(feature = "cache")]
185#[derive(Debug, Clone, Default)]
186pub enum CacheBackendConfig {
187    /// In-memory cache (cleared when process exits).
188    ///
189    /// Useful for CI/CD environments where persistence isn't needed
190    /// but you want to avoid re-downloading base image layers within
191    /// a single build session.
192    #[default]
193    Memory,
194
195    /// Persistent disk-based cache using redb.
196    ///
197    /// Requires the `cache-persistent` feature. Layers are stored on disk
198    /// and persist across builds, significantly speeding up repeated builds.
199    #[cfg(feature = "cache-persistent")]
200    Persistent {
201        /// Path to the cache directory or database file.
202        /// If a directory, `blob_cache.redb` will be created inside it.
203        path: PathBuf,
204    },
205
206    /// S3-compatible object storage backend.
207    ///
208    /// Requires the `cache-s3` feature. Useful for distributed build systems
209    /// where multiple build machines need to share a cache.
210    #[cfg(feature = "cache-s3")]
211    S3 {
212        /// S3 bucket name
213        bucket: String,
214        /// AWS region (optional, uses SDK default if not set)
215        region: Option<String>,
216        /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
217        endpoint: Option<String>,
218        /// Key prefix for cached blobs (default: "zlayer/layers/")
219        prefix: Option<String>,
220    },
221}
222
223/// Built image information returned after a successful build
224#[derive(Debug, Clone)]
225pub struct BuiltImage {
226    /// Image ID (sha256:...)
227    pub image_id: String,
228    /// Applied tags
229    pub tags: Vec<String>,
230    /// Number of layers in the final image
231    pub layer_count: usize,
232    /// Total size in bytes (0 if not computed)
233    pub size: u64,
234    /// Build duration in milliseconds
235    pub build_time_ms: u64,
236    /// Whether this image is a manifest list (multi-arch).
237    pub is_manifest: bool,
238}
239
240/// Registry authentication credentials
241#[derive(Debug, Clone)]
242pub struct RegistryAuth {
243    /// Registry username
244    pub username: String,
245    /// Registry password or token
246    pub password: String,
247}
248
249impl RegistryAuth {
250    /// Create new registry authentication
251    pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
252        Self {
253            username: username.into(),
254            password: password.into(),
255        }
256    }
257}
258
259/// Strategy for pulling the base image before building.
260///
261/// Controls the `--pull` flag passed to `buildah from`. The default is
262/// [`PullBaseMode::Newer`], matching the behaviour users expect from
263/// modern build tools: fast when nothing has changed, correct when the
264/// upstream base image has been republished.
265#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
266pub enum PullBaseMode {
267    /// Pull only if the registry has a newer version (`--pull=newer`).
268    /// Default behaviour.
269    #[default]
270    Newer,
271    /// Always pull, even if a local copy exists (`--pull=always`).
272    Always,
273    /// Never pull — use whatever is in local storage (no `--pull` flag passed).
274    Never,
275}
276
277/// Build options for customizing the image build process
278#[derive(Debug, Clone)]
279#[allow(clippy::struct_excessive_bools)]
280pub struct BuildOptions {
281    /// Dockerfile path (default: Dockerfile in context)
282    pub dockerfile: Option<PathBuf>,
283    /// `ZImagefile` path (alternative to Dockerfile)
284    pub zimagefile: Option<PathBuf>,
285    /// Use runtime template instead of Dockerfile
286    pub runtime: Option<Runtime>,
287    /// Build arguments (ARG values)
288    pub build_args: HashMap<String, String>,
289    /// Target stage for multi-stage builds
290    pub target: Option<String>,
291    /// Image tags to apply
292    pub tags: Vec<String>,
293    /// Disable layer caching
294    pub no_cache: bool,
295    /// Push to registry after build
296    pub push: bool,
297    /// Registry auth (if pushing)
298    pub registry_auth: Option<RegistryAuth>,
299    /// Squash all layers into one
300    pub squash: bool,
301    /// Image format (oci or docker)
302    pub format: Option<String>,
303    /// Enable buildah layer caching (--layers flag for `buildah build`).
304    /// Default: true
305    ///
306    /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
307    /// `buildah commit`) rather than `buildah build`, so this flag is reserved
308    /// for future use when/if we switch to `buildah build` (bud) command.
309    pub layers: bool,
310    /// Registry to pull cache from (--cache-from for `buildah build`).
311    ///
312    /// Note: This would be used with `buildah build --cache-from=<registry>`.
313    /// Currently `ZLayer` uses manual container creation, so this is reserved
314    /// for future implementation or for switching to `buildah build`.
315    ///
316    /// TODO: Implement remote cache support. This would require either:
317    /// 1. Switching to `buildah build` command which supports --cache-from natively
318    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
319    pub cache_from: Option<String>,
320    /// Registry to push cache to (--cache-to for `buildah build`).
321    ///
322    /// Note: This would be used with `buildah build --cache-to=<registry>`.
323    /// Currently `ZLayer` uses manual container creation, so this is reserved
324    /// for future implementation or for switching to `buildah build`.
325    ///
326    /// TODO: Implement remote cache support. This would require either:
327    /// 1. Switching to `buildah build` command which supports --cache-to natively
328    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
329    pub cache_to: Option<String>,
330    /// Maximum cache age (--cache-ttl for `buildah build`).
331    ///
332    /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
333    /// Currently `ZLayer` uses manual container creation, so this is reserved
334    /// for future implementation or for switching to `buildah build`.
335    ///
336    /// TODO: Implement cache TTL support. This would require either:
337    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
338    /// 2. Implementing custom cache expiration logic for our layer caching system
339    pub cache_ttl: Option<std::time::Duration>,
340    /// Cache backend configuration (requires `cache` feature).
341    ///
342    /// When configured, the builder will store layer data in the specified
343    /// cache backend for faster subsequent builds. This is separate from
344    /// buildah's native caching and operates at the `ZLayer` level.
345    ///
346    /// # Integration Points
347    ///
348    /// The cache backend is used at several points during the build:
349    ///
350    /// 1. **Before instruction execution**: Check if a cached layer exists
351    ///    for the (`instruction_hash`, `base_layer`) tuple
352    /// 2. **After instruction execution**: Store the resulting layer data
353    ///    in the cache for future builds
354    /// 3. **Base image layers**: Cache pulled base image layers to avoid
355    ///    re-downloading from registries
356    ///
357    /// TODO: Wire up cache lookups in the build loop once layer digests
358    /// are properly computed and tracked.
359    #[cfg(feature = "cache")]
360    pub cache_backend_config: Option<CacheBackendConfig>,
361    /// Default OCI/WASM-compatible registry to check for images before falling
362    /// back to Docker Hub qualification.
363    ///
364    /// When set, the builder will probe this registry for short image names
365    /// before qualifying them to `docker.io`. For example, if set to
366    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
367    /// the builder will check `git.example.com:5000/myapp:latest` first.
368    pub default_registry: Option<String>,
369    /// Default cache mounts injected into all RUN instructions.
370    /// These are merged with any step-level cache mounts (deduped by target path).
371    pub default_cache_mounts: Vec<RunMount>,
372    /// Number of retries for failed RUN steps (0 = no retries, default)
373    pub retries: u32,
374    /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
375    /// When set, `buildah from` pulls the platform-specific image variant.
376    pub platform: Option<String>,
377    /// SHA-256 hash of the source Dockerfile/ZImagefile content.
378    ///
379    /// When set, the sandbox builder can skip a rebuild if the cached image
380    /// was produced from identical source content (content-based invalidation).
381    pub source_hash: Option<String>,
382    /// How to handle base-image pulling during `buildah from`.
383    ///
384    /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
385    /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
386    /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
387    pub pull: PullBaseMode,
388}
389
390impl Default for BuildOptions {
391    fn default() -> Self {
392        Self {
393            dockerfile: None,
394            zimagefile: None,
395            runtime: None,
396            build_args: HashMap::new(),
397            target: None,
398            tags: Vec::new(),
399            no_cache: false,
400            push: false,
401            registry_auth: None,
402            squash: false,
403            format: None,
404            layers: true,
405            cache_from: None,
406            cache_to: None,
407            cache_ttl: None,
408            #[cfg(feature = "cache")]
409            cache_backend_config: None,
410            default_registry: None,
411            default_cache_mounts: Vec::new(),
412            retries: 0,
413            platform: None,
414            source_hash: None,
415            pull: PullBaseMode::default(),
416        }
417    }
418}
419
420/// Image builder - orchestrates the full build process
421///
422/// `ImageBuilder` provides a fluent API for configuring and executing
423/// container image builds using buildah as the backend.
424///
425/// # Build Process
426///
427/// 1. Parse Dockerfile (or use runtime template)
428/// 2. Resolve target stages if specified
429/// 3. Build each stage sequentially:
430///    - Create working container from base image
431///    - Execute each instruction
432///    - Commit intermediate stages for COPY --from
433/// 4. Commit final image with tags
434/// 5. Push to registry if configured
435/// 6. Clean up intermediate containers
436///
437/// # Cache Backend Integration (requires `cache` feature)
438///
439/// When a cache backend is configured, the builder can store and retrieve
440/// cached layer data to speed up subsequent builds:
441///
442/// ```no_run,ignore
443/// use zlayer_builder::ImageBuilder;
444///
445/// let builder = ImageBuilder::new("./my-app").await?
446///     .with_cache_dir("/var/cache/zlayer")
447///     .tag("myapp:latest");
448/// ```
449pub struct ImageBuilder {
450    /// Build context directory
451    context: PathBuf,
452    /// Build options
453    options: BuildOptions,
454    /// Buildah executor (kept for backwards compatibility)
455    #[allow(dead_code)]
456    executor: BuildahExecutor,
457    /// Event sender for TUI updates
458    event_tx: Option<mpsc::Sender<BuildEvent>>,
459    /// Pluggable build backend (buildah, sandbox, etc.).
460    ///
461    /// When set, the `build()` method delegates to this backend instead of
462    /// using the inline buildah logic. Set automatically by `new()` via
463    /// `detect_backend()`, or explicitly via `with_backend()`.
464    backend: Option<Arc<dyn BuildBackend>>,
465    /// Cache backend for layer caching (requires `cache` feature).
466    ///
467    /// When set, the builder will attempt to retrieve cached layers before
468    /// executing instructions, and store results in the cache after execution.
469    ///
470    /// TODO: Implement cache lookups in the build loop. Currently the backend
471    /// is stored but not actively used during builds. Integration points:
472    /// - Check cache before executing RUN instructions
473    /// - Store layer data after successful instruction execution
474    /// - Cache base image layers pulled from registries
475    #[cfg(feature = "cache")]
476    cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
477    /// Local OCI registry for checking cached images before remote pulls.
478    #[cfg(feature = "local-registry")]
479    local_registry: Option<LocalRegistry>,
480}
481
482impl ImageBuilder {
483    /// Create a new `ImageBuilder` with the given context directory
484    ///
485    /// The context directory should contain the Dockerfile (unless using
486    /// a runtime template) and any files that will be copied into the image.
487    ///
488    /// # Arguments
489    ///
490    /// * `context` - Path to the build context directory
491    ///
492    /// # Errors
493    ///
494    /// Returns an error if:
495    /// - The context directory does not exist
496    /// - Buildah is not installed or not accessible
497    ///
498    /// # Example
499    ///
500    /// ```no_run
501    /// use zlayer_builder::ImageBuilder;
502    ///
503    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
504    /// let builder = ImageBuilder::new("./my-project").await?;
505    /// # Ok(())
506    /// # }
507    /// ```
508    #[instrument(skip_all, fields(context = %context.as_ref().display()))]
509    pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
510        let context = context.as_ref().to_path_buf();
511
512        // Verify context exists
513        if !context.exists() {
514            return Err(BuildError::ContextRead {
515                path: context,
516                source: std::io::Error::new(
517                    std::io::ErrorKind::NotFound,
518                    "Build context directory not found",
519                ),
520            });
521        }
522
523        // Detect the best available build backend for this platform.
524        let backend = crate::backend::detect_backend().await.ok();
525
526        // Initialize buildah executor.
527        // On macOS, if buildah is not found we fall back to a default executor
528        // (the backend will handle the actual build dispatch).
529        let executor = match BuildahExecutor::new_async().await {
530            Ok(exec) => exec,
531            #[cfg(target_os = "macos")]
532            Err(_) => {
533                info!("Buildah not found on macOS; backend will handle build dispatch");
534                BuildahExecutor::default()
535            }
536            #[cfg(not(target_os = "macos"))]
537            Err(e) => return Err(e),
538        };
539
540        debug!("Created ImageBuilder for context: {}", context.display());
541
542        Ok(Self {
543            context,
544            options: BuildOptions::default(),
545            executor,
546            event_tx: None,
547            backend,
548            #[cfg(feature = "cache")]
549            cache_backend: None,
550            #[cfg(feature = "local-registry")]
551            local_registry: None,
552        })
553    }
554
555    /// Create an `ImageBuilder` with a custom buildah executor
556    ///
557    /// This is useful for testing or when you need to configure
558    /// the executor with specific storage options. The executor is
559    /// wrapped in a [`BuildahBackend`] so the build dispatches through
560    /// the [`BuildBackend`] trait.
561    ///
562    /// # Errors
563    ///
564    /// Returns an error if the context directory does not exist.
565    pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
566        let context = context.as_ref().to_path_buf();
567
568        if !context.exists() {
569            return Err(BuildError::ContextRead {
570                path: context,
571                source: std::io::Error::new(
572                    std::io::ErrorKind::NotFound,
573                    "Build context directory not found",
574                ),
575            });
576        }
577
578        let backend: Arc<dyn BuildBackend> = Arc::new(
579            crate::backend::BuildahBackend::with_executor(executor.clone()),
580        );
581
582        Ok(Self {
583            context,
584            options: BuildOptions::default(),
585            executor,
586            event_tx: None,
587            backend: Some(backend),
588            #[cfg(feature = "cache")]
589            cache_backend: None,
590            #[cfg(feature = "local-registry")]
591            local_registry: None,
592        })
593    }
594
595    /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
596    ///
597    /// The backend is used for all build, push, tag, and manifest
598    /// operations. The internal `BuildahExecutor` is set to the default
599    /// (it is only used if no backend is set).
600    ///
601    /// # Errors
602    ///
603    /// Returns an error if the context directory does not exist.
604    pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
605        let context = context.as_ref().to_path_buf();
606
607        if !context.exists() {
608            return Err(BuildError::ContextRead {
609                path: context,
610                source: std::io::Error::new(
611                    std::io::ErrorKind::NotFound,
612                    "Build context directory not found",
613                ),
614            });
615        }
616
617        Ok(Self {
618            context,
619            options: BuildOptions::default(),
620            executor: BuildahExecutor::default(),
621            event_tx: None,
622            backend: Some(backend),
623            #[cfg(feature = "cache")]
624            cache_backend: None,
625            #[cfg(feature = "local-registry")]
626            local_registry: None,
627        })
628    }
629
630    /// Set a custom Dockerfile path
631    ///
632    /// By default, the builder looks for a file named `Dockerfile` in the
633    /// context directory. Use this method to specify a different path.
634    ///
635    /// # Example
636    ///
637    /// ```no_run
638    /// # use zlayer_builder::ImageBuilder;
639    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
640    /// let builder = ImageBuilder::new("./my-project").await?
641    ///     .dockerfile("./my-project/Dockerfile.prod");
642    /// # Ok(())
643    /// # }
644    /// ```
645    #[must_use]
646    pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
647        self.options.dockerfile = Some(path.as_ref().to_path_buf());
648        self
649    }
650
651    /// Set a custom `ZImagefile` path
652    ///
653    /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
654    /// the builder will parse the `ZImagefile` and convert it to the internal
655    /// Dockerfile IR for execution.
656    ///
657    /// # Example
658    ///
659    /// ```no_run
660    /// # use zlayer_builder::ImageBuilder;
661    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
662    /// let builder = ImageBuilder::new("./my-project").await?
663    ///     .zimagefile("./my-project/ZImagefile");
664    /// # Ok(())
665    /// # }
666    /// ```
667    #[must_use]
668    pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
669        self.options.zimagefile = Some(path.as_ref().to_path_buf());
670        self
671    }
672
673    /// Use a runtime template instead of a Dockerfile
674    ///
675    /// Runtime templates provide pre-built Dockerfiles for common
676    /// development environments. When set, the Dockerfile option is ignored.
677    ///
678    /// # Example
679    ///
680    /// ```no_run
681    /// use zlayer_builder::{ImageBuilder, Runtime};
682    ///
683    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
684    /// let builder = ImageBuilder::new("./my-node-app").await?
685    ///     .runtime(Runtime::Node20);
686    /// # Ok(())
687    /// # }
688    /// ```
689    #[must_use]
690    pub fn runtime(mut self, runtime: Runtime) -> Self {
691        self.options.runtime = Some(runtime);
692        self
693    }
694
695    /// Add a build argument
696    ///
697    /// Build arguments are passed to the Dockerfile and can be referenced
698    /// using the `ARG` instruction.
699    ///
700    /// # Example
701    ///
702    /// ```no_run
703    /// # use zlayer_builder::ImageBuilder;
704    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
705    /// let builder = ImageBuilder::new("./my-project").await?
706    ///     .build_arg("VERSION", "1.0.0")
707    ///     .build_arg("DEBUG", "false");
708    /// # Ok(())
709    /// # }
710    /// ```
711    #[must_use]
712    pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
713        self.options.build_args.insert(key.into(), value.into());
714        self
715    }
716
717    /// Set multiple build arguments at once
718    #[must_use]
719    pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
720        self.options.build_args.extend(args);
721        self
722    }
723
724    /// Set the target stage for multi-stage builds
725    ///
726    /// When building a multi-stage Dockerfile, you can stop at a specific
727    /// stage instead of building all stages.
728    ///
729    /// # Example
730    ///
731    /// ```no_run
732    /// # use zlayer_builder::ImageBuilder;
733    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
734    /// // Dockerfile:
735    /// // FROM node:20 AS builder
736    /// // ...
737    /// // FROM node:20-slim AS runtime
738    /// // ...
739    ///
740    /// let builder = ImageBuilder::new("./my-project").await?
741    ///     .target("builder")
742    ///     .tag("myapp:builder");
743    /// # Ok(())
744    /// # }
745    /// ```
746    #[must_use]
747    pub fn target(mut self, stage: impl Into<String>) -> Self {
748        self.options.target = Some(stage.into());
749        self
750    }
751
752    /// Add an image tag
753    ///
754    /// Tags are applied to the final image. You can add multiple tags.
755    /// The first tag is used as the primary image name during commit.
756    ///
757    /// # Example
758    ///
759    /// ```no_run
760    /// # use zlayer_builder::ImageBuilder;
761    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
762    /// let builder = ImageBuilder::new("./my-project").await?
763    ///     .tag("myapp:latest")
764    ///     .tag("myapp:v1.0.0")
765    ///     .tag("registry.example.com/myapp:v1.0.0");
766    /// # Ok(())
767    /// # }
768    /// ```
769    #[must_use]
770    pub fn tag(mut self, tag: impl Into<String>) -> Self {
771        self.options.tags.push(tag.into());
772        self
773    }
774
775    /// Disable layer caching
776    ///
777    /// When enabled, all layers are rebuilt from scratch even if
778    /// they could be served from cache.
779    ///
780    /// Note: Currently this flag is tracked but not fully implemented in the
781    /// build process. `ZLayer` uses manual container creation (`buildah from`,
782    /// `buildah run`, `buildah commit`) which doesn't have built-in caching
783    /// like `buildah build` does. Future work could implement layer-level
784    /// caching by checking instruction hashes against previously built layers.
785    #[must_use]
786    pub fn no_cache(mut self) -> Self {
787        self.options.no_cache = true;
788        self
789    }
790
791    /// Set the base-image pull strategy for the build.
792    ///
793    /// By default, `buildah from` is invoked with `--pull=newer`, so an
794    /// up-to-date local base image is reused but a newer one on the
795    /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
796    /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
797    /// offline.
798    #[must_use]
799    pub fn pull(mut self, mode: PullBaseMode) -> Self {
800        self.options.pull = mode;
801        self
802    }
803
804    /// Enable or disable layer caching
805    ///
806    /// This controls the `--layers` flag for buildah. When enabled (default),
807    /// buildah can cache and reuse intermediate layers.
808    ///
809    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
810    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
811    /// flag is reserved for future use when/if we switch to `buildah build`.
812    ///
813    /// # Example
814    ///
815    /// ```no_run
816    /// # use zlayer_builder::ImageBuilder;
817    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
818    /// let builder = ImageBuilder::new("./my-project").await?
819    ///     .layers(false)  // Disable layer caching
820    ///     .tag("myapp:latest");
821    /// # Ok(())
822    /// # }
823    /// ```
824    #[must_use]
825    pub fn layers(mut self, enable: bool) -> Self {
826        self.options.layers = enable;
827        self
828    }
829
830    /// Set registry to pull cache from
831    ///
832    /// This corresponds to buildah's `--cache-from` flag, which allows
833    /// pulling cached layers from a remote registry to speed up builds.
834    ///
835    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
836    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
837    /// option is reserved for future implementation.
838    ///
839    /// TODO: Implement remote cache support. This would require either:
840    /// 1. Switching to `buildah build` command which supports --cache-from natively
841    /// 2. Implementing custom layer caching with registry pull for intermediate layers
842    ///
843    /// # Example
844    ///
845    /// ```no_run
846    /// # use zlayer_builder::ImageBuilder;
847    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
848    /// let builder = ImageBuilder::new("./my-project").await?
849    ///     .cache_from("registry.example.com/myapp:cache")
850    ///     .tag("myapp:latest");
851    /// # Ok(())
852    /// # }
853    /// ```
854    #[must_use]
855    pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
856        self.options.cache_from = Some(registry.into());
857        self
858    }
859
860    /// Set registry to push cache to
861    ///
862    /// This corresponds to buildah's `--cache-to` flag, which allows
863    /// pushing cached layers to a remote registry for future builds to use.
864    ///
865    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
866    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
867    /// option is reserved for future implementation.
868    ///
869    /// TODO: Implement remote cache support. This would require either:
870    /// 1. Switching to `buildah build` command which supports --cache-to natively
871    /// 2. Implementing custom layer caching with registry push for intermediate layers
872    ///
873    /// # Example
874    ///
875    /// ```no_run
876    /// # use zlayer_builder::ImageBuilder;
877    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
878    /// let builder = ImageBuilder::new("./my-project").await?
879    ///     .cache_to("registry.example.com/myapp:cache")
880    ///     .tag("myapp:latest");
881    /// # Ok(())
882    /// # }
883    /// ```
884    #[must_use]
885    pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
886        self.options.cache_to = Some(registry.into());
887        self
888    }
889
890    /// Set maximum cache age
891    ///
892    /// This corresponds to buildah's `--cache-ttl` flag, which sets the
893    /// maximum age for cached layers before they are considered stale.
894    ///
895    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
896    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
897    /// option is reserved for future implementation.
898    ///
899    /// TODO: Implement cache TTL support. This would require either:
900    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
901    /// 2. Implementing custom cache expiration logic for our layer caching system
902    ///
903    /// # Example
904    ///
905    /// ```no_run
906    /// # use zlayer_builder::ImageBuilder;
907    /// # use std::time::Duration;
908    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
909    /// let builder = ImageBuilder::new("./my-project").await?
910    ///     .cache_ttl(Duration::from_secs(3600 * 24))  // 24 hours
911    ///     .tag("myapp:latest");
912    /// # Ok(())
913    /// # }
914    /// ```
915    #[must_use]
916    pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
917        self.options.cache_ttl = Some(ttl);
918        self
919    }
920
921    /// Push the image to a registry after building
922    ///
923    /// # Arguments
924    ///
925    /// * `auth` - Registry authentication credentials
926    ///
927    /// # Example
928    ///
929    /// ```no_run
930    /// use zlayer_builder::{ImageBuilder, RegistryAuth};
931    ///
932    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
933    /// let builder = ImageBuilder::new("./my-project").await?
934    ///     .tag("registry.example.com/myapp:v1.0.0")
935    ///     .push(RegistryAuth::new("user", "password"));
936    /// # Ok(())
937    /// # }
938    /// ```
939    #[must_use]
940    pub fn push(mut self, auth: RegistryAuth) -> Self {
941        self.options.push = true;
942        self.options.registry_auth = Some(auth);
943        self
944    }
945
946    /// Enable pushing without authentication
947    ///
948    /// Use this for registries that don't require authentication
949    /// (e.g., local registries, insecure registries).
950    #[must_use]
951    pub fn push_without_auth(mut self) -> Self {
952        self.options.push = true;
953        self.options.registry_auth = None;
954        self
955    }
956
957    /// Set a default OCI/WASM-compatible registry to check for images.
958    ///
959    /// When set, the builder will probe this registry for short image names
960    /// before qualifying them to `docker.io`. For example, if set to
961    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
962    /// the builder will check `git.example.com:5000/myapp:latest` first.
963    #[must_use]
964    pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
965        self.options.default_registry = Some(registry.into());
966        self
967    }
968
969    /// Set a local OCI registry for image resolution.
970    ///
971    /// When set, the builder checks the local registry for cached images
972    /// before pulling from remote registries.
973    #[cfg(feature = "local-registry")]
974    #[must_use]
975    pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
976        self.local_registry = Some(registry);
977        self
978    }
979
980    /// Squash all layers into a single layer
981    ///
982    /// This reduces image size but loses layer caching benefits.
983    #[must_use]
984    pub fn squash(mut self) -> Self {
985        self.options.squash = true;
986        self
987    }
988
989    /// Set the image format
990    ///
991    /// Valid values are "oci" (default) or "docker".
992    #[must_use]
993    pub fn format(mut self, format: impl Into<String>) -> Self {
994        self.options.format = Some(format.into());
995        self
996    }
997
998    /// Set default cache mounts to inject into all RUN instructions
999    #[must_use]
1000    pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
1001        self.options.default_cache_mounts = mounts;
1002        self
1003    }
1004
1005    /// Set the number of retries for failed RUN steps
1006    #[must_use]
1007    pub fn retries(mut self, retries: u32) -> Self {
1008        self.options.retries = retries;
1009        self
1010    }
1011
1012    /// Set the target platform for cross-architecture builds.
1013    #[must_use]
1014    pub fn platform(mut self, platform: impl Into<String>) -> Self {
1015        self.options.platform = Some(platform.into());
1016        self
1017    }
1018
1019    /// Set a pre-computed source hash for content-based cache invalidation.
1020    ///
1021    /// When set, the sandbox builder can skip a full rebuild if the cached
1022    /// image was produced from identical source content.
1023    #[must_use]
1024    pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1025        self.options.source_hash = Some(hash.into());
1026        self
1027    }
1028
1029    /// Set an event sender for TUI progress updates
1030    ///
1031    /// Events will be sent as the build progresses, allowing you to
1032    /// display a progress UI or log build status.
1033    ///
1034    /// # Example
1035    ///
1036    /// ```no_run
1037    /// use zlayer_builder::{ImageBuilder, BuildEvent};
1038    /// use std::sync::mpsc;
1039    ///
1040    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1041    /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1042    ///
1043    /// let builder = ImageBuilder::new("./my-project").await?
1044    ///     .tag("myapp:latest")
1045    ///     .with_events(tx);
1046    /// # Ok(())
1047    /// # }
1048    /// ```
1049    #[must_use]
1050    pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1051        self.event_tx = Some(tx);
1052        self
1053    }
1054
1055    /// Configure a persistent disk cache backend for layer caching.
1056    ///
1057    /// When configured, the builder will store layer data on disk at the
1058    /// specified path. This cache persists across builds and significantly
1059    /// speeds up repeated builds of similar images.
1060    ///
1061    /// Requires the `cache-persistent` feature to be enabled.
1062    ///
1063    /// # Arguments
1064    ///
1065    /// * `path` - Path to the cache directory. If a directory, creates
1066    ///   `blob_cache.redb` inside it. If a file path, uses it directly.
1067    ///
1068    /// # Example
1069    ///
1070    /// ```no_run,ignore
1071    /// use zlayer_builder::ImageBuilder;
1072    ///
1073    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1074    /// let builder = ImageBuilder::new("./my-project").await?
1075    ///     .with_cache_dir("/var/cache/zlayer")
1076    ///     .tag("myapp:latest");
1077    /// # Ok(())
1078    /// # }
1079    /// ```
1080    ///
1081    /// # Integration Status
1082    ///
1083    /// TODO: The cache backend is currently stored but not actively used
1084    /// during builds. Future work will wire up:
1085    /// - Cache lookups before executing RUN instructions
1086    /// - Storing layer data after successful execution
1087    /// - Caching base image layers from registry pulls
1088    #[cfg(feature = "cache-persistent")]
1089    #[must_use]
1090    pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1091        self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1092            path: path.as_ref().to_path_buf(),
1093        });
1094        debug!(
1095            "Configured persistent cache at: {}",
1096            path.as_ref().display()
1097        );
1098        self
1099    }
1100
1101    /// Configure an in-memory cache backend for layer caching.
1102    ///
1103    /// The in-memory cache is cleared when the process exits, but can
1104    /// speed up builds within a single session by caching intermediate
1105    /// layers and avoiding redundant operations.
1106    ///
1107    /// Requires the `cache` feature to be enabled.
1108    ///
1109    /// # Example
1110    ///
1111    /// ```no_run,ignore
1112    /// use zlayer_builder::ImageBuilder;
1113    ///
1114    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1115    /// let builder = ImageBuilder::new("./my-project").await?
1116    ///     .with_memory_cache()
1117    ///     .tag("myapp:latest");
1118    /// # Ok(())
1119    /// # }
1120    /// ```
1121    ///
1122    /// # Integration Status
1123    ///
1124    /// TODO: The cache backend is currently stored but not actively used
1125    /// during builds. See `with_cache_dir` for integration status details.
1126    #[cfg(feature = "cache")]
1127    #[must_use]
1128    pub fn with_memory_cache(mut self) -> Self {
1129        self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1130        debug!("Configured in-memory cache");
1131        self
1132    }
1133
1134    /// Configure an S3-compatible storage backend for layer caching.
1135    ///
1136    /// This is useful for distributed build systems where multiple build
1137    /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1138    /// Backblaze B2, `MinIO`, and other S3-compatible services.
1139    ///
1140    /// Requires the `cache-s3` feature to be enabled.
1141    ///
1142    /// # Arguments
1143    ///
1144    /// * `bucket` - S3 bucket name
1145    /// * `region` - AWS region (optional, uses SDK default if not set)
1146    ///
1147    /// # Example
1148    ///
1149    /// ```no_run,ignore
1150    /// use zlayer_builder::ImageBuilder;
1151    ///
1152    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1153    /// let builder = ImageBuilder::new("./my-project").await?
1154    ///     .with_s3_cache("my-build-cache", Some("us-west-2"))
1155    ///     .tag("myapp:latest");
1156    /// # Ok(())
1157    /// # }
1158    /// ```
1159    ///
1160    /// # Integration Status
1161    ///
1162    /// TODO: The cache backend is currently stored but not actively used
1163    /// during builds. See `with_cache_dir` for integration status details.
1164    #[cfg(feature = "cache-s3")]
1165    #[must_use]
1166    pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1167        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1168            bucket: bucket.into(),
1169            region,
1170            endpoint: None,
1171            prefix: None,
1172        });
1173        debug!("Configured S3 cache");
1174        self
1175    }
1176
1177    /// Configure an S3-compatible storage backend with custom endpoint.
1178    ///
1179    /// Use this method for S3-compatible services that require a custom
1180    /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1181    ///
1182    /// Requires the `cache-s3` feature to be enabled.
1183    ///
1184    /// # Arguments
1185    ///
1186    /// * `bucket` - S3 bucket name
1187    /// * `endpoint` - Custom endpoint URL
1188    /// * `region` - Region (required for some S3-compatible services)
1189    ///
1190    /// # Example
1191    ///
1192    /// ```no_run,ignore
1193    /// use zlayer_builder::ImageBuilder;
1194    ///
1195    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1196    /// // Cloudflare R2
1197    /// let builder = ImageBuilder::new("./my-project").await?
1198    ///     .with_s3_cache_endpoint(
1199    ///         "my-bucket",
1200    ///         "https://accountid.r2.cloudflarestorage.com",
1201    ///         Some("auto".to_string()),
1202    ///     )
1203    ///     .tag("myapp:latest");
1204    /// # Ok(())
1205    /// # }
1206    /// ```
1207    #[cfg(feature = "cache-s3")]
1208    #[must_use]
1209    pub fn with_s3_cache_endpoint(
1210        mut self,
1211        bucket: impl Into<String>,
1212        endpoint: impl Into<String>,
1213        region: Option<String>,
1214    ) -> Self {
1215        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1216            bucket: bucket.into(),
1217            region,
1218            endpoint: Some(endpoint.into()),
1219            prefix: None,
1220        });
1221        debug!("Configured S3 cache with custom endpoint");
1222        self
1223    }
1224
1225    /// Configure a custom cache backend configuration.
1226    ///
1227    /// This is the most flexible way to configure the cache backend,
1228    /// allowing full control over all cache settings.
1229    ///
1230    /// Requires the `cache` feature to be enabled.
1231    ///
1232    /// # Example
1233    ///
1234    /// ```no_run,ignore
1235    /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1236    ///
1237    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1238    /// let builder = ImageBuilder::new("./my-project").await?
1239    ///     .with_cache_config(CacheBackendConfig::Memory)
1240    ///     .tag("myapp:latest");
1241    /// # Ok(())
1242    /// # }
1243    /// ```
1244    #[cfg(feature = "cache")]
1245    #[must_use]
1246    pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1247        self.options.cache_backend_config = Some(config);
1248        debug!("Configured custom cache backend");
1249        self
1250    }
1251
1252    /// Set an already-initialized cache backend directly.
1253    ///
1254    /// This is useful when you have a pre-configured cache backend instance
1255    /// that you want to share across multiple builders or when you need
1256    /// fine-grained control over cache initialization.
1257    ///
1258    /// Requires the `cache` feature to be enabled.
1259    ///
1260    /// # Example
1261    ///
1262    /// ```no_run,ignore
1263    /// use zlayer_builder::ImageBuilder;
1264    /// use zlayer_registry::cache::BlobCache;
1265    /// use std::sync::Arc;
1266    ///
1267    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1268    /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1269    ///
1270    /// let builder = ImageBuilder::new("./my-project").await?
1271    ///     .with_cache_backend(cache)
1272    ///     .tag("myapp:latest");
1273    /// # Ok(())
1274    /// # }
1275    /// ```
1276    #[cfg(feature = "cache")]
1277    #[must_use]
1278    pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1279        self.cache_backend = Some(backend);
1280        debug!("Configured pre-initialized cache backend");
1281        self
1282    }
1283
1284    /// Run the build
1285    ///
1286    /// This executes the complete build process:
1287    /// 1. Parse Dockerfile or load runtime template
1288    /// 2. Build all required stages
1289    /// 3. Commit and tag the final image
1290    /// 4. Push to registry if configured
1291    /// 5. Clean up intermediate containers
1292    ///
1293    /// # Errors
1294    ///
1295    /// Returns an error if:
1296    /// - Dockerfile parsing fails
1297    /// - A buildah command fails
1298    /// - Target stage is not found
1299    /// - Registry push fails
1300    ///
1301    /// # Panics
1302    ///
1303    /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1304    #[instrument(skip(self), fields(context = %self.context.display()))]
1305    #[allow(clippy::too_many_lines)]
1306    pub async fn build(self) -> Result<BuiltImage> {
1307        let start_time = std::time::Instant::now();
1308
1309        info!("Starting build in context: {}", self.context.display());
1310
1311        // 1. Get build output (Dockerfile IR or WASM artifact)
1312        let build_output = self.get_build_output().await?;
1313
1314        // If this is a WASM build, return early with the artifact info.
1315        if let BuildOutput::WasmArtifact {
1316            wasm_path,
1317            oci_path: _,
1318            language,
1319            optimized,
1320            size,
1321        } = build_output
1322        {
1323            #[allow(clippy::cast_possible_truncation)]
1324            let build_time_ms = start_time.elapsed().as_millis() as u64;
1325
1326            self.send_event(BuildEvent::BuildComplete {
1327                image_id: wasm_path.display().to_string(),
1328            });
1329
1330            info!(
1331                "WASM build completed in {}ms: {} ({}, {} bytes, optimized={})",
1332                build_time_ms,
1333                wasm_path.display(),
1334                language,
1335                size,
1336                optimized
1337            );
1338
1339            return Ok(BuiltImage {
1340                image_id: format!("wasm:{}", wasm_path.display()),
1341                tags: self.options.tags.clone(),
1342                layer_count: 1,
1343                size,
1344                build_time_ms,
1345                is_manifest: false,
1346            });
1347        }
1348
1349        // Extract the Dockerfile from the BuildOutput.
1350        let BuildOutput::Dockerfile(dockerfile) = build_output else {
1351            unreachable!("WasmArtifact case handled above");
1352        };
1353        debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1354
1355        // Delegate the build to the backend.
1356        let backend = self
1357            .backend
1358            .as_ref()
1359            .ok_or_else(|| BuildError::BuildahNotFound {
1360                message: "No build backend configured".into(),
1361            })?;
1362
1363        info!("Delegating build to {} backend", backend.name());
1364        let built = backend
1365            .build_image(
1366                &self.context,
1367                &dockerfile,
1368                &self.options,
1369                self.event_tx.clone(),
1370            )
1371            .await?;
1372
1373        // Import the built image into ZLayer's local registry and blob cache
1374        // so the runtime can find it without pulling from a remote registry.
1375        //
1376        // A user who wired up a local registry clearly wants built images to
1377        // live there — if the import fails (almost always EACCES on the
1378        // registry dir for an unprivileged user), bail with the registry path
1379        // in the message instead of silently producing a build that the
1380        // daemon can't find.
1381        #[cfg(feature = "local-registry")]
1382        if let Some(ref registry) = self.local_registry {
1383            if !built.tags.is_empty() {
1384                let tmp_path = std::env::temp_dir().join(format!(
1385                    "zlayer-build-{}-{}.tar",
1386                    std::process::id(),
1387                    start_time.elapsed().as_nanos()
1388                ));
1389
1390                // Export the image from buildah's store to an OCI archive.
1391                let export_tag = &built.tags[0];
1392                let dest = format!("oci-archive:{}", tmp_path.display());
1393                let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1394
1395                self.executor
1396                    .execute_checked(&push_cmd)
1397                    .await
1398                    .map_err(|e| BuildError::RegistryError {
1399                        message: format!(
1400                            "failed to export image to OCI archive for local registry \
1401                             import at {}: {e}",
1402                            registry.root().display()
1403                        ),
1404                    })?;
1405
1406                // Resolve the blob cache backend (if available).
1407                let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1408                    self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1409
1410                let import_result = async {
1411                    for tag in &built.tags {
1412                        let info =
1413                            import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1414                                .await
1415                                .map_err(|e| BuildError::RegistryError {
1416                                    message: format!(
1417                                        "failed to import '{tag}' into local registry at {}: {e}",
1418                                        registry.root().display()
1419                                    ),
1420                                })?;
1421                        info!(
1422                            tag = %tag,
1423                            digest = %info.digest,
1424                            "Imported into local registry"
1425                        );
1426                    }
1427                    Ok::<(), BuildError>(())
1428                }
1429                .await;
1430
1431                // Clean up the temporary archive regardless of whether the
1432                // import succeeded (best-effort; warn on failure).
1433                if let Err(e) = fs::remove_file(&tmp_path).await {
1434                    warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1435                }
1436
1437                import_result?;
1438            }
1439        }
1440
1441        Ok(built)
1442    }
1443
1444    /// Detection order:
1445    /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1446    /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1447    /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1448    /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1449    ///
1450    /// Returns [`BuildOutput::Dockerfile`] for container builds or
1451    /// [`BuildOutput::WasmArtifact`] for WASM builds.
1452    async fn get_build_output(&self) -> Result<BuildOutput> {
1453        // (a) Runtime template takes highest priority.
1454        if let Some(runtime) = &self.options.runtime {
1455            debug!("Using runtime template: {}", runtime);
1456            let content = get_template(*runtime);
1457            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1458        }
1459
1460        // (b) Explicit ZImagefile path.
1461        if let Some(ref zimage_path) = self.options.zimagefile {
1462            debug!("Reading ZImagefile: {}", zimage_path.display());
1463            let content =
1464                fs::read_to_string(zimage_path)
1465                    .await
1466                    .map_err(|e| BuildError::ContextRead {
1467                        path: zimage_path.clone(),
1468                        source: e,
1469                    })?;
1470            let zimage = crate::zimage::parse_zimagefile(&content)?;
1471            return self.handle_zimage(&zimage).await;
1472        }
1473
1474        // (c) Auto-detect ZImagefile in context directory.
1475        let auto_zimage_path = self.context.join("ZImagefile");
1476        if auto_zimage_path.exists() {
1477            debug!(
1478                "Found ZImagefile in context: {}",
1479                auto_zimage_path.display()
1480            );
1481            let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1482                BuildError::ContextRead {
1483                    path: auto_zimage_path,
1484                    source: e,
1485                }
1486            })?;
1487            let zimage = crate::zimage::parse_zimagefile(&content)?;
1488            return self.handle_zimage(&zimage).await;
1489        }
1490
1491        // (d) Fall back to Dockerfile.
1492        let dockerfile_path = self
1493            .options
1494            .dockerfile
1495            .clone()
1496            .unwrap_or_else(|| self.context.join("Dockerfile"));
1497
1498        debug!("Reading Dockerfile: {}", dockerfile_path.display());
1499
1500        let content =
1501            fs::read_to_string(&dockerfile_path)
1502                .await
1503                .map_err(|e| BuildError::ContextRead {
1504                    path: dockerfile_path,
1505                    source: e,
1506                })?;
1507
1508        Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1509    }
1510
1511    /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1512    ///
1513    /// Handles all four `ZImage` modes:
1514    /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1515    /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1516    /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1517    ///
1518    /// Any `build:` directives are resolved first by spawning nested builds.
1519    async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1520        // Runtime mode: delegate to template system.
1521        if let Some(ref runtime_name) = zimage.runtime {
1522            let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1523                BuildError::zimagefile_validation(format!(
1524                    "unknown runtime '{runtime_name}' in ZImagefile"
1525                ))
1526            })?;
1527            let content = get_template(rt);
1528            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1529        }
1530
1531        // WASM mode: build a WASM component.
1532        if let Some(ref wasm_config) = zimage.wasm {
1533            return self.handle_wasm_build(wasm_config).await;
1534        }
1535
1536        // Resolve any `build:` directives to concrete base image tags.
1537        let resolved = self.resolve_build_directives(zimage).await?;
1538
1539        // Single-stage or multi-stage: convert to Dockerfile IR directly.
1540        Ok(BuildOutput::Dockerfile(
1541            crate::zimage::zimage_to_dockerfile(&resolved)?,
1542        ))
1543    }
1544
1545    /// Build a WASM component from the `ZImagefile` wasm configuration.
1546    ///
1547    /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1548    /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1549    /// the WASM builder pipeline.
1550    async fn handle_wasm_build(
1551        &self,
1552        wasm_config: &crate::zimage::ZWasmConfig,
1553    ) -> Result<BuildOutput> {
1554        use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1555
1556        info!("ZImagefile specifies WASM mode, running WASM build");
1557
1558        // Convert target string to WasiTarget enum.
1559        let target = match wasm_config.target.as_str() {
1560            "preview1" => WasiTarget::Preview1,
1561            _ => WasiTarget::Preview2,
1562        };
1563
1564        // Resolve language: parse from string or leave as None for auto-detection.
1565        let language = wasm_config
1566            .language
1567            .as_deref()
1568            .and_then(WasmLanguage::from_name);
1569
1570        if let Some(ref lang_str) = wasm_config.language {
1571            if language.is_none() {
1572                return Err(BuildError::zimagefile_validation(format!(
1573                    "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1574                     typescript, assemblyscript, c, zig"
1575                )));
1576            }
1577        }
1578
1579        // Build the WasmBuildConfig.
1580        let mut config = WasmBuildConfig {
1581            language,
1582            target,
1583            optimize: wasm_config.optimize,
1584            opt_level: wasm_config
1585                .opt_level
1586                .clone()
1587                .unwrap_or_else(|| "Oz".to_string()),
1588            wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1589            output_path: wasm_config.output.as_ref().map(PathBuf::from),
1590            world: wasm_config.world.clone(),
1591            features: wasm_config.features.clone(),
1592            build_args: wasm_config.build_args.clone(),
1593            pre_build: Vec::new(),
1594            post_build: Vec::new(),
1595            adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1596        };
1597
1598        // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1599        for cmd in &wasm_config.pre_build {
1600            config.pre_build.push(zcommand_to_args(cmd));
1601        }
1602        for cmd in &wasm_config.post_build {
1603            config.post_build.push(zcommand_to_args(cmd));
1604        }
1605
1606        // Build the WASM component.
1607        let result = build_wasm(&self.context, config).await?;
1608
1609        let language_name = result.language.name().to_string();
1610        let wasm_path = result.wasm_path;
1611        let size = result.size;
1612
1613        info!(
1614            "WASM build complete: {} ({} bytes, optimized={})",
1615            wasm_path.display(),
1616            size,
1617            wasm_config.optimize
1618        );
1619
1620        Ok(BuildOutput::WasmArtifact {
1621            wasm_path,
1622            oci_path: None,
1623            language: language_name,
1624            optimized: wasm_config.optimize,
1625            size,
1626        })
1627    }
1628
1629    /// Resolve `build:` directives in a `ZImage` by running nested builds.
1630    ///
1631    /// For each `build:` directive (top-level or per-stage), this method:
1632    /// 1. Determines the build context directory
1633    /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1634    /// 3. Spawns a nested `ImageBuilder` to build the context
1635    /// 4. Tags the result and replaces `build` with `base`
1636    async fn resolve_build_directives(
1637        &self,
1638        zimage: &crate::zimage::ZImage,
1639    ) -> Result<crate::zimage::ZImage> {
1640        let mut resolved = zimage.clone();
1641
1642        // Resolve top-level `build:` directive.
1643        if let Some(ref build_ctx) = resolved.build {
1644            let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1645            resolved.base = Some(tag);
1646            resolved.build = None;
1647        }
1648
1649        // Resolve per-stage `build:` directives.
1650        if let Some(ref mut stages) = resolved.stages {
1651            for (name, stage) in stages.iter_mut() {
1652                if let Some(ref build_ctx) = stage.build {
1653                    let tag = self.run_nested_build(build_ctx, name).await?;
1654                    stage.base = Some(tag);
1655                    stage.build = None;
1656                }
1657            }
1658        }
1659
1660        Ok(resolved)
1661    }
1662
1663    /// Run a nested build from a `build:` directive and return the resulting image tag.
1664    fn run_nested_build<'a>(
1665        &'a self,
1666        build_ctx: &'a crate::zimage::types::ZBuildContext,
1667        stage_name: &'a str,
1668    ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1669        Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1670    }
1671
1672    async fn run_nested_build_inner(
1673        &self,
1674        build_ctx: &crate::zimage::types::ZBuildContext,
1675        stage_name: &str,
1676    ) -> Result<String> {
1677        let context_dir = build_ctx.context_dir(&self.context);
1678
1679        if !context_dir.exists() {
1680            return Err(BuildError::ContextRead {
1681                path: context_dir,
1682                source: std::io::Error::new(
1683                    std::io::ErrorKind::NotFound,
1684                    format!(
1685                        "build context directory not found for build directive in '{stage_name}'"
1686                    ),
1687                ),
1688            });
1689        }
1690
1691        info!(
1692            "Building nested image for '{}' from context: {}",
1693            stage_name,
1694            context_dir.display()
1695        );
1696
1697        // Create a tag for the nested build result.
1698        let tag = format!(
1699            "zlayer-build-dep-{}:{}",
1700            stage_name,
1701            chrono_lite_timestamp()
1702        );
1703
1704        // Create nested builder.
1705        let mut nested = ImageBuilder::new(&context_dir).await?;
1706        nested = nested.tag(&tag);
1707
1708        // Apply explicit build file if specified.
1709        if let Some(file) = build_ctx.file() {
1710            let file_path = context_dir.join(file);
1711            if std::path::Path::new(file).extension().is_some_and(|ext| {
1712                ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1713            }) || file.starts_with("ZImagefile")
1714            {
1715                nested = nested.zimagefile(file_path);
1716            } else {
1717                nested = nested.dockerfile(file_path);
1718            }
1719        }
1720
1721        // Apply build args.
1722        for (key, value) in build_ctx.args() {
1723            nested = nested.build_arg(&key, &value);
1724        }
1725
1726        // Propagate default registry if set.
1727        if let Some(ref reg) = self.options.default_registry {
1728            nested = nested.default_registry(reg.clone());
1729        }
1730
1731        // Run the nested build.
1732        let result = nested.build().await?;
1733        info!(
1734            "Nested build for '{}' completed: {}",
1735            stage_name, result.image_id
1736        );
1737
1738        Ok(tag)
1739    }
1740
1741    /// Send an event to the TUI (if configured)
1742    fn send_event(&self, event: BuildEvent) {
1743        if let Some(tx) = &self.event_tx {
1744            // Ignore send errors - the receiver may have been dropped
1745            let _ = tx.send(event);
1746        }
1747    }
1748}
1749
1750// Helper function to generate a timestamp-based name
1751fn chrono_lite_timestamp() -> String {
1752    use std::time::{SystemTime, UNIX_EPOCH};
1753    let duration = SystemTime::now()
1754        .duration_since(UNIX_EPOCH)
1755        .unwrap_or_default();
1756    format!("{}", duration.as_secs())
1757}
1758
1759/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1760/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1761/// pre/post build command lists.
1762fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1763    match cmd {
1764        crate::zimage::ZCommand::Shell(s) => {
1765            vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
1766        }
1767        crate::zimage::ZCommand::Exec(args) => args.clone(),
1768    }
1769}
1770
1771#[cfg(test)]
1772mod tests {
1773    use super::*;
1774
1775    #[test]
1776    fn test_registry_auth_new() {
1777        let auth = RegistryAuth::new("user", "pass");
1778        assert_eq!(auth.username, "user");
1779        assert_eq!(auth.password, "pass");
1780    }
1781
1782    #[test]
1783    fn test_build_options_default() {
1784        let opts = BuildOptions::default();
1785        assert!(opts.dockerfile.is_none());
1786        assert!(opts.zimagefile.is_none());
1787        assert!(opts.runtime.is_none());
1788        assert!(opts.build_args.is_empty());
1789        assert!(opts.target.is_none());
1790        assert!(opts.tags.is_empty());
1791        assert!(!opts.no_cache);
1792        assert!(!opts.push);
1793        assert!(!opts.squash);
1794        // New cache-related fields
1795        assert!(opts.layers); // Default is true
1796        assert!(opts.cache_from.is_none());
1797        assert!(opts.cache_to.is_none());
1798        assert!(opts.cache_ttl.is_none());
1799        // Cache backend config (only with cache feature)
1800        #[cfg(feature = "cache")]
1801        assert!(opts.cache_backend_config.is_none());
1802    }
1803
1804    fn create_test_builder() -> ImageBuilder {
1805        // Create a minimal builder for testing (without async initialization)
1806        ImageBuilder {
1807            context: PathBuf::from("/tmp/test"),
1808            options: BuildOptions::default(),
1809            executor: BuildahExecutor::with_path("/usr/bin/buildah"),
1810            event_tx: None,
1811            backend: None,
1812            #[cfg(feature = "cache")]
1813            cache_backend: None,
1814            #[cfg(feature = "local-registry")]
1815            local_registry: None,
1816        }
1817    }
1818
1819    // Builder method chaining tests
1820    #[test]
1821    fn test_builder_chaining() {
1822        let mut builder = create_test_builder();
1823
1824        builder = builder
1825            .dockerfile("./Dockerfile.test")
1826            .runtime(Runtime::Node20)
1827            .build_arg("VERSION", "1.0")
1828            .target("builder")
1829            .tag("myapp:latest")
1830            .tag("myapp:v1")
1831            .no_cache()
1832            .squash()
1833            .format("oci");
1834
1835        assert_eq!(
1836            builder.options.dockerfile,
1837            Some(PathBuf::from("./Dockerfile.test"))
1838        );
1839        assert_eq!(builder.options.runtime, Some(Runtime::Node20));
1840        assert_eq!(
1841            builder.options.build_args.get("VERSION"),
1842            Some(&"1.0".to_string())
1843        );
1844        assert_eq!(builder.options.target, Some("builder".to_string()));
1845        assert_eq!(builder.options.tags.len(), 2);
1846        assert!(builder.options.no_cache);
1847        assert!(builder.options.squash);
1848        assert_eq!(builder.options.format, Some("oci".to_string()));
1849    }
1850
1851    #[test]
1852    fn test_builder_push_with_auth() {
1853        let mut builder = create_test_builder();
1854        builder = builder.push(RegistryAuth::new("user", "pass"));
1855
1856        assert!(builder.options.push);
1857        assert!(builder.options.registry_auth.is_some());
1858        let auth = builder.options.registry_auth.unwrap();
1859        assert_eq!(auth.username, "user");
1860        assert_eq!(auth.password, "pass");
1861    }
1862
1863    #[test]
1864    fn test_builder_push_without_auth() {
1865        let mut builder = create_test_builder();
1866        builder = builder.push_without_auth();
1867
1868        assert!(builder.options.push);
1869        assert!(builder.options.registry_auth.is_none());
1870    }
1871
1872    #[test]
1873    fn test_builder_layers() {
1874        let mut builder = create_test_builder();
1875        // Default is true
1876        assert!(builder.options.layers);
1877
1878        // Disable layers
1879        builder = builder.layers(false);
1880        assert!(!builder.options.layers);
1881
1882        // Re-enable layers
1883        builder = builder.layers(true);
1884        assert!(builder.options.layers);
1885    }
1886
1887    #[test]
1888    fn test_builder_cache_from() {
1889        let mut builder = create_test_builder();
1890        assert!(builder.options.cache_from.is_none());
1891
1892        builder = builder.cache_from("registry.example.com/myapp:cache");
1893        assert_eq!(
1894            builder.options.cache_from,
1895            Some("registry.example.com/myapp:cache".to_string())
1896        );
1897    }
1898
1899    #[test]
1900    fn test_builder_cache_to() {
1901        let mut builder = create_test_builder();
1902        assert!(builder.options.cache_to.is_none());
1903
1904        builder = builder.cache_to("registry.example.com/myapp:cache");
1905        assert_eq!(
1906            builder.options.cache_to,
1907            Some("registry.example.com/myapp:cache".to_string())
1908        );
1909    }
1910
1911    #[test]
1912    fn test_builder_cache_ttl() {
1913        use std::time::Duration;
1914
1915        let mut builder = create_test_builder();
1916        assert!(builder.options.cache_ttl.is_none());
1917
1918        builder = builder.cache_ttl(Duration::from_secs(3600));
1919        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
1920    }
1921
1922    #[test]
1923    fn test_builder_cache_options_chaining() {
1924        use std::time::Duration;
1925
1926        let builder = create_test_builder()
1927            .layers(true)
1928            .cache_from("registry.example.com/cache:input")
1929            .cache_to("registry.example.com/cache:output")
1930            .cache_ttl(Duration::from_secs(7200))
1931            .no_cache();
1932
1933        assert!(builder.options.layers);
1934        assert_eq!(
1935            builder.options.cache_from,
1936            Some("registry.example.com/cache:input".to_string())
1937        );
1938        assert_eq!(
1939            builder.options.cache_to,
1940            Some("registry.example.com/cache:output".to_string())
1941        );
1942        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
1943        assert!(builder.options.no_cache);
1944    }
1945
1946    #[test]
1947    fn test_chrono_lite_timestamp() {
1948        let ts = chrono_lite_timestamp();
1949        // Should be a valid number
1950        let parsed: u64 = ts.parse().expect("Should be a valid u64");
1951        // Should be reasonably recent (after 2024)
1952        assert!(parsed > 1_700_000_000);
1953    }
1954}