Skip to main content

zlayer_builder/
builder.rs

1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//!     // Build from a Dockerfile
15//!     let image = ImageBuilder::new("./my-app").await?
16//!         .tag("myapp:latest")
17//!         .tag("myapp:v1.0.0")
18//!         .build()
19//!         .await?;
20//!
21//!     println!("Built image: {}", image.image_id);
22//!     Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//!     // Build using a runtime template (no Dockerfile needed)
34//!     let image = ImageBuilder::new("./my-node-app").await?
35//!         .runtime(Runtime::Node20)
36//!         .tag("myapp:latest")
37//!         .build()
38//!         .await?;
39//!
40//!     println!("Built image: {}", image.image_id);
41//!     Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//!     // Build only up to a specific stage
53//!     let image = ImageBuilder::new("./my-app").await?
54//!         .target("builder")
55//!         .tag("myapp:builder")
56//!         .build()
57//!         .await?;
58//!
59//!     println!("Built intermediate image: {}", image.image_id);
60//!     Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//!     // Start TUI in another thread
75//!     std::thread::spawn(move || {
76//!         // Process events from rx...
77//!         while let Ok(event) = rx.recv() {
78//!             println!("Event: {:?}", event);
79//!         }
80//!     });
81//!
82//!     let image = ImageBuilder::new("./my-app").await?
83//!         .tag("myapp:latest")
84//!         .with_events(tx)
85//!         .build()
86//!         .await?;
87//!
88//!     Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//!     let image = ImageBuilder::new("./my-app").await?
100//!         .with_cache_dir("/var/cache/zlayer")  // Use persistent disk cache
101//!         .tag("myapp:latest")
102//!         .build()
103//!         .await?;
104//!
105//!     println!("Built image: {}", image.image_id);
106//!     Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116use tracing::{debug, info, instrument};
117
118use crate::backend::BuildBackend;
119use crate::buildah::BuildahExecutor;
120use crate::dockerfile::{Dockerfile, RunMount};
121use crate::error::{BuildError, Result};
122use crate::templates::{get_template, Runtime};
123use crate::tui::BuildEvent;
124
125#[cfg(feature = "cache")]
126use zlayer_registry::cache::BlobCacheBackend;
127
128#[cfg(feature = "local-registry")]
129use zlayer_registry::LocalRegistry;
130
131/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
132/// or a WASM build result for WebAssembly builds.
133///
134/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
135/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
136/// a compiled artifact directly, bypassing the container build pipeline.
137#[derive(Debug)]
138pub enum BuildOutput {
139    /// Standard container build - produces a Dockerfile to be built with buildah.
140    Dockerfile(Dockerfile),
141    /// WASM component build - already built, produces artifact path.
142    WasmArtifact {
143        /// Path to the compiled WASM binary.
144        wasm_path: PathBuf,
145        /// Path to the OCI artifact directory (if exported).
146        oci_path: Option<PathBuf>,
147        /// Source language used.
148        language: String,
149        /// Whether optimization was applied.
150        optimized: bool,
151        /// Size of the output file in bytes.
152        size: u64,
153    },
154}
155
156/// Configuration for the layer cache backend.
157///
158/// This enum specifies which cache backend to use for storing and retrieving
159/// cached layers during builds. The cache feature must be enabled for this
160/// to be available.
161///
162/// # Example
163///
164/// ```no_run,ignore
165/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
166///
167/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
168/// // Use persistent disk cache
169/// let builder = ImageBuilder::new("./my-app").await?
170///     .with_cache_config(CacheBackendConfig::Persistent {
171///         path: "/var/cache/zlayer".into(),
172///     })
173///     .tag("myapp:latest");
174/// # Ok(())
175/// # }
176/// ```
177#[cfg(feature = "cache")]
178#[derive(Debug, Clone, Default)]
179pub enum CacheBackendConfig {
180    /// In-memory cache (cleared when process exits).
181    ///
182    /// Useful for CI/CD environments where persistence isn't needed
183    /// but you want to avoid re-downloading base image layers within
184    /// a single build session.
185    #[default]
186    Memory,
187
188    /// Persistent disk-based cache using redb.
189    ///
190    /// Requires the `cache-persistent` feature. Layers are stored on disk
191    /// and persist across builds, significantly speeding up repeated builds.
192    #[cfg(feature = "cache-persistent")]
193    Persistent {
194        /// Path to the cache directory or database file.
195        /// If a directory, `blob_cache.redb` will be created inside it.
196        path: PathBuf,
197    },
198
199    /// S3-compatible object storage backend.
200    ///
201    /// Requires the `cache-s3` feature. Useful for distributed build systems
202    /// where multiple build machines need to share a cache.
203    #[cfg(feature = "cache-s3")]
204    S3 {
205        /// S3 bucket name
206        bucket: String,
207        /// AWS region (optional, uses SDK default if not set)
208        region: Option<String>,
209        /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
210        endpoint: Option<String>,
211        /// Key prefix for cached blobs (default: "zlayer/layers/")
212        prefix: Option<String>,
213    },
214}
215
216/// Built image information returned after a successful build
217#[derive(Debug, Clone)]
218pub struct BuiltImage {
219    /// Image ID (sha256:...)
220    pub image_id: String,
221    /// Applied tags
222    pub tags: Vec<String>,
223    /// Number of layers in the final image
224    pub layer_count: usize,
225    /// Total size in bytes (0 if not computed)
226    pub size: u64,
227    /// Build duration in milliseconds
228    pub build_time_ms: u64,
229    /// Whether this image is a manifest list (multi-arch).
230    pub is_manifest: bool,
231}
232
233/// Registry authentication credentials
234#[derive(Debug, Clone)]
235pub struct RegistryAuth {
236    /// Registry username
237    pub username: String,
238    /// Registry password or token
239    pub password: String,
240}
241
242impl RegistryAuth {
243    /// Create new registry authentication
244    pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
245        Self {
246            username: username.into(),
247            password: password.into(),
248        }
249    }
250}
251
252/// Strategy for pulling the base image before building.
253///
254/// Controls the `--pull` flag passed to `buildah from`. The default is
255/// [`PullBaseMode::Newer`], matching the behaviour users expect from
256/// modern build tools: fast when nothing has changed, correct when the
257/// upstream base image has been republished.
258#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
259pub enum PullBaseMode {
260    /// Pull only if the registry has a newer version (`--pull=newer`).
261    /// Default behaviour.
262    #[default]
263    Newer,
264    /// Always pull, even if a local copy exists (`--pull=always`).
265    Always,
266    /// Never pull — use whatever is in local storage (no `--pull` flag passed).
267    Never,
268}
269
270/// Build options for customizing the image build process
271#[derive(Debug, Clone)]
272#[allow(clippy::struct_excessive_bools)]
273pub struct BuildOptions {
274    /// Dockerfile path (default: Dockerfile in context)
275    pub dockerfile: Option<PathBuf>,
276    /// `ZImagefile` path (alternative to Dockerfile)
277    pub zimagefile: Option<PathBuf>,
278    /// Use runtime template instead of Dockerfile
279    pub runtime: Option<Runtime>,
280    /// Build arguments (ARG values)
281    pub build_args: HashMap<String, String>,
282    /// Target stage for multi-stage builds
283    pub target: Option<String>,
284    /// Image tags to apply
285    pub tags: Vec<String>,
286    /// Disable layer caching
287    pub no_cache: bool,
288    /// Push to registry after build
289    pub push: bool,
290    /// Registry auth (if pushing)
291    pub registry_auth: Option<RegistryAuth>,
292    /// Squash all layers into one
293    pub squash: bool,
294    /// Image format (oci or docker)
295    pub format: Option<String>,
296    /// Enable buildah layer caching (--layers flag for `buildah build`).
297    /// Default: true
298    ///
299    /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
300    /// `buildah commit`) rather than `buildah build`, so this flag is reserved
301    /// for future use when/if we switch to `buildah build` (bud) command.
302    pub layers: bool,
303    /// Registry to pull cache from (--cache-from for `buildah build`).
304    ///
305    /// Note: This would be used with `buildah build --cache-from=<registry>`.
306    /// Currently `ZLayer` uses manual container creation, so this is reserved
307    /// for future implementation or for switching to `buildah build`.
308    ///
309    /// TODO: Implement remote cache support. This would require either:
310    /// 1. Switching to `buildah build` command which supports --cache-from natively
311    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
312    pub cache_from: Option<String>,
313    /// Registry to push cache to (--cache-to for `buildah build`).
314    ///
315    /// Note: This would be used with `buildah build --cache-to=<registry>`.
316    /// Currently `ZLayer` uses manual container creation, so this is reserved
317    /// for future implementation or for switching to `buildah build`.
318    ///
319    /// TODO: Implement remote cache support. This would require either:
320    /// 1. Switching to `buildah build` command which supports --cache-to natively
321    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
322    pub cache_to: Option<String>,
323    /// Maximum cache age (--cache-ttl for `buildah build`).
324    ///
325    /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
326    /// Currently `ZLayer` uses manual container creation, so this is reserved
327    /// for future implementation or for switching to `buildah build`.
328    ///
329    /// TODO: Implement cache TTL support. This would require either:
330    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
331    /// 2. Implementing custom cache expiration logic for our layer caching system
332    pub cache_ttl: Option<std::time::Duration>,
333    /// Cache backend configuration (requires `cache` feature).
334    ///
335    /// When configured, the builder will store layer data in the specified
336    /// cache backend for faster subsequent builds. This is separate from
337    /// buildah's native caching and operates at the `ZLayer` level.
338    ///
339    /// # Integration Points
340    ///
341    /// The cache backend is used at several points during the build:
342    ///
343    /// 1. **Before instruction execution**: Check if a cached layer exists
344    ///    for the (`instruction_hash`, `base_layer`) tuple
345    /// 2. **After instruction execution**: Store the resulting layer data
346    ///    in the cache for future builds
347    /// 3. **Base image layers**: Cache pulled base image layers to avoid
348    ///    re-downloading from registries
349    ///
350    /// TODO: Wire up cache lookups in the build loop once layer digests
351    /// are properly computed and tracked.
352    #[cfg(feature = "cache")]
353    pub cache_backend_config: Option<CacheBackendConfig>,
354    /// Default OCI/WASM-compatible registry to check for images before falling
355    /// back to Docker Hub qualification.
356    ///
357    /// When set, the builder will probe this registry for short image names
358    /// before qualifying them to `docker.io`. For example, if set to
359    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
360    /// the builder will check `git.example.com:5000/myapp:latest` first.
361    pub default_registry: Option<String>,
362    /// Default cache mounts injected into all RUN instructions.
363    /// These are merged with any step-level cache mounts (deduped by target path).
364    pub default_cache_mounts: Vec<RunMount>,
365    /// Number of retries for failed RUN steps (0 = no retries, default)
366    pub retries: u32,
367    /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
368    /// When set, `buildah from` pulls the platform-specific image variant.
369    pub platform: Option<String>,
370    /// SHA-256 hash of the source Dockerfile/ZImagefile content.
371    ///
372    /// When set, the sandbox builder can skip a rebuild if the cached image
373    /// was produced from identical source content (content-based invalidation).
374    pub source_hash: Option<String>,
375    /// How to handle base-image pulling during `buildah from`.
376    ///
377    /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
378    /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
379    /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
380    pub pull: PullBaseMode,
381}
382
383impl Default for BuildOptions {
384    fn default() -> Self {
385        Self {
386            dockerfile: None,
387            zimagefile: None,
388            runtime: None,
389            build_args: HashMap::new(),
390            target: None,
391            tags: Vec::new(),
392            no_cache: false,
393            push: false,
394            registry_auth: None,
395            squash: false,
396            format: None,
397            layers: true,
398            cache_from: None,
399            cache_to: None,
400            cache_ttl: None,
401            #[cfg(feature = "cache")]
402            cache_backend_config: None,
403            default_registry: None,
404            default_cache_mounts: Vec::new(),
405            retries: 0,
406            platform: None,
407            source_hash: None,
408            pull: PullBaseMode::default(),
409        }
410    }
411}
412
413/// Image builder - orchestrates the full build process
414///
415/// `ImageBuilder` provides a fluent API for configuring and executing
416/// container image builds using buildah as the backend.
417///
418/// # Build Process
419///
420/// 1. Parse Dockerfile (or use runtime template)
421/// 2. Resolve target stages if specified
422/// 3. Build each stage sequentially:
423///    - Create working container from base image
424///    - Execute each instruction
425///    - Commit intermediate stages for COPY --from
426/// 4. Commit final image with tags
427/// 5. Push to registry if configured
428/// 6. Clean up intermediate containers
429///
430/// # Cache Backend Integration (requires `cache` feature)
431///
432/// When a cache backend is configured, the builder can store and retrieve
433/// cached layer data to speed up subsequent builds:
434///
435/// ```no_run,ignore
436/// use zlayer_builder::ImageBuilder;
437///
438/// let builder = ImageBuilder::new("./my-app").await?
439///     .with_cache_dir("/var/cache/zlayer")
440///     .tag("myapp:latest");
441/// ```
442pub struct ImageBuilder {
443    /// Build context directory
444    context: PathBuf,
445    /// Build options
446    options: BuildOptions,
447    /// Buildah executor (kept for backwards compatibility)
448    #[allow(dead_code)]
449    executor: BuildahExecutor,
450    /// Event sender for TUI updates
451    event_tx: Option<mpsc::Sender<BuildEvent>>,
452    /// Pluggable build backend (buildah, sandbox, etc.).
453    ///
454    /// When set, the `build()` method delegates to this backend instead of
455    /// using the inline buildah logic. Set automatically by `new()` via
456    /// `detect_backend()`, or explicitly via `with_backend()`.
457    backend: Option<Arc<dyn BuildBackend>>,
458    /// Cache backend for layer caching (requires `cache` feature).
459    ///
460    /// When set, the builder will attempt to retrieve cached layers before
461    /// executing instructions, and store results in the cache after execution.
462    ///
463    /// TODO: Implement cache lookups in the build loop. Currently the backend
464    /// is stored but not actively used during builds. Integration points:
465    /// - Check cache before executing RUN instructions
466    /// - Store layer data after successful instruction execution
467    /// - Cache base image layers pulled from registries
468    #[cfg(feature = "cache")]
469    cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
470    /// Local OCI registry for checking cached images before remote pulls.
471    #[cfg(feature = "local-registry")]
472    local_registry: Option<LocalRegistry>,
473}
474
475impl ImageBuilder {
476    /// Create a new `ImageBuilder` with the given context directory
477    ///
478    /// The context directory should contain the Dockerfile (unless using
479    /// a runtime template) and any files that will be copied into the image.
480    ///
481    /// # Arguments
482    ///
483    /// * `context` - Path to the build context directory
484    ///
485    /// # Errors
486    ///
487    /// Returns an error if:
488    /// - The context directory does not exist
489    /// - Buildah is not installed or not accessible
490    ///
491    /// # Example
492    ///
493    /// ```no_run
494    /// use zlayer_builder::ImageBuilder;
495    ///
496    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
497    /// let builder = ImageBuilder::new("./my-project").await?;
498    /// # Ok(())
499    /// # }
500    /// ```
501    #[instrument(skip_all, fields(context = %context.as_ref().display()))]
502    pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
503        let context = context.as_ref().to_path_buf();
504
505        // Verify context exists
506        if !context.exists() {
507            return Err(BuildError::ContextRead {
508                path: context,
509                source: std::io::Error::new(
510                    std::io::ErrorKind::NotFound,
511                    "Build context directory not found",
512                ),
513            });
514        }
515
516        // Detect the best available build backend for this platform.
517        let backend = crate::backend::detect_backend().await.ok();
518
519        // Initialize buildah executor.
520        // On macOS, if buildah is not found we fall back to a default executor
521        // (the backend will handle the actual build dispatch).
522        let executor = match BuildahExecutor::new_async().await {
523            Ok(exec) => exec,
524            #[cfg(target_os = "macos")]
525            Err(_) => {
526                info!("Buildah not found on macOS; backend will handle build dispatch");
527                BuildahExecutor::default()
528            }
529            #[cfg(not(target_os = "macos"))]
530            Err(e) => return Err(e),
531        };
532
533        debug!("Created ImageBuilder for context: {}", context.display());
534
535        Ok(Self {
536            context,
537            options: BuildOptions::default(),
538            executor,
539            event_tx: None,
540            backend,
541            #[cfg(feature = "cache")]
542            cache_backend: None,
543            #[cfg(feature = "local-registry")]
544            local_registry: None,
545        })
546    }
547
548    /// Create an `ImageBuilder` with a custom buildah executor
549    ///
550    /// This is useful for testing or when you need to configure
551    /// the executor with specific storage options. The executor is
552    /// wrapped in a [`BuildahBackend`] so the build dispatches through
553    /// the [`BuildBackend`] trait.
554    ///
555    /// # Errors
556    ///
557    /// Returns an error if the context directory does not exist.
558    pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
559        let context = context.as_ref().to_path_buf();
560
561        if !context.exists() {
562            return Err(BuildError::ContextRead {
563                path: context,
564                source: std::io::Error::new(
565                    std::io::ErrorKind::NotFound,
566                    "Build context directory not found",
567                ),
568            });
569        }
570
571        let backend: Arc<dyn BuildBackend> = Arc::new(
572            crate::backend::BuildahBackend::with_executor(executor.clone()),
573        );
574
575        Ok(Self {
576            context,
577            options: BuildOptions::default(),
578            executor,
579            event_tx: None,
580            backend: Some(backend),
581            #[cfg(feature = "cache")]
582            cache_backend: None,
583            #[cfg(feature = "local-registry")]
584            local_registry: None,
585        })
586    }
587
588    /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
589    ///
590    /// The backend is used for all build, push, tag, and manifest
591    /// operations. The internal `BuildahExecutor` is set to the default
592    /// (it is only used if no backend is set).
593    ///
594    /// # Errors
595    ///
596    /// Returns an error if the context directory does not exist.
597    pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
598        let context = context.as_ref().to_path_buf();
599
600        if !context.exists() {
601            return Err(BuildError::ContextRead {
602                path: context,
603                source: std::io::Error::new(
604                    std::io::ErrorKind::NotFound,
605                    "Build context directory not found",
606                ),
607            });
608        }
609
610        Ok(Self {
611            context,
612            options: BuildOptions::default(),
613            executor: BuildahExecutor::default(),
614            event_tx: None,
615            backend: Some(backend),
616            #[cfg(feature = "cache")]
617            cache_backend: None,
618            #[cfg(feature = "local-registry")]
619            local_registry: None,
620        })
621    }
622
623    /// Set a custom Dockerfile path
624    ///
625    /// By default, the builder looks for a file named `Dockerfile` in the
626    /// context directory. Use this method to specify a different path.
627    ///
628    /// # Example
629    ///
630    /// ```no_run
631    /// # use zlayer_builder::ImageBuilder;
632    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
633    /// let builder = ImageBuilder::new("./my-project").await?
634    ///     .dockerfile("./my-project/Dockerfile.prod");
635    /// # Ok(())
636    /// # }
637    /// ```
638    #[must_use]
639    pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
640        self.options.dockerfile = Some(path.as_ref().to_path_buf());
641        self
642    }
643
644    /// Set a custom `ZImagefile` path
645    ///
646    /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
647    /// the builder will parse the `ZImagefile` and convert it to the internal
648    /// Dockerfile IR for execution.
649    ///
650    /// # Example
651    ///
652    /// ```no_run
653    /// # use zlayer_builder::ImageBuilder;
654    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
655    /// let builder = ImageBuilder::new("./my-project").await?
656    ///     .zimagefile("./my-project/ZImagefile");
657    /// # Ok(())
658    /// # }
659    /// ```
660    #[must_use]
661    pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
662        self.options.zimagefile = Some(path.as_ref().to_path_buf());
663        self
664    }
665
666    /// Use a runtime template instead of a Dockerfile
667    ///
668    /// Runtime templates provide pre-built Dockerfiles for common
669    /// development environments. When set, the Dockerfile option is ignored.
670    ///
671    /// # Example
672    ///
673    /// ```no_run
674    /// use zlayer_builder::{ImageBuilder, Runtime};
675    ///
676    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
677    /// let builder = ImageBuilder::new("./my-node-app").await?
678    ///     .runtime(Runtime::Node20);
679    /// # Ok(())
680    /// # }
681    /// ```
682    #[must_use]
683    pub fn runtime(mut self, runtime: Runtime) -> Self {
684        self.options.runtime = Some(runtime);
685        self
686    }
687
688    /// Add a build argument
689    ///
690    /// Build arguments are passed to the Dockerfile and can be referenced
691    /// using the `ARG` instruction.
692    ///
693    /// # Example
694    ///
695    /// ```no_run
696    /// # use zlayer_builder::ImageBuilder;
697    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
698    /// let builder = ImageBuilder::new("./my-project").await?
699    ///     .build_arg("VERSION", "1.0.0")
700    ///     .build_arg("DEBUG", "false");
701    /// # Ok(())
702    /// # }
703    /// ```
704    #[must_use]
705    pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
706        self.options.build_args.insert(key.into(), value.into());
707        self
708    }
709
710    /// Set multiple build arguments at once
711    #[must_use]
712    pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
713        self.options.build_args.extend(args);
714        self
715    }
716
717    /// Set the target stage for multi-stage builds
718    ///
719    /// When building a multi-stage Dockerfile, you can stop at a specific
720    /// stage instead of building all stages.
721    ///
722    /// # Example
723    ///
724    /// ```no_run
725    /// # use zlayer_builder::ImageBuilder;
726    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
727    /// // Dockerfile:
728    /// // FROM node:20 AS builder
729    /// // ...
730    /// // FROM node:20-slim AS runtime
731    /// // ...
732    ///
733    /// let builder = ImageBuilder::new("./my-project").await?
734    ///     .target("builder")
735    ///     .tag("myapp:builder");
736    /// # Ok(())
737    /// # }
738    /// ```
739    #[must_use]
740    pub fn target(mut self, stage: impl Into<String>) -> Self {
741        self.options.target = Some(stage.into());
742        self
743    }
744
745    /// Add an image tag
746    ///
747    /// Tags are applied to the final image. You can add multiple tags.
748    /// The first tag is used as the primary image name during commit.
749    ///
750    /// # Example
751    ///
752    /// ```no_run
753    /// # use zlayer_builder::ImageBuilder;
754    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
755    /// let builder = ImageBuilder::new("./my-project").await?
756    ///     .tag("myapp:latest")
757    ///     .tag("myapp:v1.0.0")
758    ///     .tag("registry.example.com/myapp:v1.0.0");
759    /// # Ok(())
760    /// # }
761    /// ```
762    #[must_use]
763    pub fn tag(mut self, tag: impl Into<String>) -> Self {
764        self.options.tags.push(tag.into());
765        self
766    }
767
768    /// Disable layer caching
769    ///
770    /// When enabled, all layers are rebuilt from scratch even if
771    /// they could be served from cache.
772    ///
773    /// Note: Currently this flag is tracked but not fully implemented in the
774    /// build process. `ZLayer` uses manual container creation (`buildah from`,
775    /// `buildah run`, `buildah commit`) which doesn't have built-in caching
776    /// like `buildah build` does. Future work could implement layer-level
777    /// caching by checking instruction hashes against previously built layers.
778    #[must_use]
779    pub fn no_cache(mut self) -> Self {
780        self.options.no_cache = true;
781        self
782    }
783
784    /// Set the base-image pull strategy for the build.
785    ///
786    /// By default, `buildah from` is invoked with `--pull=newer`, so an
787    /// up-to-date local base image is reused but a newer one on the
788    /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
789    /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
790    /// offline.
791    #[must_use]
792    pub fn pull(mut self, mode: PullBaseMode) -> Self {
793        self.options.pull = mode;
794        self
795    }
796
797    /// Enable or disable layer caching
798    ///
799    /// This controls the `--layers` flag for buildah. When enabled (default),
800    /// buildah can cache and reuse intermediate layers.
801    ///
802    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
803    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
804    /// flag is reserved for future use when/if we switch to `buildah build`.
805    ///
806    /// # Example
807    ///
808    /// ```no_run
809    /// # use zlayer_builder::ImageBuilder;
810    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
811    /// let builder = ImageBuilder::new("./my-project").await?
812    ///     .layers(false)  // Disable layer caching
813    ///     .tag("myapp:latest");
814    /// # Ok(())
815    /// # }
816    /// ```
817    #[must_use]
818    pub fn layers(mut self, enable: bool) -> Self {
819        self.options.layers = enable;
820        self
821    }
822
823    /// Set registry to pull cache from
824    ///
825    /// This corresponds to buildah's `--cache-from` flag, which allows
826    /// pulling cached layers from a remote registry to speed up builds.
827    ///
828    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
829    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
830    /// option is reserved for future implementation.
831    ///
832    /// TODO: Implement remote cache support. This would require either:
833    /// 1. Switching to `buildah build` command which supports --cache-from natively
834    /// 2. Implementing custom layer caching with registry pull for intermediate layers
835    ///
836    /// # Example
837    ///
838    /// ```no_run
839    /// # use zlayer_builder::ImageBuilder;
840    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
841    /// let builder = ImageBuilder::new("./my-project").await?
842    ///     .cache_from("registry.example.com/myapp:cache")
843    ///     .tag("myapp:latest");
844    /// # Ok(())
845    /// # }
846    /// ```
847    #[must_use]
848    pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
849        self.options.cache_from = Some(registry.into());
850        self
851    }
852
853    /// Set registry to push cache to
854    ///
855    /// This corresponds to buildah's `--cache-to` flag, which allows
856    /// pushing cached layers to a remote registry for future builds to use.
857    ///
858    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
859    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
860    /// option is reserved for future implementation.
861    ///
862    /// TODO: Implement remote cache support. This would require either:
863    /// 1. Switching to `buildah build` command which supports --cache-to natively
864    /// 2. Implementing custom layer caching with registry push for intermediate layers
865    ///
866    /// # Example
867    ///
868    /// ```no_run
869    /// # use zlayer_builder::ImageBuilder;
870    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
871    /// let builder = ImageBuilder::new("./my-project").await?
872    ///     .cache_to("registry.example.com/myapp:cache")
873    ///     .tag("myapp:latest");
874    /// # Ok(())
875    /// # }
876    /// ```
877    #[must_use]
878    pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
879        self.options.cache_to = Some(registry.into());
880        self
881    }
882
883    /// Set maximum cache age
884    ///
885    /// This corresponds to buildah's `--cache-ttl` flag, which sets the
886    /// maximum age for cached layers before they are considered stale.
887    ///
888    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
889    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
890    /// option is reserved for future implementation.
891    ///
892    /// TODO: Implement cache TTL support. This would require either:
893    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
894    /// 2. Implementing custom cache expiration logic for our layer caching system
895    ///
896    /// # Example
897    ///
898    /// ```no_run
899    /// # use zlayer_builder::ImageBuilder;
900    /// # use std::time::Duration;
901    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
902    /// let builder = ImageBuilder::new("./my-project").await?
903    ///     .cache_ttl(Duration::from_secs(3600 * 24))  // 24 hours
904    ///     .tag("myapp:latest");
905    /// # Ok(())
906    /// # }
907    /// ```
908    #[must_use]
909    pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
910        self.options.cache_ttl = Some(ttl);
911        self
912    }
913
914    /// Push the image to a registry after building
915    ///
916    /// # Arguments
917    ///
918    /// * `auth` - Registry authentication credentials
919    ///
920    /// # Example
921    ///
922    /// ```no_run
923    /// use zlayer_builder::{ImageBuilder, RegistryAuth};
924    ///
925    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
926    /// let builder = ImageBuilder::new("./my-project").await?
927    ///     .tag("registry.example.com/myapp:v1.0.0")
928    ///     .push(RegistryAuth::new("user", "password"));
929    /// # Ok(())
930    /// # }
931    /// ```
932    #[must_use]
933    pub fn push(mut self, auth: RegistryAuth) -> Self {
934        self.options.push = true;
935        self.options.registry_auth = Some(auth);
936        self
937    }
938
939    /// Enable pushing without authentication
940    ///
941    /// Use this for registries that don't require authentication
942    /// (e.g., local registries, insecure registries).
943    #[must_use]
944    pub fn push_without_auth(mut self) -> Self {
945        self.options.push = true;
946        self.options.registry_auth = None;
947        self
948    }
949
950    /// Set a default OCI/WASM-compatible registry to check for images.
951    ///
952    /// When set, the builder will probe this registry for short image names
953    /// before qualifying them to `docker.io`. For example, if set to
954    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
955    /// the builder will check `git.example.com:5000/myapp:latest` first.
956    #[must_use]
957    pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
958        self.options.default_registry = Some(registry.into());
959        self
960    }
961
962    /// Set a local OCI registry for image resolution.
963    ///
964    /// When set, the builder checks the local registry for cached images
965    /// before pulling from remote registries.
966    #[cfg(feature = "local-registry")]
967    #[must_use]
968    pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
969        self.local_registry = Some(registry);
970        self
971    }
972
973    /// Squash all layers into a single layer
974    ///
975    /// This reduces image size but loses layer caching benefits.
976    #[must_use]
977    pub fn squash(mut self) -> Self {
978        self.options.squash = true;
979        self
980    }
981
982    /// Set the image format
983    ///
984    /// Valid values are "oci" (default) or "docker".
985    #[must_use]
986    pub fn format(mut self, format: impl Into<String>) -> Self {
987        self.options.format = Some(format.into());
988        self
989    }
990
991    /// Set default cache mounts to inject into all RUN instructions
992    #[must_use]
993    pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
994        self.options.default_cache_mounts = mounts;
995        self
996    }
997
998    /// Set the number of retries for failed RUN steps
999    #[must_use]
1000    pub fn retries(mut self, retries: u32) -> Self {
1001        self.options.retries = retries;
1002        self
1003    }
1004
1005    /// Set the target platform for cross-architecture builds.
1006    #[must_use]
1007    pub fn platform(mut self, platform: impl Into<String>) -> Self {
1008        self.options.platform = Some(platform.into());
1009        self
1010    }
1011
1012    /// Set a pre-computed source hash for content-based cache invalidation.
1013    ///
1014    /// When set, the sandbox builder can skip a full rebuild if the cached
1015    /// image was produced from identical source content.
1016    #[must_use]
1017    pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1018        self.options.source_hash = Some(hash.into());
1019        self
1020    }
1021
1022    /// Set an event sender for TUI progress updates
1023    ///
1024    /// Events will be sent as the build progresses, allowing you to
1025    /// display a progress UI or log build status.
1026    ///
1027    /// # Example
1028    ///
1029    /// ```no_run
1030    /// use zlayer_builder::{ImageBuilder, BuildEvent};
1031    /// use std::sync::mpsc;
1032    ///
1033    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1034    /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1035    ///
1036    /// let builder = ImageBuilder::new("./my-project").await?
1037    ///     .tag("myapp:latest")
1038    ///     .with_events(tx);
1039    /// # Ok(())
1040    /// # }
1041    /// ```
1042    #[must_use]
1043    pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1044        self.event_tx = Some(tx);
1045        self
1046    }
1047
1048    /// Configure a persistent disk cache backend for layer caching.
1049    ///
1050    /// When configured, the builder will store layer data on disk at the
1051    /// specified path. This cache persists across builds and significantly
1052    /// speeds up repeated builds of similar images.
1053    ///
1054    /// Requires the `cache-persistent` feature to be enabled.
1055    ///
1056    /// # Arguments
1057    ///
1058    /// * `path` - Path to the cache directory. If a directory, creates
1059    ///   `blob_cache.redb` inside it. If a file path, uses it directly.
1060    ///
1061    /// # Example
1062    ///
1063    /// ```no_run,ignore
1064    /// use zlayer_builder::ImageBuilder;
1065    ///
1066    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1067    /// let builder = ImageBuilder::new("./my-project").await?
1068    ///     .with_cache_dir("/var/cache/zlayer")
1069    ///     .tag("myapp:latest");
1070    /// # Ok(())
1071    /// # }
1072    /// ```
1073    ///
1074    /// # Integration Status
1075    ///
1076    /// TODO: The cache backend is currently stored but not actively used
1077    /// during builds. Future work will wire up:
1078    /// - Cache lookups before executing RUN instructions
1079    /// - Storing layer data after successful execution
1080    /// - Caching base image layers from registry pulls
1081    #[cfg(feature = "cache-persistent")]
1082    #[must_use]
1083    pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1084        self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1085            path: path.as_ref().to_path_buf(),
1086        });
1087        debug!(
1088            "Configured persistent cache at: {}",
1089            path.as_ref().display()
1090        );
1091        self
1092    }
1093
1094    /// Configure an in-memory cache backend for layer caching.
1095    ///
1096    /// The in-memory cache is cleared when the process exits, but can
1097    /// speed up builds within a single session by caching intermediate
1098    /// layers and avoiding redundant operations.
1099    ///
1100    /// Requires the `cache` feature to be enabled.
1101    ///
1102    /// # Example
1103    ///
1104    /// ```no_run,ignore
1105    /// use zlayer_builder::ImageBuilder;
1106    ///
1107    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1108    /// let builder = ImageBuilder::new("./my-project").await?
1109    ///     .with_memory_cache()
1110    ///     .tag("myapp:latest");
1111    /// # Ok(())
1112    /// # }
1113    /// ```
1114    ///
1115    /// # Integration Status
1116    ///
1117    /// TODO: The cache backend is currently stored but not actively used
1118    /// during builds. See `with_cache_dir` for integration status details.
1119    #[cfg(feature = "cache")]
1120    #[must_use]
1121    pub fn with_memory_cache(mut self) -> Self {
1122        self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1123        debug!("Configured in-memory cache");
1124        self
1125    }
1126
1127    /// Configure an S3-compatible storage backend for layer caching.
1128    ///
1129    /// This is useful for distributed build systems where multiple build
1130    /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1131    /// Backblaze B2, `MinIO`, and other S3-compatible services.
1132    ///
1133    /// Requires the `cache-s3` feature to be enabled.
1134    ///
1135    /// # Arguments
1136    ///
1137    /// * `bucket` - S3 bucket name
1138    /// * `region` - AWS region (optional, uses SDK default if not set)
1139    ///
1140    /// # Example
1141    ///
1142    /// ```no_run,ignore
1143    /// use zlayer_builder::ImageBuilder;
1144    ///
1145    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1146    /// let builder = ImageBuilder::new("./my-project").await?
1147    ///     .with_s3_cache("my-build-cache", Some("us-west-2"))
1148    ///     .tag("myapp:latest");
1149    /// # Ok(())
1150    /// # }
1151    /// ```
1152    ///
1153    /// # Integration Status
1154    ///
1155    /// TODO: The cache backend is currently stored but not actively used
1156    /// during builds. See `with_cache_dir` for integration status details.
1157    #[cfg(feature = "cache-s3")]
1158    #[must_use]
1159    pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1160        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1161            bucket: bucket.into(),
1162            region,
1163            endpoint: None,
1164            prefix: None,
1165        });
1166        debug!("Configured S3 cache");
1167        self
1168    }
1169
1170    /// Configure an S3-compatible storage backend with custom endpoint.
1171    ///
1172    /// Use this method for S3-compatible services that require a custom
1173    /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1174    ///
1175    /// Requires the `cache-s3` feature to be enabled.
1176    ///
1177    /// # Arguments
1178    ///
1179    /// * `bucket` - S3 bucket name
1180    /// * `endpoint` - Custom endpoint URL
1181    /// * `region` - Region (required for some S3-compatible services)
1182    ///
1183    /// # Example
1184    ///
1185    /// ```no_run,ignore
1186    /// use zlayer_builder::ImageBuilder;
1187    ///
1188    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1189    /// // Cloudflare R2
1190    /// let builder = ImageBuilder::new("./my-project").await?
1191    ///     .with_s3_cache_endpoint(
1192    ///         "my-bucket",
1193    ///         "https://accountid.r2.cloudflarestorage.com",
1194    ///         Some("auto".to_string()),
1195    ///     )
1196    ///     .tag("myapp:latest");
1197    /// # Ok(())
1198    /// # }
1199    /// ```
1200    #[cfg(feature = "cache-s3")]
1201    #[must_use]
1202    pub fn with_s3_cache_endpoint(
1203        mut self,
1204        bucket: impl Into<String>,
1205        endpoint: impl Into<String>,
1206        region: Option<String>,
1207    ) -> Self {
1208        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1209            bucket: bucket.into(),
1210            region,
1211            endpoint: Some(endpoint.into()),
1212            prefix: None,
1213        });
1214        debug!("Configured S3 cache with custom endpoint");
1215        self
1216    }
1217
1218    /// Configure a custom cache backend configuration.
1219    ///
1220    /// This is the most flexible way to configure the cache backend,
1221    /// allowing full control over all cache settings.
1222    ///
1223    /// Requires the `cache` feature to be enabled.
1224    ///
1225    /// # Example
1226    ///
1227    /// ```no_run,ignore
1228    /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1229    ///
1230    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1231    /// let builder = ImageBuilder::new("./my-project").await?
1232    ///     .with_cache_config(CacheBackendConfig::Memory)
1233    ///     .tag("myapp:latest");
1234    /// # Ok(())
1235    /// # }
1236    /// ```
1237    #[cfg(feature = "cache")]
1238    #[must_use]
1239    pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1240        self.options.cache_backend_config = Some(config);
1241        debug!("Configured custom cache backend");
1242        self
1243    }
1244
1245    /// Set an already-initialized cache backend directly.
1246    ///
1247    /// This is useful when you have a pre-configured cache backend instance
1248    /// that you want to share across multiple builders or when you need
1249    /// fine-grained control over cache initialization.
1250    ///
1251    /// Requires the `cache` feature to be enabled.
1252    ///
1253    /// # Example
1254    ///
1255    /// ```no_run,ignore
1256    /// use zlayer_builder::ImageBuilder;
1257    /// use zlayer_registry::cache::BlobCache;
1258    /// use std::sync::Arc;
1259    ///
1260    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1261    /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1262    ///
1263    /// let builder = ImageBuilder::new("./my-project").await?
1264    ///     .with_cache_backend(cache)
1265    ///     .tag("myapp:latest");
1266    /// # Ok(())
1267    /// # }
1268    /// ```
1269    #[cfg(feature = "cache")]
1270    #[must_use]
1271    pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1272        self.cache_backend = Some(backend);
1273        debug!("Configured pre-initialized cache backend");
1274        self
1275    }
1276
1277    /// Run the build
1278    ///
1279    /// This executes the complete build process:
1280    /// 1. Parse Dockerfile or load runtime template
1281    /// 2. Build all required stages
1282    /// 3. Commit and tag the final image
1283    /// 4. Push to registry if configured
1284    /// 5. Clean up intermediate containers
1285    ///
1286    /// # Errors
1287    ///
1288    /// Returns an error if:
1289    /// - Dockerfile parsing fails
1290    /// - A buildah command fails
1291    /// - Target stage is not found
1292    /// - Registry push fails
1293    ///
1294    /// # Panics
1295    ///
1296    /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1297    #[instrument(skip(self), fields(context = %self.context.display()))]
1298    pub async fn build(self) -> Result<BuiltImage> {
1299        let start_time = std::time::Instant::now();
1300
1301        info!("Starting build in context: {}", self.context.display());
1302
1303        // 1. Get build output (Dockerfile IR or WASM artifact)
1304        let build_output = self.get_build_output().await?;
1305
1306        // If this is a WASM build, return early with the artifact info.
1307        if let BuildOutput::WasmArtifact {
1308            wasm_path,
1309            oci_path: _,
1310            language,
1311            optimized,
1312            size,
1313        } = build_output
1314        {
1315            #[allow(clippy::cast_possible_truncation)]
1316            let build_time_ms = start_time.elapsed().as_millis() as u64;
1317
1318            self.send_event(BuildEvent::BuildComplete {
1319                image_id: wasm_path.display().to_string(),
1320            });
1321
1322            info!(
1323                "WASM build completed in {}ms: {} ({}, {} bytes, optimized={})",
1324                build_time_ms,
1325                wasm_path.display(),
1326                language,
1327                size,
1328                optimized
1329            );
1330
1331            return Ok(BuiltImage {
1332                image_id: format!("wasm:{}", wasm_path.display()),
1333                tags: self.options.tags.clone(),
1334                layer_count: 1,
1335                size,
1336                build_time_ms,
1337                is_manifest: false,
1338            });
1339        }
1340
1341        // Extract the Dockerfile from the BuildOutput.
1342        let BuildOutput::Dockerfile(dockerfile) = build_output else {
1343            unreachable!("WasmArtifact case handled above");
1344        };
1345        debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1346
1347        // Delegate the build to the backend.
1348        let backend = self
1349            .backend
1350            .as_ref()
1351            .ok_or_else(|| BuildError::BuildahNotFound {
1352                message: "No build backend configured".into(),
1353            })?;
1354
1355        info!("Delegating build to {} backend", backend.name());
1356        backend
1357            .build_image(
1358                &self.context,
1359                &dockerfile,
1360                &self.options,
1361                self.event_tx.clone(),
1362            )
1363            .await
1364    }
1365
1366    /// Detection order:
1367    /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1368    /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1369    /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1370    /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1371    ///
1372    /// Returns [`BuildOutput::Dockerfile`] for container builds or
1373    /// [`BuildOutput::WasmArtifact`] for WASM builds.
1374    async fn get_build_output(&self) -> Result<BuildOutput> {
1375        // (a) Runtime template takes highest priority.
1376        if let Some(runtime) = &self.options.runtime {
1377            debug!("Using runtime template: {}", runtime);
1378            let content = get_template(*runtime);
1379            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1380        }
1381
1382        // (b) Explicit ZImagefile path.
1383        if let Some(ref zimage_path) = self.options.zimagefile {
1384            debug!("Reading ZImagefile: {}", zimage_path.display());
1385            let content =
1386                fs::read_to_string(zimage_path)
1387                    .await
1388                    .map_err(|e| BuildError::ContextRead {
1389                        path: zimage_path.clone(),
1390                        source: e,
1391                    })?;
1392            let zimage = crate::zimage::parse_zimagefile(&content)?;
1393            return self.handle_zimage(&zimage).await;
1394        }
1395
1396        // (c) Auto-detect ZImagefile in context directory.
1397        let auto_zimage_path = self.context.join("ZImagefile");
1398        if auto_zimage_path.exists() {
1399            debug!(
1400                "Found ZImagefile in context: {}",
1401                auto_zimage_path.display()
1402            );
1403            let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1404                BuildError::ContextRead {
1405                    path: auto_zimage_path,
1406                    source: e,
1407                }
1408            })?;
1409            let zimage = crate::zimage::parse_zimagefile(&content)?;
1410            return self.handle_zimage(&zimage).await;
1411        }
1412
1413        // (d) Fall back to Dockerfile.
1414        let dockerfile_path = self
1415            .options
1416            .dockerfile
1417            .clone()
1418            .unwrap_or_else(|| self.context.join("Dockerfile"));
1419
1420        debug!("Reading Dockerfile: {}", dockerfile_path.display());
1421
1422        let content =
1423            fs::read_to_string(&dockerfile_path)
1424                .await
1425                .map_err(|e| BuildError::ContextRead {
1426                    path: dockerfile_path,
1427                    source: e,
1428                })?;
1429
1430        Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1431    }
1432
1433    /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1434    ///
1435    /// Handles all four `ZImage` modes:
1436    /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1437    /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1438    /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1439    ///
1440    /// Any `build:` directives are resolved first by spawning nested builds.
1441    async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1442        // Runtime mode: delegate to template system.
1443        if let Some(ref runtime_name) = zimage.runtime {
1444            let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1445                BuildError::zimagefile_validation(format!(
1446                    "unknown runtime '{runtime_name}' in ZImagefile"
1447                ))
1448            })?;
1449            let content = get_template(rt);
1450            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1451        }
1452
1453        // WASM mode: build a WASM component.
1454        if let Some(ref wasm_config) = zimage.wasm {
1455            return self.handle_wasm_build(wasm_config).await;
1456        }
1457
1458        // Resolve any `build:` directives to concrete base image tags.
1459        let resolved = self.resolve_build_directives(zimage).await?;
1460
1461        // Single-stage or multi-stage: convert to Dockerfile IR directly.
1462        Ok(BuildOutput::Dockerfile(
1463            crate::zimage::zimage_to_dockerfile(&resolved)?,
1464        ))
1465    }
1466
1467    /// Build a WASM component from the `ZImagefile` wasm configuration.
1468    ///
1469    /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1470    /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1471    /// the WASM builder pipeline.
1472    async fn handle_wasm_build(
1473        &self,
1474        wasm_config: &crate::zimage::ZWasmConfig,
1475    ) -> Result<BuildOutput> {
1476        use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1477
1478        info!("ZImagefile specifies WASM mode, running WASM build");
1479
1480        // Convert target string to WasiTarget enum.
1481        let target = match wasm_config.target.as_str() {
1482            "preview1" => WasiTarget::Preview1,
1483            _ => WasiTarget::Preview2,
1484        };
1485
1486        // Resolve language: parse from string or leave as None for auto-detection.
1487        let language = wasm_config
1488            .language
1489            .as_deref()
1490            .and_then(WasmLanguage::from_name);
1491
1492        if let Some(ref lang_str) = wasm_config.language {
1493            if language.is_none() {
1494                return Err(BuildError::zimagefile_validation(format!(
1495                    "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1496                     typescript, assemblyscript, c, zig"
1497                )));
1498            }
1499        }
1500
1501        // Build the WasmBuildConfig.
1502        let mut config = WasmBuildConfig {
1503            language,
1504            target,
1505            optimize: wasm_config.optimize,
1506            opt_level: wasm_config
1507                .opt_level
1508                .clone()
1509                .unwrap_or_else(|| "Oz".to_string()),
1510            wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1511            output_path: wasm_config.output.as_ref().map(PathBuf::from),
1512            world: wasm_config.world.clone(),
1513            features: wasm_config.features.clone(),
1514            build_args: wasm_config.build_args.clone(),
1515            pre_build: Vec::new(),
1516            post_build: Vec::new(),
1517            adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1518        };
1519
1520        // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1521        for cmd in &wasm_config.pre_build {
1522            config.pre_build.push(zcommand_to_args(cmd));
1523        }
1524        for cmd in &wasm_config.post_build {
1525            config.post_build.push(zcommand_to_args(cmd));
1526        }
1527
1528        // Build the WASM component.
1529        let result = build_wasm(&self.context, config).await?;
1530
1531        let language_name = result.language.name().to_string();
1532        let wasm_path = result.wasm_path;
1533        let size = result.size;
1534
1535        info!(
1536            "WASM build complete: {} ({} bytes, optimized={})",
1537            wasm_path.display(),
1538            size,
1539            wasm_config.optimize
1540        );
1541
1542        Ok(BuildOutput::WasmArtifact {
1543            wasm_path,
1544            oci_path: None,
1545            language: language_name,
1546            optimized: wasm_config.optimize,
1547            size,
1548        })
1549    }
1550
1551    /// Resolve `build:` directives in a `ZImage` by running nested builds.
1552    ///
1553    /// For each `build:` directive (top-level or per-stage), this method:
1554    /// 1. Determines the build context directory
1555    /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1556    /// 3. Spawns a nested `ImageBuilder` to build the context
1557    /// 4. Tags the result and replaces `build` with `base`
1558    async fn resolve_build_directives(
1559        &self,
1560        zimage: &crate::zimage::ZImage,
1561    ) -> Result<crate::zimage::ZImage> {
1562        let mut resolved = zimage.clone();
1563
1564        // Resolve top-level `build:` directive.
1565        if let Some(ref build_ctx) = resolved.build {
1566            let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1567            resolved.base = Some(tag);
1568            resolved.build = None;
1569        }
1570
1571        // Resolve per-stage `build:` directives.
1572        if let Some(ref mut stages) = resolved.stages {
1573            for (name, stage) in stages.iter_mut() {
1574                if let Some(ref build_ctx) = stage.build {
1575                    let tag = self.run_nested_build(build_ctx, name).await?;
1576                    stage.base = Some(tag);
1577                    stage.build = None;
1578                }
1579            }
1580        }
1581
1582        Ok(resolved)
1583    }
1584
1585    /// Run a nested build from a `build:` directive and return the resulting image tag.
1586    fn run_nested_build<'a>(
1587        &'a self,
1588        build_ctx: &'a crate::zimage::types::ZBuildContext,
1589        stage_name: &'a str,
1590    ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1591        Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1592    }
1593
1594    async fn run_nested_build_inner(
1595        &self,
1596        build_ctx: &crate::zimage::types::ZBuildContext,
1597        stage_name: &str,
1598    ) -> Result<String> {
1599        let context_dir = build_ctx.context_dir(&self.context);
1600
1601        if !context_dir.exists() {
1602            return Err(BuildError::ContextRead {
1603                path: context_dir,
1604                source: std::io::Error::new(
1605                    std::io::ErrorKind::NotFound,
1606                    format!(
1607                        "build context directory not found for build directive in '{stage_name}'"
1608                    ),
1609                ),
1610            });
1611        }
1612
1613        info!(
1614            "Building nested image for '{}' from context: {}",
1615            stage_name,
1616            context_dir.display()
1617        );
1618
1619        // Create a tag for the nested build result.
1620        let tag = format!(
1621            "zlayer-build-dep-{}:{}",
1622            stage_name,
1623            chrono_lite_timestamp()
1624        );
1625
1626        // Create nested builder.
1627        let mut nested = ImageBuilder::new(&context_dir).await?;
1628        nested = nested.tag(&tag);
1629
1630        // Apply explicit build file if specified.
1631        if let Some(file) = build_ctx.file() {
1632            let file_path = context_dir.join(file);
1633            if std::path::Path::new(file).extension().is_some_and(|ext| {
1634                ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1635            }) || file.starts_with("ZImagefile")
1636            {
1637                nested = nested.zimagefile(file_path);
1638            } else {
1639                nested = nested.dockerfile(file_path);
1640            }
1641        }
1642
1643        // Apply build args.
1644        for (key, value) in build_ctx.args() {
1645            nested = nested.build_arg(&key, &value);
1646        }
1647
1648        // Propagate default registry if set.
1649        if let Some(ref reg) = self.options.default_registry {
1650            nested = nested.default_registry(reg.clone());
1651        }
1652
1653        // Run the nested build.
1654        let result = nested.build().await?;
1655        info!(
1656            "Nested build for '{}' completed: {}",
1657            stage_name, result.image_id
1658        );
1659
1660        Ok(tag)
1661    }
1662
1663    /// Send an event to the TUI (if configured)
1664    fn send_event(&self, event: BuildEvent) {
1665        if let Some(tx) = &self.event_tx {
1666            // Ignore send errors - the receiver may have been dropped
1667            let _ = tx.send(event);
1668        }
1669    }
1670}
1671
1672// Helper function to generate a timestamp-based name
1673fn chrono_lite_timestamp() -> String {
1674    use std::time::{SystemTime, UNIX_EPOCH};
1675    let duration = SystemTime::now()
1676        .duration_since(UNIX_EPOCH)
1677        .unwrap_or_default();
1678    format!("{}", duration.as_secs())
1679}
1680
1681/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1682/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1683/// pre/post build command lists.
1684fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1685    match cmd {
1686        crate::zimage::ZCommand::Shell(s) => {
1687            vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
1688        }
1689        crate::zimage::ZCommand::Exec(args) => args.clone(),
1690    }
1691}
1692
1693#[cfg(test)]
1694mod tests {
1695    use super::*;
1696
1697    #[test]
1698    fn test_registry_auth_new() {
1699        let auth = RegistryAuth::new("user", "pass");
1700        assert_eq!(auth.username, "user");
1701        assert_eq!(auth.password, "pass");
1702    }
1703
1704    #[test]
1705    fn test_build_options_default() {
1706        let opts = BuildOptions::default();
1707        assert!(opts.dockerfile.is_none());
1708        assert!(opts.zimagefile.is_none());
1709        assert!(opts.runtime.is_none());
1710        assert!(opts.build_args.is_empty());
1711        assert!(opts.target.is_none());
1712        assert!(opts.tags.is_empty());
1713        assert!(!opts.no_cache);
1714        assert!(!opts.push);
1715        assert!(!opts.squash);
1716        // New cache-related fields
1717        assert!(opts.layers); // Default is true
1718        assert!(opts.cache_from.is_none());
1719        assert!(opts.cache_to.is_none());
1720        assert!(opts.cache_ttl.is_none());
1721        // Cache backend config (only with cache feature)
1722        #[cfg(feature = "cache")]
1723        assert!(opts.cache_backend_config.is_none());
1724    }
1725
1726    fn create_test_builder() -> ImageBuilder {
1727        // Create a minimal builder for testing (without async initialization)
1728        ImageBuilder {
1729            context: PathBuf::from("/tmp/test"),
1730            options: BuildOptions::default(),
1731            executor: BuildahExecutor::with_path("/usr/bin/buildah"),
1732            event_tx: None,
1733            backend: None,
1734            #[cfg(feature = "cache")]
1735            cache_backend: None,
1736            #[cfg(feature = "local-registry")]
1737            local_registry: None,
1738        }
1739    }
1740
1741    // Builder method chaining tests
1742    #[test]
1743    fn test_builder_chaining() {
1744        let mut builder = create_test_builder();
1745
1746        builder = builder
1747            .dockerfile("./Dockerfile.test")
1748            .runtime(Runtime::Node20)
1749            .build_arg("VERSION", "1.0")
1750            .target("builder")
1751            .tag("myapp:latest")
1752            .tag("myapp:v1")
1753            .no_cache()
1754            .squash()
1755            .format("oci");
1756
1757        assert_eq!(
1758            builder.options.dockerfile,
1759            Some(PathBuf::from("./Dockerfile.test"))
1760        );
1761        assert_eq!(builder.options.runtime, Some(Runtime::Node20));
1762        assert_eq!(
1763            builder.options.build_args.get("VERSION"),
1764            Some(&"1.0".to_string())
1765        );
1766        assert_eq!(builder.options.target, Some("builder".to_string()));
1767        assert_eq!(builder.options.tags.len(), 2);
1768        assert!(builder.options.no_cache);
1769        assert!(builder.options.squash);
1770        assert_eq!(builder.options.format, Some("oci".to_string()));
1771    }
1772
1773    #[test]
1774    fn test_builder_push_with_auth() {
1775        let mut builder = create_test_builder();
1776        builder = builder.push(RegistryAuth::new("user", "pass"));
1777
1778        assert!(builder.options.push);
1779        assert!(builder.options.registry_auth.is_some());
1780        let auth = builder.options.registry_auth.unwrap();
1781        assert_eq!(auth.username, "user");
1782        assert_eq!(auth.password, "pass");
1783    }
1784
1785    #[test]
1786    fn test_builder_push_without_auth() {
1787        let mut builder = create_test_builder();
1788        builder = builder.push_without_auth();
1789
1790        assert!(builder.options.push);
1791        assert!(builder.options.registry_auth.is_none());
1792    }
1793
1794    #[test]
1795    fn test_builder_layers() {
1796        let mut builder = create_test_builder();
1797        // Default is true
1798        assert!(builder.options.layers);
1799
1800        // Disable layers
1801        builder = builder.layers(false);
1802        assert!(!builder.options.layers);
1803
1804        // Re-enable layers
1805        builder = builder.layers(true);
1806        assert!(builder.options.layers);
1807    }
1808
1809    #[test]
1810    fn test_builder_cache_from() {
1811        let mut builder = create_test_builder();
1812        assert!(builder.options.cache_from.is_none());
1813
1814        builder = builder.cache_from("registry.example.com/myapp:cache");
1815        assert_eq!(
1816            builder.options.cache_from,
1817            Some("registry.example.com/myapp:cache".to_string())
1818        );
1819    }
1820
1821    #[test]
1822    fn test_builder_cache_to() {
1823        let mut builder = create_test_builder();
1824        assert!(builder.options.cache_to.is_none());
1825
1826        builder = builder.cache_to("registry.example.com/myapp:cache");
1827        assert_eq!(
1828            builder.options.cache_to,
1829            Some("registry.example.com/myapp:cache".to_string())
1830        );
1831    }
1832
1833    #[test]
1834    fn test_builder_cache_ttl() {
1835        use std::time::Duration;
1836
1837        let mut builder = create_test_builder();
1838        assert!(builder.options.cache_ttl.is_none());
1839
1840        builder = builder.cache_ttl(Duration::from_secs(3600));
1841        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
1842    }
1843
1844    #[test]
1845    fn test_builder_cache_options_chaining() {
1846        use std::time::Duration;
1847
1848        let builder = create_test_builder()
1849            .layers(true)
1850            .cache_from("registry.example.com/cache:input")
1851            .cache_to("registry.example.com/cache:output")
1852            .cache_ttl(Duration::from_secs(7200))
1853            .no_cache();
1854
1855        assert!(builder.options.layers);
1856        assert_eq!(
1857            builder.options.cache_from,
1858            Some("registry.example.com/cache:input".to_string())
1859        );
1860        assert_eq!(
1861            builder.options.cache_to,
1862            Some("registry.example.com/cache:output".to_string())
1863        );
1864        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
1865        assert!(builder.options.no_cache);
1866    }
1867
1868    #[test]
1869    fn test_chrono_lite_timestamp() {
1870        let ts = chrono_lite_timestamp();
1871        // Should be a valid number
1872        let parsed: u64 = ts.parse().expect("Should be a valid u64");
1873        // Should be reasonably recent (after 2024)
1874        assert!(parsed > 1_700_000_000);
1875    }
1876}