Skip to main content

zlayer_builder/
builder.rs

1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//!     // Build from a Dockerfile
15//!     let image = ImageBuilder::new("./my-app").await?
16//!         .tag("myapp:latest")
17//!         .tag("myapp:v1.0.0")
18//!         .build()
19//!         .await?;
20//!
21//!     println!("Built image: {}", image.image_id);
22//!     Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//!     // Build using a runtime template (no Dockerfile needed)
34//!     let image = ImageBuilder::new("./my-node-app").await?
35//!         .runtime(Runtime::Node20)
36//!         .tag("myapp:latest")
37//!         .build()
38//!         .await?;
39//!
40//!     println!("Built image: {}", image.image_id);
41//!     Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//!     // Build only up to a specific stage
53//!     let image = ImageBuilder::new("./my-app").await?
54//!         .target("builder")
55//!         .tag("myapp:builder")
56//!         .build()
57//!         .await?;
58//!
59//!     println!("Built intermediate image: {}", image.image_id);
60//!     Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//!     // Start TUI in another thread
75//!     std::thread::spawn(move || {
76//!         // Process events from rx...
77//!         while let Ok(event) = rx.recv() {
78//!             println!("Event: {:?}", event);
79//!         }
80//!     });
81//!
82//!     let image = ImageBuilder::new("./my-app").await?
83//!         .tag("myapp:latest")
84//!         .with_events(tx)
85//!         .build()
86//!         .await?;
87//!
88//!     Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//!     let image = ImageBuilder::new("./my-app").await?
100//!         .with_cache_dir("/var/cache/zlayer")  // Use persistent disk cache
101//!         .tag("myapp:latest")
102//!         .build()
103//!         .await?;
104//!
105//!     println!("Built image: {}", image.image_id);
106//!     Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116use tracing::{debug, info, instrument, warn};
117
118use crate::backend::BuildBackend;
119use crate::buildah::{BuildahCommand, BuildahExecutor};
120use crate::dockerfile::{Dockerfile, RunMount};
121use crate::error::{BuildError, Result};
122use crate::templates::{get_template, Runtime};
123use crate::tui::BuildEvent;
124
125#[cfg(feature = "cache")]
126use zlayer_registry::cache::BlobCacheBackend;
127
128#[cfg(feature = "local-registry")]
129use zlayer_registry::LocalRegistry;
130
131#[cfg(feature = "local-registry")]
132use zlayer_registry::import_image;
133
134/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
135/// or a WASM build result for WebAssembly builds.
136///
137/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
138/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
139/// a compiled artifact directly, bypassing the container build pipeline.
140#[derive(Debug)]
141pub enum BuildOutput {
142    /// Standard container build - produces a Dockerfile to be built with buildah.
143    Dockerfile(Dockerfile),
144    /// WASM component build - already built, produces artifact path.
145    WasmArtifact {
146        /// Path to the compiled WASM binary.
147        wasm_path: PathBuf,
148        /// Path to the OCI artifact directory (if exported).
149        oci_path: Option<PathBuf>,
150        /// Source language used.
151        language: String,
152        /// Whether optimization was applied.
153        optimized: bool,
154        /// Size of the output file in bytes.
155        size: u64,
156    },
157}
158
159/// Configuration for the layer cache backend.
160///
161/// This enum specifies which cache backend to use for storing and retrieving
162/// cached layers during builds. The cache feature must be enabled for this
163/// to be available.
164///
165/// # Example
166///
167/// ```no_run,ignore
168/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
169///
170/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
171/// // Use persistent disk cache
172/// let builder = ImageBuilder::new("./my-app").await?
173///     .with_cache_config(CacheBackendConfig::Persistent {
174///         path: "/var/cache/zlayer".into(),
175///     })
176///     .tag("myapp:latest");
177/// # Ok(())
178/// # }
179/// ```
180#[cfg(feature = "cache")]
181#[derive(Debug, Clone, Default)]
182pub enum CacheBackendConfig {
183    /// In-memory cache (cleared when process exits).
184    ///
185    /// Useful for CI/CD environments where persistence isn't needed
186    /// but you want to avoid re-downloading base image layers within
187    /// a single build session.
188    #[default]
189    Memory,
190
191    /// Persistent disk-based cache using redb.
192    ///
193    /// Requires the `cache-persistent` feature. Layers are stored on disk
194    /// and persist across builds, significantly speeding up repeated builds.
195    #[cfg(feature = "cache-persistent")]
196    Persistent {
197        /// Path to the cache directory or database file.
198        /// If a directory, `blob_cache.redb` will be created inside it.
199        path: PathBuf,
200    },
201
202    /// S3-compatible object storage backend.
203    ///
204    /// Requires the `cache-s3` feature. Useful for distributed build systems
205    /// where multiple build machines need to share a cache.
206    #[cfg(feature = "cache-s3")]
207    S3 {
208        /// S3 bucket name
209        bucket: String,
210        /// AWS region (optional, uses SDK default if not set)
211        region: Option<String>,
212        /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
213        endpoint: Option<String>,
214        /// Key prefix for cached blobs (default: "zlayer/layers/")
215        prefix: Option<String>,
216    },
217}
218
219/// Built image information returned after a successful build
220#[derive(Debug, Clone)]
221pub struct BuiltImage {
222    /// Image ID (sha256:...)
223    pub image_id: String,
224    /// Applied tags
225    pub tags: Vec<String>,
226    /// Number of layers in the final image
227    pub layer_count: usize,
228    /// Total size in bytes (0 if not computed)
229    pub size: u64,
230    /// Build duration in milliseconds
231    pub build_time_ms: u64,
232    /// Whether this image is a manifest list (multi-arch).
233    pub is_manifest: bool,
234}
235
236/// Registry authentication credentials
237#[derive(Debug, Clone)]
238pub struct RegistryAuth {
239    /// Registry username
240    pub username: String,
241    /// Registry password or token
242    pub password: String,
243}
244
245impl RegistryAuth {
246    /// Create new registry authentication
247    pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
248        Self {
249            username: username.into(),
250            password: password.into(),
251        }
252    }
253}
254
255/// Strategy for pulling the base image before building.
256///
257/// Controls the `--pull` flag passed to `buildah from`. The default is
258/// [`PullBaseMode::Newer`], matching the behaviour users expect from
259/// modern build tools: fast when nothing has changed, correct when the
260/// upstream base image has been republished.
261#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
262pub enum PullBaseMode {
263    /// Pull only if the registry has a newer version (`--pull=newer`).
264    /// Default behaviour.
265    #[default]
266    Newer,
267    /// Always pull, even if a local copy exists (`--pull=always`).
268    Always,
269    /// Never pull — use whatever is in local storage (no `--pull` flag passed).
270    Never,
271}
272
273/// Build options for customizing the image build process
274#[derive(Debug, Clone)]
275#[allow(clippy::struct_excessive_bools)]
276pub struct BuildOptions {
277    /// Dockerfile path (default: Dockerfile in context)
278    pub dockerfile: Option<PathBuf>,
279    /// `ZImagefile` path (alternative to Dockerfile)
280    pub zimagefile: Option<PathBuf>,
281    /// Use runtime template instead of Dockerfile
282    pub runtime: Option<Runtime>,
283    /// Build arguments (ARG values)
284    pub build_args: HashMap<String, String>,
285    /// Target stage for multi-stage builds
286    pub target: Option<String>,
287    /// Image tags to apply
288    pub tags: Vec<String>,
289    /// Disable layer caching
290    pub no_cache: bool,
291    /// Push to registry after build
292    pub push: bool,
293    /// Registry auth (if pushing)
294    pub registry_auth: Option<RegistryAuth>,
295    /// Squash all layers into one
296    pub squash: bool,
297    /// Image format (oci or docker)
298    pub format: Option<String>,
299    /// Enable buildah layer caching (--layers flag for `buildah build`).
300    /// Default: true
301    ///
302    /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
303    /// `buildah commit`) rather than `buildah build`, so this flag is reserved
304    /// for future use when/if we switch to `buildah build` (bud) command.
305    pub layers: bool,
306    /// Registry to pull cache from (--cache-from for `buildah build`).
307    ///
308    /// Note: This would be used with `buildah build --cache-from=<registry>`.
309    /// Currently `ZLayer` uses manual container creation, so this is reserved
310    /// for future implementation or for switching to `buildah build`.
311    ///
312    /// TODO: Implement remote cache support. This would require either:
313    /// 1. Switching to `buildah build` command which supports --cache-from natively
314    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
315    pub cache_from: Option<String>,
316    /// Registry to push cache to (--cache-to for `buildah build`).
317    ///
318    /// Note: This would be used with `buildah build --cache-to=<registry>`.
319    /// Currently `ZLayer` uses manual container creation, so this is reserved
320    /// for future implementation or for switching to `buildah build`.
321    ///
322    /// TODO: Implement remote cache support. This would require either:
323    /// 1. Switching to `buildah build` command which supports --cache-to natively
324    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
325    pub cache_to: Option<String>,
326    /// Maximum cache age (--cache-ttl for `buildah build`).
327    ///
328    /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
329    /// Currently `ZLayer` uses manual container creation, so this is reserved
330    /// for future implementation or for switching to `buildah build`.
331    ///
332    /// TODO: Implement cache TTL support. This would require either:
333    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
334    /// 2. Implementing custom cache expiration logic for our layer caching system
335    pub cache_ttl: Option<std::time::Duration>,
336    /// Cache backend configuration (requires `cache` feature).
337    ///
338    /// When configured, the builder will store layer data in the specified
339    /// cache backend for faster subsequent builds. This is separate from
340    /// buildah's native caching and operates at the `ZLayer` level.
341    ///
342    /// # Integration Points
343    ///
344    /// The cache backend is used at several points during the build:
345    ///
346    /// 1. **Before instruction execution**: Check if a cached layer exists
347    ///    for the (`instruction_hash`, `base_layer`) tuple
348    /// 2. **After instruction execution**: Store the resulting layer data
349    ///    in the cache for future builds
350    /// 3. **Base image layers**: Cache pulled base image layers to avoid
351    ///    re-downloading from registries
352    ///
353    /// TODO: Wire up cache lookups in the build loop once layer digests
354    /// are properly computed and tracked.
355    #[cfg(feature = "cache")]
356    pub cache_backend_config: Option<CacheBackendConfig>,
357    /// Default OCI/WASM-compatible registry to check for images before falling
358    /// back to Docker Hub qualification.
359    ///
360    /// When set, the builder will probe this registry for short image names
361    /// before qualifying them to `docker.io`. For example, if set to
362    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
363    /// the builder will check `git.example.com:5000/myapp:latest` first.
364    pub default_registry: Option<String>,
365    /// Default cache mounts injected into all RUN instructions.
366    /// These are merged with any step-level cache mounts (deduped by target path).
367    pub default_cache_mounts: Vec<RunMount>,
368    /// Number of retries for failed RUN steps (0 = no retries, default)
369    pub retries: u32,
370    /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
371    /// When set, `buildah from` pulls the platform-specific image variant.
372    pub platform: Option<String>,
373    /// SHA-256 hash of the source Dockerfile/ZImagefile content.
374    ///
375    /// When set, the sandbox builder can skip a rebuild if the cached image
376    /// was produced from identical source content (content-based invalidation).
377    pub source_hash: Option<String>,
378    /// How to handle base-image pulling during `buildah from`.
379    ///
380    /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
381    /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
382    /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
383    pub pull: PullBaseMode,
384}
385
386impl Default for BuildOptions {
387    fn default() -> Self {
388        Self {
389            dockerfile: None,
390            zimagefile: None,
391            runtime: None,
392            build_args: HashMap::new(),
393            target: None,
394            tags: Vec::new(),
395            no_cache: false,
396            push: false,
397            registry_auth: None,
398            squash: false,
399            format: None,
400            layers: true,
401            cache_from: None,
402            cache_to: None,
403            cache_ttl: None,
404            #[cfg(feature = "cache")]
405            cache_backend_config: None,
406            default_registry: None,
407            default_cache_mounts: Vec::new(),
408            retries: 0,
409            platform: None,
410            source_hash: None,
411            pull: PullBaseMode::default(),
412        }
413    }
414}
415
416/// Image builder - orchestrates the full build process
417///
418/// `ImageBuilder` provides a fluent API for configuring and executing
419/// container image builds using buildah as the backend.
420///
421/// # Build Process
422///
423/// 1. Parse Dockerfile (or use runtime template)
424/// 2. Resolve target stages if specified
425/// 3. Build each stage sequentially:
426///    - Create working container from base image
427///    - Execute each instruction
428///    - Commit intermediate stages for COPY --from
429/// 4. Commit final image with tags
430/// 5. Push to registry if configured
431/// 6. Clean up intermediate containers
432///
433/// # Cache Backend Integration (requires `cache` feature)
434///
435/// When a cache backend is configured, the builder can store and retrieve
436/// cached layer data to speed up subsequent builds:
437///
438/// ```no_run,ignore
439/// use zlayer_builder::ImageBuilder;
440///
441/// let builder = ImageBuilder::new("./my-app").await?
442///     .with_cache_dir("/var/cache/zlayer")
443///     .tag("myapp:latest");
444/// ```
445pub struct ImageBuilder {
446    /// Build context directory
447    context: PathBuf,
448    /// Build options
449    options: BuildOptions,
450    /// Buildah executor (kept for backwards compatibility)
451    #[allow(dead_code)]
452    executor: BuildahExecutor,
453    /// Event sender for TUI updates
454    event_tx: Option<mpsc::Sender<BuildEvent>>,
455    /// Pluggable build backend (buildah, sandbox, etc.).
456    ///
457    /// When set, the `build()` method delegates to this backend instead of
458    /// using the inline buildah logic. Set automatically by `new()` via
459    /// `detect_backend()`, or explicitly via `with_backend()`.
460    backend: Option<Arc<dyn BuildBackend>>,
461    /// Cache backend for layer caching (requires `cache` feature).
462    ///
463    /// When set, the builder will attempt to retrieve cached layers before
464    /// executing instructions, and store results in the cache after execution.
465    ///
466    /// TODO: Implement cache lookups in the build loop. Currently the backend
467    /// is stored but not actively used during builds. Integration points:
468    /// - Check cache before executing RUN instructions
469    /// - Store layer data after successful instruction execution
470    /// - Cache base image layers pulled from registries
471    #[cfg(feature = "cache")]
472    cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
473    /// Local OCI registry for checking cached images before remote pulls.
474    #[cfg(feature = "local-registry")]
475    local_registry: Option<LocalRegistry>,
476}
477
478impl ImageBuilder {
479    /// Create a new `ImageBuilder` with the given context directory
480    ///
481    /// The context directory should contain the Dockerfile (unless using
482    /// a runtime template) and any files that will be copied into the image.
483    ///
484    /// # Arguments
485    ///
486    /// * `context` - Path to the build context directory
487    ///
488    /// # Errors
489    ///
490    /// Returns an error if:
491    /// - The context directory does not exist
492    /// - Buildah is not installed or not accessible
493    ///
494    /// # Example
495    ///
496    /// ```no_run
497    /// use zlayer_builder::ImageBuilder;
498    ///
499    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
500    /// let builder = ImageBuilder::new("./my-project").await?;
501    /// # Ok(())
502    /// # }
503    /// ```
504    #[instrument(skip_all, fields(context = %context.as_ref().display()))]
505    pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
506        let context = context.as_ref().to_path_buf();
507
508        // Verify context exists
509        if !context.exists() {
510            return Err(BuildError::ContextRead {
511                path: context,
512                source: std::io::Error::new(
513                    std::io::ErrorKind::NotFound,
514                    "Build context directory not found",
515                ),
516            });
517        }
518
519        // Detect the best available build backend for this platform.
520        let backend = crate::backend::detect_backend().await.ok();
521
522        // Initialize buildah executor.
523        // On macOS, if buildah is not found we fall back to a default executor
524        // (the backend will handle the actual build dispatch).
525        let executor = match BuildahExecutor::new_async().await {
526            Ok(exec) => exec,
527            #[cfg(target_os = "macos")]
528            Err(_) => {
529                info!("Buildah not found on macOS; backend will handle build dispatch");
530                BuildahExecutor::default()
531            }
532            #[cfg(not(target_os = "macos"))]
533            Err(e) => return Err(e),
534        };
535
536        debug!("Created ImageBuilder for context: {}", context.display());
537
538        Ok(Self {
539            context,
540            options: BuildOptions::default(),
541            executor,
542            event_tx: None,
543            backend,
544            #[cfg(feature = "cache")]
545            cache_backend: None,
546            #[cfg(feature = "local-registry")]
547            local_registry: None,
548        })
549    }
550
551    /// Create an `ImageBuilder` with a custom buildah executor
552    ///
553    /// This is useful for testing or when you need to configure
554    /// the executor with specific storage options. The executor is
555    /// wrapped in a [`BuildahBackend`] so the build dispatches through
556    /// the [`BuildBackend`] trait.
557    ///
558    /// # Errors
559    ///
560    /// Returns an error if the context directory does not exist.
561    pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
562        let context = context.as_ref().to_path_buf();
563
564        if !context.exists() {
565            return Err(BuildError::ContextRead {
566                path: context,
567                source: std::io::Error::new(
568                    std::io::ErrorKind::NotFound,
569                    "Build context directory not found",
570                ),
571            });
572        }
573
574        let backend: Arc<dyn BuildBackend> = Arc::new(
575            crate::backend::BuildahBackend::with_executor(executor.clone()),
576        );
577
578        Ok(Self {
579            context,
580            options: BuildOptions::default(),
581            executor,
582            event_tx: None,
583            backend: Some(backend),
584            #[cfg(feature = "cache")]
585            cache_backend: None,
586            #[cfg(feature = "local-registry")]
587            local_registry: None,
588        })
589    }
590
591    /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
592    ///
593    /// The backend is used for all build, push, tag, and manifest
594    /// operations. The internal `BuildahExecutor` is set to the default
595    /// (it is only used if no backend is set).
596    ///
597    /// # Errors
598    ///
599    /// Returns an error if the context directory does not exist.
600    pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
601        let context = context.as_ref().to_path_buf();
602
603        if !context.exists() {
604            return Err(BuildError::ContextRead {
605                path: context,
606                source: std::io::Error::new(
607                    std::io::ErrorKind::NotFound,
608                    "Build context directory not found",
609                ),
610            });
611        }
612
613        Ok(Self {
614            context,
615            options: BuildOptions::default(),
616            executor: BuildahExecutor::default(),
617            event_tx: None,
618            backend: Some(backend),
619            #[cfg(feature = "cache")]
620            cache_backend: None,
621            #[cfg(feature = "local-registry")]
622            local_registry: None,
623        })
624    }
625
626    /// Set a custom Dockerfile path
627    ///
628    /// By default, the builder looks for a file named `Dockerfile` in the
629    /// context directory. Use this method to specify a different path.
630    ///
631    /// # Example
632    ///
633    /// ```no_run
634    /// # use zlayer_builder::ImageBuilder;
635    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
636    /// let builder = ImageBuilder::new("./my-project").await?
637    ///     .dockerfile("./my-project/Dockerfile.prod");
638    /// # Ok(())
639    /// # }
640    /// ```
641    #[must_use]
642    pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
643        self.options.dockerfile = Some(path.as_ref().to_path_buf());
644        self
645    }
646
647    /// Set a custom `ZImagefile` path
648    ///
649    /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
650    /// the builder will parse the `ZImagefile` and convert it to the internal
651    /// Dockerfile IR for execution.
652    ///
653    /// # Example
654    ///
655    /// ```no_run
656    /// # use zlayer_builder::ImageBuilder;
657    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
658    /// let builder = ImageBuilder::new("./my-project").await?
659    ///     .zimagefile("./my-project/ZImagefile");
660    /// # Ok(())
661    /// # }
662    /// ```
663    #[must_use]
664    pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
665        self.options.zimagefile = Some(path.as_ref().to_path_buf());
666        self
667    }
668
669    /// Use a runtime template instead of a Dockerfile
670    ///
671    /// Runtime templates provide pre-built Dockerfiles for common
672    /// development environments. When set, the Dockerfile option is ignored.
673    ///
674    /// # Example
675    ///
676    /// ```no_run
677    /// use zlayer_builder::{ImageBuilder, Runtime};
678    ///
679    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
680    /// let builder = ImageBuilder::new("./my-node-app").await?
681    ///     .runtime(Runtime::Node20);
682    /// # Ok(())
683    /// # }
684    /// ```
685    #[must_use]
686    pub fn runtime(mut self, runtime: Runtime) -> Self {
687        self.options.runtime = Some(runtime);
688        self
689    }
690
691    /// Add a build argument
692    ///
693    /// Build arguments are passed to the Dockerfile and can be referenced
694    /// using the `ARG` instruction.
695    ///
696    /// # Example
697    ///
698    /// ```no_run
699    /// # use zlayer_builder::ImageBuilder;
700    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
701    /// let builder = ImageBuilder::new("./my-project").await?
702    ///     .build_arg("VERSION", "1.0.0")
703    ///     .build_arg("DEBUG", "false");
704    /// # Ok(())
705    /// # }
706    /// ```
707    #[must_use]
708    pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
709        self.options.build_args.insert(key.into(), value.into());
710        self
711    }
712
713    /// Set multiple build arguments at once
714    #[must_use]
715    pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
716        self.options.build_args.extend(args);
717        self
718    }
719
720    /// Set the target stage for multi-stage builds
721    ///
722    /// When building a multi-stage Dockerfile, you can stop at a specific
723    /// stage instead of building all stages.
724    ///
725    /// # Example
726    ///
727    /// ```no_run
728    /// # use zlayer_builder::ImageBuilder;
729    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
730    /// // Dockerfile:
731    /// // FROM node:20 AS builder
732    /// // ...
733    /// // FROM node:20-slim AS runtime
734    /// // ...
735    ///
736    /// let builder = ImageBuilder::new("./my-project").await?
737    ///     .target("builder")
738    ///     .tag("myapp:builder");
739    /// # Ok(())
740    /// # }
741    /// ```
742    #[must_use]
743    pub fn target(mut self, stage: impl Into<String>) -> Self {
744        self.options.target = Some(stage.into());
745        self
746    }
747
748    /// Add an image tag
749    ///
750    /// Tags are applied to the final image. You can add multiple tags.
751    /// The first tag is used as the primary image name during commit.
752    ///
753    /// # Example
754    ///
755    /// ```no_run
756    /// # use zlayer_builder::ImageBuilder;
757    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
758    /// let builder = ImageBuilder::new("./my-project").await?
759    ///     .tag("myapp:latest")
760    ///     .tag("myapp:v1.0.0")
761    ///     .tag("registry.example.com/myapp:v1.0.0");
762    /// # Ok(())
763    /// # }
764    /// ```
765    #[must_use]
766    pub fn tag(mut self, tag: impl Into<String>) -> Self {
767        self.options.tags.push(tag.into());
768        self
769    }
770
771    /// Disable layer caching
772    ///
773    /// When enabled, all layers are rebuilt from scratch even if
774    /// they could be served from cache.
775    ///
776    /// Note: Currently this flag is tracked but not fully implemented in the
777    /// build process. `ZLayer` uses manual container creation (`buildah from`,
778    /// `buildah run`, `buildah commit`) which doesn't have built-in caching
779    /// like `buildah build` does. Future work could implement layer-level
780    /// caching by checking instruction hashes against previously built layers.
781    #[must_use]
782    pub fn no_cache(mut self) -> Self {
783        self.options.no_cache = true;
784        self
785    }
786
787    /// Set the base-image pull strategy for the build.
788    ///
789    /// By default, `buildah from` is invoked with `--pull=newer`, so an
790    /// up-to-date local base image is reused but a newer one on the
791    /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
792    /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
793    /// offline.
794    #[must_use]
795    pub fn pull(mut self, mode: PullBaseMode) -> Self {
796        self.options.pull = mode;
797        self
798    }
799
800    /// Enable or disable layer caching
801    ///
802    /// This controls the `--layers` flag for buildah. When enabled (default),
803    /// buildah can cache and reuse intermediate layers.
804    ///
805    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
806    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
807    /// flag is reserved for future use when/if we switch to `buildah build`.
808    ///
809    /// # Example
810    ///
811    /// ```no_run
812    /// # use zlayer_builder::ImageBuilder;
813    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
814    /// let builder = ImageBuilder::new("./my-project").await?
815    ///     .layers(false)  // Disable layer caching
816    ///     .tag("myapp:latest");
817    /// # Ok(())
818    /// # }
819    /// ```
820    #[must_use]
821    pub fn layers(mut self, enable: bool) -> Self {
822        self.options.layers = enable;
823        self
824    }
825
826    /// Set registry to pull cache from
827    ///
828    /// This corresponds to buildah's `--cache-from` flag, which allows
829    /// pulling cached layers from a remote registry to speed up builds.
830    ///
831    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
832    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
833    /// option is reserved for future implementation.
834    ///
835    /// TODO: Implement remote cache support. This would require either:
836    /// 1. Switching to `buildah build` command which supports --cache-from natively
837    /// 2. Implementing custom layer caching with registry pull for intermediate layers
838    ///
839    /// # Example
840    ///
841    /// ```no_run
842    /// # use zlayer_builder::ImageBuilder;
843    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
844    /// let builder = ImageBuilder::new("./my-project").await?
845    ///     .cache_from("registry.example.com/myapp:cache")
846    ///     .tag("myapp:latest");
847    /// # Ok(())
848    /// # }
849    /// ```
850    #[must_use]
851    pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
852        self.options.cache_from = Some(registry.into());
853        self
854    }
855
856    /// Set registry to push cache to
857    ///
858    /// This corresponds to buildah's `--cache-to` flag, which allows
859    /// pushing cached layers to a remote registry for future builds to use.
860    ///
861    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
862    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
863    /// option is reserved for future implementation.
864    ///
865    /// TODO: Implement remote cache support. This would require either:
866    /// 1. Switching to `buildah build` command which supports --cache-to natively
867    /// 2. Implementing custom layer caching with registry push for intermediate layers
868    ///
869    /// # Example
870    ///
871    /// ```no_run
872    /// # use zlayer_builder::ImageBuilder;
873    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
874    /// let builder = ImageBuilder::new("./my-project").await?
875    ///     .cache_to("registry.example.com/myapp:cache")
876    ///     .tag("myapp:latest");
877    /// # Ok(())
878    /// # }
879    /// ```
880    #[must_use]
881    pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
882        self.options.cache_to = Some(registry.into());
883        self
884    }
885
886    /// Set maximum cache age
887    ///
888    /// This corresponds to buildah's `--cache-ttl` flag, which sets the
889    /// maximum age for cached layers before they are considered stale.
890    ///
891    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
892    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
893    /// option is reserved for future implementation.
894    ///
895    /// TODO: Implement cache TTL support. This would require either:
896    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
897    /// 2. Implementing custom cache expiration logic for our layer caching system
898    ///
899    /// # Example
900    ///
901    /// ```no_run
902    /// # use zlayer_builder::ImageBuilder;
903    /// # use std::time::Duration;
904    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
905    /// let builder = ImageBuilder::new("./my-project").await?
906    ///     .cache_ttl(Duration::from_secs(3600 * 24))  // 24 hours
907    ///     .tag("myapp:latest");
908    /// # Ok(())
909    /// # }
910    /// ```
911    #[must_use]
912    pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
913        self.options.cache_ttl = Some(ttl);
914        self
915    }
916
917    /// Push the image to a registry after building
918    ///
919    /// # Arguments
920    ///
921    /// * `auth` - Registry authentication credentials
922    ///
923    /// # Example
924    ///
925    /// ```no_run
926    /// use zlayer_builder::{ImageBuilder, RegistryAuth};
927    ///
928    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
929    /// let builder = ImageBuilder::new("./my-project").await?
930    ///     .tag("registry.example.com/myapp:v1.0.0")
931    ///     .push(RegistryAuth::new("user", "password"));
932    /// # Ok(())
933    /// # }
934    /// ```
935    #[must_use]
936    pub fn push(mut self, auth: RegistryAuth) -> Self {
937        self.options.push = true;
938        self.options.registry_auth = Some(auth);
939        self
940    }
941
942    /// Enable pushing without authentication
943    ///
944    /// Use this for registries that don't require authentication
945    /// (e.g., local registries, insecure registries).
946    #[must_use]
947    pub fn push_without_auth(mut self) -> Self {
948        self.options.push = true;
949        self.options.registry_auth = None;
950        self
951    }
952
953    /// Set a default OCI/WASM-compatible registry to check for images.
954    ///
955    /// When set, the builder will probe this registry for short image names
956    /// before qualifying them to `docker.io`. For example, if set to
957    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
958    /// the builder will check `git.example.com:5000/myapp:latest` first.
959    #[must_use]
960    pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
961        self.options.default_registry = Some(registry.into());
962        self
963    }
964
965    /// Set a local OCI registry for image resolution.
966    ///
967    /// When set, the builder checks the local registry for cached images
968    /// before pulling from remote registries.
969    #[cfg(feature = "local-registry")]
970    #[must_use]
971    pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
972        self.local_registry = Some(registry);
973        self
974    }
975
976    /// Squash all layers into a single layer
977    ///
978    /// This reduces image size but loses layer caching benefits.
979    #[must_use]
980    pub fn squash(mut self) -> Self {
981        self.options.squash = true;
982        self
983    }
984
985    /// Set the image format
986    ///
987    /// Valid values are "oci" (default) or "docker".
988    #[must_use]
989    pub fn format(mut self, format: impl Into<String>) -> Self {
990        self.options.format = Some(format.into());
991        self
992    }
993
994    /// Set default cache mounts to inject into all RUN instructions
995    #[must_use]
996    pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
997        self.options.default_cache_mounts = mounts;
998        self
999    }
1000
1001    /// Set the number of retries for failed RUN steps
1002    #[must_use]
1003    pub fn retries(mut self, retries: u32) -> Self {
1004        self.options.retries = retries;
1005        self
1006    }
1007
1008    /// Set the target platform for cross-architecture builds.
1009    #[must_use]
1010    pub fn platform(mut self, platform: impl Into<String>) -> Self {
1011        self.options.platform = Some(platform.into());
1012        self
1013    }
1014
1015    /// Set a pre-computed source hash for content-based cache invalidation.
1016    ///
1017    /// When set, the sandbox builder can skip a full rebuild if the cached
1018    /// image was produced from identical source content.
1019    #[must_use]
1020    pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1021        self.options.source_hash = Some(hash.into());
1022        self
1023    }
1024
1025    /// Set an event sender for TUI progress updates
1026    ///
1027    /// Events will be sent as the build progresses, allowing you to
1028    /// display a progress UI or log build status.
1029    ///
1030    /// # Example
1031    ///
1032    /// ```no_run
1033    /// use zlayer_builder::{ImageBuilder, BuildEvent};
1034    /// use std::sync::mpsc;
1035    ///
1036    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1037    /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1038    ///
1039    /// let builder = ImageBuilder::new("./my-project").await?
1040    ///     .tag("myapp:latest")
1041    ///     .with_events(tx);
1042    /// # Ok(())
1043    /// # }
1044    /// ```
1045    #[must_use]
1046    pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1047        self.event_tx = Some(tx);
1048        self
1049    }
1050
1051    /// Configure a persistent disk cache backend for layer caching.
1052    ///
1053    /// When configured, the builder will store layer data on disk at the
1054    /// specified path. This cache persists across builds and significantly
1055    /// speeds up repeated builds of similar images.
1056    ///
1057    /// Requires the `cache-persistent` feature to be enabled.
1058    ///
1059    /// # Arguments
1060    ///
1061    /// * `path` - Path to the cache directory. If a directory, creates
1062    ///   `blob_cache.redb` inside it. If a file path, uses it directly.
1063    ///
1064    /// # Example
1065    ///
1066    /// ```no_run,ignore
1067    /// use zlayer_builder::ImageBuilder;
1068    ///
1069    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1070    /// let builder = ImageBuilder::new("./my-project").await?
1071    ///     .with_cache_dir("/var/cache/zlayer")
1072    ///     .tag("myapp:latest");
1073    /// # Ok(())
1074    /// # }
1075    /// ```
1076    ///
1077    /// # Integration Status
1078    ///
1079    /// TODO: The cache backend is currently stored but not actively used
1080    /// during builds. Future work will wire up:
1081    /// - Cache lookups before executing RUN instructions
1082    /// - Storing layer data after successful execution
1083    /// - Caching base image layers from registry pulls
1084    #[cfg(feature = "cache-persistent")]
1085    #[must_use]
1086    pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1087        self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1088            path: path.as_ref().to_path_buf(),
1089        });
1090        debug!(
1091            "Configured persistent cache at: {}",
1092            path.as_ref().display()
1093        );
1094        self
1095    }
1096
1097    /// Configure an in-memory cache backend for layer caching.
1098    ///
1099    /// The in-memory cache is cleared when the process exits, but can
1100    /// speed up builds within a single session by caching intermediate
1101    /// layers and avoiding redundant operations.
1102    ///
1103    /// Requires the `cache` feature to be enabled.
1104    ///
1105    /// # Example
1106    ///
1107    /// ```no_run,ignore
1108    /// use zlayer_builder::ImageBuilder;
1109    ///
1110    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1111    /// let builder = ImageBuilder::new("./my-project").await?
1112    ///     .with_memory_cache()
1113    ///     .tag("myapp:latest");
1114    /// # Ok(())
1115    /// # }
1116    /// ```
1117    ///
1118    /// # Integration Status
1119    ///
1120    /// TODO: The cache backend is currently stored but not actively used
1121    /// during builds. See `with_cache_dir` for integration status details.
1122    #[cfg(feature = "cache")]
1123    #[must_use]
1124    pub fn with_memory_cache(mut self) -> Self {
1125        self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1126        debug!("Configured in-memory cache");
1127        self
1128    }
1129
1130    /// Configure an S3-compatible storage backend for layer caching.
1131    ///
1132    /// This is useful for distributed build systems where multiple build
1133    /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1134    /// Backblaze B2, `MinIO`, and other S3-compatible services.
1135    ///
1136    /// Requires the `cache-s3` feature to be enabled.
1137    ///
1138    /// # Arguments
1139    ///
1140    /// * `bucket` - S3 bucket name
1141    /// * `region` - AWS region (optional, uses SDK default if not set)
1142    ///
1143    /// # Example
1144    ///
1145    /// ```no_run,ignore
1146    /// use zlayer_builder::ImageBuilder;
1147    ///
1148    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1149    /// let builder = ImageBuilder::new("./my-project").await?
1150    ///     .with_s3_cache("my-build-cache", Some("us-west-2"))
1151    ///     .tag("myapp:latest");
1152    /// # Ok(())
1153    /// # }
1154    /// ```
1155    ///
1156    /// # Integration Status
1157    ///
1158    /// TODO: The cache backend is currently stored but not actively used
1159    /// during builds. See `with_cache_dir` for integration status details.
1160    #[cfg(feature = "cache-s3")]
1161    #[must_use]
1162    pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1163        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1164            bucket: bucket.into(),
1165            region,
1166            endpoint: None,
1167            prefix: None,
1168        });
1169        debug!("Configured S3 cache");
1170        self
1171    }
1172
1173    /// Configure an S3-compatible storage backend with custom endpoint.
1174    ///
1175    /// Use this method for S3-compatible services that require a custom
1176    /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1177    ///
1178    /// Requires the `cache-s3` feature to be enabled.
1179    ///
1180    /// # Arguments
1181    ///
1182    /// * `bucket` - S3 bucket name
1183    /// * `endpoint` - Custom endpoint URL
1184    /// * `region` - Region (required for some S3-compatible services)
1185    ///
1186    /// # Example
1187    ///
1188    /// ```no_run,ignore
1189    /// use zlayer_builder::ImageBuilder;
1190    ///
1191    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1192    /// // Cloudflare R2
1193    /// let builder = ImageBuilder::new("./my-project").await?
1194    ///     .with_s3_cache_endpoint(
1195    ///         "my-bucket",
1196    ///         "https://accountid.r2.cloudflarestorage.com",
1197    ///         Some("auto".to_string()),
1198    ///     )
1199    ///     .tag("myapp:latest");
1200    /// # Ok(())
1201    /// # }
1202    /// ```
1203    #[cfg(feature = "cache-s3")]
1204    #[must_use]
1205    pub fn with_s3_cache_endpoint(
1206        mut self,
1207        bucket: impl Into<String>,
1208        endpoint: impl Into<String>,
1209        region: Option<String>,
1210    ) -> Self {
1211        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1212            bucket: bucket.into(),
1213            region,
1214            endpoint: Some(endpoint.into()),
1215            prefix: None,
1216        });
1217        debug!("Configured S3 cache with custom endpoint");
1218        self
1219    }
1220
1221    /// Configure a custom cache backend configuration.
1222    ///
1223    /// This is the most flexible way to configure the cache backend,
1224    /// allowing full control over all cache settings.
1225    ///
1226    /// Requires the `cache` feature to be enabled.
1227    ///
1228    /// # Example
1229    ///
1230    /// ```no_run,ignore
1231    /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1232    ///
1233    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1234    /// let builder = ImageBuilder::new("./my-project").await?
1235    ///     .with_cache_config(CacheBackendConfig::Memory)
1236    ///     .tag("myapp:latest");
1237    /// # Ok(())
1238    /// # }
1239    /// ```
1240    #[cfg(feature = "cache")]
1241    #[must_use]
1242    pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1243        self.options.cache_backend_config = Some(config);
1244        debug!("Configured custom cache backend");
1245        self
1246    }
1247
1248    /// Set an already-initialized cache backend directly.
1249    ///
1250    /// This is useful when you have a pre-configured cache backend instance
1251    /// that you want to share across multiple builders or when you need
1252    /// fine-grained control over cache initialization.
1253    ///
1254    /// Requires the `cache` feature to be enabled.
1255    ///
1256    /// # Example
1257    ///
1258    /// ```no_run,ignore
1259    /// use zlayer_builder::ImageBuilder;
1260    /// use zlayer_registry::cache::BlobCache;
1261    /// use std::sync::Arc;
1262    ///
1263    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1264    /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1265    ///
1266    /// let builder = ImageBuilder::new("./my-project").await?
1267    ///     .with_cache_backend(cache)
1268    ///     .tag("myapp:latest");
1269    /// # Ok(())
1270    /// # }
1271    /// ```
1272    #[cfg(feature = "cache")]
1273    #[must_use]
1274    pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1275        self.cache_backend = Some(backend);
1276        debug!("Configured pre-initialized cache backend");
1277        self
1278    }
1279
1280    /// Run the build
1281    ///
1282    /// This executes the complete build process:
1283    /// 1. Parse Dockerfile or load runtime template
1284    /// 2. Build all required stages
1285    /// 3. Commit and tag the final image
1286    /// 4. Push to registry if configured
1287    /// 5. Clean up intermediate containers
1288    ///
1289    /// # Errors
1290    ///
1291    /// Returns an error if:
1292    /// - Dockerfile parsing fails
1293    /// - A buildah command fails
1294    /// - Target stage is not found
1295    /// - Registry push fails
1296    ///
1297    /// # Panics
1298    ///
1299    /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1300    #[instrument(skip(self), fields(context = %self.context.display()))]
1301    pub async fn build(self) -> Result<BuiltImage> {
1302        let start_time = std::time::Instant::now();
1303
1304        info!("Starting build in context: {}", self.context.display());
1305
1306        // 1. Get build output (Dockerfile IR or WASM artifact)
1307        let build_output = self.get_build_output().await?;
1308
1309        // If this is a WASM build, return early with the artifact info.
1310        if let BuildOutput::WasmArtifact {
1311            wasm_path,
1312            oci_path: _,
1313            language,
1314            optimized,
1315            size,
1316        } = build_output
1317        {
1318            #[allow(clippy::cast_possible_truncation)]
1319            let build_time_ms = start_time.elapsed().as_millis() as u64;
1320
1321            self.send_event(BuildEvent::BuildComplete {
1322                image_id: wasm_path.display().to_string(),
1323            });
1324
1325            info!(
1326                "WASM build completed in {}ms: {} ({}, {} bytes, optimized={})",
1327                build_time_ms,
1328                wasm_path.display(),
1329                language,
1330                size,
1331                optimized
1332            );
1333
1334            return Ok(BuiltImage {
1335                image_id: format!("wasm:{}", wasm_path.display()),
1336                tags: self.options.tags.clone(),
1337                layer_count: 1,
1338                size,
1339                build_time_ms,
1340                is_manifest: false,
1341            });
1342        }
1343
1344        // Extract the Dockerfile from the BuildOutput.
1345        let BuildOutput::Dockerfile(dockerfile) = build_output else {
1346            unreachable!("WasmArtifact case handled above");
1347        };
1348        debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1349
1350        // Delegate the build to the backend.
1351        let backend = self
1352            .backend
1353            .as_ref()
1354            .ok_or_else(|| BuildError::BuildahNotFound {
1355                message: "No build backend configured".into(),
1356            })?;
1357
1358        info!("Delegating build to {} backend", backend.name());
1359        let built = backend
1360            .build_image(
1361                &self.context,
1362                &dockerfile,
1363                &self.options,
1364                self.event_tx.clone(),
1365            )
1366            .await?;
1367
1368        // Import the built image into ZLayer's local registry and blob cache
1369        // so the runtime can find it without pulling from a remote registry.
1370        #[cfg(feature = "local-registry")]
1371        if let Some(ref registry) = self.local_registry {
1372            if !built.tags.is_empty() {
1373                let tmp_path = std::env::temp_dir().join(format!(
1374                    "zlayer-build-{}-{}.tar",
1375                    std::process::id(),
1376                    start_time.elapsed().as_nanos()
1377                ));
1378
1379                // Export the image from buildah's store to an OCI archive.
1380                let export_tag = &built.tags[0];
1381                let dest = format!("oci-archive:{}", tmp_path.display());
1382                let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1383
1384                match self.executor.execute_checked(&push_cmd).await {
1385                    Ok(_) => {
1386                        // Resolve the blob cache backend (if available).
1387                        let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1388                            self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1389
1390                        for tag in &built.tags {
1391                            match import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1392                                .await
1393                            {
1394                                Ok(info) => {
1395                                    info!(
1396                                        tag = %tag,
1397                                        digest = %info.digest,
1398                                        "Imported into local registry"
1399                                    );
1400                                }
1401                                Err(e) => {
1402                                    warn!(tag = %tag, error = %e, "Failed to import into local registry");
1403                                }
1404                            }
1405                        }
1406
1407                        // Clean up the temporary archive.
1408                        if let Err(e) = fs::remove_file(&tmp_path).await {
1409                            warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1410                        }
1411                    }
1412                    Err(e) => {
1413                        warn!(error = %e, "Failed to export image for local registry import");
1414                    }
1415                }
1416            }
1417        }
1418
1419        Ok(built)
1420    }
1421
1422    /// Detection order:
1423    /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1424    /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1425    /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1426    /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1427    ///
1428    /// Returns [`BuildOutput::Dockerfile`] for container builds or
1429    /// [`BuildOutput::WasmArtifact`] for WASM builds.
1430    async fn get_build_output(&self) -> Result<BuildOutput> {
1431        // (a) Runtime template takes highest priority.
1432        if let Some(runtime) = &self.options.runtime {
1433            debug!("Using runtime template: {}", runtime);
1434            let content = get_template(*runtime);
1435            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1436        }
1437
1438        // (b) Explicit ZImagefile path.
1439        if let Some(ref zimage_path) = self.options.zimagefile {
1440            debug!("Reading ZImagefile: {}", zimage_path.display());
1441            let content =
1442                fs::read_to_string(zimage_path)
1443                    .await
1444                    .map_err(|e| BuildError::ContextRead {
1445                        path: zimage_path.clone(),
1446                        source: e,
1447                    })?;
1448            let zimage = crate::zimage::parse_zimagefile(&content)?;
1449            return self.handle_zimage(&zimage).await;
1450        }
1451
1452        // (c) Auto-detect ZImagefile in context directory.
1453        let auto_zimage_path = self.context.join("ZImagefile");
1454        if auto_zimage_path.exists() {
1455            debug!(
1456                "Found ZImagefile in context: {}",
1457                auto_zimage_path.display()
1458            );
1459            let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1460                BuildError::ContextRead {
1461                    path: auto_zimage_path,
1462                    source: e,
1463                }
1464            })?;
1465            let zimage = crate::zimage::parse_zimagefile(&content)?;
1466            return self.handle_zimage(&zimage).await;
1467        }
1468
1469        // (d) Fall back to Dockerfile.
1470        let dockerfile_path = self
1471            .options
1472            .dockerfile
1473            .clone()
1474            .unwrap_or_else(|| self.context.join("Dockerfile"));
1475
1476        debug!("Reading Dockerfile: {}", dockerfile_path.display());
1477
1478        let content =
1479            fs::read_to_string(&dockerfile_path)
1480                .await
1481                .map_err(|e| BuildError::ContextRead {
1482                    path: dockerfile_path,
1483                    source: e,
1484                })?;
1485
1486        Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1487    }
1488
1489    /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1490    ///
1491    /// Handles all four `ZImage` modes:
1492    /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1493    /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1494    /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1495    ///
1496    /// Any `build:` directives are resolved first by spawning nested builds.
1497    async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1498        // Runtime mode: delegate to template system.
1499        if let Some(ref runtime_name) = zimage.runtime {
1500            let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1501                BuildError::zimagefile_validation(format!(
1502                    "unknown runtime '{runtime_name}' in ZImagefile"
1503                ))
1504            })?;
1505            let content = get_template(rt);
1506            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1507        }
1508
1509        // WASM mode: build a WASM component.
1510        if let Some(ref wasm_config) = zimage.wasm {
1511            return self.handle_wasm_build(wasm_config).await;
1512        }
1513
1514        // Resolve any `build:` directives to concrete base image tags.
1515        let resolved = self.resolve_build_directives(zimage).await?;
1516
1517        // Single-stage or multi-stage: convert to Dockerfile IR directly.
1518        Ok(BuildOutput::Dockerfile(
1519            crate::zimage::zimage_to_dockerfile(&resolved)?,
1520        ))
1521    }
1522
1523    /// Build a WASM component from the `ZImagefile` wasm configuration.
1524    ///
1525    /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1526    /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1527    /// the WASM builder pipeline.
1528    async fn handle_wasm_build(
1529        &self,
1530        wasm_config: &crate::zimage::ZWasmConfig,
1531    ) -> Result<BuildOutput> {
1532        use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1533
1534        info!("ZImagefile specifies WASM mode, running WASM build");
1535
1536        // Convert target string to WasiTarget enum.
1537        let target = match wasm_config.target.as_str() {
1538            "preview1" => WasiTarget::Preview1,
1539            _ => WasiTarget::Preview2,
1540        };
1541
1542        // Resolve language: parse from string or leave as None for auto-detection.
1543        let language = wasm_config
1544            .language
1545            .as_deref()
1546            .and_then(WasmLanguage::from_name);
1547
1548        if let Some(ref lang_str) = wasm_config.language {
1549            if language.is_none() {
1550                return Err(BuildError::zimagefile_validation(format!(
1551                    "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1552                     typescript, assemblyscript, c, zig"
1553                )));
1554            }
1555        }
1556
1557        // Build the WasmBuildConfig.
1558        let mut config = WasmBuildConfig {
1559            language,
1560            target,
1561            optimize: wasm_config.optimize,
1562            opt_level: wasm_config
1563                .opt_level
1564                .clone()
1565                .unwrap_or_else(|| "Oz".to_string()),
1566            wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1567            output_path: wasm_config.output.as_ref().map(PathBuf::from),
1568            world: wasm_config.world.clone(),
1569            features: wasm_config.features.clone(),
1570            build_args: wasm_config.build_args.clone(),
1571            pre_build: Vec::new(),
1572            post_build: Vec::new(),
1573            adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1574        };
1575
1576        // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1577        for cmd in &wasm_config.pre_build {
1578            config.pre_build.push(zcommand_to_args(cmd));
1579        }
1580        for cmd in &wasm_config.post_build {
1581            config.post_build.push(zcommand_to_args(cmd));
1582        }
1583
1584        // Build the WASM component.
1585        let result = build_wasm(&self.context, config).await?;
1586
1587        let language_name = result.language.name().to_string();
1588        let wasm_path = result.wasm_path;
1589        let size = result.size;
1590
1591        info!(
1592            "WASM build complete: {} ({} bytes, optimized={})",
1593            wasm_path.display(),
1594            size,
1595            wasm_config.optimize
1596        );
1597
1598        Ok(BuildOutput::WasmArtifact {
1599            wasm_path,
1600            oci_path: None,
1601            language: language_name,
1602            optimized: wasm_config.optimize,
1603            size,
1604        })
1605    }
1606
1607    /// Resolve `build:` directives in a `ZImage` by running nested builds.
1608    ///
1609    /// For each `build:` directive (top-level or per-stage), this method:
1610    /// 1. Determines the build context directory
1611    /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1612    /// 3. Spawns a nested `ImageBuilder` to build the context
1613    /// 4. Tags the result and replaces `build` with `base`
1614    async fn resolve_build_directives(
1615        &self,
1616        zimage: &crate::zimage::ZImage,
1617    ) -> Result<crate::zimage::ZImage> {
1618        let mut resolved = zimage.clone();
1619
1620        // Resolve top-level `build:` directive.
1621        if let Some(ref build_ctx) = resolved.build {
1622            let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1623            resolved.base = Some(tag);
1624            resolved.build = None;
1625        }
1626
1627        // Resolve per-stage `build:` directives.
1628        if let Some(ref mut stages) = resolved.stages {
1629            for (name, stage) in stages.iter_mut() {
1630                if let Some(ref build_ctx) = stage.build {
1631                    let tag = self.run_nested_build(build_ctx, name).await?;
1632                    stage.base = Some(tag);
1633                    stage.build = None;
1634                }
1635            }
1636        }
1637
1638        Ok(resolved)
1639    }
1640
1641    /// Run a nested build from a `build:` directive and return the resulting image tag.
1642    fn run_nested_build<'a>(
1643        &'a self,
1644        build_ctx: &'a crate::zimage::types::ZBuildContext,
1645        stage_name: &'a str,
1646    ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1647        Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1648    }
1649
1650    async fn run_nested_build_inner(
1651        &self,
1652        build_ctx: &crate::zimage::types::ZBuildContext,
1653        stage_name: &str,
1654    ) -> Result<String> {
1655        let context_dir = build_ctx.context_dir(&self.context);
1656
1657        if !context_dir.exists() {
1658            return Err(BuildError::ContextRead {
1659                path: context_dir,
1660                source: std::io::Error::new(
1661                    std::io::ErrorKind::NotFound,
1662                    format!(
1663                        "build context directory not found for build directive in '{stage_name}'"
1664                    ),
1665                ),
1666            });
1667        }
1668
1669        info!(
1670            "Building nested image for '{}' from context: {}",
1671            stage_name,
1672            context_dir.display()
1673        );
1674
1675        // Create a tag for the nested build result.
1676        let tag = format!(
1677            "zlayer-build-dep-{}:{}",
1678            stage_name,
1679            chrono_lite_timestamp()
1680        );
1681
1682        // Create nested builder.
1683        let mut nested = ImageBuilder::new(&context_dir).await?;
1684        nested = nested.tag(&tag);
1685
1686        // Apply explicit build file if specified.
1687        if let Some(file) = build_ctx.file() {
1688            let file_path = context_dir.join(file);
1689            if std::path::Path::new(file).extension().is_some_and(|ext| {
1690                ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1691            }) || file.starts_with("ZImagefile")
1692            {
1693                nested = nested.zimagefile(file_path);
1694            } else {
1695                nested = nested.dockerfile(file_path);
1696            }
1697        }
1698
1699        // Apply build args.
1700        for (key, value) in build_ctx.args() {
1701            nested = nested.build_arg(&key, &value);
1702        }
1703
1704        // Propagate default registry if set.
1705        if let Some(ref reg) = self.options.default_registry {
1706            nested = nested.default_registry(reg.clone());
1707        }
1708
1709        // Run the nested build.
1710        let result = nested.build().await?;
1711        info!(
1712            "Nested build for '{}' completed: {}",
1713            stage_name, result.image_id
1714        );
1715
1716        Ok(tag)
1717    }
1718
1719    /// Send an event to the TUI (if configured)
1720    fn send_event(&self, event: BuildEvent) {
1721        if let Some(tx) = &self.event_tx {
1722            // Ignore send errors - the receiver may have been dropped
1723            let _ = tx.send(event);
1724        }
1725    }
1726}
1727
1728// Helper function to generate a timestamp-based name
1729fn chrono_lite_timestamp() -> String {
1730    use std::time::{SystemTime, UNIX_EPOCH};
1731    let duration = SystemTime::now()
1732        .duration_since(UNIX_EPOCH)
1733        .unwrap_or_default();
1734    format!("{}", duration.as_secs())
1735}
1736
1737/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1738/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1739/// pre/post build command lists.
1740fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1741    match cmd {
1742        crate::zimage::ZCommand::Shell(s) => {
1743            vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
1744        }
1745        crate::zimage::ZCommand::Exec(args) => args.clone(),
1746    }
1747}
1748
1749#[cfg(test)]
1750mod tests {
1751    use super::*;
1752
1753    #[test]
1754    fn test_registry_auth_new() {
1755        let auth = RegistryAuth::new("user", "pass");
1756        assert_eq!(auth.username, "user");
1757        assert_eq!(auth.password, "pass");
1758    }
1759
1760    #[test]
1761    fn test_build_options_default() {
1762        let opts = BuildOptions::default();
1763        assert!(opts.dockerfile.is_none());
1764        assert!(opts.zimagefile.is_none());
1765        assert!(opts.runtime.is_none());
1766        assert!(opts.build_args.is_empty());
1767        assert!(opts.target.is_none());
1768        assert!(opts.tags.is_empty());
1769        assert!(!opts.no_cache);
1770        assert!(!opts.push);
1771        assert!(!opts.squash);
1772        // New cache-related fields
1773        assert!(opts.layers); // Default is true
1774        assert!(opts.cache_from.is_none());
1775        assert!(opts.cache_to.is_none());
1776        assert!(opts.cache_ttl.is_none());
1777        // Cache backend config (only with cache feature)
1778        #[cfg(feature = "cache")]
1779        assert!(opts.cache_backend_config.is_none());
1780    }
1781
1782    fn create_test_builder() -> ImageBuilder {
1783        // Create a minimal builder for testing (without async initialization)
1784        ImageBuilder {
1785            context: PathBuf::from("/tmp/test"),
1786            options: BuildOptions::default(),
1787            executor: BuildahExecutor::with_path("/usr/bin/buildah"),
1788            event_tx: None,
1789            backend: None,
1790            #[cfg(feature = "cache")]
1791            cache_backend: None,
1792            #[cfg(feature = "local-registry")]
1793            local_registry: None,
1794        }
1795    }
1796
1797    // Builder method chaining tests
1798    #[test]
1799    fn test_builder_chaining() {
1800        let mut builder = create_test_builder();
1801
1802        builder = builder
1803            .dockerfile("./Dockerfile.test")
1804            .runtime(Runtime::Node20)
1805            .build_arg("VERSION", "1.0")
1806            .target("builder")
1807            .tag("myapp:latest")
1808            .tag("myapp:v1")
1809            .no_cache()
1810            .squash()
1811            .format("oci");
1812
1813        assert_eq!(
1814            builder.options.dockerfile,
1815            Some(PathBuf::from("./Dockerfile.test"))
1816        );
1817        assert_eq!(builder.options.runtime, Some(Runtime::Node20));
1818        assert_eq!(
1819            builder.options.build_args.get("VERSION"),
1820            Some(&"1.0".to_string())
1821        );
1822        assert_eq!(builder.options.target, Some("builder".to_string()));
1823        assert_eq!(builder.options.tags.len(), 2);
1824        assert!(builder.options.no_cache);
1825        assert!(builder.options.squash);
1826        assert_eq!(builder.options.format, Some("oci".to_string()));
1827    }
1828
1829    #[test]
1830    fn test_builder_push_with_auth() {
1831        let mut builder = create_test_builder();
1832        builder = builder.push(RegistryAuth::new("user", "pass"));
1833
1834        assert!(builder.options.push);
1835        assert!(builder.options.registry_auth.is_some());
1836        let auth = builder.options.registry_auth.unwrap();
1837        assert_eq!(auth.username, "user");
1838        assert_eq!(auth.password, "pass");
1839    }
1840
1841    #[test]
1842    fn test_builder_push_without_auth() {
1843        let mut builder = create_test_builder();
1844        builder = builder.push_without_auth();
1845
1846        assert!(builder.options.push);
1847        assert!(builder.options.registry_auth.is_none());
1848    }
1849
1850    #[test]
1851    fn test_builder_layers() {
1852        let mut builder = create_test_builder();
1853        // Default is true
1854        assert!(builder.options.layers);
1855
1856        // Disable layers
1857        builder = builder.layers(false);
1858        assert!(!builder.options.layers);
1859
1860        // Re-enable layers
1861        builder = builder.layers(true);
1862        assert!(builder.options.layers);
1863    }
1864
1865    #[test]
1866    fn test_builder_cache_from() {
1867        let mut builder = create_test_builder();
1868        assert!(builder.options.cache_from.is_none());
1869
1870        builder = builder.cache_from("registry.example.com/myapp:cache");
1871        assert_eq!(
1872            builder.options.cache_from,
1873            Some("registry.example.com/myapp:cache".to_string())
1874        );
1875    }
1876
1877    #[test]
1878    fn test_builder_cache_to() {
1879        let mut builder = create_test_builder();
1880        assert!(builder.options.cache_to.is_none());
1881
1882        builder = builder.cache_to("registry.example.com/myapp:cache");
1883        assert_eq!(
1884            builder.options.cache_to,
1885            Some("registry.example.com/myapp:cache".to_string())
1886        );
1887    }
1888
1889    #[test]
1890    fn test_builder_cache_ttl() {
1891        use std::time::Duration;
1892
1893        let mut builder = create_test_builder();
1894        assert!(builder.options.cache_ttl.is_none());
1895
1896        builder = builder.cache_ttl(Duration::from_secs(3600));
1897        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
1898    }
1899
1900    #[test]
1901    fn test_builder_cache_options_chaining() {
1902        use std::time::Duration;
1903
1904        let builder = create_test_builder()
1905            .layers(true)
1906            .cache_from("registry.example.com/cache:input")
1907            .cache_to("registry.example.com/cache:output")
1908            .cache_ttl(Duration::from_secs(7200))
1909            .no_cache();
1910
1911        assert!(builder.options.layers);
1912        assert_eq!(
1913            builder.options.cache_from,
1914            Some("registry.example.com/cache:input".to_string())
1915        );
1916        assert_eq!(
1917            builder.options.cache_to,
1918            Some("registry.example.com/cache:output".to_string())
1919        );
1920        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
1921        assert!(builder.options.no_cache);
1922    }
1923
1924    #[test]
1925    fn test_chrono_lite_timestamp() {
1926        let ts = chrono_lite_timestamp();
1927        // Should be a valid number
1928        let parsed: u64 = ts.parse().expect("Should be a valid u64");
1929        // Should be reasonably recent (after 2024)
1930        assert!(parsed > 1_700_000_000);
1931    }
1932}