Skip to main content

zlayer_builder/
builder.rs

1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//!     // Build from a Dockerfile
15//!     let image = ImageBuilder::new("./my-app").await?
16//!         .tag("myapp:latest")
17//!         .tag("myapp:v1.0.0")
18//!         .build()
19//!         .await?;
20//!
21//!     println!("Built image: {}", image.image_id);
22//!     Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//!     // Build using a runtime template (no Dockerfile needed)
34//!     let image = ImageBuilder::new("./my-node-app").await?
35//!         .runtime(Runtime::Node20)
36//!         .tag("myapp:latest")
37//!         .build()
38//!         .await?;
39//!
40//!     println!("Built image: {}", image.image_id);
41//!     Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//!     // Build only up to a specific stage
53//!     let image = ImageBuilder::new("./my-app").await?
54//!         .target("builder")
55//!         .tag("myapp:builder")
56//!         .build()
57//!         .await?;
58//!
59//!     println!("Built intermediate image: {}", image.image_id);
60//!     Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//!     // Start TUI in another thread
75//!     std::thread::spawn(move || {
76//!         // Process events from rx...
77//!         while let Ok(event) = rx.recv() {
78//!             println!("Event: {:?}", event);
79//!         }
80//!     });
81//!
82//!     let image = ImageBuilder::new("./my-app").await?
83//!         .tag("myapp:latest")
84//!         .with_events(tx)
85//!         .build()
86//!         .await?;
87//!
88//!     Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//!     let image = ImageBuilder::new("./my-app").await?
100//!         .with_cache_dir("/var/cache/zlayer")  // Use persistent disk cache
101//!         .tag("myapp:latest")
102//!         .build()
103//!         .await?;
104//!
105//!     println!("Built image: {}", image.image_id);
106//!     Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116#[cfg(feature = "local-registry")]
117use tracing::warn;
118use tracing::{debug, info, instrument};
119
120use crate::backend::BuildBackend;
121#[cfg(feature = "local-registry")]
122use crate::buildah::BuildahCommand;
123use crate::buildah::BuildahExecutor;
124use crate::dockerfile::{Dockerfile, RunMount};
125use crate::error::{BuildError, Result};
126use crate::templates::{get_template, Runtime};
127use crate::tui::BuildEvent;
128
129#[cfg(feature = "cache")]
130use zlayer_registry::cache::BlobCacheBackend;
131
132#[cfg(feature = "local-registry")]
133use zlayer_registry::LocalRegistry;
134
135#[cfg(feature = "local-registry")]
136use zlayer_registry::import_image;
137
138/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
139/// or a WASM build result for WebAssembly builds.
140///
141/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
142/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
143/// a compiled artifact directly, bypassing the container build pipeline.
144#[derive(Debug)]
145pub enum BuildOutput {
146    /// Standard container build - produces a Dockerfile to be built with buildah.
147    Dockerfile(Dockerfile),
148    /// WASM component build - already built, produces artifact path.
149    WasmArtifact {
150        /// Path to the compiled WASM binary.
151        wasm_path: PathBuf,
152        /// Path to the OCI artifact directory (if exported).
153        oci_path: Option<PathBuf>,
154        /// OCI manifest digest (e.g. `sha256:...`) for the exported artifact,
155        /// or `None` if export did not run (should always be `Some` when
156        /// `oci_path` is `Some`).
157        manifest_digest: Option<String>,
158        /// OCI artifact type (e.g. `application/vnd.wasm.component.v1+wasm`).
159        artifact_type: Option<String>,
160        /// Source language used.
161        language: String,
162        /// Whether optimization was applied.
163        optimized: bool,
164        /// Size of the output file in bytes.
165        size: u64,
166    },
167}
168
169/// Configuration for the layer cache backend.
170///
171/// This enum specifies which cache backend to use for storing and retrieving
172/// cached layers during builds. The cache feature must be enabled for this
173/// to be available.
174///
175/// # Example
176///
177/// ```no_run,ignore
178/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
179///
180/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
181/// // Use persistent disk cache
182/// let builder = ImageBuilder::new("./my-app").await?
183///     .with_cache_config(CacheBackendConfig::Persistent {
184///         path: "/var/cache/zlayer".into(),
185///     })
186///     .tag("myapp:latest");
187/// # Ok(())
188/// # }
189/// ```
190#[cfg(feature = "cache")]
191#[derive(Debug, Clone, Default)]
192pub enum CacheBackendConfig {
193    /// In-memory cache (cleared when process exits).
194    ///
195    /// Useful for CI/CD environments where persistence isn't needed
196    /// but you want to avoid re-downloading base image layers within
197    /// a single build session.
198    #[default]
199    Memory,
200
201    /// Persistent disk-based cache using redb.
202    ///
203    /// Requires the `cache-persistent` feature. Layers are stored on disk
204    /// and persist across builds, significantly speeding up repeated builds.
205    #[cfg(feature = "cache-persistent")]
206    Persistent {
207        /// Path to the cache directory or database file.
208        /// If a directory, `blob_cache.redb` will be created inside it.
209        path: PathBuf,
210    },
211
212    /// S3-compatible object storage backend.
213    ///
214    /// Requires the `cache-s3` feature. Useful for distributed build systems
215    /// where multiple build machines need to share a cache.
216    #[cfg(feature = "cache-s3")]
217    S3 {
218        /// S3 bucket name
219        bucket: String,
220        /// AWS region (optional, uses SDK default if not set)
221        region: Option<String>,
222        /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
223        endpoint: Option<String>,
224        /// Key prefix for cached blobs (default: "zlayer/layers/")
225        prefix: Option<String>,
226    },
227}
228
229/// Built image information returned after a successful build
230#[derive(Debug, Clone)]
231pub struct BuiltImage {
232    /// Image ID (sha256:...)
233    pub image_id: String,
234    /// Applied tags
235    pub tags: Vec<String>,
236    /// Number of layers in the final image
237    pub layer_count: usize,
238    /// Total size in bytes (0 if not computed)
239    pub size: u64,
240    /// Build duration in milliseconds
241    pub build_time_ms: u64,
242    /// Whether this image is a manifest list (multi-arch).
243    pub is_manifest: bool,
244}
245
246/// Registry authentication credentials
247#[derive(Debug, Clone)]
248pub struct RegistryAuth {
249    /// Registry username
250    pub username: String,
251    /// Registry password or token
252    pub password: String,
253}
254
255impl RegistryAuth {
256    /// Create new registry authentication
257    pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
258        Self {
259            username: username.into(),
260            password: password.into(),
261        }
262    }
263}
264
265/// Strategy for pulling the base image before building.
266///
267/// Controls the `--pull` flag passed to `buildah from`. The default is
268/// [`PullBaseMode::Newer`], matching the behaviour users expect from
269/// modern build tools: fast when nothing has changed, correct when the
270/// upstream base image has been republished.
271#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
272pub enum PullBaseMode {
273    /// Pull only if the registry has a newer version (`--pull=newer`).
274    /// Default behaviour.
275    #[default]
276    Newer,
277    /// Always pull, even if a local copy exists (`--pull=always`).
278    Always,
279    /// Never pull — use whatever is in local storage (no `--pull` flag passed).
280    Never,
281}
282
283/// Build options for customizing the image build process
284#[derive(Debug, Clone)]
285#[allow(clippy::struct_excessive_bools)]
286pub struct BuildOptions {
287    /// Dockerfile path (default: Dockerfile in context)
288    pub dockerfile: Option<PathBuf>,
289    /// `ZImagefile` path (alternative to Dockerfile)
290    pub zimagefile: Option<PathBuf>,
291    /// Use runtime template instead of Dockerfile
292    pub runtime: Option<Runtime>,
293    /// Build arguments (ARG values)
294    pub build_args: HashMap<String, String>,
295    /// Target stage for multi-stage builds
296    pub target: Option<String>,
297    /// Image tags to apply
298    pub tags: Vec<String>,
299    /// Disable layer caching
300    pub no_cache: bool,
301    /// Push to registry after build
302    pub push: bool,
303    /// Registry auth (if pushing)
304    pub registry_auth: Option<RegistryAuth>,
305    /// Squash all layers into one
306    pub squash: bool,
307    /// Image format (oci or docker)
308    pub format: Option<String>,
309    /// Enable buildah layer caching (--layers flag for `buildah build`).
310    /// Default: true
311    ///
312    /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
313    /// `buildah commit`) rather than `buildah build`, so this flag is reserved
314    /// for future use when/if we switch to `buildah build` (bud) command.
315    pub layers: bool,
316    /// Registry to pull cache from (--cache-from for `buildah build`).
317    ///
318    /// Note: This would be used with `buildah build --cache-from=<registry>`.
319    /// Currently `ZLayer` uses manual container creation, so this is reserved
320    /// for future implementation or for switching to `buildah build`.
321    ///
322    /// TODO: Implement remote cache support. This would require either:
323    /// 1. Switching to `buildah build` command which supports --cache-from natively
324    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
325    pub cache_from: Option<String>,
326    /// Registry to push cache to (--cache-to for `buildah build`).
327    ///
328    /// Note: This would be used with `buildah build --cache-to=<registry>`.
329    /// Currently `ZLayer` uses manual container creation, so this is reserved
330    /// for future implementation or for switching to `buildah build`.
331    ///
332    /// TODO: Implement remote cache support. This would require either:
333    /// 1. Switching to `buildah build` command which supports --cache-to natively
334    /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
335    pub cache_to: Option<String>,
336    /// Maximum cache age (--cache-ttl for `buildah build`).
337    ///
338    /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
339    /// Currently `ZLayer` uses manual container creation, so this is reserved
340    /// for future implementation or for switching to `buildah build`.
341    ///
342    /// TODO: Implement cache TTL support. This would require either:
343    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
344    /// 2. Implementing custom cache expiration logic for our layer caching system
345    pub cache_ttl: Option<std::time::Duration>,
346    /// Cache backend configuration (requires `cache` feature).
347    ///
348    /// When configured, the builder will store layer data in the specified
349    /// cache backend for faster subsequent builds. This is separate from
350    /// buildah's native caching and operates at the `ZLayer` level.
351    ///
352    /// # Integration Points
353    ///
354    /// The cache backend is used at several points during the build:
355    ///
356    /// 1. **Before instruction execution**: Check if a cached layer exists
357    ///    for the (`instruction_hash`, `base_layer`) tuple
358    /// 2. **After instruction execution**: Store the resulting layer data
359    ///    in the cache for future builds
360    /// 3. **Base image layers**: Cache pulled base image layers to avoid
361    ///    re-downloading from registries
362    ///
363    /// TODO: Wire up cache lookups in the build loop once layer digests
364    /// are properly computed and tracked.
365    #[cfg(feature = "cache")]
366    pub cache_backend_config: Option<CacheBackendConfig>,
367    /// Default OCI/WASM-compatible registry to check for images before falling
368    /// back to Docker Hub qualification.
369    ///
370    /// When set, the builder will probe this registry for short image names
371    /// before qualifying them to `docker.io`. For example, if set to
372    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
373    /// the builder will check `git.example.com:5000/myapp:latest` first.
374    pub default_registry: Option<String>,
375    /// Default cache mounts injected into all RUN instructions.
376    /// These are merged with any step-level cache mounts (deduped by target path).
377    pub default_cache_mounts: Vec<RunMount>,
378    /// Number of retries for failed RUN steps (0 = no retries, default)
379    pub retries: u32,
380    /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
381    /// When set, `buildah from` pulls the platform-specific image variant.
382    pub platform: Option<String>,
383    /// SHA-256 hash of the source Dockerfile/ZImagefile content.
384    ///
385    /// When set, the sandbox builder can skip a rebuild if the cached image
386    /// was produced from identical source content (content-based invalidation).
387    pub source_hash: Option<String>,
388    /// How to handle base-image pulling during `buildah from`.
389    ///
390    /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
391    /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
392    /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
393    pub pull: PullBaseMode,
394}
395
396impl Default for BuildOptions {
397    fn default() -> Self {
398        Self {
399            dockerfile: None,
400            zimagefile: None,
401            runtime: None,
402            build_args: HashMap::new(),
403            target: None,
404            tags: Vec::new(),
405            no_cache: false,
406            push: false,
407            registry_auth: None,
408            squash: false,
409            format: None,
410            layers: true,
411            cache_from: None,
412            cache_to: None,
413            cache_ttl: None,
414            #[cfg(feature = "cache")]
415            cache_backend_config: None,
416            default_registry: None,
417            default_cache_mounts: Vec::new(),
418            retries: 0,
419            platform: None,
420            source_hash: None,
421            pull: PullBaseMode::default(),
422        }
423    }
424}
425
426/// Image builder - orchestrates the full build process
427///
428/// `ImageBuilder` provides a fluent API for configuring and executing
429/// container image builds using buildah as the backend.
430///
431/// # Build Process
432///
433/// 1. Parse Dockerfile (or use runtime template)
434/// 2. Resolve target stages if specified
435/// 3. Build each stage sequentially:
436///    - Create working container from base image
437///    - Execute each instruction
438///    - Commit intermediate stages for COPY --from
439/// 4. Commit final image with tags
440/// 5. Push to registry if configured
441/// 6. Clean up intermediate containers
442///
443/// # Cache Backend Integration (requires `cache` feature)
444///
445/// When a cache backend is configured, the builder can store and retrieve
446/// cached layer data to speed up subsequent builds:
447///
448/// ```no_run,ignore
449/// use zlayer_builder::ImageBuilder;
450///
451/// let builder = ImageBuilder::new("./my-app").await?
452///     .with_cache_dir("/var/cache/zlayer")
453///     .tag("myapp:latest");
454/// ```
455pub struct ImageBuilder {
456    /// Build context directory
457    context: PathBuf,
458    /// Build options
459    options: BuildOptions,
460    /// Buildah executor (kept for backwards compatibility)
461    #[allow(dead_code)]
462    executor: BuildahExecutor,
463    /// Event sender for TUI updates
464    event_tx: Option<mpsc::Sender<BuildEvent>>,
465    /// Pluggable build backend (buildah, sandbox, etc.).
466    ///
467    /// When set, the `build()` method delegates to this backend instead of
468    /// using the inline buildah logic. Set automatically by `new()` via
469    /// `detect_backend()`, or explicitly via `with_backend()`.
470    backend: Option<Arc<dyn BuildBackend>>,
471    /// Cache backend for layer caching (requires `cache` feature).
472    ///
473    /// When set, the builder will attempt to retrieve cached layers before
474    /// executing instructions, and store results in the cache after execution.
475    ///
476    /// TODO: Implement cache lookups in the build loop. Currently the backend
477    /// is stored but not actively used during builds. Integration points:
478    /// - Check cache before executing RUN instructions
479    /// - Store layer data after successful instruction execution
480    /// - Cache base image layers pulled from registries
481    #[cfg(feature = "cache")]
482    cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
483    /// Local OCI registry for checking cached images before remote pulls.
484    #[cfg(feature = "local-registry")]
485    local_registry: Option<LocalRegistry>,
486}
487
488impl ImageBuilder {
489    /// Create a new `ImageBuilder` with the given context directory
490    ///
491    /// The context directory should contain the Dockerfile (unless using
492    /// a runtime template) and any files that will be copied into the image.
493    ///
494    /// # Arguments
495    ///
496    /// * `context` - Path to the build context directory
497    ///
498    /// # Errors
499    ///
500    /// Returns an error if:
501    /// - The context directory does not exist
502    /// - Buildah is not installed or not accessible
503    ///
504    /// # Example
505    ///
506    /// ```no_run
507    /// use zlayer_builder::ImageBuilder;
508    ///
509    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
510    /// let builder = ImageBuilder::new("./my-project").await?;
511    /// # Ok(())
512    /// # }
513    /// ```
514    #[instrument(skip_all, fields(context = %context.as_ref().display()))]
515    pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
516        let context = context.as_ref().to_path_buf();
517
518        // Verify context exists
519        if !context.exists() {
520            return Err(BuildError::ContextRead {
521                path: context,
522                source: std::io::Error::new(
523                    std::io::ErrorKind::NotFound,
524                    "Build context directory not found",
525                ),
526            });
527        }
528
529        // Detect the best available build backend for this platform.
530        let backend = crate::backend::detect_backend().await.ok();
531
532        // Initialize buildah executor.
533        // On macOS, if buildah is not found we fall back to a default executor
534        // (the backend will handle the actual build dispatch).
535        let executor = match BuildahExecutor::new_async().await {
536            Ok(exec) => exec,
537            #[cfg(target_os = "macos")]
538            Err(_) => {
539                info!("Buildah not found on macOS; backend will handle build dispatch");
540                BuildahExecutor::default()
541            }
542            #[cfg(not(target_os = "macos"))]
543            Err(e) => return Err(e),
544        };
545
546        debug!("Created ImageBuilder for context: {}", context.display());
547
548        Ok(Self {
549            context,
550            options: BuildOptions::default(),
551            executor,
552            event_tx: None,
553            backend,
554            #[cfg(feature = "cache")]
555            cache_backend: None,
556            #[cfg(feature = "local-registry")]
557            local_registry: None,
558        })
559    }
560
561    /// Create an `ImageBuilder` with a custom buildah executor
562    ///
563    /// This is useful for testing or when you need to configure
564    /// the executor with specific storage options. The executor is
565    /// wrapped in a [`BuildahBackend`] so the build dispatches through
566    /// the [`BuildBackend`] trait.
567    ///
568    /// # Errors
569    ///
570    /// Returns an error if the context directory does not exist.
571    pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
572        let context = context.as_ref().to_path_buf();
573
574        if !context.exists() {
575            return Err(BuildError::ContextRead {
576                path: context,
577                source: std::io::Error::new(
578                    std::io::ErrorKind::NotFound,
579                    "Build context directory not found",
580                ),
581            });
582        }
583
584        let backend: Arc<dyn BuildBackend> = Arc::new(
585            crate::backend::BuildahBackend::with_executor(executor.clone()),
586        );
587
588        Ok(Self {
589            context,
590            options: BuildOptions::default(),
591            executor,
592            event_tx: None,
593            backend: Some(backend),
594            #[cfg(feature = "cache")]
595            cache_backend: None,
596            #[cfg(feature = "local-registry")]
597            local_registry: None,
598        })
599    }
600
601    /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
602    ///
603    /// The backend is used for all build, push, tag, and manifest
604    /// operations. The internal `BuildahExecutor` is set to the default
605    /// (it is only used if no backend is set).
606    ///
607    /// # Errors
608    ///
609    /// Returns an error if the context directory does not exist.
610    pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
611        let context = context.as_ref().to_path_buf();
612
613        if !context.exists() {
614            return Err(BuildError::ContextRead {
615                path: context,
616                source: std::io::Error::new(
617                    std::io::ErrorKind::NotFound,
618                    "Build context directory not found",
619                ),
620            });
621        }
622
623        Ok(Self {
624            context,
625            options: BuildOptions::default(),
626            executor: BuildahExecutor::default(),
627            event_tx: None,
628            backend: Some(backend),
629            #[cfg(feature = "cache")]
630            cache_backend: None,
631            #[cfg(feature = "local-registry")]
632            local_registry: None,
633        })
634    }
635
636    /// Set a custom Dockerfile path
637    ///
638    /// By default, the builder looks for a file named `Dockerfile` in the
639    /// context directory. Use this method to specify a different path.
640    ///
641    /// # Example
642    ///
643    /// ```no_run
644    /// # use zlayer_builder::ImageBuilder;
645    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
646    /// let builder = ImageBuilder::new("./my-project").await?
647    ///     .dockerfile("./my-project/Dockerfile.prod");
648    /// # Ok(())
649    /// # }
650    /// ```
651    #[must_use]
652    pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
653        self.options.dockerfile = Some(path.as_ref().to_path_buf());
654        self
655    }
656
657    /// Set a custom `ZImagefile` path
658    ///
659    /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
660    /// the builder will parse the `ZImagefile` and convert it to the internal
661    /// Dockerfile IR for execution.
662    ///
663    /// # Example
664    ///
665    /// ```no_run
666    /// # use zlayer_builder::ImageBuilder;
667    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
668    /// let builder = ImageBuilder::new("./my-project").await?
669    ///     .zimagefile("./my-project/ZImagefile");
670    /// # Ok(())
671    /// # }
672    /// ```
673    #[must_use]
674    pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
675        self.options.zimagefile = Some(path.as_ref().to_path_buf());
676        self
677    }
678
679    /// Use a runtime template instead of a Dockerfile
680    ///
681    /// Runtime templates provide pre-built Dockerfiles for common
682    /// development environments. When set, the Dockerfile option is ignored.
683    ///
684    /// # Example
685    ///
686    /// ```no_run
687    /// use zlayer_builder::{ImageBuilder, Runtime};
688    ///
689    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
690    /// let builder = ImageBuilder::new("./my-node-app").await?
691    ///     .runtime(Runtime::Node20);
692    /// # Ok(())
693    /// # }
694    /// ```
695    #[must_use]
696    pub fn runtime(mut self, runtime: Runtime) -> Self {
697        self.options.runtime = Some(runtime);
698        self
699    }
700
701    /// Add a build argument
702    ///
703    /// Build arguments are passed to the Dockerfile and can be referenced
704    /// using the `ARG` instruction.
705    ///
706    /// # Example
707    ///
708    /// ```no_run
709    /// # use zlayer_builder::ImageBuilder;
710    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
711    /// let builder = ImageBuilder::new("./my-project").await?
712    ///     .build_arg("VERSION", "1.0.0")
713    ///     .build_arg("DEBUG", "false");
714    /// # Ok(())
715    /// # }
716    /// ```
717    #[must_use]
718    pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
719        self.options.build_args.insert(key.into(), value.into());
720        self
721    }
722
723    /// Set multiple build arguments at once
724    #[must_use]
725    pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
726        self.options.build_args.extend(args);
727        self
728    }
729
730    /// Set the target stage for multi-stage builds
731    ///
732    /// When building a multi-stage Dockerfile, you can stop at a specific
733    /// stage instead of building all stages.
734    ///
735    /// # Example
736    ///
737    /// ```no_run
738    /// # use zlayer_builder::ImageBuilder;
739    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
740    /// // Dockerfile:
741    /// // FROM node:20 AS builder
742    /// // ...
743    /// // FROM node:20-slim AS runtime
744    /// // ...
745    ///
746    /// let builder = ImageBuilder::new("./my-project").await?
747    ///     .target("builder")
748    ///     .tag("myapp:builder");
749    /// # Ok(())
750    /// # }
751    /// ```
752    #[must_use]
753    pub fn target(mut self, stage: impl Into<String>) -> Self {
754        self.options.target = Some(stage.into());
755        self
756    }
757
758    /// Add an image tag
759    ///
760    /// Tags are applied to the final image. You can add multiple tags.
761    /// The first tag is used as the primary image name during commit.
762    ///
763    /// # Example
764    ///
765    /// ```no_run
766    /// # use zlayer_builder::ImageBuilder;
767    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
768    /// let builder = ImageBuilder::new("./my-project").await?
769    ///     .tag("myapp:latest")
770    ///     .tag("myapp:v1.0.0")
771    ///     .tag("registry.example.com/myapp:v1.0.0");
772    /// # Ok(())
773    /// # }
774    /// ```
775    #[must_use]
776    pub fn tag(mut self, tag: impl Into<String>) -> Self {
777        self.options.tags.push(tag.into());
778        self
779    }
780
781    /// Disable layer caching
782    ///
783    /// When enabled, all layers are rebuilt from scratch even if
784    /// they could be served from cache.
785    ///
786    /// Note: Currently this flag is tracked but not fully implemented in the
787    /// build process. `ZLayer` uses manual container creation (`buildah from`,
788    /// `buildah run`, `buildah commit`) which doesn't have built-in caching
789    /// like `buildah build` does. Future work could implement layer-level
790    /// caching by checking instruction hashes against previously built layers.
791    #[must_use]
792    pub fn no_cache(mut self) -> Self {
793        self.options.no_cache = true;
794        self
795    }
796
797    /// Set the base-image pull strategy for the build.
798    ///
799    /// By default, `buildah from` is invoked with `--pull=newer`, so an
800    /// up-to-date local base image is reused but a newer one on the
801    /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
802    /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
803    /// offline.
804    #[must_use]
805    pub fn pull(mut self, mode: PullBaseMode) -> Self {
806        self.options.pull = mode;
807        self
808    }
809
810    /// Enable or disable layer caching
811    ///
812    /// This controls the `--layers` flag for buildah. When enabled (default),
813    /// buildah can cache and reuse intermediate layers.
814    ///
815    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
816    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
817    /// flag is reserved for future use when/if we switch to `buildah build`.
818    ///
819    /// # Example
820    ///
821    /// ```no_run
822    /// # use zlayer_builder::ImageBuilder;
823    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
824    /// let builder = ImageBuilder::new("./my-project").await?
825    ///     .layers(false)  // Disable layer caching
826    ///     .tag("myapp:latest");
827    /// # Ok(())
828    /// # }
829    /// ```
830    #[must_use]
831    pub fn layers(mut self, enable: bool) -> Self {
832        self.options.layers = enable;
833        self
834    }
835
836    /// Set registry to pull cache from
837    ///
838    /// This corresponds to buildah's `--cache-from` flag, which allows
839    /// pulling cached layers from a remote registry to speed up builds.
840    ///
841    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
842    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
843    /// option is reserved for future implementation.
844    ///
845    /// TODO: Implement remote cache support. This would require either:
846    /// 1. Switching to `buildah build` command which supports --cache-from natively
847    /// 2. Implementing custom layer caching with registry pull for intermediate layers
848    ///
849    /// # Example
850    ///
851    /// ```no_run
852    /// # use zlayer_builder::ImageBuilder;
853    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
854    /// let builder = ImageBuilder::new("./my-project").await?
855    ///     .cache_from("registry.example.com/myapp:cache")
856    ///     .tag("myapp:latest");
857    /// # Ok(())
858    /// # }
859    /// ```
860    #[must_use]
861    pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
862        self.options.cache_from = Some(registry.into());
863        self
864    }
865
866    /// Set registry to push cache to
867    ///
868    /// This corresponds to buildah's `--cache-to` flag, which allows
869    /// pushing cached layers to a remote registry for future builds to use.
870    ///
871    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
872    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
873    /// option is reserved for future implementation.
874    ///
875    /// TODO: Implement remote cache support. This would require either:
876    /// 1. Switching to `buildah build` command which supports --cache-to natively
877    /// 2. Implementing custom layer caching with registry push for intermediate layers
878    ///
879    /// # Example
880    ///
881    /// ```no_run
882    /// # use zlayer_builder::ImageBuilder;
883    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
884    /// let builder = ImageBuilder::new("./my-project").await?
885    ///     .cache_to("registry.example.com/myapp:cache")
886    ///     .tag("myapp:latest");
887    /// # Ok(())
888    /// # }
889    /// ```
890    #[must_use]
891    pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
892        self.options.cache_to = Some(registry.into());
893        self
894    }
895
896    /// Set maximum cache age
897    ///
898    /// This corresponds to buildah's `--cache-ttl` flag, which sets the
899    /// maximum age for cached layers before they are considered stale.
900    ///
901    /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
902    /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
903    /// option is reserved for future implementation.
904    ///
905    /// TODO: Implement cache TTL support. This would require either:
906    /// 1. Switching to `buildah build` command which supports --cache-ttl natively
907    /// 2. Implementing custom cache expiration logic for our layer caching system
908    ///
909    /// # Example
910    ///
911    /// ```no_run
912    /// # use zlayer_builder::ImageBuilder;
913    /// # use std::time::Duration;
914    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
915    /// let builder = ImageBuilder::new("./my-project").await?
916    ///     .cache_ttl(Duration::from_secs(3600 * 24))  // 24 hours
917    ///     .tag("myapp:latest");
918    /// # Ok(())
919    /// # }
920    /// ```
921    #[must_use]
922    pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
923        self.options.cache_ttl = Some(ttl);
924        self
925    }
926
927    /// Push the image to a registry after building
928    ///
929    /// # Arguments
930    ///
931    /// * `auth` - Registry authentication credentials
932    ///
933    /// # Example
934    ///
935    /// ```no_run
936    /// use zlayer_builder::{ImageBuilder, RegistryAuth};
937    ///
938    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
939    /// let builder = ImageBuilder::new("./my-project").await?
940    ///     .tag("registry.example.com/myapp:v1.0.0")
941    ///     .push(RegistryAuth::new("user", "password"));
942    /// # Ok(())
943    /// # }
944    /// ```
945    #[must_use]
946    pub fn push(mut self, auth: RegistryAuth) -> Self {
947        self.options.push = true;
948        self.options.registry_auth = Some(auth);
949        self
950    }
951
952    /// Enable pushing without authentication
953    ///
954    /// Use this for registries that don't require authentication
955    /// (e.g., local registries, insecure registries).
956    #[must_use]
957    pub fn push_without_auth(mut self) -> Self {
958        self.options.push = true;
959        self.options.registry_auth = None;
960        self
961    }
962
963    /// Set a default OCI/WASM-compatible registry to check for images.
964    ///
965    /// When set, the builder will probe this registry for short image names
966    /// before qualifying them to `docker.io`. For example, if set to
967    /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
968    /// the builder will check `git.example.com:5000/myapp:latest` first.
969    #[must_use]
970    pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
971        self.options.default_registry = Some(registry.into());
972        self
973    }
974
975    /// Set a local OCI registry for image resolution.
976    ///
977    /// When set, the builder checks the local registry for cached images
978    /// before pulling from remote registries.
979    #[cfg(feature = "local-registry")]
980    #[must_use]
981    pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
982        self.local_registry = Some(registry);
983        self
984    }
985
986    /// Squash all layers into a single layer
987    ///
988    /// This reduces image size but loses layer caching benefits.
989    #[must_use]
990    pub fn squash(mut self) -> Self {
991        self.options.squash = true;
992        self
993    }
994
995    /// Set the image format
996    ///
997    /// Valid values are "oci" (default) or "docker".
998    #[must_use]
999    pub fn format(mut self, format: impl Into<String>) -> Self {
1000        self.options.format = Some(format.into());
1001        self
1002    }
1003
1004    /// Set default cache mounts to inject into all RUN instructions
1005    #[must_use]
1006    pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
1007        self.options.default_cache_mounts = mounts;
1008        self
1009    }
1010
1011    /// Set the number of retries for failed RUN steps
1012    #[must_use]
1013    pub fn retries(mut self, retries: u32) -> Self {
1014        self.options.retries = retries;
1015        self
1016    }
1017
1018    /// Set the target platform for cross-architecture builds.
1019    #[must_use]
1020    pub fn platform(mut self, platform: impl Into<String>) -> Self {
1021        self.options.platform = Some(platform.into());
1022        self
1023    }
1024
1025    /// Set a pre-computed source hash for content-based cache invalidation.
1026    ///
1027    /// When set, the sandbox builder can skip a full rebuild if the cached
1028    /// image was produced from identical source content.
1029    #[must_use]
1030    pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1031        self.options.source_hash = Some(hash.into());
1032        self
1033    }
1034
1035    /// Set an event sender for TUI progress updates
1036    ///
1037    /// Events will be sent as the build progresses, allowing you to
1038    /// display a progress UI or log build status.
1039    ///
1040    /// # Example
1041    ///
1042    /// ```no_run
1043    /// use zlayer_builder::{ImageBuilder, BuildEvent};
1044    /// use std::sync::mpsc;
1045    ///
1046    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1047    /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1048    ///
1049    /// let builder = ImageBuilder::new("./my-project").await?
1050    ///     .tag("myapp:latest")
1051    ///     .with_events(tx);
1052    /// # Ok(())
1053    /// # }
1054    /// ```
1055    #[must_use]
1056    pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1057        self.event_tx = Some(tx);
1058        self
1059    }
1060
1061    /// Configure a persistent disk cache backend for layer caching.
1062    ///
1063    /// When configured, the builder will store layer data on disk at the
1064    /// specified path. This cache persists across builds and significantly
1065    /// speeds up repeated builds of similar images.
1066    ///
1067    /// Requires the `cache-persistent` feature to be enabled.
1068    ///
1069    /// # Arguments
1070    ///
1071    /// * `path` - Path to the cache directory. If a directory, creates
1072    ///   `blob_cache.redb` inside it. If a file path, uses it directly.
1073    ///
1074    /// # Example
1075    ///
1076    /// ```no_run,ignore
1077    /// use zlayer_builder::ImageBuilder;
1078    ///
1079    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1080    /// let builder = ImageBuilder::new("./my-project").await?
1081    ///     .with_cache_dir("/var/cache/zlayer")
1082    ///     .tag("myapp:latest");
1083    /// # Ok(())
1084    /// # }
1085    /// ```
1086    ///
1087    /// # Integration Status
1088    ///
1089    /// TODO: The cache backend is currently stored but not actively used
1090    /// during builds. Future work will wire up:
1091    /// - Cache lookups before executing RUN instructions
1092    /// - Storing layer data after successful execution
1093    /// - Caching base image layers from registry pulls
1094    #[cfg(feature = "cache-persistent")]
1095    #[must_use]
1096    pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1097        self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1098            path: path.as_ref().to_path_buf(),
1099        });
1100        debug!(
1101            "Configured persistent cache at: {}",
1102            path.as_ref().display()
1103        );
1104        self
1105    }
1106
1107    /// Configure an in-memory cache backend for layer caching.
1108    ///
1109    /// The in-memory cache is cleared when the process exits, but can
1110    /// speed up builds within a single session by caching intermediate
1111    /// layers and avoiding redundant operations.
1112    ///
1113    /// Requires the `cache` feature to be enabled.
1114    ///
1115    /// # Example
1116    ///
1117    /// ```no_run,ignore
1118    /// use zlayer_builder::ImageBuilder;
1119    ///
1120    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1121    /// let builder = ImageBuilder::new("./my-project").await?
1122    ///     .with_memory_cache()
1123    ///     .tag("myapp:latest");
1124    /// # Ok(())
1125    /// # }
1126    /// ```
1127    ///
1128    /// # Integration Status
1129    ///
1130    /// TODO: The cache backend is currently stored but not actively used
1131    /// during builds. See `with_cache_dir` for integration status details.
1132    #[cfg(feature = "cache")]
1133    #[must_use]
1134    pub fn with_memory_cache(mut self) -> Self {
1135        self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1136        debug!("Configured in-memory cache");
1137        self
1138    }
1139
1140    /// Configure an S3-compatible storage backend for layer caching.
1141    ///
1142    /// This is useful for distributed build systems where multiple build
1143    /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1144    /// Backblaze B2, `MinIO`, and other S3-compatible services.
1145    ///
1146    /// Requires the `cache-s3` feature to be enabled.
1147    ///
1148    /// # Arguments
1149    ///
1150    /// * `bucket` - S3 bucket name
1151    /// * `region` - AWS region (optional, uses SDK default if not set)
1152    ///
1153    /// # Example
1154    ///
1155    /// ```no_run,ignore
1156    /// use zlayer_builder::ImageBuilder;
1157    ///
1158    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1159    /// let builder = ImageBuilder::new("./my-project").await?
1160    ///     .with_s3_cache("my-build-cache", Some("us-west-2"))
1161    ///     .tag("myapp:latest");
1162    /// # Ok(())
1163    /// # }
1164    /// ```
1165    ///
1166    /// # Integration Status
1167    ///
1168    /// TODO: The cache backend is currently stored but not actively used
1169    /// during builds. See `with_cache_dir` for integration status details.
1170    #[cfg(feature = "cache-s3")]
1171    #[must_use]
1172    pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1173        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1174            bucket: bucket.into(),
1175            region,
1176            endpoint: None,
1177            prefix: None,
1178        });
1179        debug!("Configured S3 cache");
1180        self
1181    }
1182
1183    /// Configure an S3-compatible storage backend with custom endpoint.
1184    ///
1185    /// Use this method for S3-compatible services that require a custom
1186    /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1187    ///
1188    /// Requires the `cache-s3` feature to be enabled.
1189    ///
1190    /// # Arguments
1191    ///
1192    /// * `bucket` - S3 bucket name
1193    /// * `endpoint` - Custom endpoint URL
1194    /// * `region` - Region (required for some S3-compatible services)
1195    ///
1196    /// # Example
1197    ///
1198    /// ```no_run,ignore
1199    /// use zlayer_builder::ImageBuilder;
1200    ///
1201    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1202    /// // Cloudflare R2
1203    /// let builder = ImageBuilder::new("./my-project").await?
1204    ///     .with_s3_cache_endpoint(
1205    ///         "my-bucket",
1206    ///         "https://accountid.r2.cloudflarestorage.com",
1207    ///         Some("auto".to_string()),
1208    ///     )
1209    ///     .tag("myapp:latest");
1210    /// # Ok(())
1211    /// # }
1212    /// ```
1213    #[cfg(feature = "cache-s3")]
1214    #[must_use]
1215    pub fn with_s3_cache_endpoint(
1216        mut self,
1217        bucket: impl Into<String>,
1218        endpoint: impl Into<String>,
1219        region: Option<String>,
1220    ) -> Self {
1221        self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1222            bucket: bucket.into(),
1223            region,
1224            endpoint: Some(endpoint.into()),
1225            prefix: None,
1226        });
1227        debug!("Configured S3 cache with custom endpoint");
1228        self
1229    }
1230
1231    /// Configure a custom cache backend configuration.
1232    ///
1233    /// This is the most flexible way to configure the cache backend,
1234    /// allowing full control over all cache settings.
1235    ///
1236    /// Requires the `cache` feature to be enabled.
1237    ///
1238    /// # Example
1239    ///
1240    /// ```no_run,ignore
1241    /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1242    ///
1243    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1244    /// let builder = ImageBuilder::new("./my-project").await?
1245    ///     .with_cache_config(CacheBackendConfig::Memory)
1246    ///     .tag("myapp:latest");
1247    /// # Ok(())
1248    /// # }
1249    /// ```
1250    #[cfg(feature = "cache")]
1251    #[must_use]
1252    pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1253        self.options.cache_backend_config = Some(config);
1254        debug!("Configured custom cache backend");
1255        self
1256    }
1257
1258    /// Set an already-initialized cache backend directly.
1259    ///
1260    /// This is useful when you have a pre-configured cache backend instance
1261    /// that you want to share across multiple builders or when you need
1262    /// fine-grained control over cache initialization.
1263    ///
1264    /// Requires the `cache` feature to be enabled.
1265    ///
1266    /// # Example
1267    ///
1268    /// ```no_run,ignore
1269    /// use zlayer_builder::ImageBuilder;
1270    /// use zlayer_registry::cache::BlobCache;
1271    /// use std::sync::Arc;
1272    ///
1273    /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1274    /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1275    ///
1276    /// let builder = ImageBuilder::new("./my-project").await?
1277    ///     .with_cache_backend(cache)
1278    ///     .tag("myapp:latest");
1279    /// # Ok(())
1280    /// # }
1281    /// ```
1282    #[cfg(feature = "cache")]
1283    #[must_use]
1284    pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1285        self.cache_backend = Some(backend);
1286        debug!("Configured pre-initialized cache backend");
1287        self
1288    }
1289
1290    /// Run the build
1291    ///
1292    /// This executes the complete build process:
1293    /// 1. Parse Dockerfile or load runtime template
1294    /// 2. Build all required stages
1295    /// 3. Commit and tag the final image
1296    /// 4. Push to registry if configured
1297    /// 5. Clean up intermediate containers
1298    ///
1299    /// # Errors
1300    ///
1301    /// Returns an error if:
1302    /// - Dockerfile parsing fails
1303    /// - A buildah command fails
1304    /// - Target stage is not found
1305    /// - Registry push fails
1306    ///
1307    /// # Panics
1308    ///
1309    /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1310    #[instrument(skip(self), fields(context = %self.context.display()))]
1311    #[allow(clippy::too_many_lines)]
1312    pub async fn build(self) -> Result<BuiltImage> {
1313        let start_time = std::time::Instant::now();
1314
1315        info!("Starting build in context: {}", self.context.display());
1316
1317        // 1. Get build output (Dockerfile IR or WASM artifact)
1318        let build_output = self.get_build_output().await?;
1319
1320        // If this is a WASM build, return early with the artifact info.
1321        if let BuildOutput::WasmArtifact {
1322            wasm_path,
1323            // `oci_path` drives the optional push branch below; when the
1324            // `local-registry` feature is off the push branch is compiled
1325            // out, so the binding is unused.
1326            #[cfg_attr(not(feature = "local-registry"), allow(unused_variables))]
1327            oci_path,
1328            manifest_digest,
1329            artifact_type: _,
1330            language,
1331            optimized,
1332            size,
1333        } = build_output
1334        {
1335            #[allow(clippy::cast_possible_truncation)]
1336            let build_time_ms = start_time.elapsed().as_millis() as u64;
1337
1338            // Prefer a user tag as the image id; otherwise fall back to the
1339            // OCI manifest digest (sha256:...), which is what WASM tooling
1340            // references in `oci-archive:` / `oci:` URIs. As a last resort
1341            // (no tag, no digest — only possible if export somehow produced
1342            // no digest) use a `wasm-path:` marker so downstream code can
1343            // tell this was a WASM build.
1344            let image_id = if let Some(tag) = self.options.tags.first() {
1345                tag.clone()
1346            } else if let Some(digest) = manifest_digest.as_ref() {
1347                format!("wasm:{digest}")
1348            } else {
1349                format!("wasm-path:{}", wasm_path.display())
1350            };
1351
1352            // Push WASM OCI artifact(s) to the remote registry if the user
1353            // both supplied tags and requested a push (e.g. `zlayer build
1354            // -t ghcr.io/org/mod:v1 --push`). Mirrors the container flow at
1355            // `BuildahBackend::build_image` where `options.push` drives
1356            // `push_image_internal` for each tag.
1357            //
1358            // Gated on `local-registry` because `ImagePuller::push_wasm` is
1359            // behind the `zlayer-registry/local` feature, matching the other
1360            // push-to-registry sites in this crate.
1361            #[cfg(feature = "local-registry")]
1362            if oci_path.is_some() && self.options.push && !self.options.tags.is_empty() {
1363                let oci_dir = oci_path.as_ref().expect("checked oci_path.is_some() above");
1364                self.push_wasm_oci(&wasm_path, oci_dir).await?;
1365            }
1366
1367            self.send_event(BuildEvent::BuildComplete {
1368                image_id: image_id.clone(),
1369            });
1370
1371            info!(
1372                "WASM build completed in {}ms: {} ({}, {} bytes, optimized={}, image_id={})",
1373                build_time_ms,
1374                wasm_path.display(),
1375                language,
1376                size,
1377                optimized,
1378                image_id,
1379            );
1380
1381            return Ok(BuiltImage {
1382                image_id,
1383                tags: self.options.tags.clone(),
1384                layer_count: 1,
1385                size,
1386                build_time_ms,
1387                is_manifest: false,
1388            });
1389        }
1390
1391        // Extract the Dockerfile from the BuildOutput.
1392        let BuildOutput::Dockerfile(dockerfile) = build_output else {
1393            unreachable!("WasmArtifact case handled above");
1394        };
1395        debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1396
1397        // Delegate the build to the backend.
1398        let backend = self
1399            .backend
1400            .as_ref()
1401            .ok_or_else(|| BuildError::BuildahNotFound {
1402                message: "No build backend configured".into(),
1403            })?;
1404
1405        info!("Delegating build to {} backend", backend.name());
1406        let built = backend
1407            .build_image(
1408                &self.context,
1409                &dockerfile,
1410                &self.options,
1411                self.event_tx.clone(),
1412            )
1413            .await?;
1414
1415        // Import the built image into ZLayer's local registry and blob cache
1416        // so the runtime can find it without pulling from a remote registry.
1417        //
1418        // A user who wired up a local registry clearly wants built images to
1419        // live there — if the import fails (almost always EACCES on the
1420        // registry dir for an unprivileged user), bail with the registry path
1421        // in the message instead of silently producing a build that the
1422        // daemon can't find.
1423        #[cfg(feature = "local-registry")]
1424        if let Some(ref registry) = self.local_registry {
1425            if !built.tags.is_empty() {
1426                let tmp_path = std::env::temp_dir().join(format!(
1427                    "zlayer-build-{}-{}.tar",
1428                    std::process::id(),
1429                    start_time.elapsed().as_nanos()
1430                ));
1431
1432                // Export the image from buildah's store to an OCI archive.
1433                let export_tag = &built.tags[0];
1434                let dest = format!("oci-archive:{}", tmp_path.display());
1435                let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1436
1437                self.executor
1438                    .execute_checked(&push_cmd)
1439                    .await
1440                    .map_err(|e| BuildError::RegistryError {
1441                        message: format!(
1442                            "failed to export image to OCI archive for local registry \
1443                             import at {}: {e}",
1444                            registry.root().display()
1445                        ),
1446                    })?;
1447
1448                // Resolve the blob cache backend (if available).
1449                let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1450                    self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1451
1452                let import_result = async {
1453                    for tag in &built.tags {
1454                        let info =
1455                            import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1456                                .await
1457                                .map_err(|e| BuildError::RegistryError {
1458                                    message: format!(
1459                                        "failed to import '{tag}' into local registry at {}: {e}",
1460                                        registry.root().display()
1461                                    ),
1462                                })?;
1463                        info!(
1464                            tag = %tag,
1465                            digest = %info.digest,
1466                            "Imported into local registry"
1467                        );
1468                    }
1469                    Ok::<(), BuildError>(())
1470                }
1471                .await;
1472
1473                // Clean up the temporary archive regardless of whether the
1474                // import succeeded (best-effort; warn on failure).
1475                if let Err(e) = fs::remove_file(&tmp_path).await {
1476                    warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1477                }
1478
1479                import_result?;
1480            }
1481        }
1482
1483        Ok(built)
1484    }
1485
1486    /// Detection order:
1487    /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1488    /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1489    /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1490    /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1491    ///
1492    /// Returns [`BuildOutput::Dockerfile`] for container builds or
1493    /// [`BuildOutput::WasmArtifact`] for WASM builds.
1494    async fn get_build_output(&self) -> Result<BuildOutput> {
1495        // (a) Runtime template takes highest priority.
1496        if let Some(runtime) = &self.options.runtime {
1497            debug!("Using runtime template: {}", runtime);
1498            let content = get_template(*runtime);
1499            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1500        }
1501
1502        // (b) Explicit ZImagefile path.
1503        if let Some(ref zimage_path) = self.options.zimagefile {
1504            debug!("Reading ZImagefile: {}", zimage_path.display());
1505            let content =
1506                fs::read_to_string(zimage_path)
1507                    .await
1508                    .map_err(|e| BuildError::ContextRead {
1509                        path: zimage_path.clone(),
1510                        source: e,
1511                    })?;
1512            let zimage = crate::zimage::parse_zimagefile(&content)?;
1513            return self.handle_zimage(&zimage).await;
1514        }
1515
1516        // (c) Auto-detect ZImagefile in context directory.
1517        let auto_zimage_path = self.context.join("ZImagefile");
1518        if auto_zimage_path.exists() {
1519            debug!(
1520                "Found ZImagefile in context: {}",
1521                auto_zimage_path.display()
1522            );
1523            let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1524                BuildError::ContextRead {
1525                    path: auto_zimage_path,
1526                    source: e,
1527                }
1528            })?;
1529            let zimage = crate::zimage::parse_zimagefile(&content)?;
1530            return self.handle_zimage(&zimage).await;
1531        }
1532
1533        // (d) Fall back to Dockerfile.
1534        let dockerfile_path = self
1535            .options
1536            .dockerfile
1537            .clone()
1538            .unwrap_or_else(|| self.context.join("Dockerfile"));
1539
1540        debug!("Reading Dockerfile: {}", dockerfile_path.display());
1541
1542        let content =
1543            fs::read_to_string(&dockerfile_path)
1544                .await
1545                .map_err(|e| BuildError::ContextRead {
1546                    path: dockerfile_path,
1547                    source: e,
1548                })?;
1549
1550        Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1551    }
1552
1553    /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1554    ///
1555    /// Handles all four `ZImage` modes:
1556    /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1557    /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1558    /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1559    ///
1560    /// Any `build:` directives are resolved first by spawning nested builds.
1561    async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1562        // Runtime mode: delegate to template system.
1563        if let Some(ref runtime_name) = zimage.runtime {
1564            let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1565                BuildError::zimagefile_validation(format!(
1566                    "unknown runtime '{runtime_name}' in ZImagefile"
1567                ))
1568            })?;
1569            let content = get_template(rt);
1570            return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1571        }
1572
1573        // WASM mode: build a WASM component.
1574        if zimage.wasm.is_some() {
1575            return self.handle_wasm_build(zimage).await;
1576        }
1577
1578        // Resolve any `build:` directives to concrete base image tags.
1579        let resolved = self.resolve_build_directives(zimage).await?;
1580
1581        // Single-stage or multi-stage: convert to Dockerfile IR directly.
1582        Ok(BuildOutput::Dockerfile(
1583            crate::zimage::zimage_to_dockerfile(&resolved)?,
1584        ))
1585    }
1586
1587    /// Build a WASM component from the `ZImagefile` wasm configuration.
1588    ///
1589    /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1590    /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1591    /// the WASM builder pipeline.
1592    #[allow(clippy::too_many_lines)]
1593    async fn handle_wasm_build(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1594        use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1595        use zlayer_registry::wasm::WasiVersion;
1596        use zlayer_registry::{export_wasm_as_oci, WasmExportConfig};
1597
1598        // Caller guarantees `zimage.wasm` is `Some`.
1599        let wasm_config = zimage.wasm.as_ref().expect(
1600            "handle_wasm_build invoked without a wasm section in ZImage; caller must check",
1601        );
1602
1603        info!("ZImagefile specifies WASM mode, running WASM build");
1604
1605        // Convert target string to WasiTarget enum.
1606        let target = match wasm_config.target.as_str() {
1607            "preview1" => WasiTarget::Preview1,
1608            _ => WasiTarget::Preview2,
1609        };
1610
1611        // Resolve language: parse from string or leave as None for auto-detection.
1612        let language = wasm_config
1613            .language
1614            .as_deref()
1615            .and_then(WasmLanguage::from_name);
1616
1617        if let Some(ref lang_str) = wasm_config.language {
1618            if language.is_none() {
1619                return Err(BuildError::zimagefile_validation(format!(
1620                    "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1621                     typescript, assemblyscript, c, zig"
1622                )));
1623            }
1624        }
1625
1626        // Build the WasmBuildConfig.
1627        let mut config = WasmBuildConfig {
1628            language,
1629            target,
1630            optimize: wasm_config.optimize,
1631            opt_level: wasm_config
1632                .opt_level
1633                .clone()
1634                .unwrap_or_else(|| "Oz".to_string()),
1635            wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1636            output_path: wasm_config.output.as_ref().map(PathBuf::from),
1637            world: wasm_config.world.clone(),
1638            features: wasm_config.features.clone(),
1639            build_args: wasm_config.build_args.clone(),
1640            pre_build: Vec::new(),
1641            post_build: Vec::new(),
1642            adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1643        };
1644
1645        // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1646        for cmd in &wasm_config.pre_build {
1647            config.pre_build.push(zcommand_to_args(cmd));
1648        }
1649        for cmd in &wasm_config.post_build {
1650            config.post_build.push(zcommand_to_args(cmd));
1651        }
1652
1653        // Build the WASM component.
1654        let result = build_wasm(&self.context, config).await?;
1655
1656        let language_name = result.language.name().to_string();
1657        let wasm_path = result.wasm_path;
1658        let size = result.size;
1659
1660        info!(
1661            "WASM build complete: {} ({} bytes, optimized={})",
1662            wasm_path.display(),
1663            size,
1664            wasm_config.optimize
1665        );
1666
1667        // `wasm.oci: false` opts out of OCI artifact packaging and push —
1668        // the compilation pipeline above still runs (with caching, wasm-opt,
1669        // and the preview1 -> preview2 adapter), we simply skip the layout
1670        // write and leave `oci_path`/`manifest_digest`/`artifact_type` as
1671        // `None`. The push branch in `build()` keys off `oci_path.is_some()`
1672        // so skipping it here transparently disables push for this build.
1673        if !wasm_config.oci {
1674            info!(
1675                "WASM OCI export skipped (wasm.oci = false); raw .wasm at {}",
1676                wasm_path.display()
1677            );
1678            return Ok(BuildOutput::WasmArtifact {
1679                wasm_path,
1680                oci_path: None,
1681                manifest_digest: None,
1682                artifact_type: None,
1683                language: language_name,
1684                optimized: wasm_config.optimize,
1685                size,
1686            });
1687        }
1688
1689        // Derive a module name for OCI annotations. Prefer the first tag's
1690        // repository component (`repo` from `repo:version` or `host/repo`),
1691        // falling back to the wasm file stem, then "wasm-module".
1692        let module_name = self
1693            .options
1694            .tags
1695            .first()
1696            .map(|t| module_name_from_tag(t))
1697            .or_else(|| {
1698                wasm_path
1699                    .file_stem()
1700                    .and_then(|s| s.to_str())
1701                    .map(str::to_string)
1702            })
1703            .unwrap_or_else(|| "wasm-module".to_string());
1704
1705        // Map the selected WASI target to a WasiVersion so the export uses
1706        // the correct artifact_type without re-analyzing the binary.
1707        let wasi_version = match target {
1708            WasiTarget::Preview1 => Some(WasiVersion::Preview1),
1709            WasiTarget::Preview2 => Some(WasiVersion::Preview2),
1710        };
1711
1712        // Carry ZImage labels across as OCI manifest annotations, matching
1713        // the behaviour of container image builds that emit LABEL -> annotations.
1714        let annotations: HashMap<String, String> = zimage.labels.clone();
1715
1716        let export_config = WasmExportConfig {
1717            wasm_path: wasm_path.clone(),
1718            module_name: module_name.clone(),
1719            wasi_version,
1720            annotations,
1721        };
1722
1723        let export =
1724            export_wasm_as_oci(&export_config)
1725                .await
1726                .map_err(|e| BuildError::RegistryError {
1727                    message: format!("failed to export WASM as OCI artifact: {e}"),
1728                })?;
1729
1730        // Write the OCI image layout to disk next to the WASM file. The
1731        // layout directory name is `<module>-oci`, mirroring the CLI
1732        // `zlayer wasm export` layout in bin/zlayer/src/commands/wasm.rs.
1733        let layout_parent = wasm_path
1734            .parent()
1735            .map_or_else(|| self.context.clone(), Path::to_path_buf);
1736        let oci_dir = layout_parent.join(format!("{module_name}-oci"));
1737        write_wasm_oci_layout(&oci_dir, &export, &module_name).await?;
1738
1739        info!(
1740            manifest_digest = %export.manifest_digest,
1741            artifact_type = %export.artifact_type,
1742            oci_path = %oci_dir.display(),
1743            "WASM OCI artifact written"
1744        );
1745
1746        Ok(BuildOutput::WasmArtifact {
1747            wasm_path,
1748            oci_path: Some(oci_dir),
1749            manifest_digest: Some(export.manifest_digest),
1750            artifact_type: Some(export.artifact_type),
1751            language: language_name,
1752            optimized: wasm_config.optimize,
1753            size,
1754        })
1755    }
1756
1757    /// Resolve `build:` directives in a `ZImage` by running nested builds.
1758    ///
1759    /// For each `build:` directive (top-level or per-stage), this method:
1760    /// 1. Determines the build context directory
1761    /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1762    /// 3. Spawns a nested `ImageBuilder` to build the context
1763    /// 4. Tags the result and replaces `build` with `base`
1764    async fn resolve_build_directives(
1765        &self,
1766        zimage: &crate::zimage::ZImage,
1767    ) -> Result<crate::zimage::ZImage> {
1768        let mut resolved = zimage.clone();
1769
1770        // Resolve top-level `build:` directive.
1771        if let Some(ref build_ctx) = resolved.build {
1772            let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1773            resolved.base = Some(tag);
1774            resolved.build = None;
1775        }
1776
1777        // Resolve per-stage `build:` directives.
1778        if let Some(ref mut stages) = resolved.stages {
1779            for (name, stage) in stages.iter_mut() {
1780                if let Some(ref build_ctx) = stage.build {
1781                    let tag = self.run_nested_build(build_ctx, name).await?;
1782                    stage.base = Some(tag);
1783                    stage.build = None;
1784                }
1785            }
1786        }
1787
1788        Ok(resolved)
1789    }
1790
1791    /// Run a nested build from a `build:` directive and return the resulting image tag.
1792    fn run_nested_build<'a>(
1793        &'a self,
1794        build_ctx: &'a crate::zimage::types::ZBuildContext,
1795        stage_name: &'a str,
1796    ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1797        Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1798    }
1799
1800    async fn run_nested_build_inner(
1801        &self,
1802        build_ctx: &crate::zimage::types::ZBuildContext,
1803        stage_name: &str,
1804    ) -> Result<String> {
1805        let context_dir = build_ctx.context_dir(&self.context);
1806
1807        if !context_dir.exists() {
1808            return Err(BuildError::ContextRead {
1809                path: context_dir,
1810                source: std::io::Error::new(
1811                    std::io::ErrorKind::NotFound,
1812                    format!(
1813                        "build context directory not found for build directive in '{stage_name}'"
1814                    ),
1815                ),
1816            });
1817        }
1818
1819        info!(
1820            "Building nested image for '{}' from context: {}",
1821            stage_name,
1822            context_dir.display()
1823        );
1824
1825        // Create a tag for the nested build result.
1826        let tag = format!(
1827            "zlayer-build-dep-{}:{}",
1828            stage_name,
1829            chrono_lite_timestamp()
1830        );
1831
1832        // Create nested builder.
1833        let mut nested = ImageBuilder::new(&context_dir).await?;
1834        nested = nested.tag(&tag);
1835
1836        // Apply explicit build file if specified.
1837        if let Some(file) = build_ctx.file() {
1838            let file_path = context_dir.join(file);
1839            if std::path::Path::new(file).extension().is_some_and(|ext| {
1840                ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1841            }) || file.starts_with("ZImagefile")
1842            {
1843                nested = nested.zimagefile(file_path);
1844            } else {
1845                nested = nested.dockerfile(file_path);
1846            }
1847        }
1848
1849        // Apply build args.
1850        for (key, value) in build_ctx.args() {
1851            nested = nested.build_arg(&key, &value);
1852        }
1853
1854        // Propagate default registry if set.
1855        if let Some(ref reg) = self.options.default_registry {
1856            nested = nested.default_registry(reg.clone());
1857        }
1858
1859        // Run the nested build.
1860        let result = nested.build().await?;
1861        info!(
1862            "Nested build for '{}' completed: {}",
1863            stage_name, result.image_id
1864        );
1865
1866        Ok(tag)
1867    }
1868
1869    /// Push the WASM OCI artifact produced by `handle_wasm_build` to every
1870    /// user-supplied registry tag.
1871    ///
1872    /// Mirrors the container push flow in [`BuildahBackend::build_image`]:
1873    /// when `options.push` is true, each tag in `options.tags` is pushed.
1874    /// Tags that look like bare image names (no registry host, e.g.
1875    /// `myapp:wasm`) are skipped with an info log, matching how bare tags
1876    /// are treated elsewhere — a registryless tag has nowhere to be pushed.
1877    ///
1878    /// Re-runs [`export_wasm_as_oci`] on the produced `wasm_path` to obtain
1879    /// the [`WasmExportResult`] blobs required by [`ImagePuller::push_wasm`].
1880    /// The export is deterministic (same WASM binary produces the same
1881    /// blobs and digests), so the digests match the layout on disk at
1882    /// `oci_dir` that A1.2 wrote.
1883    ///
1884    /// [`BuildahBackend::build_image`]: crate::backend::buildah::BuildahBackend
1885    /// [`export_wasm_as_oci`]: zlayer_registry::export_wasm_as_oci
1886    /// [`WasmExportResult`]: zlayer_registry::WasmExportResult
1887    /// [`ImagePuller::push_wasm`]: zlayer_registry::ImagePuller::push_wasm
1888    #[cfg(feature = "local-registry")]
1889    async fn push_wasm_oci(&self, wasm_path: &Path, oci_dir: &Path) -> Result<()> {
1890        use zlayer_registry::wasm::WasiVersion;
1891        use zlayer_registry::{export_wasm_as_oci, BlobCache, ImagePuller, WasmExportConfig};
1892
1893        // Derive the module name the same way `handle_wasm_build` did so the
1894        // re-exported artifact carries identical OCI annotations.
1895        let module_name = self
1896            .options
1897            .tags
1898            .first()
1899            .map(|t| module_name_from_tag(t))
1900            .or_else(|| {
1901                wasm_path
1902                    .file_stem()
1903                    .and_then(|s| s.to_str())
1904                    .map(str::to_string)
1905            })
1906            .unwrap_or_else(|| "wasm-module".to_string());
1907
1908        // Reconstruct the export result from the on-disk WASM binary. The
1909        // `wasi_version` is left `None` so it is re-detected from the binary
1910        // (matches whatever A1.2 wrote unless the user mutated the file).
1911        let export_config = WasmExportConfig {
1912            wasm_path: wasm_path.to_path_buf(),
1913            module_name,
1914            wasi_version: None::<WasiVersion>,
1915            annotations: HashMap::new(),
1916        };
1917        let export =
1918            export_wasm_as_oci(&export_config)
1919                .await
1920                .map_err(|e| BuildError::RegistryError {
1921                    message: format!(
1922                        "failed to re-export WASM for push from {}: {e}",
1923                        wasm_path.display()
1924                    ),
1925                })?;
1926
1927        // Build the puller once; reuse for every tag.
1928        let cache = BlobCache::new().map_err(|e| BuildError::RegistryError {
1929            message: format!("failed to create blob cache for WASM push: {e}"),
1930        })?;
1931        let puller = ImagePuller::new(cache);
1932
1933        for tag in &self.options.tags {
1934            if !tag_has_registry_host(tag) {
1935                info!(
1936                    "Skipping WASM push for bare tag '{}' (no registry host); \
1937                     OCI layout still available at {}",
1938                    tag,
1939                    oci_dir.display()
1940                );
1941                continue;
1942            }
1943
1944            let oci_auth = Self::resolve_wasm_push_auth(self.options.registry_auth.as_ref());
1945
1946            info!("Pushing WASM artifact: {}", tag);
1947            let push_result = puller
1948                .push_wasm(tag, &export, &oci_auth)
1949                .await
1950                .map_err(|e| BuildError::RegistryError {
1951                    message: format!("failed to push WASM artifact '{tag}': {e}"),
1952                })?;
1953            info!(
1954                "Pushed WASM artifact: {} (manifest digest: {})",
1955                tag, push_result.manifest_digest
1956            );
1957        }
1958
1959        Ok(())
1960    }
1961
1962    /// Resolve registry auth for a WASM push.
1963    ///
1964    /// Uses the explicitly provided credentials when set; otherwise falls
1965    /// back to anonymous. Mirrors the minimal behaviour of the buildah push
1966    /// path (`--creds user:pass` when provided, otherwise let the registry
1967    /// decide).
1968    #[cfg(feature = "local-registry")]
1969    fn resolve_wasm_push_auth(auth: Option<&RegistryAuth>) -> zlayer_registry::RegistryAuth {
1970        match auth {
1971            Some(a) => zlayer_registry::RegistryAuth::Basic(a.username.clone(), a.password.clone()),
1972            None => zlayer_registry::RegistryAuth::Anonymous,
1973        }
1974    }
1975
1976    /// Send an event to the TUI (if configured)
1977    fn send_event(&self, event: BuildEvent) {
1978        if let Some(tx) = &self.event_tx {
1979            // Ignore send errors - the receiver may have been dropped
1980            let _ = tx.send(event);
1981        }
1982    }
1983}
1984
1985// Helper function to generate a timestamp-based name
1986fn chrono_lite_timestamp() -> String {
1987    use std::time::{SystemTime, UNIX_EPOCH};
1988    let duration = SystemTime::now()
1989        .duration_since(UNIX_EPOCH)
1990        .unwrap_or_default();
1991    format!("{}", duration.as_secs())
1992}
1993
1994/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1995/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1996/// pre/post build command lists.
1997fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1998    match cmd {
1999        crate::zimage::ZCommand::Shell(s) => {
2000            vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
2001        }
2002        crate::zimage::ZCommand::Exec(args) => args.clone(),
2003    }
2004}
2005
2006/// Extract a short "module name" suitable for OCI annotations from an image
2007/// tag. Strips any registry host, leading path segments, and tag/digest.
2008///
2009/// Examples:
2010/// - `myapp:latest` -> `myapp`
2011/// - `ghcr.io/org/myapp:v1.2.3` -> `myapp`
2012/// - `myapp@sha256:...` -> `myapp`
2013fn module_name_from_tag(tag: &str) -> String {
2014    let last_segment = tag.rsplit('/').next().unwrap_or(tag);
2015    let without_tag = last_segment.split(':').next().unwrap_or(last_segment);
2016    let without_digest = without_tag.split('@').next().unwrap_or(without_tag);
2017    without_digest.to_string()
2018}
2019
2020/// Heuristic: does `tag` include an explicit registry host?
2021///
2022/// Used to decide which tags are push-eligible. A tag is treated as
2023/// registry-qualified when it has at least one `/` and the first path
2024/// component looks like a host — it contains a `.` (FQDN like `ghcr.io`,
2025/// `registry.example.com`), a `:` (host:port like `localhost:5000`), or
2026/// equals the literal `localhost`. Bare names like `myapp:wasm` and
2027/// Docker-Hub-style `org/app:v1` are skipped because there is no explicit
2028/// registry to push to.
2029#[cfg(feature = "local-registry")]
2030fn tag_has_registry_host(tag: &str) -> bool {
2031    // No `/` means the whole string is `name[:tag]` with no host component.
2032    if !tag.contains('/') {
2033        return false;
2034    }
2035    let Some(first) = tag.split('/').next() else {
2036        return false;
2037    };
2038    first.contains('.') || first.contains(':') || first == "localhost"
2039}
2040
2041/// Write an OCI image layout directory (`oci-layout`, `index.json`,
2042/// `blobs/sha256/...`) for a WASM artifact on disk. This mirrors the layout
2043/// emitted by the `zlayer wasm export` CLI command so the directory can be
2044/// consumed by tools that expect a standard OCI layout.
2045async fn write_wasm_oci_layout(
2046    oci_dir: &Path,
2047    export: &zlayer_registry::WasmExportResult,
2048    ref_name: &str,
2049) -> Result<()> {
2050    let map_io = |path: PathBuf| {
2051        move |e: std::io::Error| BuildError::ContextRead {
2052            path: path.clone(),
2053            source: e,
2054        }
2055    };
2056
2057    fs::create_dir_all(oci_dir)
2058        .await
2059        .map_err(map_io(oci_dir.to_path_buf()))?;
2060
2061    // `oci-layout` marker file.
2062    let layout_marker = oci_dir.join("oci-layout");
2063    let oci_layout = serde_json::json!({ "imageLayoutVersion": "1.0.0" });
2064    fs::write(
2065        &layout_marker,
2066        serde_json::to_vec_pretty(&oci_layout).map_err(|e| BuildError::RegistryError {
2067            message: format!("failed to serialize oci-layout marker: {e}"),
2068        })?,
2069    )
2070    .await
2071    .map_err(map_io(layout_marker.clone()))?;
2072
2073    // `blobs/sha256/` directory.
2074    let blobs_dir = oci_dir.join("blobs").join("sha256");
2075    fs::create_dir_all(&blobs_dir)
2076        .await
2077        .map_err(map_io(blobs_dir.clone()))?;
2078
2079    // Write config, wasm-layer, and manifest blobs under their digests.
2080    let write_blob = |digest: &str, data: &[u8]| {
2081        let hash = digest.strip_prefix("sha256:").unwrap_or(digest).to_string();
2082        let path = blobs_dir.join(hash);
2083        let data = data.to_vec();
2084        async move {
2085            fs::write(&path, &data)
2086                .await
2087                .map_err(map_io(path.clone()))?;
2088            Ok::<(), BuildError>(())
2089        }
2090    };
2091
2092    write_blob(&export.config_digest, &export.config_blob).await?;
2093    write_blob(&export.wasm_layer_digest, &export.wasm_binary).await?;
2094    write_blob(&export.manifest_digest, &export.manifest_json).await?;
2095
2096    // Write `index.json` pointing at the manifest.
2097    let index = serde_json::json!({
2098        "schemaVersion": 2,
2099        "mediaType": "application/vnd.oci.image.index.v1+json",
2100        "manifests": [{
2101            "mediaType": "application/vnd.oci.image.manifest.v1+json",
2102            "digest": export.manifest_digest,
2103            "size": export.manifest_size,
2104            "artifactType": export.artifact_type,
2105            "annotations": {
2106                "org.opencontainers.image.ref.name": ref_name,
2107            }
2108        }]
2109    });
2110    let index_path = oci_dir.join("index.json");
2111    fs::write(
2112        &index_path,
2113        serde_json::to_vec_pretty(&index).map_err(|e| BuildError::RegistryError {
2114            message: format!("failed to serialize OCI index.json: {e}"),
2115        })?,
2116    )
2117    .await
2118    .map_err(map_io(index_path.clone()))?;
2119
2120    Ok(())
2121}
2122
2123#[cfg(test)]
2124mod tests {
2125    use super::*;
2126
2127    #[test]
2128    fn test_registry_auth_new() {
2129        let auth = RegistryAuth::new("user", "pass");
2130        assert_eq!(auth.username, "user");
2131        assert_eq!(auth.password, "pass");
2132    }
2133
2134    #[test]
2135    fn test_build_options_default() {
2136        let opts = BuildOptions::default();
2137        assert!(opts.dockerfile.is_none());
2138        assert!(opts.zimagefile.is_none());
2139        assert!(opts.runtime.is_none());
2140        assert!(opts.build_args.is_empty());
2141        assert!(opts.target.is_none());
2142        assert!(opts.tags.is_empty());
2143        assert!(!opts.no_cache);
2144        assert!(!opts.push);
2145        assert!(!opts.squash);
2146        // New cache-related fields
2147        assert!(opts.layers); // Default is true
2148        assert!(opts.cache_from.is_none());
2149        assert!(opts.cache_to.is_none());
2150        assert!(opts.cache_ttl.is_none());
2151        // Cache backend config (only with cache feature)
2152        #[cfg(feature = "cache")]
2153        assert!(opts.cache_backend_config.is_none());
2154    }
2155
2156    fn create_test_builder() -> ImageBuilder {
2157        // Create a minimal builder for testing (without async initialization)
2158        ImageBuilder {
2159            context: PathBuf::from("/tmp/test"),
2160            options: BuildOptions::default(),
2161            executor: BuildahExecutor::with_path("/usr/bin/buildah"),
2162            event_tx: None,
2163            backend: None,
2164            #[cfg(feature = "cache")]
2165            cache_backend: None,
2166            #[cfg(feature = "local-registry")]
2167            local_registry: None,
2168        }
2169    }
2170
2171    // Builder method chaining tests
2172    #[test]
2173    fn test_builder_chaining() {
2174        let mut builder = create_test_builder();
2175
2176        builder = builder
2177            .dockerfile("./Dockerfile.test")
2178            .runtime(Runtime::Node20)
2179            .build_arg("VERSION", "1.0")
2180            .target("builder")
2181            .tag("myapp:latest")
2182            .tag("myapp:v1")
2183            .no_cache()
2184            .squash()
2185            .format("oci");
2186
2187        assert_eq!(
2188            builder.options.dockerfile,
2189            Some(PathBuf::from("./Dockerfile.test"))
2190        );
2191        assert_eq!(builder.options.runtime, Some(Runtime::Node20));
2192        assert_eq!(
2193            builder.options.build_args.get("VERSION"),
2194            Some(&"1.0".to_string())
2195        );
2196        assert_eq!(builder.options.target, Some("builder".to_string()));
2197        assert_eq!(builder.options.tags.len(), 2);
2198        assert!(builder.options.no_cache);
2199        assert!(builder.options.squash);
2200        assert_eq!(builder.options.format, Some("oci".to_string()));
2201    }
2202
2203    #[test]
2204    fn test_builder_push_with_auth() {
2205        let mut builder = create_test_builder();
2206        builder = builder.push(RegistryAuth::new("user", "pass"));
2207
2208        assert!(builder.options.push);
2209        assert!(builder.options.registry_auth.is_some());
2210        let auth = builder.options.registry_auth.unwrap();
2211        assert_eq!(auth.username, "user");
2212        assert_eq!(auth.password, "pass");
2213    }
2214
2215    #[test]
2216    fn test_builder_push_without_auth() {
2217        let mut builder = create_test_builder();
2218        builder = builder.push_without_auth();
2219
2220        assert!(builder.options.push);
2221        assert!(builder.options.registry_auth.is_none());
2222    }
2223
2224    #[test]
2225    fn test_builder_layers() {
2226        let mut builder = create_test_builder();
2227        // Default is true
2228        assert!(builder.options.layers);
2229
2230        // Disable layers
2231        builder = builder.layers(false);
2232        assert!(!builder.options.layers);
2233
2234        // Re-enable layers
2235        builder = builder.layers(true);
2236        assert!(builder.options.layers);
2237    }
2238
2239    #[test]
2240    fn test_builder_cache_from() {
2241        let mut builder = create_test_builder();
2242        assert!(builder.options.cache_from.is_none());
2243
2244        builder = builder.cache_from("registry.example.com/myapp:cache");
2245        assert_eq!(
2246            builder.options.cache_from,
2247            Some("registry.example.com/myapp:cache".to_string())
2248        );
2249    }
2250
2251    #[test]
2252    fn test_builder_cache_to() {
2253        let mut builder = create_test_builder();
2254        assert!(builder.options.cache_to.is_none());
2255
2256        builder = builder.cache_to("registry.example.com/myapp:cache");
2257        assert_eq!(
2258            builder.options.cache_to,
2259            Some("registry.example.com/myapp:cache".to_string())
2260        );
2261    }
2262
2263    #[test]
2264    fn test_builder_cache_ttl() {
2265        use std::time::Duration;
2266
2267        let mut builder = create_test_builder();
2268        assert!(builder.options.cache_ttl.is_none());
2269
2270        builder = builder.cache_ttl(Duration::from_secs(3600));
2271        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
2272    }
2273
2274    #[test]
2275    fn test_builder_cache_options_chaining() {
2276        use std::time::Duration;
2277
2278        let builder = create_test_builder()
2279            .layers(true)
2280            .cache_from("registry.example.com/cache:input")
2281            .cache_to("registry.example.com/cache:output")
2282            .cache_ttl(Duration::from_secs(7200))
2283            .no_cache();
2284
2285        assert!(builder.options.layers);
2286        assert_eq!(
2287            builder.options.cache_from,
2288            Some("registry.example.com/cache:input".to_string())
2289        );
2290        assert_eq!(
2291            builder.options.cache_to,
2292            Some("registry.example.com/cache:output".to_string())
2293        );
2294        assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
2295        assert!(builder.options.no_cache);
2296    }
2297
2298    #[test]
2299    fn test_chrono_lite_timestamp() {
2300        let ts = chrono_lite_timestamp();
2301        // Should be a valid number
2302        let parsed: u64 = ts.parse().expect("Should be a valid u64");
2303        // Should be reasonably recent (after 2024)
2304        assert!(parsed > 1_700_000_000);
2305    }
2306}