zlayer_builder/builder.rs
1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//! // Build from a Dockerfile
15//! let image = ImageBuilder::new("./my-app").await?
16//! .tag("myapp:latest")
17//! .tag("myapp:v1.0.0")
18//! .build()
19//! .await?;
20//!
21//! println!("Built image: {}", image.image_id);
22//! Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//! // Build using a runtime template (no Dockerfile needed)
34//! let image = ImageBuilder::new("./my-node-app").await?
35//! .runtime(Runtime::Node20)
36//! .tag("myapp:latest")
37//! .build()
38//! .await?;
39//!
40//! println!("Built image: {}", image.image_id);
41//! Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//! // Build only up to a specific stage
53//! let image = ImageBuilder::new("./my-app").await?
54//! .target("builder")
55//! .tag("myapp:builder")
56//! .build()
57//! .await?;
58//!
59//! println!("Built intermediate image: {}", image.image_id);
60//! Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//! let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//! // Start TUI in another thread
75//! std::thread::spawn(move || {
76//! // Process events from rx...
77//! while let Ok(event) = rx.recv() {
78//! println!("Event: {:?}", event);
79//! }
80//! });
81//!
82//! let image = ImageBuilder::new("./my-app").await?
83//! .tag("myapp:latest")
84//! .with_events(tx)
85//! .build()
86//! .await?;
87//!
88//! Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//! let image = ImageBuilder::new("./my-app").await?
100//! .with_cache_dir("/var/cache/zlayer") // Use persistent disk cache
101//! .tag("myapp:latest")
102//! .build()
103//! .await?;
104//!
105//! println!("Built image: {}", image.image_id);
106//! Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116#[cfg(feature = "local-registry")]
117use tracing::warn;
118use tracing::{debug, info, instrument};
119
120use crate::backend::BuildBackend;
121#[cfg(feature = "local-registry")]
122use crate::buildah::BuildahCommand;
123use crate::buildah::BuildahExecutor;
124use crate::dockerfile::{Dockerfile, RunMount};
125use crate::error::{BuildError, Result};
126use crate::templates::{get_template, Runtime};
127use crate::tui::BuildEvent;
128
129#[cfg(feature = "cache")]
130use zlayer_registry::cache::BlobCacheBackend;
131
132#[cfg(feature = "local-registry")]
133use zlayer_registry::LocalRegistry;
134
135#[cfg(feature = "local-registry")]
136use zlayer_registry::import_image;
137
138/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
139/// or a WASM build result for WebAssembly builds.
140///
141/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
142/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
143/// a compiled artifact directly, bypassing the container build pipeline.
144#[derive(Debug)]
145pub enum BuildOutput {
146 /// Standard container build - produces a Dockerfile to be built with buildah.
147 Dockerfile(Dockerfile),
148 /// WASM component build - already built, produces artifact path.
149 WasmArtifact {
150 /// Path to the compiled WASM binary.
151 wasm_path: PathBuf,
152 /// Path to the OCI artifact directory (if exported).
153 oci_path: Option<PathBuf>,
154 /// OCI manifest digest (e.g. `sha256:...`) for the exported artifact,
155 /// or `None` if export did not run (should always be `Some` when
156 /// `oci_path` is `Some`).
157 manifest_digest: Option<String>,
158 /// OCI artifact type (e.g. `application/vnd.wasm.component.v1+wasm`).
159 artifact_type: Option<String>,
160 /// Source language used.
161 language: String,
162 /// Whether optimization was applied.
163 optimized: bool,
164 /// Size of the output file in bytes.
165 size: u64,
166 },
167}
168
169/// Configuration for the layer cache backend.
170///
171/// This enum specifies which cache backend to use for storing and retrieving
172/// cached layers during builds. The cache feature must be enabled for this
173/// to be available.
174///
175/// # Example
176///
177/// ```no_run,ignore
178/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
179///
180/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
181/// // Use persistent disk cache
182/// let builder = ImageBuilder::new("./my-app").await?
183/// .with_cache_config(CacheBackendConfig::Persistent {
184/// path: "/var/cache/zlayer".into(),
185/// })
186/// .tag("myapp:latest");
187/// # Ok(())
188/// # }
189/// ```
190#[cfg(feature = "cache")]
191#[derive(Debug, Clone, Default)]
192pub enum CacheBackendConfig {
193 /// In-memory cache (cleared when process exits).
194 ///
195 /// Useful for CI/CD environments where persistence isn't needed
196 /// but you want to avoid re-downloading base image layers within
197 /// a single build session.
198 #[default]
199 Memory,
200
201 /// Persistent disk-based cache using redb.
202 ///
203 /// Requires the `cache-persistent` feature. Layers are stored on disk
204 /// and persist across builds, significantly speeding up repeated builds.
205 #[cfg(feature = "cache-persistent")]
206 Persistent {
207 /// Path to the cache directory or database file.
208 /// If a directory, `blob_cache.redb` will be created inside it.
209 path: PathBuf,
210 },
211
212 /// S3-compatible object storage backend.
213 ///
214 /// Requires the `cache-s3` feature. Useful for distributed build systems
215 /// where multiple build machines need to share a cache.
216 #[cfg(feature = "cache-s3")]
217 S3 {
218 /// S3 bucket name
219 bucket: String,
220 /// AWS region (optional, uses SDK default if not set)
221 region: Option<String>,
222 /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
223 endpoint: Option<String>,
224 /// Key prefix for cached blobs (default: "zlayer/layers/")
225 prefix: Option<String>,
226 },
227}
228
229/// Built image information returned after a successful build
230#[derive(Debug, Clone)]
231pub struct BuiltImage {
232 /// Image ID (sha256:...)
233 pub image_id: String,
234 /// Applied tags
235 pub tags: Vec<String>,
236 /// Number of layers in the final image
237 pub layer_count: usize,
238 /// Total size in bytes (0 if not computed)
239 pub size: u64,
240 /// Build duration in milliseconds
241 pub build_time_ms: u64,
242 /// Whether this image is a manifest list (multi-arch).
243 pub is_manifest: bool,
244}
245
246/// Registry authentication credentials
247#[derive(Debug, Clone)]
248pub struct RegistryAuth {
249 /// Registry username
250 pub username: String,
251 /// Registry password or token
252 pub password: String,
253}
254
255impl RegistryAuth {
256 /// Create new registry authentication
257 pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
258 Self {
259 username: username.into(),
260 password: password.into(),
261 }
262 }
263}
264
265/// Strategy for pulling the base image before building.
266///
267/// Controls the `--pull` flag passed to `buildah from`. The default is
268/// [`PullBaseMode::Newer`], matching the behaviour users expect from
269/// modern build tools: fast when nothing has changed, correct when the
270/// upstream base image has been republished.
271#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
272pub enum PullBaseMode {
273 /// Pull only if the registry has a newer version (`--pull=newer`).
274 /// Default behaviour.
275 #[default]
276 Newer,
277 /// Always pull, even if a local copy exists (`--pull=always`).
278 Always,
279 /// Never pull — use whatever is in local storage (no `--pull` flag passed).
280 Never,
281}
282
283/// Build options for customizing the image build process
284#[derive(Debug, Clone)]
285#[allow(clippy::struct_excessive_bools)]
286pub struct BuildOptions {
287 /// Dockerfile path (default: Dockerfile in context)
288 pub dockerfile: Option<PathBuf>,
289 /// `ZImagefile` path (alternative to Dockerfile)
290 pub zimagefile: Option<PathBuf>,
291 /// Use runtime template instead of Dockerfile
292 pub runtime: Option<Runtime>,
293 /// Build arguments (ARG values)
294 pub build_args: HashMap<String, String>,
295 /// Target stage for multi-stage builds
296 pub target: Option<String>,
297 /// Image tags to apply
298 pub tags: Vec<String>,
299 /// Disable layer caching
300 pub no_cache: bool,
301 /// Push to registry after build
302 pub push: bool,
303 /// Registry auth (if pushing)
304 pub registry_auth: Option<RegistryAuth>,
305 /// Squash all layers into one
306 pub squash: bool,
307 /// Image format (oci or docker)
308 pub format: Option<String>,
309 /// Enable buildah layer caching (--layers flag for `buildah build`).
310 /// Default: true
311 ///
312 /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
313 /// `buildah commit`) rather than `buildah build`, so this flag is reserved
314 /// for future use when/if we switch to `buildah build` (bud) command.
315 pub layers: bool,
316 /// Registry to pull cache from (--cache-from for `buildah build`).
317 ///
318 /// Note: This would be used with `buildah build --cache-from=<registry>`.
319 /// Currently `ZLayer` uses manual container creation, so this is reserved
320 /// for future implementation or for switching to `buildah build`.
321 ///
322 /// TODO: Implement remote cache support. This would require either:
323 /// 1. Switching to `buildah build` command which supports --cache-from natively
324 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
325 pub cache_from: Option<String>,
326 /// Registry to push cache to (--cache-to for `buildah build`).
327 ///
328 /// Note: This would be used with `buildah build --cache-to=<registry>`.
329 /// Currently `ZLayer` uses manual container creation, so this is reserved
330 /// for future implementation or for switching to `buildah build`.
331 ///
332 /// TODO: Implement remote cache support. This would require either:
333 /// 1. Switching to `buildah build` command which supports --cache-to natively
334 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
335 pub cache_to: Option<String>,
336 /// Maximum cache age (--cache-ttl for `buildah build`).
337 ///
338 /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
339 /// Currently `ZLayer` uses manual container creation, so this is reserved
340 /// for future implementation or for switching to `buildah build`.
341 ///
342 /// TODO: Implement cache TTL support. This would require either:
343 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
344 /// 2. Implementing custom cache expiration logic for our layer caching system
345 pub cache_ttl: Option<std::time::Duration>,
346 /// Cache backend configuration (requires `cache` feature).
347 ///
348 /// When configured, the builder will store layer data in the specified
349 /// cache backend for faster subsequent builds. This is separate from
350 /// buildah's native caching and operates at the `ZLayer` level.
351 ///
352 /// # Integration Points
353 ///
354 /// The cache backend is used at several points during the build:
355 ///
356 /// 1. **Before instruction execution**: Check if a cached layer exists
357 /// for the (`instruction_hash`, `base_layer`) tuple
358 /// 2. **After instruction execution**: Store the resulting layer data
359 /// in the cache for future builds
360 /// 3. **Base image layers**: Cache pulled base image layers to avoid
361 /// re-downloading from registries
362 ///
363 /// TODO: Wire up cache lookups in the build loop once layer digests
364 /// are properly computed and tracked.
365 #[cfg(feature = "cache")]
366 pub cache_backend_config: Option<CacheBackendConfig>,
367 /// Default OCI/WASM-compatible registry to check for images before falling
368 /// back to Docker Hub qualification.
369 ///
370 /// When set, the builder will probe this registry for short image names
371 /// before qualifying them to `docker.io`. For example, if set to
372 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
373 /// the builder will check `git.example.com:5000/myapp:latest` first.
374 pub default_registry: Option<String>,
375 /// Default cache mounts injected into all RUN instructions.
376 /// These are merged with any step-level cache mounts (deduped by target path).
377 pub default_cache_mounts: Vec<RunMount>,
378 /// Number of retries for failed RUN steps (0 = no retries, default)
379 pub retries: u32,
380 /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
381 /// When set, `buildah from` pulls the platform-specific image variant.
382 pub platform: Option<String>,
383 /// SHA-256 hash of the source Dockerfile/ZImagefile content.
384 ///
385 /// When set, the sandbox builder can skip a rebuild if the cached image
386 /// was produced from identical source content (content-based invalidation).
387 pub source_hash: Option<String>,
388 /// How to handle base-image pulling during `buildah from`.
389 ///
390 /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
391 /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
392 /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
393 pub pull: PullBaseMode,
394}
395
396impl Default for BuildOptions {
397 fn default() -> Self {
398 Self {
399 dockerfile: None,
400 zimagefile: None,
401 runtime: None,
402 build_args: HashMap::new(),
403 target: None,
404 tags: Vec::new(),
405 no_cache: false,
406 push: false,
407 registry_auth: None,
408 squash: false,
409 format: None,
410 layers: true,
411 cache_from: None,
412 cache_to: None,
413 cache_ttl: None,
414 #[cfg(feature = "cache")]
415 cache_backend_config: None,
416 default_registry: None,
417 default_cache_mounts: Vec::new(),
418 retries: 0,
419 platform: None,
420 source_hash: None,
421 pull: PullBaseMode::default(),
422 }
423 }
424}
425
426/// Image builder - orchestrates the full build process
427///
428/// `ImageBuilder` provides a fluent API for configuring and executing
429/// container image builds using buildah as the backend.
430///
431/// # Build Process
432///
433/// 1. Parse Dockerfile (or use runtime template)
434/// 2. Resolve target stages if specified
435/// 3. Build each stage sequentially:
436/// - Create working container from base image
437/// - Execute each instruction
438/// - Commit intermediate stages for COPY --from
439/// 4. Commit final image with tags
440/// 5. Push to registry if configured
441/// 6. Clean up intermediate containers
442///
443/// # Cache Backend Integration (requires `cache` feature)
444///
445/// When a cache backend is configured, the builder can store and retrieve
446/// cached layer data to speed up subsequent builds:
447///
448/// ```no_run,ignore
449/// use zlayer_builder::ImageBuilder;
450///
451/// let builder = ImageBuilder::new("./my-app").await?
452/// .with_cache_dir("/var/cache/zlayer")
453/// .tag("myapp:latest");
454/// ```
455pub struct ImageBuilder {
456 /// Build context directory
457 context: PathBuf,
458 /// Build options
459 options: BuildOptions,
460 /// Buildah executor (kept for backwards compatibility)
461 #[allow(dead_code)]
462 executor: BuildahExecutor,
463 /// Event sender for TUI updates
464 event_tx: Option<mpsc::Sender<BuildEvent>>,
465 /// Explicit target OS for this build.
466 ///
467 /// When `Some`, the backend was (or will be) detected for this OS and
468 /// it overrides any OS inferred from the `ZImagefile` (`os:` / `platform:`)
469 /// during `build()`. When `None`, the builder uses the OS inferred from
470 /// the parsed `ZImage` via `ZImage::resolve_target_os()`, falling back to
471 /// [`ImageOs::Linux`] when the `ZImagefile` has no OS hint either.
472 target_os: Option<crate::backend::ImageOs>,
473 /// Pluggable build backend (buildah, sandbox, etc.).
474 ///
475 /// When set, the `build()` method delegates to this backend instead of
476 /// using the inline buildah logic. Set automatically by `new()` via
477 /// `detect_backend()`, or explicitly via `with_backend()`.
478 backend: Option<Arc<dyn BuildBackend>>,
479 /// Cache backend for layer caching (requires `cache` feature).
480 ///
481 /// When set, the builder will attempt to retrieve cached layers before
482 /// executing instructions, and store results in the cache after execution.
483 ///
484 /// TODO: Implement cache lookups in the build loop. Currently the backend
485 /// is stored but not actively used during builds. Integration points:
486 /// - Check cache before executing RUN instructions
487 /// - Store layer data after successful instruction execution
488 /// - Cache base image layers pulled from registries
489 #[cfg(feature = "cache")]
490 cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
491 /// Local OCI registry for checking cached images before remote pulls.
492 #[cfg(feature = "local-registry")]
493 local_registry: Option<LocalRegistry>,
494}
495
496impl ImageBuilder {
497 /// Create a new `ImageBuilder` with the given context directory
498 ///
499 /// The context directory should contain the Dockerfile (unless using
500 /// a runtime template) and any files that will be copied into the image.
501 ///
502 /// # Arguments
503 ///
504 /// * `context` - Path to the build context directory
505 ///
506 /// # Errors
507 ///
508 /// Returns an error if:
509 /// - The context directory does not exist
510 /// - Buildah is not installed or not accessible
511 ///
512 /// # Example
513 ///
514 /// ```no_run
515 /// use zlayer_builder::ImageBuilder;
516 ///
517 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
518 /// let builder = ImageBuilder::new("./my-project").await?;
519 /// # Ok(())
520 /// # }
521 /// ```
522 #[instrument(skip_all, fields(context = %context.as_ref().display()))]
523 pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
524 Self::new_with_os(context, None).await
525 }
526
527 /// Create a new `ImageBuilder` with an explicit target OS.
528 ///
529 /// This is equivalent to [`ImageBuilder::new`] followed by
530 /// [`ImageBuilder::with_target_os`], but avoids the extra round-trip of
531 /// detecting a Linux backend first and throwing it away.
532 ///
533 /// Pass `None` to defer target-OS resolution to `build()` time, where
534 /// the effective OS is resolved from the `ZImagefile`'s `os:` or `platform:`
535 /// field (priority documented on [`crate::zimage::ZImage::resolve_target_os`]).
536 ///
537 /// # Errors
538 ///
539 /// Returns an error if the context directory does not exist, or (on
540 /// Linux/Windows) if the buildah executor cannot be initialized.
541 #[instrument(skip_all, fields(context = %context.as_ref().display(), target_os = ?target_os))]
542 pub async fn new_with_os(
543 context: impl AsRef<Path>,
544 target_os: Option<crate::backend::ImageOs>,
545 ) -> Result<Self> {
546 let context = context.as_ref().to_path_buf();
547
548 // Verify context exists
549 if !context.exists() {
550 return Err(BuildError::ContextRead {
551 path: context,
552 source: std::io::Error::new(
553 std::io::ErrorKind::NotFound,
554 "Build context directory not found",
555 ),
556 });
557 }
558
559 // Detect the best available build backend for this platform. When
560 // `target_os` is None (caller hasn't decided yet), probe for the Linux
561 // backend as the common case; `build()` will re-detect if the parsed
562 // ZImagefile reveals a different target OS.
563 let detection_os = target_os.unwrap_or(crate::backend::ImageOs::Linux);
564 let backend = crate::backend::detect_backend(detection_os).await.ok();
565
566 // Initialize buildah executor.
567 // On macOS, if buildah is not found we fall back to a default executor
568 // (the backend will handle the actual build dispatch).
569 let executor = match BuildahExecutor::new_async().await {
570 Ok(exec) => exec,
571 #[cfg(target_os = "macos")]
572 Err(_) => {
573 info!("Buildah not found on macOS; backend will handle build dispatch");
574 BuildahExecutor::default()
575 }
576 #[cfg(not(target_os = "macos"))]
577 Err(e) => return Err(e),
578 };
579
580 debug!("Created ImageBuilder for context: {}", context.display());
581
582 Ok(Self {
583 context,
584 options: BuildOptions::default(),
585 executor,
586 event_tx: None,
587 target_os,
588 backend,
589 #[cfg(feature = "cache")]
590 cache_backend: None,
591 #[cfg(feature = "local-registry")]
592 local_registry: None,
593 })
594 }
595
596 /// Override the target OS after construction, re-detecting the backend.
597 ///
598 /// Use this when the caller only learns the target OS *after* creating
599 /// the builder — for example, after parsing a `ZImagefile` to inspect its
600 /// `os:`/`platform:` fields. Passing the same OS that was already selected
601 /// at construction time is cheap (it still re-runs `detect_backend()`).
602 ///
603 /// # Errors
604 ///
605 /// Returns an error if `detect_backend(target_os)` fails for the current
606 /// host/target combination (e.g. Windows image requested on a Linux host).
607 pub async fn with_target_os(mut self, target_os: crate::backend::ImageOs) -> Result<Self> {
608 self.target_os = Some(target_os);
609 self.backend = Some(crate::backend::detect_backend(target_os).await?);
610 Ok(self)
611 }
612
613 /// Create an `ImageBuilder` with a custom buildah executor
614 ///
615 /// This is useful for testing or when you need to configure
616 /// the executor with specific storage options. The executor is
617 /// wrapped in a [`BuildahBackend`] so the build dispatches through
618 /// the [`BuildBackend`] trait.
619 ///
620 /// # Errors
621 ///
622 /// Returns an error if the context directory does not exist.
623 pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
624 let context = context.as_ref().to_path_buf();
625
626 if !context.exists() {
627 return Err(BuildError::ContextRead {
628 path: context,
629 source: std::io::Error::new(
630 std::io::ErrorKind::NotFound,
631 "Build context directory not found",
632 ),
633 });
634 }
635
636 let backend: Arc<dyn BuildBackend> = Arc::new(
637 crate::backend::BuildahBackend::with_executor(executor.clone()),
638 );
639
640 Ok(Self {
641 context,
642 options: BuildOptions::default(),
643 executor,
644 event_tx: None,
645 target_os: None,
646 backend: Some(backend),
647 #[cfg(feature = "cache")]
648 cache_backend: None,
649 #[cfg(feature = "local-registry")]
650 local_registry: None,
651 })
652 }
653
654 /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
655 ///
656 /// The backend is used for all build, push, tag, and manifest
657 /// operations. The internal `BuildahExecutor` is set to the default
658 /// (it is only used if no backend is set).
659 ///
660 /// # Errors
661 ///
662 /// Returns an error if the context directory does not exist.
663 pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
664 let context = context.as_ref().to_path_buf();
665
666 if !context.exists() {
667 return Err(BuildError::ContextRead {
668 path: context,
669 source: std::io::Error::new(
670 std::io::ErrorKind::NotFound,
671 "Build context directory not found",
672 ),
673 });
674 }
675
676 Ok(Self {
677 context,
678 options: BuildOptions::default(),
679 executor: BuildahExecutor::default(),
680 event_tx: None,
681 target_os: None,
682 backend: Some(backend),
683 #[cfg(feature = "cache")]
684 cache_backend: None,
685 #[cfg(feature = "local-registry")]
686 local_registry: None,
687 })
688 }
689
690 /// Set a custom Dockerfile path
691 ///
692 /// By default, the builder looks for a file named `Dockerfile` in the
693 /// context directory. Use this method to specify a different path.
694 ///
695 /// # Example
696 ///
697 /// ```no_run
698 /// # use zlayer_builder::ImageBuilder;
699 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
700 /// let builder = ImageBuilder::new("./my-project").await?
701 /// .dockerfile("./my-project/Dockerfile.prod");
702 /// # Ok(())
703 /// # }
704 /// ```
705 #[must_use]
706 pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
707 self.options.dockerfile = Some(path.as_ref().to_path_buf());
708 self
709 }
710
711 /// Set a custom `ZImagefile` path
712 ///
713 /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
714 /// the builder will parse the `ZImagefile` and convert it to the internal
715 /// Dockerfile IR for execution.
716 ///
717 /// # Example
718 ///
719 /// ```no_run
720 /// # use zlayer_builder::ImageBuilder;
721 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
722 /// let builder = ImageBuilder::new("./my-project").await?
723 /// .zimagefile("./my-project/ZImagefile");
724 /// # Ok(())
725 /// # }
726 /// ```
727 #[must_use]
728 pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
729 self.options.zimagefile = Some(path.as_ref().to_path_buf());
730 self
731 }
732
733 /// Use a runtime template instead of a Dockerfile
734 ///
735 /// Runtime templates provide pre-built Dockerfiles for common
736 /// development environments. When set, the Dockerfile option is ignored.
737 ///
738 /// # Example
739 ///
740 /// ```no_run
741 /// use zlayer_builder::{ImageBuilder, Runtime};
742 ///
743 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
744 /// let builder = ImageBuilder::new("./my-node-app").await?
745 /// .runtime(Runtime::Node20);
746 /// # Ok(())
747 /// # }
748 /// ```
749 #[must_use]
750 pub fn runtime(mut self, runtime: Runtime) -> Self {
751 self.options.runtime = Some(runtime);
752 self
753 }
754
755 /// Add a build argument
756 ///
757 /// Build arguments are passed to the Dockerfile and can be referenced
758 /// using the `ARG` instruction.
759 ///
760 /// # Example
761 ///
762 /// ```no_run
763 /// # use zlayer_builder::ImageBuilder;
764 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
765 /// let builder = ImageBuilder::new("./my-project").await?
766 /// .build_arg("VERSION", "1.0.0")
767 /// .build_arg("DEBUG", "false");
768 /// # Ok(())
769 /// # }
770 /// ```
771 #[must_use]
772 pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
773 self.options.build_args.insert(key.into(), value.into());
774 self
775 }
776
777 /// Set multiple build arguments at once
778 #[must_use]
779 pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
780 self.options.build_args.extend(args);
781 self
782 }
783
784 /// Set the target stage for multi-stage builds
785 ///
786 /// When building a multi-stage Dockerfile, you can stop at a specific
787 /// stage instead of building all stages.
788 ///
789 /// # Example
790 ///
791 /// ```no_run
792 /// # use zlayer_builder::ImageBuilder;
793 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
794 /// // Dockerfile:
795 /// // FROM node:20 AS builder
796 /// // ...
797 /// // FROM node:20-slim AS runtime
798 /// // ...
799 ///
800 /// let builder = ImageBuilder::new("./my-project").await?
801 /// .target("builder")
802 /// .tag("myapp:builder");
803 /// # Ok(())
804 /// # }
805 /// ```
806 #[must_use]
807 pub fn target(mut self, stage: impl Into<String>) -> Self {
808 self.options.target = Some(stage.into());
809 self
810 }
811
812 /// Add an image tag
813 ///
814 /// Tags are applied to the final image. You can add multiple tags.
815 /// The first tag is used as the primary image name during commit.
816 ///
817 /// # Example
818 ///
819 /// ```no_run
820 /// # use zlayer_builder::ImageBuilder;
821 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
822 /// let builder = ImageBuilder::new("./my-project").await?
823 /// .tag("myapp:latest")
824 /// .tag("myapp:v1.0.0")
825 /// .tag("registry.example.com/myapp:v1.0.0");
826 /// # Ok(())
827 /// # }
828 /// ```
829 #[must_use]
830 pub fn tag(mut self, tag: impl Into<String>) -> Self {
831 self.options.tags.push(tag.into());
832 self
833 }
834
835 /// Disable layer caching
836 ///
837 /// When enabled, all layers are rebuilt from scratch even if
838 /// they could be served from cache.
839 ///
840 /// Note: Currently this flag is tracked but not fully implemented in the
841 /// build process. `ZLayer` uses manual container creation (`buildah from`,
842 /// `buildah run`, `buildah commit`) which doesn't have built-in caching
843 /// like `buildah build` does. Future work could implement layer-level
844 /// caching by checking instruction hashes against previously built layers.
845 #[must_use]
846 pub fn no_cache(mut self) -> Self {
847 self.options.no_cache = true;
848 self
849 }
850
851 /// Set the base-image pull strategy for the build.
852 ///
853 /// By default, `buildah from` is invoked with `--pull=newer`, so an
854 /// up-to-date local base image is reused but a newer one on the
855 /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
856 /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
857 /// offline.
858 #[must_use]
859 pub fn pull(mut self, mode: PullBaseMode) -> Self {
860 self.options.pull = mode;
861 self
862 }
863
864 /// Enable or disable layer caching
865 ///
866 /// This controls the `--layers` flag for buildah. When enabled (default),
867 /// buildah can cache and reuse intermediate layers.
868 ///
869 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
870 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
871 /// flag is reserved for future use when/if we switch to `buildah build`.
872 ///
873 /// # Example
874 ///
875 /// ```no_run
876 /// # use zlayer_builder::ImageBuilder;
877 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
878 /// let builder = ImageBuilder::new("./my-project").await?
879 /// .layers(false) // Disable layer caching
880 /// .tag("myapp:latest");
881 /// # Ok(())
882 /// # }
883 /// ```
884 #[must_use]
885 pub fn layers(mut self, enable: bool) -> Self {
886 self.options.layers = enable;
887 self
888 }
889
890 /// Set registry to pull cache from
891 ///
892 /// This corresponds to buildah's `--cache-from` flag, which allows
893 /// pulling cached layers from a remote registry to speed up builds.
894 ///
895 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
896 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
897 /// option is reserved for future implementation.
898 ///
899 /// TODO: Implement remote cache support. This would require either:
900 /// 1. Switching to `buildah build` command which supports --cache-from natively
901 /// 2. Implementing custom layer caching with registry pull for intermediate layers
902 ///
903 /// # Example
904 ///
905 /// ```no_run
906 /// # use zlayer_builder::ImageBuilder;
907 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
908 /// let builder = ImageBuilder::new("./my-project").await?
909 /// .cache_from("registry.example.com/myapp:cache")
910 /// .tag("myapp:latest");
911 /// # Ok(())
912 /// # }
913 /// ```
914 #[must_use]
915 pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
916 self.options.cache_from = Some(registry.into());
917 self
918 }
919
920 /// Set registry to push cache to
921 ///
922 /// This corresponds to buildah's `--cache-to` flag, which allows
923 /// pushing cached layers to a remote registry for future builds to use.
924 ///
925 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
926 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
927 /// option is reserved for future implementation.
928 ///
929 /// TODO: Implement remote cache support. This would require either:
930 /// 1. Switching to `buildah build` command which supports --cache-to natively
931 /// 2. Implementing custom layer caching with registry push for intermediate layers
932 ///
933 /// # Example
934 ///
935 /// ```no_run
936 /// # use zlayer_builder::ImageBuilder;
937 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
938 /// let builder = ImageBuilder::new("./my-project").await?
939 /// .cache_to("registry.example.com/myapp:cache")
940 /// .tag("myapp:latest");
941 /// # Ok(())
942 /// # }
943 /// ```
944 #[must_use]
945 pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
946 self.options.cache_to = Some(registry.into());
947 self
948 }
949
950 /// Set maximum cache age
951 ///
952 /// This corresponds to buildah's `--cache-ttl` flag, which sets the
953 /// maximum age for cached layers before they are considered stale.
954 ///
955 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
956 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
957 /// option is reserved for future implementation.
958 ///
959 /// TODO: Implement cache TTL support. This would require either:
960 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
961 /// 2. Implementing custom cache expiration logic for our layer caching system
962 ///
963 /// # Example
964 ///
965 /// ```no_run
966 /// # use zlayer_builder::ImageBuilder;
967 /// # use std::time::Duration;
968 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
969 /// let builder = ImageBuilder::new("./my-project").await?
970 /// .cache_ttl(Duration::from_secs(3600 * 24)) // 24 hours
971 /// .tag("myapp:latest");
972 /// # Ok(())
973 /// # }
974 /// ```
975 #[must_use]
976 pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
977 self.options.cache_ttl = Some(ttl);
978 self
979 }
980
981 /// Push the image to a registry after building
982 ///
983 /// # Arguments
984 ///
985 /// * `auth` - Registry authentication credentials
986 ///
987 /// # Example
988 ///
989 /// ```no_run
990 /// use zlayer_builder::{ImageBuilder, RegistryAuth};
991 ///
992 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
993 /// let builder = ImageBuilder::new("./my-project").await?
994 /// .tag("registry.example.com/myapp:v1.0.0")
995 /// .push(RegistryAuth::new("user", "password"));
996 /// # Ok(())
997 /// # }
998 /// ```
999 #[must_use]
1000 pub fn push(mut self, auth: RegistryAuth) -> Self {
1001 self.options.push = true;
1002 self.options.registry_auth = Some(auth);
1003 self
1004 }
1005
1006 /// Enable pushing without authentication
1007 ///
1008 /// Use this for registries that don't require authentication
1009 /// (e.g., local registries, insecure registries).
1010 #[must_use]
1011 pub fn push_without_auth(mut self) -> Self {
1012 self.options.push = true;
1013 self.options.registry_auth = None;
1014 self
1015 }
1016
1017 /// Set a default OCI/WASM-compatible registry to check for images.
1018 ///
1019 /// When set, the builder will probe this registry for short image names
1020 /// before qualifying them to `docker.io`. For example, if set to
1021 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
1022 /// the builder will check `git.example.com:5000/myapp:latest` first.
1023 #[must_use]
1024 pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
1025 self.options.default_registry = Some(registry.into());
1026 self
1027 }
1028
1029 /// Set a local OCI registry for image resolution.
1030 ///
1031 /// When set, the builder checks the local registry for cached images
1032 /// before pulling from remote registries.
1033 #[cfg(feature = "local-registry")]
1034 #[must_use]
1035 pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
1036 self.local_registry = Some(registry);
1037 self
1038 }
1039
1040 /// Squash all layers into a single layer
1041 ///
1042 /// This reduces image size but loses layer caching benefits.
1043 #[must_use]
1044 pub fn squash(mut self) -> Self {
1045 self.options.squash = true;
1046 self
1047 }
1048
1049 /// Set the image format
1050 ///
1051 /// Valid values are "oci" (default) or "docker".
1052 #[must_use]
1053 pub fn format(mut self, format: impl Into<String>) -> Self {
1054 self.options.format = Some(format.into());
1055 self
1056 }
1057
1058 /// Set default cache mounts to inject into all RUN instructions
1059 #[must_use]
1060 pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
1061 self.options.default_cache_mounts = mounts;
1062 self
1063 }
1064
1065 /// Set the number of retries for failed RUN steps
1066 #[must_use]
1067 pub fn retries(mut self, retries: u32) -> Self {
1068 self.options.retries = retries;
1069 self
1070 }
1071
1072 /// Set the target platform for cross-architecture builds.
1073 #[must_use]
1074 pub fn platform(mut self, platform: impl Into<String>) -> Self {
1075 self.options.platform = Some(platform.into());
1076 self
1077 }
1078
1079 /// Set a pre-computed source hash for content-based cache invalidation.
1080 ///
1081 /// When set, the sandbox builder can skip a full rebuild if the cached
1082 /// image was produced from identical source content.
1083 #[must_use]
1084 pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1085 self.options.source_hash = Some(hash.into());
1086 self
1087 }
1088
1089 /// Set an event sender for TUI progress updates
1090 ///
1091 /// Events will be sent as the build progresses, allowing you to
1092 /// display a progress UI or log build status.
1093 ///
1094 /// # Example
1095 ///
1096 /// ```no_run
1097 /// use zlayer_builder::{ImageBuilder, BuildEvent};
1098 /// use std::sync::mpsc;
1099 ///
1100 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1101 /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1102 ///
1103 /// let builder = ImageBuilder::new("./my-project").await?
1104 /// .tag("myapp:latest")
1105 /// .with_events(tx);
1106 /// # Ok(())
1107 /// # }
1108 /// ```
1109 #[must_use]
1110 pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1111 self.event_tx = Some(tx);
1112 self
1113 }
1114
1115 /// Configure a persistent disk cache backend for layer caching.
1116 ///
1117 /// When configured, the builder will store layer data on disk at the
1118 /// specified path. This cache persists across builds and significantly
1119 /// speeds up repeated builds of similar images.
1120 ///
1121 /// Requires the `cache-persistent` feature to be enabled.
1122 ///
1123 /// # Arguments
1124 ///
1125 /// * `path` - Path to the cache directory. If a directory, creates
1126 /// `blob_cache.redb` inside it. If a file path, uses it directly.
1127 ///
1128 /// # Example
1129 ///
1130 /// ```no_run,ignore
1131 /// use zlayer_builder::ImageBuilder;
1132 ///
1133 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1134 /// let builder = ImageBuilder::new("./my-project").await?
1135 /// .with_cache_dir("/var/cache/zlayer")
1136 /// .tag("myapp:latest");
1137 /// # Ok(())
1138 /// # }
1139 /// ```
1140 ///
1141 /// # Integration Status
1142 ///
1143 /// TODO: The cache backend is currently stored but not actively used
1144 /// during builds. Future work will wire up:
1145 /// - Cache lookups before executing RUN instructions
1146 /// - Storing layer data after successful execution
1147 /// - Caching base image layers from registry pulls
1148 #[cfg(feature = "cache-persistent")]
1149 #[must_use]
1150 pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1151 self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1152 path: path.as_ref().to_path_buf(),
1153 });
1154 debug!(
1155 "Configured persistent cache at: {}",
1156 path.as_ref().display()
1157 );
1158 self
1159 }
1160
1161 /// Configure an in-memory cache backend for layer caching.
1162 ///
1163 /// The in-memory cache is cleared when the process exits, but can
1164 /// speed up builds within a single session by caching intermediate
1165 /// layers and avoiding redundant operations.
1166 ///
1167 /// Requires the `cache` feature to be enabled.
1168 ///
1169 /// # Example
1170 ///
1171 /// ```no_run,ignore
1172 /// use zlayer_builder::ImageBuilder;
1173 ///
1174 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1175 /// let builder = ImageBuilder::new("./my-project").await?
1176 /// .with_memory_cache()
1177 /// .tag("myapp:latest");
1178 /// # Ok(())
1179 /// # }
1180 /// ```
1181 ///
1182 /// # Integration Status
1183 ///
1184 /// TODO: The cache backend is currently stored but not actively used
1185 /// during builds. See `with_cache_dir` for integration status details.
1186 #[cfg(feature = "cache")]
1187 #[must_use]
1188 pub fn with_memory_cache(mut self) -> Self {
1189 self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1190 debug!("Configured in-memory cache");
1191 self
1192 }
1193
1194 /// Configure an S3-compatible storage backend for layer caching.
1195 ///
1196 /// This is useful for distributed build systems where multiple build
1197 /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1198 /// Backblaze B2, `MinIO`, and other S3-compatible services.
1199 ///
1200 /// Requires the `cache-s3` feature to be enabled.
1201 ///
1202 /// # Arguments
1203 ///
1204 /// * `bucket` - S3 bucket name
1205 /// * `region` - AWS region (optional, uses SDK default if not set)
1206 ///
1207 /// # Example
1208 ///
1209 /// ```no_run,ignore
1210 /// use zlayer_builder::ImageBuilder;
1211 ///
1212 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1213 /// let builder = ImageBuilder::new("./my-project").await?
1214 /// .with_s3_cache("my-build-cache", Some("us-west-2"))
1215 /// .tag("myapp:latest");
1216 /// # Ok(())
1217 /// # }
1218 /// ```
1219 ///
1220 /// # Integration Status
1221 ///
1222 /// TODO: The cache backend is currently stored but not actively used
1223 /// during builds. See `with_cache_dir` for integration status details.
1224 #[cfg(feature = "cache-s3")]
1225 #[must_use]
1226 pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1227 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1228 bucket: bucket.into(),
1229 region,
1230 endpoint: None,
1231 prefix: None,
1232 });
1233 debug!("Configured S3 cache");
1234 self
1235 }
1236
1237 /// Configure an S3-compatible storage backend with custom endpoint.
1238 ///
1239 /// Use this method for S3-compatible services that require a custom
1240 /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1241 ///
1242 /// Requires the `cache-s3` feature to be enabled.
1243 ///
1244 /// # Arguments
1245 ///
1246 /// * `bucket` - S3 bucket name
1247 /// * `endpoint` - Custom endpoint URL
1248 /// * `region` - Region (required for some S3-compatible services)
1249 ///
1250 /// # Example
1251 ///
1252 /// ```no_run,ignore
1253 /// use zlayer_builder::ImageBuilder;
1254 ///
1255 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1256 /// // Cloudflare R2
1257 /// let builder = ImageBuilder::new("./my-project").await?
1258 /// .with_s3_cache_endpoint(
1259 /// "my-bucket",
1260 /// "https://accountid.r2.cloudflarestorage.com",
1261 /// Some("auto".to_string()),
1262 /// )
1263 /// .tag("myapp:latest");
1264 /// # Ok(())
1265 /// # }
1266 /// ```
1267 #[cfg(feature = "cache-s3")]
1268 #[must_use]
1269 pub fn with_s3_cache_endpoint(
1270 mut self,
1271 bucket: impl Into<String>,
1272 endpoint: impl Into<String>,
1273 region: Option<String>,
1274 ) -> Self {
1275 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1276 bucket: bucket.into(),
1277 region,
1278 endpoint: Some(endpoint.into()),
1279 prefix: None,
1280 });
1281 debug!("Configured S3 cache with custom endpoint");
1282 self
1283 }
1284
1285 /// Configure a custom cache backend configuration.
1286 ///
1287 /// This is the most flexible way to configure the cache backend,
1288 /// allowing full control over all cache settings.
1289 ///
1290 /// Requires the `cache` feature to be enabled.
1291 ///
1292 /// # Example
1293 ///
1294 /// ```no_run,ignore
1295 /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1296 ///
1297 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1298 /// let builder = ImageBuilder::new("./my-project").await?
1299 /// .with_cache_config(CacheBackendConfig::Memory)
1300 /// .tag("myapp:latest");
1301 /// # Ok(())
1302 /// # }
1303 /// ```
1304 #[cfg(feature = "cache")]
1305 #[must_use]
1306 pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1307 self.options.cache_backend_config = Some(config);
1308 debug!("Configured custom cache backend");
1309 self
1310 }
1311
1312 /// Set an already-initialized cache backend directly.
1313 ///
1314 /// This is useful when you have a pre-configured cache backend instance
1315 /// that you want to share across multiple builders or when you need
1316 /// fine-grained control over cache initialization.
1317 ///
1318 /// Requires the `cache` feature to be enabled.
1319 ///
1320 /// # Example
1321 ///
1322 /// ```no_run,ignore
1323 /// use zlayer_builder::ImageBuilder;
1324 /// use zlayer_registry::cache::BlobCache;
1325 /// use std::sync::Arc;
1326 ///
1327 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1328 /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1329 ///
1330 /// let builder = ImageBuilder::new("./my-project").await?
1331 /// .with_cache_backend(cache)
1332 /// .tag("myapp:latest");
1333 /// # Ok(())
1334 /// # }
1335 /// ```
1336 #[cfg(feature = "cache")]
1337 #[must_use]
1338 pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1339 self.cache_backend = Some(backend);
1340 debug!("Configured pre-initialized cache backend");
1341 self
1342 }
1343
1344 /// Run the build
1345 ///
1346 /// This executes the complete build process:
1347 /// 1. Parse Dockerfile or load runtime template
1348 /// 2. Build all required stages
1349 /// 3. Commit and tag the final image
1350 /// 4. Push to registry if configured
1351 /// 5. Clean up intermediate containers
1352 ///
1353 /// # Errors
1354 ///
1355 /// Returns an error if:
1356 /// - Dockerfile parsing fails
1357 /// - A buildah command fails
1358 /// - Target stage is not found
1359 /// - Registry push fails
1360 ///
1361 /// # Panics
1362 ///
1363 /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1364 #[instrument(skip(self), fields(context = %self.context.display()))]
1365 #[allow(clippy::too_many_lines)]
1366 pub async fn build(mut self) -> Result<BuiltImage> {
1367 let start_time = std::time::Instant::now();
1368
1369 info!("Starting build in context: {}", self.context.display());
1370
1371 // 0. Resolve the effective target OS from the priority chain when the
1372 // caller did not pin one explicitly. Re-detects the backend if the
1373 // resolved OS differs from the one we initially probed (Linux). A
1374 // pinned `target_os` wins and skips this resolution entirely.
1375 self.resolve_target_os_and_backend().await?;
1376
1377 // 1. Get build output (Dockerfile IR or WASM artifact)
1378 let build_output = self.get_build_output().await?;
1379
1380 // If this is a WASM build, return early with the artifact info.
1381 if let BuildOutput::WasmArtifact {
1382 wasm_path,
1383 // `oci_path` drives the optional push branch below; when the
1384 // `local-registry` feature is off the push branch is compiled
1385 // out, so the binding is unused.
1386 #[cfg_attr(not(feature = "local-registry"), allow(unused_variables))]
1387 oci_path,
1388 manifest_digest,
1389 artifact_type: _,
1390 language,
1391 optimized,
1392 size,
1393 } = build_output
1394 {
1395 #[allow(clippy::cast_possible_truncation)]
1396 let build_time_ms = start_time.elapsed().as_millis() as u64;
1397
1398 // Prefer a user tag as the image id; otherwise fall back to the
1399 // OCI manifest digest (sha256:...), which is what WASM tooling
1400 // references in `oci-archive:` / `oci:` URIs. As a last resort
1401 // (no tag, no digest — only possible if export somehow produced
1402 // no digest) use a `wasm-path:` marker so downstream code can
1403 // tell this was a WASM build.
1404 let image_id = if let Some(tag) = self.options.tags.first() {
1405 tag.clone()
1406 } else if let Some(digest) = manifest_digest.as_ref() {
1407 format!("wasm:{digest}")
1408 } else {
1409 format!("wasm-path:{}", wasm_path.display())
1410 };
1411
1412 // Push WASM OCI artifact(s) to the remote registry if the user
1413 // both supplied tags and requested a push (e.g. `zlayer build
1414 // -t ghcr.io/org/mod:v1 --push`). Mirrors the container flow at
1415 // `BuildahBackend::build_image` where `options.push` drives
1416 // `push_image_internal` for each tag.
1417 //
1418 // Gated on `local-registry` because `ImagePuller::push_wasm` is
1419 // behind the `zlayer-registry/local` feature, matching the other
1420 // push-to-registry sites in this crate.
1421 #[cfg(feature = "local-registry")]
1422 if oci_path.is_some() && self.options.push && !self.options.tags.is_empty() {
1423 let oci_dir = oci_path.as_ref().expect("checked oci_path.is_some() above");
1424 self.push_wasm_oci(&wasm_path, oci_dir).await?;
1425 }
1426
1427 self.send_event(BuildEvent::BuildComplete {
1428 image_id: image_id.clone(),
1429 });
1430
1431 info!(
1432 "WASM build completed in {}ms: {} ({}, {} bytes, optimized={}, image_id={})",
1433 build_time_ms,
1434 wasm_path.display(),
1435 language,
1436 size,
1437 optimized,
1438 image_id,
1439 );
1440
1441 return Ok(BuiltImage {
1442 image_id,
1443 tags: self.options.tags.clone(),
1444 layer_count: 1,
1445 size,
1446 build_time_ms,
1447 is_manifest: false,
1448 });
1449 }
1450
1451 // Extract the Dockerfile from the BuildOutput.
1452 let BuildOutput::Dockerfile(dockerfile) = build_output else {
1453 unreachable!("WasmArtifact case handled above");
1454 };
1455 debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1456
1457 // L-5: Static guard — catch `RUN choco install ...` /
1458 // `RUN winget install ...` on a nanoserver base image before we hand
1459 // the Dockerfile off to the backend. Nanoserver ships no package
1460 // manager, so without this check the build fails deep inside buildah
1461 // / HCS with an opaque "`choco` is not recognized" message.
1462 //
1463 // The validator is a pure AST walk; it runs regardless of the
1464 // resolved target OS because a Dockerfile pinning a Windows base
1465 // should be diagnosed the same way on a Linux build host doing a
1466 // cross-OS build as on a Windows host.
1467 if let Err(err) = crate::windows::deps::validate_dockerfile(&dockerfile) {
1468 return Err(BuildError::InvalidInstruction {
1469 instruction: "RUN".to_string(),
1470 reason: err.to_string(),
1471 });
1472 }
1473
1474 // Delegate the build to the backend.
1475 let backend = self
1476 .backend
1477 .as_ref()
1478 .ok_or_else(|| BuildError::BuildahNotFound {
1479 message: "No build backend configured".into(),
1480 })?;
1481
1482 info!("Delegating build to {} backend", backend.name());
1483 let built = backend
1484 .build_image(
1485 &self.context,
1486 &dockerfile,
1487 &self.options,
1488 self.event_tx.clone(),
1489 )
1490 .await?;
1491
1492 // Import the built image into ZLayer's local registry and blob cache
1493 // so the runtime can find it without pulling from a remote registry.
1494 //
1495 // A user who wired up a local registry clearly wants built images to
1496 // live there — if the import fails (almost always EACCES on the
1497 // registry dir for an unprivileged user), bail with the registry path
1498 // in the message instead of silently producing a build that the
1499 // daemon can't find.
1500 #[cfg(feature = "local-registry")]
1501 if let Some(ref registry) = self.local_registry {
1502 if !built.tags.is_empty() {
1503 let tmp_path = std::env::temp_dir().join(format!(
1504 "zlayer-build-{}-{}.tar",
1505 std::process::id(),
1506 start_time.elapsed().as_nanos()
1507 ));
1508
1509 // Export the image from buildah's store to an OCI archive.
1510 let export_tag = &built.tags[0];
1511 let dest = format!("oci-archive:{}", tmp_path.display());
1512 let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1513
1514 self.executor
1515 .execute_checked(&push_cmd)
1516 .await
1517 .map_err(|e| BuildError::RegistryError {
1518 message: format!(
1519 "failed to export image to OCI archive for local registry \
1520 import at {}: {e}",
1521 registry.root().display()
1522 ),
1523 })?;
1524
1525 // Resolve the blob cache backend (if available).
1526 let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1527 self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1528
1529 let import_result = async {
1530 for tag in &built.tags {
1531 let info =
1532 import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1533 .await
1534 .map_err(|e| BuildError::RegistryError {
1535 message: format!(
1536 "failed to import '{tag}' into local registry at {}: {e}",
1537 registry.root().display()
1538 ),
1539 })?;
1540 info!(
1541 tag = %tag,
1542 digest = %info.digest,
1543 "Imported into local registry"
1544 );
1545 }
1546 Ok::<(), BuildError>(())
1547 }
1548 .await;
1549
1550 // Clean up the temporary archive regardless of whether the
1551 // import succeeded (best-effort; warn on failure).
1552 if let Err(e) = fs::remove_file(&tmp_path).await {
1553 warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1554 }
1555
1556 import_result?;
1557 }
1558 }
1559
1560 Ok(built)
1561 }
1562
1563 /// Resolve the effective target OS for this build and re-detect the
1564 /// backend when it differs from what was probed at construction.
1565 ///
1566 /// Priority (highest first):
1567 /// 1. `self.target_os` — explicit pin from the caller (e.g. CLI `--platform`).
1568 /// 2. `ZImage::resolve_target_os()` — `os:` field, else OS parsed from
1569 /// the `platform:` field of the `ZImagefile`.
1570 /// 3. [`ImageOs::Linux`] — the historical default, applied whenever the
1571 /// `ZImagefile` has neither hint and the caller didn't pin an OS.
1572 ///
1573 /// The runtime-template and plain-Dockerfile paths never carry an OS
1574 /// hint, so they fall through to the caller's pin or the default.
1575 async fn resolve_target_os_and_backend(&mut self) -> Result<()> {
1576 // Explicit pin always wins: the backend was already detected for
1577 // this OS by `new_with_os`/`with_target_os`. Nothing to do.
1578 if self.target_os.is_some() {
1579 return Ok(());
1580 }
1581
1582 // Peek at the ZImagefile (if the caller pointed us at one, or if one
1583 // lives in the context dir). We only inspect the OS-related fields so
1584 // a malformed ZImagefile body defers its error to `get_build_output`.
1585 let zimage_path = self.options.zimagefile.clone().or_else(|| {
1586 let candidate = self.context.join("ZImagefile");
1587 candidate.exists().then_some(candidate)
1588 });
1589
1590 let Some(path) = zimage_path else {
1591 // No ZImagefile — Dockerfile / runtime template paths have no OS
1592 // metadata, so the initial Linux detection stands.
1593 return Ok(());
1594 };
1595
1596 // Let `get_build_output()` surface any real read / parse errors.
1597 let Ok(content) = fs::read_to_string(&path).await else {
1598 return Ok(());
1599 };
1600 let Ok(zimage) = crate::zimage::parse_zimagefile(&content) else {
1601 return Ok(());
1602 };
1603
1604 if let Some(resolved) = zimage.resolve_target_os() {
1605 // Re-detect only if the resolved OS differs from the one we
1606 // probed at construction. `new_with_os(None)` probes Linux, so
1607 // the common Linux case short-circuits.
1608 let initial = crate::backend::ImageOs::Linux;
1609 if resolved != initial {
1610 info!(
1611 "Re-detecting build backend for target OS {:?} (inferred from ZImagefile)",
1612 resolved
1613 );
1614 self.backend = Some(crate::backend::detect_backend(resolved).await?);
1615 }
1616 self.target_os = Some(resolved);
1617 }
1618
1619 Ok(())
1620 }
1621
1622 /// Detection order:
1623 /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1624 /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1625 /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1626 /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1627 ///
1628 /// Returns [`BuildOutput::Dockerfile`] for container builds or
1629 /// [`BuildOutput::WasmArtifact`] for WASM builds.
1630 async fn get_build_output(&self) -> Result<BuildOutput> {
1631 // (a) Runtime template takes highest priority.
1632 if let Some(runtime) = &self.options.runtime {
1633 debug!("Using runtime template: {}", runtime);
1634 let content = get_template(*runtime);
1635 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1636 }
1637
1638 // (b) Explicit ZImagefile path.
1639 if let Some(ref zimage_path) = self.options.zimagefile {
1640 debug!("Reading ZImagefile: {}", zimage_path.display());
1641 let content =
1642 fs::read_to_string(zimage_path)
1643 .await
1644 .map_err(|e| BuildError::ContextRead {
1645 path: zimage_path.clone(),
1646 source: e,
1647 })?;
1648 let zimage = crate::zimage::parse_zimagefile(&content)?;
1649 return self.handle_zimage(&zimage).await;
1650 }
1651
1652 // (c) Auto-detect ZImagefile in context directory.
1653 let auto_zimage_path = self.context.join("ZImagefile");
1654 if auto_zimage_path.exists() {
1655 debug!(
1656 "Found ZImagefile in context: {}",
1657 auto_zimage_path.display()
1658 );
1659 let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1660 BuildError::ContextRead {
1661 path: auto_zimage_path,
1662 source: e,
1663 }
1664 })?;
1665 let zimage = crate::zimage::parse_zimagefile(&content)?;
1666 return self.handle_zimage(&zimage).await;
1667 }
1668
1669 // (d) Fall back to Dockerfile.
1670 let dockerfile_path = self
1671 .options
1672 .dockerfile
1673 .clone()
1674 .unwrap_or_else(|| self.context.join("Dockerfile"));
1675
1676 debug!("Reading Dockerfile: {}", dockerfile_path.display());
1677
1678 let content =
1679 fs::read_to_string(&dockerfile_path)
1680 .await
1681 .map_err(|e| BuildError::ContextRead {
1682 path: dockerfile_path,
1683 source: e,
1684 })?;
1685
1686 Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1687 }
1688
1689 /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1690 ///
1691 /// Handles all four `ZImage` modes:
1692 /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1693 /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1694 /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1695 ///
1696 /// Any `build:` directives are resolved first by spawning nested builds.
1697 async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1698 // Runtime mode: delegate to template system.
1699 if let Some(ref runtime_name) = zimage.runtime {
1700 let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1701 BuildError::zimagefile_validation(format!(
1702 "unknown runtime '{runtime_name}' in ZImagefile"
1703 ))
1704 })?;
1705 let content = get_template(rt);
1706 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1707 }
1708
1709 // WASM mode: build a WASM component.
1710 if zimage.wasm.is_some() {
1711 return self.handle_wasm_build(zimage).await;
1712 }
1713
1714 // Resolve any `build:` directives to concrete base image tags.
1715 let resolved = self.resolve_build_directives(zimage).await?;
1716
1717 // Single-stage or multi-stage: convert to Dockerfile IR directly.
1718 Ok(BuildOutput::Dockerfile(
1719 crate::zimage::zimage_to_dockerfile(&resolved)?,
1720 ))
1721 }
1722
1723 /// Build a WASM component from the `ZImagefile` wasm configuration.
1724 ///
1725 /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1726 /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1727 /// the WASM builder pipeline.
1728 #[allow(clippy::too_many_lines)]
1729 async fn handle_wasm_build(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1730 use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1731 use zlayer_registry::wasm::WasiVersion;
1732 use zlayer_registry::{export_wasm_as_oci, WasmExportConfig};
1733
1734 // Caller guarantees `zimage.wasm` is `Some`.
1735 let wasm_config = zimage.wasm.as_ref().expect(
1736 "handle_wasm_build invoked without a wasm section in ZImage; caller must check",
1737 );
1738
1739 info!("ZImagefile specifies WASM mode, running WASM build");
1740
1741 // Convert target string to WasiTarget enum.
1742 let target = match wasm_config.target.as_str() {
1743 "preview1" => WasiTarget::Preview1,
1744 _ => WasiTarget::Preview2,
1745 };
1746
1747 // Resolve language: parse from string or leave as None for auto-detection.
1748 let language = wasm_config
1749 .language
1750 .as_deref()
1751 .and_then(WasmLanguage::from_name);
1752
1753 if let Some(ref lang_str) = wasm_config.language {
1754 if language.is_none() {
1755 return Err(BuildError::zimagefile_validation(format!(
1756 "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1757 typescript, assemblyscript, c, zig"
1758 )));
1759 }
1760 }
1761
1762 // Build the WasmBuildConfig.
1763 let mut config = WasmBuildConfig {
1764 language,
1765 target,
1766 optimize: wasm_config.optimize,
1767 opt_level: wasm_config
1768 .opt_level
1769 .clone()
1770 .unwrap_or_else(|| "Oz".to_string()),
1771 wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1772 output_path: wasm_config.output.as_ref().map(PathBuf::from),
1773 world: wasm_config.world.clone(),
1774 features: wasm_config.features.clone(),
1775 build_args: wasm_config.build_args.clone(),
1776 pre_build: Vec::new(),
1777 post_build: Vec::new(),
1778 adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1779 };
1780
1781 // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1782 for cmd in &wasm_config.pre_build {
1783 config.pre_build.push(zcommand_to_args(cmd));
1784 }
1785 for cmd in &wasm_config.post_build {
1786 config.post_build.push(zcommand_to_args(cmd));
1787 }
1788
1789 // Build the WASM component.
1790 let result = build_wasm(&self.context, config).await?;
1791
1792 let language_name = result.language.name().to_string();
1793 let wasm_path = result.wasm_path;
1794 let size = result.size;
1795
1796 info!(
1797 "WASM build complete: {} ({} bytes, optimized={})",
1798 wasm_path.display(),
1799 size,
1800 wasm_config.optimize
1801 );
1802
1803 // `wasm.oci: false` opts out of OCI artifact packaging and push —
1804 // the compilation pipeline above still runs (with caching, wasm-opt,
1805 // and the preview1 -> preview2 adapter), we simply skip the layout
1806 // write and leave `oci_path`/`manifest_digest`/`artifact_type` as
1807 // `None`. The push branch in `build()` keys off `oci_path.is_some()`
1808 // so skipping it here transparently disables push for this build.
1809 if !wasm_config.oci {
1810 info!(
1811 "WASM OCI export skipped (wasm.oci = false); raw .wasm at {}",
1812 wasm_path.display()
1813 );
1814 return Ok(BuildOutput::WasmArtifact {
1815 wasm_path,
1816 oci_path: None,
1817 manifest_digest: None,
1818 artifact_type: None,
1819 language: language_name,
1820 optimized: wasm_config.optimize,
1821 size,
1822 });
1823 }
1824
1825 // Derive a module name for OCI annotations. Prefer the first tag's
1826 // repository component (`repo` from `repo:version` or `host/repo`),
1827 // falling back to the wasm file stem, then "wasm-module".
1828 let module_name = self
1829 .options
1830 .tags
1831 .first()
1832 .map(|t| module_name_from_tag(t))
1833 .or_else(|| {
1834 wasm_path
1835 .file_stem()
1836 .and_then(|s| s.to_str())
1837 .map(str::to_string)
1838 })
1839 .unwrap_or_else(|| "wasm-module".to_string());
1840
1841 // Map the selected WASI target to a WasiVersion so the export uses
1842 // the correct artifact_type without re-analyzing the binary.
1843 let wasi_version = match target {
1844 WasiTarget::Preview1 => Some(WasiVersion::Preview1),
1845 WasiTarget::Preview2 => Some(WasiVersion::Preview2),
1846 };
1847
1848 // Carry ZImage labels across as OCI manifest annotations, matching
1849 // the behaviour of container image builds that emit LABEL -> annotations.
1850 let annotations: HashMap<String, String> = zimage.labels.clone();
1851
1852 let export_config = WasmExportConfig {
1853 wasm_path: wasm_path.clone(),
1854 module_name: module_name.clone(),
1855 wasi_version,
1856 annotations,
1857 };
1858
1859 let export =
1860 export_wasm_as_oci(&export_config)
1861 .await
1862 .map_err(|e| BuildError::RegistryError {
1863 message: format!("failed to export WASM as OCI artifact: {e}"),
1864 })?;
1865
1866 // Write the OCI image layout to disk next to the WASM file. The
1867 // layout directory name is `<module>-oci`, mirroring the CLI
1868 // `zlayer wasm export` layout in bin/zlayer/src/commands/wasm.rs.
1869 let layout_parent = wasm_path
1870 .parent()
1871 .map_or_else(|| self.context.clone(), Path::to_path_buf);
1872 let oci_dir = layout_parent.join(format!("{module_name}-oci"));
1873 write_wasm_oci_layout(&oci_dir, &export, &module_name).await?;
1874
1875 info!(
1876 manifest_digest = %export.manifest_digest,
1877 artifact_type = %export.artifact_type,
1878 oci_path = %oci_dir.display(),
1879 "WASM OCI artifact written"
1880 );
1881
1882 Ok(BuildOutput::WasmArtifact {
1883 wasm_path,
1884 oci_path: Some(oci_dir),
1885 manifest_digest: Some(export.manifest_digest),
1886 artifact_type: Some(export.artifact_type),
1887 language: language_name,
1888 optimized: wasm_config.optimize,
1889 size,
1890 })
1891 }
1892
1893 /// Resolve `build:` directives in a `ZImage` by running nested builds.
1894 ///
1895 /// For each `build:` directive (top-level or per-stage), this method:
1896 /// 1. Determines the build context directory
1897 /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1898 /// 3. Spawns a nested `ImageBuilder` to build the context
1899 /// 4. Tags the result and replaces `build` with `base`
1900 async fn resolve_build_directives(
1901 &self,
1902 zimage: &crate::zimage::ZImage,
1903 ) -> Result<crate::zimage::ZImage> {
1904 let mut resolved = zimage.clone();
1905
1906 // Resolve top-level `build:` directive.
1907 if let Some(ref build_ctx) = resolved.build {
1908 let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1909 resolved.base = Some(tag);
1910 resolved.build = None;
1911 }
1912
1913 // Resolve per-stage `build:` directives.
1914 if let Some(ref mut stages) = resolved.stages {
1915 for (name, stage) in stages.iter_mut() {
1916 if let Some(ref build_ctx) = stage.build {
1917 let tag = self.run_nested_build(build_ctx, name).await?;
1918 stage.base = Some(tag);
1919 stage.build = None;
1920 }
1921 }
1922 }
1923
1924 Ok(resolved)
1925 }
1926
1927 /// Run a nested build from a `build:` directive and return the resulting image tag.
1928 fn run_nested_build<'a>(
1929 &'a self,
1930 build_ctx: &'a crate::zimage::types::ZBuildContext,
1931 stage_name: &'a str,
1932 ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1933 Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1934 }
1935
1936 async fn run_nested_build_inner(
1937 &self,
1938 build_ctx: &crate::zimage::types::ZBuildContext,
1939 stage_name: &str,
1940 ) -> Result<String> {
1941 let context_dir = build_ctx.context_dir(&self.context);
1942
1943 if !context_dir.exists() {
1944 return Err(BuildError::ContextRead {
1945 path: context_dir,
1946 source: std::io::Error::new(
1947 std::io::ErrorKind::NotFound,
1948 format!(
1949 "build context directory not found for build directive in '{stage_name}'"
1950 ),
1951 ),
1952 });
1953 }
1954
1955 info!(
1956 "Building nested image for '{}' from context: {}",
1957 stage_name,
1958 context_dir.display()
1959 );
1960
1961 // Create a tag for the nested build result.
1962 let tag = format!(
1963 "zlayer-build-dep-{}:{}",
1964 stage_name,
1965 chrono_lite_timestamp()
1966 );
1967
1968 // Create nested builder. Inherit the parent's target_os (if any) so
1969 // a Windows top-level build doesn't silently spawn a Linux nested
1970 // build for its `build:` dependency.
1971 let mut nested = ImageBuilder::new_with_os(&context_dir, self.target_os).await?;
1972 nested = nested.tag(&tag);
1973
1974 // Apply explicit build file if specified.
1975 if let Some(file) = build_ctx.file() {
1976 let file_path = context_dir.join(file);
1977 if std::path::Path::new(file).extension().is_some_and(|ext| {
1978 ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1979 }) || file.starts_with("ZImagefile")
1980 {
1981 nested = nested.zimagefile(file_path);
1982 } else {
1983 nested = nested.dockerfile(file_path);
1984 }
1985 }
1986
1987 // Apply build args.
1988 for (key, value) in build_ctx.args() {
1989 nested = nested.build_arg(&key, &value);
1990 }
1991
1992 // Propagate default registry if set.
1993 if let Some(ref reg) = self.options.default_registry {
1994 nested = nested.default_registry(reg.clone());
1995 }
1996
1997 // Run the nested build.
1998 let result = nested.build().await?;
1999 info!(
2000 "Nested build for '{}' completed: {}",
2001 stage_name, result.image_id
2002 );
2003
2004 Ok(tag)
2005 }
2006
2007 /// Push the WASM OCI artifact produced by `handle_wasm_build` to every
2008 /// user-supplied registry tag.
2009 ///
2010 /// Mirrors the container push flow in [`BuildahBackend::build_image`]:
2011 /// when `options.push` is true, each tag in `options.tags` is pushed.
2012 /// Tags that look like bare image names (no registry host, e.g.
2013 /// `myapp:wasm`) are skipped with an info log, matching how bare tags
2014 /// are treated elsewhere — a registryless tag has nowhere to be pushed.
2015 ///
2016 /// Re-runs [`export_wasm_as_oci`] on the produced `wasm_path` to obtain
2017 /// the [`WasmExportResult`] blobs required by [`ImagePuller::push_wasm`].
2018 /// The export is deterministic (same WASM binary produces the same
2019 /// blobs and digests), so the digests match the layout on disk at
2020 /// `oci_dir` that A1.2 wrote.
2021 ///
2022 /// [`BuildahBackend::build_image`]: crate::backend::buildah::BuildahBackend
2023 /// [`export_wasm_as_oci`]: zlayer_registry::export_wasm_as_oci
2024 /// [`WasmExportResult`]: zlayer_registry::WasmExportResult
2025 /// [`ImagePuller::push_wasm`]: zlayer_registry::ImagePuller::push_wasm
2026 #[cfg(feature = "local-registry")]
2027 async fn push_wasm_oci(&self, wasm_path: &Path, oci_dir: &Path) -> Result<()> {
2028 use zlayer_registry::wasm::WasiVersion;
2029 use zlayer_registry::{export_wasm_as_oci, BlobCache, ImagePuller, WasmExportConfig};
2030
2031 // Derive the module name the same way `handle_wasm_build` did so the
2032 // re-exported artifact carries identical OCI annotations.
2033 let module_name = self
2034 .options
2035 .tags
2036 .first()
2037 .map(|t| module_name_from_tag(t))
2038 .or_else(|| {
2039 wasm_path
2040 .file_stem()
2041 .and_then(|s| s.to_str())
2042 .map(str::to_string)
2043 })
2044 .unwrap_or_else(|| "wasm-module".to_string());
2045
2046 // Reconstruct the export result from the on-disk WASM binary. The
2047 // `wasi_version` is left `None` so it is re-detected from the binary
2048 // (matches whatever A1.2 wrote unless the user mutated the file).
2049 let export_config = WasmExportConfig {
2050 wasm_path: wasm_path.to_path_buf(),
2051 module_name,
2052 wasi_version: None::<WasiVersion>,
2053 annotations: HashMap::new(),
2054 };
2055 let export =
2056 export_wasm_as_oci(&export_config)
2057 .await
2058 .map_err(|e| BuildError::RegistryError {
2059 message: format!(
2060 "failed to re-export WASM for push from {}: {e}",
2061 wasm_path.display()
2062 ),
2063 })?;
2064
2065 // Build the puller once; reuse for every tag.
2066 let cache = BlobCache::new().map_err(|e| BuildError::RegistryError {
2067 message: format!("failed to create blob cache for WASM push: {e}"),
2068 })?;
2069 let puller = ImagePuller::new(cache);
2070
2071 for tag in &self.options.tags {
2072 if !tag_has_registry_host(tag) {
2073 info!(
2074 "Skipping WASM push for bare tag '{}' (no registry host); \
2075 OCI layout still available at {}",
2076 tag,
2077 oci_dir.display()
2078 );
2079 continue;
2080 }
2081
2082 let oci_auth = Self::resolve_wasm_push_auth(self.options.registry_auth.as_ref());
2083
2084 info!("Pushing WASM artifact: {}", tag);
2085 let push_result = puller
2086 .push_wasm(tag, &export, &oci_auth)
2087 .await
2088 .map_err(|e| BuildError::RegistryError {
2089 message: format!("failed to push WASM artifact '{tag}': {e}"),
2090 })?;
2091 info!(
2092 "Pushed WASM artifact: {} (manifest digest: {})",
2093 tag, push_result.manifest_digest
2094 );
2095 }
2096
2097 Ok(())
2098 }
2099
2100 /// Resolve registry auth for a WASM push.
2101 ///
2102 /// Uses the explicitly provided credentials when set; otherwise falls
2103 /// back to anonymous. Mirrors the minimal behaviour of the buildah push
2104 /// path (`--creds user:pass` when provided, otherwise let the registry
2105 /// decide).
2106 #[cfg(feature = "local-registry")]
2107 fn resolve_wasm_push_auth(auth: Option<&RegistryAuth>) -> zlayer_registry::RegistryAuth {
2108 match auth {
2109 Some(a) => zlayer_registry::RegistryAuth::Basic(a.username.clone(), a.password.clone()),
2110 None => zlayer_registry::RegistryAuth::Anonymous,
2111 }
2112 }
2113
2114 /// Send an event to the TUI (if configured)
2115 fn send_event(&self, event: BuildEvent) {
2116 if let Some(tx) = &self.event_tx {
2117 // Ignore send errors - the receiver may have been dropped
2118 let _ = tx.send(event);
2119 }
2120 }
2121}
2122
2123// Helper function to generate a timestamp-based name
2124fn chrono_lite_timestamp() -> String {
2125 use std::time::{SystemTime, UNIX_EPOCH};
2126 let duration = SystemTime::now()
2127 .duration_since(UNIX_EPOCH)
2128 .unwrap_or_default();
2129 format!("{}", duration.as_secs())
2130}
2131
2132/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
2133/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
2134/// pre/post build command lists.
2135fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
2136 match cmd {
2137 crate::zimage::ZCommand::Shell(s) => {
2138 vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
2139 }
2140 crate::zimage::ZCommand::Exec(args) => args.clone(),
2141 }
2142}
2143
2144/// Extract a short "module name" suitable for OCI annotations from an image
2145/// tag. Strips any registry host, leading path segments, and tag/digest.
2146///
2147/// Examples:
2148/// - `myapp:latest` -> `myapp`
2149/// - `ghcr.io/org/myapp:v1.2.3` -> `myapp`
2150/// - `myapp@sha256:...` -> `myapp`
2151fn module_name_from_tag(tag: &str) -> String {
2152 let last_segment = tag.rsplit('/').next().unwrap_or(tag);
2153 let without_tag = last_segment.split(':').next().unwrap_or(last_segment);
2154 let without_digest = without_tag.split('@').next().unwrap_or(without_tag);
2155 without_digest.to_string()
2156}
2157
2158/// Heuristic: does `tag` include an explicit registry host?
2159///
2160/// Used to decide which tags are push-eligible. A tag is treated as
2161/// registry-qualified when it has at least one `/` and the first path
2162/// component looks like a host — it contains a `.` (FQDN like `ghcr.io`,
2163/// `registry.example.com`), a `:` (host:port like `localhost:5000`), or
2164/// equals the literal `localhost`. Bare names like `myapp:wasm` and
2165/// Docker-Hub-style `org/app:v1` are skipped because there is no explicit
2166/// registry to push to.
2167#[cfg(feature = "local-registry")]
2168fn tag_has_registry_host(tag: &str) -> bool {
2169 // No `/` means the whole string is `name[:tag]` with no host component.
2170 if !tag.contains('/') {
2171 return false;
2172 }
2173 let Some(first) = tag.split('/').next() else {
2174 return false;
2175 };
2176 first.contains('.') || first.contains(':') || first == "localhost"
2177}
2178
2179/// Write an OCI image layout directory (`oci-layout`, `index.json`,
2180/// `blobs/sha256/...`) for a WASM artifact on disk. This mirrors the layout
2181/// emitted by the `zlayer wasm export` CLI command so the directory can be
2182/// consumed by tools that expect a standard OCI layout.
2183async fn write_wasm_oci_layout(
2184 oci_dir: &Path,
2185 export: &zlayer_registry::WasmExportResult,
2186 ref_name: &str,
2187) -> Result<()> {
2188 let map_io = |path: PathBuf| {
2189 move |e: std::io::Error| BuildError::ContextRead {
2190 path: path.clone(),
2191 source: e,
2192 }
2193 };
2194
2195 fs::create_dir_all(oci_dir)
2196 .await
2197 .map_err(map_io(oci_dir.to_path_buf()))?;
2198
2199 // `oci-layout` marker file.
2200 let layout_marker = oci_dir.join("oci-layout");
2201 let oci_layout = serde_json::json!({ "imageLayoutVersion": "1.0.0" });
2202 fs::write(
2203 &layout_marker,
2204 serde_json::to_vec_pretty(&oci_layout).map_err(|e| BuildError::RegistryError {
2205 message: format!("failed to serialize oci-layout marker: {e}"),
2206 })?,
2207 )
2208 .await
2209 .map_err(map_io(layout_marker.clone()))?;
2210
2211 // `blobs/sha256/` directory.
2212 let blobs_dir = oci_dir.join("blobs").join("sha256");
2213 fs::create_dir_all(&blobs_dir)
2214 .await
2215 .map_err(map_io(blobs_dir.clone()))?;
2216
2217 // Write config, wasm-layer, and manifest blobs under their digests.
2218 let write_blob = |digest: &str, data: &[u8]| {
2219 let hash = digest.strip_prefix("sha256:").unwrap_or(digest).to_string();
2220 let path = blobs_dir.join(hash);
2221 let data = data.to_vec();
2222 async move {
2223 fs::write(&path, &data)
2224 .await
2225 .map_err(map_io(path.clone()))?;
2226 Ok::<(), BuildError>(())
2227 }
2228 };
2229
2230 write_blob(&export.config_digest, &export.config_blob).await?;
2231 write_blob(&export.wasm_layer_digest, &export.wasm_binary).await?;
2232 write_blob(&export.manifest_digest, &export.manifest_json).await?;
2233
2234 // Write `index.json` pointing at the manifest.
2235 let index = serde_json::json!({
2236 "schemaVersion": 2,
2237 "mediaType": "application/vnd.oci.image.index.v1+json",
2238 "manifests": [{
2239 "mediaType": "application/vnd.oci.image.manifest.v1+json",
2240 "digest": export.manifest_digest,
2241 "size": export.manifest_size,
2242 "artifactType": export.artifact_type,
2243 "annotations": {
2244 "org.opencontainers.image.ref.name": ref_name,
2245 }
2246 }]
2247 });
2248 let index_path = oci_dir.join("index.json");
2249 fs::write(
2250 &index_path,
2251 serde_json::to_vec_pretty(&index).map_err(|e| BuildError::RegistryError {
2252 message: format!("failed to serialize OCI index.json: {e}"),
2253 })?,
2254 )
2255 .await
2256 .map_err(map_io(index_path.clone()))?;
2257
2258 Ok(())
2259}
2260
2261#[cfg(test)]
2262mod tests {
2263 use super::*;
2264
2265 #[test]
2266 fn test_registry_auth_new() {
2267 let auth = RegistryAuth::new("user", "pass");
2268 assert_eq!(auth.username, "user");
2269 assert_eq!(auth.password, "pass");
2270 }
2271
2272 #[test]
2273 fn test_build_options_default() {
2274 let opts = BuildOptions::default();
2275 assert!(opts.dockerfile.is_none());
2276 assert!(opts.zimagefile.is_none());
2277 assert!(opts.runtime.is_none());
2278 assert!(opts.build_args.is_empty());
2279 assert!(opts.target.is_none());
2280 assert!(opts.tags.is_empty());
2281 assert!(!opts.no_cache);
2282 assert!(!opts.push);
2283 assert!(!opts.squash);
2284 // New cache-related fields
2285 assert!(opts.layers); // Default is true
2286 assert!(opts.cache_from.is_none());
2287 assert!(opts.cache_to.is_none());
2288 assert!(opts.cache_ttl.is_none());
2289 // Cache backend config (only with cache feature)
2290 #[cfg(feature = "cache")]
2291 assert!(opts.cache_backend_config.is_none());
2292 }
2293
2294 fn create_test_builder() -> ImageBuilder {
2295 // Create a minimal builder for testing (without async initialization)
2296 ImageBuilder {
2297 context: PathBuf::from("/tmp/test"),
2298 options: BuildOptions::default(),
2299 executor: BuildahExecutor::with_path("/usr/bin/buildah"),
2300 event_tx: None,
2301 target_os: None,
2302 backend: None,
2303 #[cfg(feature = "cache")]
2304 cache_backend: None,
2305 #[cfg(feature = "local-registry")]
2306 local_registry: None,
2307 }
2308 }
2309
2310 // Builder method chaining tests
2311 #[test]
2312 fn test_builder_chaining() {
2313 let mut builder = create_test_builder();
2314
2315 builder = builder
2316 .dockerfile("./Dockerfile.test")
2317 .runtime(Runtime::Node20)
2318 .build_arg("VERSION", "1.0")
2319 .target("builder")
2320 .tag("myapp:latest")
2321 .tag("myapp:v1")
2322 .no_cache()
2323 .squash()
2324 .format("oci");
2325
2326 assert_eq!(
2327 builder.options.dockerfile,
2328 Some(PathBuf::from("./Dockerfile.test"))
2329 );
2330 assert_eq!(builder.options.runtime, Some(Runtime::Node20));
2331 assert_eq!(
2332 builder.options.build_args.get("VERSION"),
2333 Some(&"1.0".to_string())
2334 );
2335 assert_eq!(builder.options.target, Some("builder".to_string()));
2336 assert_eq!(builder.options.tags.len(), 2);
2337 assert!(builder.options.no_cache);
2338 assert!(builder.options.squash);
2339 assert_eq!(builder.options.format, Some("oci".to_string()));
2340 }
2341
2342 #[test]
2343 fn test_builder_push_with_auth() {
2344 let mut builder = create_test_builder();
2345 builder = builder.push(RegistryAuth::new("user", "pass"));
2346
2347 assert!(builder.options.push);
2348 assert!(builder.options.registry_auth.is_some());
2349 let auth = builder.options.registry_auth.unwrap();
2350 assert_eq!(auth.username, "user");
2351 assert_eq!(auth.password, "pass");
2352 }
2353
2354 #[test]
2355 fn test_builder_push_without_auth() {
2356 let mut builder = create_test_builder();
2357 builder = builder.push_without_auth();
2358
2359 assert!(builder.options.push);
2360 assert!(builder.options.registry_auth.is_none());
2361 }
2362
2363 #[test]
2364 fn test_builder_layers() {
2365 let mut builder = create_test_builder();
2366 // Default is true
2367 assert!(builder.options.layers);
2368
2369 // Disable layers
2370 builder = builder.layers(false);
2371 assert!(!builder.options.layers);
2372
2373 // Re-enable layers
2374 builder = builder.layers(true);
2375 assert!(builder.options.layers);
2376 }
2377
2378 #[test]
2379 fn test_builder_cache_from() {
2380 let mut builder = create_test_builder();
2381 assert!(builder.options.cache_from.is_none());
2382
2383 builder = builder.cache_from("registry.example.com/myapp:cache");
2384 assert_eq!(
2385 builder.options.cache_from,
2386 Some("registry.example.com/myapp:cache".to_string())
2387 );
2388 }
2389
2390 #[test]
2391 fn test_builder_cache_to() {
2392 let mut builder = create_test_builder();
2393 assert!(builder.options.cache_to.is_none());
2394
2395 builder = builder.cache_to("registry.example.com/myapp:cache");
2396 assert_eq!(
2397 builder.options.cache_to,
2398 Some("registry.example.com/myapp:cache".to_string())
2399 );
2400 }
2401
2402 #[test]
2403 fn test_builder_cache_ttl() {
2404 use std::time::Duration;
2405
2406 let mut builder = create_test_builder();
2407 assert!(builder.options.cache_ttl.is_none());
2408
2409 builder = builder.cache_ttl(Duration::from_secs(3600));
2410 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
2411 }
2412
2413 #[test]
2414 fn test_builder_cache_options_chaining() {
2415 use std::time::Duration;
2416
2417 let builder = create_test_builder()
2418 .layers(true)
2419 .cache_from("registry.example.com/cache:input")
2420 .cache_to("registry.example.com/cache:output")
2421 .cache_ttl(Duration::from_secs(7200))
2422 .no_cache();
2423
2424 assert!(builder.options.layers);
2425 assert_eq!(
2426 builder.options.cache_from,
2427 Some("registry.example.com/cache:input".to_string())
2428 );
2429 assert_eq!(
2430 builder.options.cache_to,
2431 Some("registry.example.com/cache:output".to_string())
2432 );
2433 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
2434 assert!(builder.options.no_cache);
2435 }
2436
2437 #[test]
2438 fn test_chrono_lite_timestamp() {
2439 let ts = chrono_lite_timestamp();
2440 // Should be a valid number
2441 let parsed: u64 = ts.parse().expect("Should be a valid u64");
2442 // Should be reasonably recent (after 2024)
2443 assert!(parsed > 1_700_000_000);
2444 }
2445}