zlayer_builder/builder.rs
1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//! // Build from a Dockerfile
15//! let image = ImageBuilder::new("./my-app").await?
16//! .tag("myapp:latest")
17//! .tag("myapp:v1.0.0")
18//! .build()
19//! .await?;
20//!
21//! println!("Built image: {}", image.image_id);
22//! Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//! // Build using a runtime template (no Dockerfile needed)
34//! let image = ImageBuilder::new("./my-node-app").await?
35//! .runtime(Runtime::Node20)
36//! .tag("myapp:latest")
37//! .build()
38//! .await?;
39//!
40//! println!("Built image: {}", image.image_id);
41//! Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//! // Build only up to a specific stage
53//! let image = ImageBuilder::new("./my-app").await?
54//! .target("builder")
55//! .tag("myapp:builder")
56//! .build()
57//! .await?;
58//!
59//! println!("Built intermediate image: {}", image.image_id);
60//! Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//! let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//! // Start TUI in another thread
75//! std::thread::spawn(move || {
76//! // Process events from rx...
77//! while let Ok(event) = rx.recv() {
78//! println!("Event: {:?}", event);
79//! }
80//! });
81//!
82//! let image = ImageBuilder::new("./my-app").await?
83//! .tag("myapp:latest")
84//! .with_events(tx)
85//! .build()
86//! .await?;
87//!
88//! Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//! let image = ImageBuilder::new("./my-app").await?
100//! .with_cache_dir("/var/cache/zlayer") // Use persistent disk cache
101//! .tag("myapp:latest")
102//! .build()
103//! .await?;
104//!
105//! println!("Built image: {}", image.image_id);
106//! Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116#[cfg(feature = "local-registry")]
117use tracing::warn;
118use tracing::{debug, info, instrument};
119
120use crate::backend::BuildBackend;
121#[cfg(feature = "local-registry")]
122use crate::buildah::BuildahCommand;
123use crate::buildah::BuildahExecutor;
124use crate::dockerfile::{Dockerfile, RunMount};
125use crate::error::{BuildError, Result};
126use crate::templates::{get_template, Runtime};
127use crate::tui::BuildEvent;
128
129#[cfg(feature = "cache")]
130use zlayer_registry::cache::BlobCacheBackend;
131
132#[cfg(feature = "local-registry")]
133use zlayer_registry::LocalRegistry;
134
135#[cfg(feature = "local-registry")]
136use zlayer_registry::import_image;
137
138/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
139/// or a WASM build result for WebAssembly builds.
140///
141/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
142/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
143/// a compiled artifact directly, bypassing the container build pipeline.
144#[derive(Debug)]
145pub enum BuildOutput {
146 /// Standard container build - produces a Dockerfile to be built with buildah.
147 Dockerfile(Dockerfile),
148 /// WASM component build - already built, produces artifact path.
149 WasmArtifact {
150 /// Path to the compiled WASM binary.
151 wasm_path: PathBuf,
152 /// Path to the OCI artifact directory (if exported).
153 oci_path: Option<PathBuf>,
154 /// OCI manifest digest (e.g. `sha256:...`) for the exported artifact,
155 /// or `None` if export did not run (should always be `Some` when
156 /// `oci_path` is `Some`).
157 manifest_digest: Option<String>,
158 /// OCI artifact type (e.g. `application/vnd.wasm.component.v1+wasm`).
159 artifact_type: Option<String>,
160 /// Source language used.
161 language: String,
162 /// Whether optimization was applied.
163 optimized: bool,
164 /// Size of the output file in bytes.
165 size: u64,
166 },
167}
168
169/// Configuration for the layer cache backend.
170///
171/// This enum specifies which cache backend to use for storing and retrieving
172/// cached layers during builds. The cache feature must be enabled for this
173/// to be available.
174///
175/// # Example
176///
177/// ```no_run,ignore
178/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
179///
180/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
181/// // Use persistent disk cache
182/// let builder = ImageBuilder::new("./my-app").await?
183/// .with_cache_config(CacheBackendConfig::Persistent {
184/// path: "/var/cache/zlayer".into(),
185/// })
186/// .tag("myapp:latest");
187/// # Ok(())
188/// # }
189/// ```
190#[cfg(feature = "cache")]
191#[derive(Debug, Clone, Default)]
192pub enum CacheBackendConfig {
193 /// In-memory cache (cleared when process exits).
194 ///
195 /// Useful for CI/CD environments where persistence isn't needed
196 /// but you want to avoid re-downloading base image layers within
197 /// a single build session.
198 #[default]
199 Memory,
200
201 /// Persistent disk-based cache using redb.
202 ///
203 /// Requires the `cache-persistent` feature. Layers are stored on disk
204 /// and persist across builds, significantly speeding up repeated builds.
205 #[cfg(feature = "cache-persistent")]
206 Persistent {
207 /// Path to the cache directory or database file.
208 /// If a directory, `blob_cache.redb` will be created inside it.
209 path: PathBuf,
210 },
211
212 /// S3-compatible object storage backend.
213 ///
214 /// Requires the `cache-s3` feature. Useful for distributed build systems
215 /// where multiple build machines need to share a cache.
216 #[cfg(feature = "cache-s3")]
217 S3 {
218 /// S3 bucket name
219 bucket: String,
220 /// AWS region (optional, uses SDK default if not set)
221 region: Option<String>,
222 /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
223 endpoint: Option<String>,
224 /// Key prefix for cached blobs (default: "zlayer/layers/")
225 prefix: Option<String>,
226 },
227}
228
229/// Built image information returned after a successful build
230#[derive(Debug, Clone)]
231pub struct BuiltImage {
232 /// Image ID (sha256:...)
233 pub image_id: String,
234 /// Applied tags
235 pub tags: Vec<String>,
236 /// Number of layers in the final image
237 pub layer_count: usize,
238 /// Total size in bytes (0 if not computed)
239 pub size: u64,
240 /// Build duration in milliseconds
241 pub build_time_ms: u64,
242 /// Whether this image is a manifest list (multi-arch).
243 pub is_manifest: bool,
244}
245
246/// Registry authentication credentials
247#[derive(Debug, Clone)]
248pub struct RegistryAuth {
249 /// Registry username
250 pub username: String,
251 /// Registry password or token
252 pub password: String,
253}
254
255impl RegistryAuth {
256 /// Create new registry authentication
257 pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
258 Self {
259 username: username.into(),
260 password: password.into(),
261 }
262 }
263}
264
265/// Strategy for pulling the base image before building.
266///
267/// Controls the `--pull` flag passed to `buildah from`. The default is
268/// [`PullBaseMode::Newer`], matching the behaviour users expect from
269/// modern build tools: fast when nothing has changed, correct when the
270/// upstream base image has been republished.
271#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
272pub enum PullBaseMode {
273 /// Pull only if the registry has a newer version (`--pull=newer`).
274 /// Default behaviour.
275 #[default]
276 Newer,
277 /// Always pull, even if a local copy exists (`--pull=always`).
278 Always,
279 /// Never pull — use whatever is in local storage (no `--pull` flag passed).
280 Never,
281}
282
283/// Build options for customizing the image build process
284#[derive(Debug, Clone)]
285#[allow(clippy::struct_excessive_bools)]
286pub struct BuildOptions {
287 /// Dockerfile path (default: Dockerfile in context)
288 pub dockerfile: Option<PathBuf>,
289 /// `ZImagefile` path (alternative to Dockerfile)
290 pub zimagefile: Option<PathBuf>,
291 /// Use runtime template instead of Dockerfile
292 pub runtime: Option<Runtime>,
293 /// Build arguments (ARG values)
294 pub build_args: HashMap<String, String>,
295 /// Target stage for multi-stage builds
296 pub target: Option<String>,
297 /// Image tags to apply
298 pub tags: Vec<String>,
299 /// Disable layer caching
300 pub no_cache: bool,
301 /// Push to registry after build
302 pub push: bool,
303 /// Registry auth (if pushing)
304 pub registry_auth: Option<RegistryAuth>,
305 /// Squash all layers into one
306 pub squash: bool,
307 /// Image format (oci or docker)
308 pub format: Option<String>,
309 /// Enable buildah layer caching (--layers flag for `buildah build`).
310 /// Default: true
311 ///
312 /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
313 /// `buildah commit`) rather than `buildah build`, so this flag is reserved
314 /// for future use when/if we switch to `buildah build` (bud) command.
315 pub layers: bool,
316 /// Registry to pull cache from (--cache-from for `buildah build`).
317 ///
318 /// Note: This would be used with `buildah build --cache-from=<registry>`.
319 /// Currently `ZLayer` uses manual container creation, so this is reserved
320 /// for future implementation or for switching to `buildah build`.
321 ///
322 /// TODO: Implement remote cache support. This would require either:
323 /// 1. Switching to `buildah build` command which supports --cache-from natively
324 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
325 pub cache_from: Option<String>,
326 /// Registry to push cache to (--cache-to for `buildah build`).
327 ///
328 /// Note: This would be used with `buildah build --cache-to=<registry>`.
329 /// Currently `ZLayer` uses manual container creation, so this is reserved
330 /// for future implementation or for switching to `buildah build`.
331 ///
332 /// TODO: Implement remote cache support. This would require either:
333 /// 1. Switching to `buildah build` command which supports --cache-to natively
334 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
335 pub cache_to: Option<String>,
336 /// Maximum cache age (--cache-ttl for `buildah build`).
337 ///
338 /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
339 /// Currently `ZLayer` uses manual container creation, so this is reserved
340 /// for future implementation or for switching to `buildah build`.
341 ///
342 /// TODO: Implement cache TTL support. This would require either:
343 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
344 /// 2. Implementing custom cache expiration logic for our layer caching system
345 pub cache_ttl: Option<std::time::Duration>,
346 /// Cache backend configuration (requires `cache` feature).
347 ///
348 /// When configured, the builder will store layer data in the specified
349 /// cache backend for faster subsequent builds. This is separate from
350 /// buildah's native caching and operates at the `ZLayer` level.
351 ///
352 /// # Integration Points
353 ///
354 /// The cache backend is used at several points during the build:
355 ///
356 /// 1. **Before instruction execution**: Check if a cached layer exists
357 /// for the (`instruction_hash`, `base_layer`) tuple
358 /// 2. **After instruction execution**: Store the resulting layer data
359 /// in the cache for future builds
360 /// 3. **Base image layers**: Cache pulled base image layers to avoid
361 /// re-downloading from registries
362 ///
363 /// TODO: Wire up cache lookups in the build loop once layer digests
364 /// are properly computed and tracked.
365 #[cfg(feature = "cache")]
366 pub cache_backend_config: Option<CacheBackendConfig>,
367 /// Default OCI/WASM-compatible registry to check for images before falling
368 /// back to Docker Hub qualification.
369 ///
370 /// When set, the builder will probe this registry for short image names
371 /// before qualifying them to `docker.io`. For example, if set to
372 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
373 /// the builder will check `git.example.com:5000/myapp:latest` first.
374 pub default_registry: Option<String>,
375 /// Default cache mounts injected into all RUN instructions.
376 /// These are merged with any step-level cache mounts (deduped by target path).
377 pub default_cache_mounts: Vec<RunMount>,
378 /// Number of retries for failed RUN steps (0 = no retries, default)
379 pub retries: u32,
380 /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
381 /// When set, `buildah from` pulls the platform-specific image variant.
382 pub platform: Option<String>,
383 /// SHA-256 hash of the source Dockerfile/ZImagefile content.
384 ///
385 /// When set, the sandbox builder can skip a rebuild if the cached image
386 /// was produced from identical source content (content-based invalidation).
387 pub source_hash: Option<String>,
388 /// How to handle base-image pulling during `buildah from`.
389 ///
390 /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
391 /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
392 /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
393 pub pull: PullBaseMode,
394 /// Force regeneration of `zlayer-bottles.lock` for this build.
395 ///
396 /// Only consumed by the macOS sandbox backend. When `true`, any existing
397 /// lockfile next to the spec is ignored and the live brew resolver runs
398 /// for every formula; the lockfile is then rewritten from scratch.
399 /// Mirrors `cargo update` semantics. On non-macOS backends this flag is
400 /// ignored.
401 pub update_bottles: bool,
402}
403
404impl Default for BuildOptions {
405 fn default() -> Self {
406 Self {
407 dockerfile: None,
408 zimagefile: None,
409 runtime: None,
410 build_args: HashMap::new(),
411 target: None,
412 tags: Vec::new(),
413 no_cache: false,
414 push: false,
415 registry_auth: None,
416 squash: false,
417 format: None,
418 layers: true,
419 cache_from: None,
420 cache_to: None,
421 cache_ttl: None,
422 #[cfg(feature = "cache")]
423 cache_backend_config: None,
424 default_registry: None,
425 default_cache_mounts: Vec::new(),
426 retries: 0,
427 platform: None,
428 source_hash: None,
429 pull: PullBaseMode::default(),
430 update_bottles: false,
431 }
432 }
433}
434
435/// Image builder - orchestrates the full build process
436///
437/// `ImageBuilder` provides a fluent API for configuring and executing
438/// container image builds using buildah as the backend.
439///
440/// # Build Process
441///
442/// 1. Parse Dockerfile (or use runtime template)
443/// 2. Resolve target stages if specified
444/// 3. Build each stage sequentially:
445/// - Create working container from base image
446/// - Execute each instruction
447/// - Commit intermediate stages for COPY --from
448/// 4. Commit final image with tags
449/// 5. Push to registry if configured
450/// 6. Clean up intermediate containers
451///
452/// # Cache Backend Integration (requires `cache` feature)
453///
454/// When a cache backend is configured, the builder can store and retrieve
455/// cached layer data to speed up subsequent builds:
456///
457/// ```no_run,ignore
458/// use zlayer_builder::ImageBuilder;
459///
460/// let builder = ImageBuilder::new("./my-app").await?
461/// .with_cache_dir("/var/cache/zlayer")
462/// .tag("myapp:latest");
463/// ```
464pub struct ImageBuilder {
465 /// Build context directory
466 context: PathBuf,
467 /// Build options
468 options: BuildOptions,
469 /// Buildah executor (kept for backwards compatibility)
470 #[allow(dead_code)]
471 executor: BuildahExecutor,
472 /// Event sender for TUI updates
473 event_tx: Option<mpsc::Sender<BuildEvent>>,
474 /// Explicit target OS for this build.
475 ///
476 /// When `Some`, the backend was (or will be) detected for this OS and
477 /// it overrides any OS inferred from the `ZImagefile` (`os:` / `platform:`)
478 /// during `build()`. When `None`, the builder uses the OS inferred from
479 /// the parsed `ZImage` via `ZImage::resolve_target_os()`, falling back to
480 /// [`ImageOs::Linux`] when the `ZImagefile` has no OS hint either.
481 target_os: Option<crate::backend::ImageOs>,
482 /// Pluggable build backend (buildah, sandbox, etc.).
483 ///
484 /// When set, the `build()` method delegates to this backend instead of
485 /// using the inline buildah logic. Set automatically by `new()` via
486 /// `detect_backend()`, or explicitly via `with_backend()`.
487 backend: Option<Arc<dyn BuildBackend>>,
488 /// Cache backend for layer caching (requires `cache` feature).
489 ///
490 /// When set, the builder will attempt to retrieve cached layers before
491 /// executing instructions, and store results in the cache after execution.
492 ///
493 /// TODO: Implement cache lookups in the build loop. Currently the backend
494 /// is stored but not actively used during builds. Integration points:
495 /// - Check cache before executing RUN instructions
496 /// - Store layer data after successful instruction execution
497 /// - Cache base image layers pulled from registries
498 #[cfg(feature = "cache")]
499 cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
500 /// Local OCI registry for checking cached images before remote pulls.
501 #[cfg(feature = "local-registry")]
502 local_registry: Option<LocalRegistry>,
503}
504
505impl ImageBuilder {
506 /// Create a new `ImageBuilder` with the given context directory
507 ///
508 /// The context directory should contain the Dockerfile (unless using
509 /// a runtime template) and any files that will be copied into the image.
510 ///
511 /// # Arguments
512 ///
513 /// * `context` - Path to the build context directory
514 ///
515 /// # Errors
516 ///
517 /// Returns an error if:
518 /// - The context directory does not exist
519 /// - Buildah is not installed or not accessible
520 ///
521 /// # Example
522 ///
523 /// ```no_run
524 /// use zlayer_builder::ImageBuilder;
525 ///
526 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
527 /// let builder = ImageBuilder::new("./my-project").await?;
528 /// # Ok(())
529 /// # }
530 /// ```
531 #[instrument(skip_all, fields(context = %context.as_ref().display()))]
532 pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
533 Self::new_with_os(context, None).await
534 }
535
536 /// Create a new `ImageBuilder` with an explicit target OS.
537 ///
538 /// This is equivalent to [`ImageBuilder::new`] followed by
539 /// [`ImageBuilder::with_target_os`], but avoids the extra round-trip of
540 /// detecting a Linux backend first and throwing it away.
541 ///
542 /// Pass `None` to defer target-OS resolution to `build()` time, where
543 /// the effective OS is resolved from the `ZImagefile`'s `os:` or `platform:`
544 /// field (priority documented on [`crate::zimage::ZImage::resolve_target_os`]).
545 ///
546 /// # Errors
547 ///
548 /// Returns an error if the context directory does not exist, or (on
549 /// Linux/Windows) if the buildah executor cannot be initialized.
550 #[instrument(skip_all, fields(context = %context.as_ref().display(), target_os = ?target_os))]
551 pub async fn new_with_os(
552 context: impl AsRef<Path>,
553 target_os: Option<crate::backend::ImageOs>,
554 ) -> Result<Self> {
555 let context = context.as_ref().to_path_buf();
556
557 // Verify context exists
558 if !context.exists() {
559 return Err(BuildError::ContextRead {
560 path: context,
561 source: std::io::Error::new(
562 std::io::ErrorKind::NotFound,
563 "Build context directory not found",
564 ),
565 });
566 }
567
568 // Detect the best available build backend for this platform. When
569 // `target_os` is None (caller hasn't decided yet), probe for the Linux
570 // backend as the common case; `build()` will re-detect if the parsed
571 // ZImagefile reveals a different target OS.
572 let detection_os = target_os.unwrap_or(crate::backend::ImageOs::Linux);
573 let backend = crate::backend::detect_backend(detection_os).await.ok();
574
575 // Initialize buildah executor.
576 // On macOS, if buildah is not found we fall back to a default executor
577 // (the backend will handle the actual build dispatch).
578 let executor = match BuildahExecutor::new_async().await {
579 Ok(exec) => exec,
580 #[cfg(target_os = "macos")]
581 Err(_) => {
582 info!("Buildah not found on macOS; backend will handle build dispatch");
583 BuildahExecutor::default()
584 }
585 #[cfg(not(target_os = "macos"))]
586 Err(e) => return Err(e),
587 };
588
589 debug!("Created ImageBuilder for context: {}", context.display());
590
591 Ok(Self {
592 context,
593 options: BuildOptions::default(),
594 executor,
595 event_tx: None,
596 target_os,
597 backend,
598 #[cfg(feature = "cache")]
599 cache_backend: None,
600 #[cfg(feature = "local-registry")]
601 local_registry: None,
602 })
603 }
604
605 /// Override the target OS after construction, re-detecting the backend.
606 ///
607 /// Use this when the caller only learns the target OS *after* creating
608 /// the builder — for example, after parsing a `ZImagefile` to inspect its
609 /// `os:`/`platform:` fields. Passing the same OS that was already selected
610 /// at construction time is cheap (it still re-runs `detect_backend()`).
611 ///
612 /// # Errors
613 ///
614 /// Returns an error if `detect_backend(target_os)` fails for the current
615 /// host/target combination (e.g. Windows image requested on a Linux host).
616 pub async fn with_target_os(mut self, target_os: crate::backend::ImageOs) -> Result<Self> {
617 self.target_os = Some(target_os);
618 self.backend = Some(crate::backend::detect_backend(target_os).await?);
619 Ok(self)
620 }
621
622 /// Create an `ImageBuilder` with a custom buildah executor
623 ///
624 /// This is useful for testing or when you need to configure
625 /// the executor with specific storage options. The executor is
626 /// wrapped in a [`BuildahBackend`] so the build dispatches through
627 /// the [`BuildBackend`] trait.
628 ///
629 /// # Errors
630 ///
631 /// Returns an error if the context directory does not exist.
632 pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
633 let context = context.as_ref().to_path_buf();
634
635 if !context.exists() {
636 return Err(BuildError::ContextRead {
637 path: context,
638 source: std::io::Error::new(
639 std::io::ErrorKind::NotFound,
640 "Build context directory not found",
641 ),
642 });
643 }
644
645 let backend: Arc<dyn BuildBackend> = Arc::new(
646 crate::backend::BuildahBackend::with_executor(executor.clone()),
647 );
648
649 Ok(Self {
650 context,
651 options: BuildOptions::default(),
652 executor,
653 event_tx: None,
654 target_os: None,
655 backend: Some(backend),
656 #[cfg(feature = "cache")]
657 cache_backend: None,
658 #[cfg(feature = "local-registry")]
659 local_registry: None,
660 })
661 }
662
663 /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
664 ///
665 /// The backend is used for all build, push, tag, and manifest
666 /// operations. The internal `BuildahExecutor` is set to the default
667 /// (it is only used if no backend is set).
668 ///
669 /// # Errors
670 ///
671 /// Returns an error if the context directory does not exist.
672 pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
673 let context = context.as_ref().to_path_buf();
674
675 if !context.exists() {
676 return Err(BuildError::ContextRead {
677 path: context,
678 source: std::io::Error::new(
679 std::io::ErrorKind::NotFound,
680 "Build context directory not found",
681 ),
682 });
683 }
684
685 Ok(Self {
686 context,
687 options: BuildOptions::default(),
688 executor: BuildahExecutor::default(),
689 event_tx: None,
690 target_os: None,
691 backend: Some(backend),
692 #[cfg(feature = "cache")]
693 cache_backend: None,
694 #[cfg(feature = "local-registry")]
695 local_registry: None,
696 })
697 }
698
699 /// Set a custom Dockerfile path
700 ///
701 /// By default, the builder looks for a file named `Dockerfile` in the
702 /// context directory. Use this method to specify a different path.
703 ///
704 /// # Example
705 ///
706 /// ```no_run
707 /// # use zlayer_builder::ImageBuilder;
708 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
709 /// let builder = ImageBuilder::new("./my-project").await?
710 /// .dockerfile("./my-project/Dockerfile.prod");
711 /// # Ok(())
712 /// # }
713 /// ```
714 #[must_use]
715 pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
716 self.options.dockerfile = Some(path.as_ref().to_path_buf());
717 self
718 }
719
720 /// Set a custom `ZImagefile` path
721 ///
722 /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
723 /// the builder will parse the `ZImagefile` and convert it to the internal
724 /// Dockerfile IR for execution.
725 ///
726 /// # Example
727 ///
728 /// ```no_run
729 /// # use zlayer_builder::ImageBuilder;
730 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
731 /// let builder = ImageBuilder::new("./my-project").await?
732 /// .zimagefile("./my-project/ZImagefile");
733 /// # Ok(())
734 /// # }
735 /// ```
736 #[must_use]
737 pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
738 self.options.zimagefile = Some(path.as_ref().to_path_buf());
739 self
740 }
741
742 /// Use a runtime template instead of a Dockerfile
743 ///
744 /// Runtime templates provide pre-built Dockerfiles for common
745 /// development environments. When set, the Dockerfile option is ignored.
746 ///
747 /// # Example
748 ///
749 /// ```no_run
750 /// use zlayer_builder::{ImageBuilder, Runtime};
751 ///
752 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
753 /// let builder = ImageBuilder::new("./my-node-app").await?
754 /// .runtime(Runtime::Node20);
755 /// # Ok(())
756 /// # }
757 /// ```
758 #[must_use]
759 pub fn runtime(mut self, runtime: Runtime) -> Self {
760 self.options.runtime = Some(runtime);
761 self
762 }
763
764 /// Add a build argument
765 ///
766 /// Build arguments are passed to the Dockerfile and can be referenced
767 /// using the `ARG` instruction.
768 ///
769 /// # Example
770 ///
771 /// ```no_run
772 /// # use zlayer_builder::ImageBuilder;
773 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
774 /// let builder = ImageBuilder::new("./my-project").await?
775 /// .build_arg("VERSION", "1.0.0")
776 /// .build_arg("DEBUG", "false");
777 /// # Ok(())
778 /// # }
779 /// ```
780 #[must_use]
781 pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
782 self.options.build_args.insert(key.into(), value.into());
783 self
784 }
785
786 /// Set multiple build arguments at once
787 #[must_use]
788 pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
789 self.options.build_args.extend(args);
790 self
791 }
792
793 /// Set the target stage for multi-stage builds
794 ///
795 /// When building a multi-stage Dockerfile, you can stop at a specific
796 /// stage instead of building all stages.
797 ///
798 /// # Example
799 ///
800 /// ```no_run
801 /// # use zlayer_builder::ImageBuilder;
802 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
803 /// // Dockerfile:
804 /// // FROM node:20 AS builder
805 /// // ...
806 /// // FROM node:20-slim AS runtime
807 /// // ...
808 ///
809 /// let builder = ImageBuilder::new("./my-project").await?
810 /// .target("builder")
811 /// .tag("myapp:builder");
812 /// # Ok(())
813 /// # }
814 /// ```
815 #[must_use]
816 pub fn target(mut self, stage: impl Into<String>) -> Self {
817 self.options.target = Some(stage.into());
818 self
819 }
820
821 /// Add an image tag
822 ///
823 /// Tags are applied to the final image. You can add multiple tags.
824 /// The first tag is used as the primary image name during commit.
825 ///
826 /// # Example
827 ///
828 /// ```no_run
829 /// # use zlayer_builder::ImageBuilder;
830 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
831 /// let builder = ImageBuilder::new("./my-project").await?
832 /// .tag("myapp:latest")
833 /// .tag("myapp:v1.0.0")
834 /// .tag("registry.example.com/myapp:v1.0.0");
835 /// # Ok(())
836 /// # }
837 /// ```
838 #[must_use]
839 pub fn tag(mut self, tag: impl Into<String>) -> Self {
840 self.options.tags.push(tag.into());
841 self
842 }
843
844 /// Disable layer caching
845 ///
846 /// When enabled, all layers are rebuilt from scratch even if
847 /// they could be served from cache.
848 ///
849 /// Note: Currently this flag is tracked but not fully implemented in the
850 /// build process. `ZLayer` uses manual container creation (`buildah from`,
851 /// `buildah run`, `buildah commit`) which doesn't have built-in caching
852 /// like `buildah build` does. Future work could implement layer-level
853 /// caching by checking instruction hashes against previously built layers.
854 #[must_use]
855 pub fn no_cache(mut self) -> Self {
856 self.options.no_cache = true;
857 self
858 }
859
860 /// Set the base-image pull strategy for the build.
861 ///
862 /// By default, `buildah from` is invoked with `--pull=newer`, so an
863 /// up-to-date local base image is reused but a newer one on the
864 /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
865 /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
866 /// offline.
867 #[must_use]
868 pub fn pull(mut self, mode: PullBaseMode) -> Self {
869 self.options.pull = mode;
870 self
871 }
872
873 /// Force regeneration of `zlayer-bottles.lock` for this build.
874 ///
875 /// Only consumed by the macOS sandbox backend. Mirrors `cargo update`
876 /// semantics — the existing lockfile is ignored, every formula is
877 /// resolved live, and the file is rewritten from scratch. No-op on
878 /// non-macOS backends.
879 #[must_use]
880 pub fn update_bottles(mut self, update_bottles: bool) -> Self {
881 self.options.update_bottles = update_bottles;
882 self
883 }
884
885 /// Enable or disable layer caching
886 ///
887 /// This controls the `--layers` flag for buildah. When enabled (default),
888 /// buildah can cache and reuse intermediate layers.
889 ///
890 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
891 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
892 /// flag is reserved for future use when/if we switch to `buildah build`.
893 ///
894 /// # Example
895 ///
896 /// ```no_run
897 /// # use zlayer_builder::ImageBuilder;
898 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
899 /// let builder = ImageBuilder::new("./my-project").await?
900 /// .layers(false) // Disable layer caching
901 /// .tag("myapp:latest");
902 /// # Ok(())
903 /// # }
904 /// ```
905 #[must_use]
906 pub fn layers(mut self, enable: bool) -> Self {
907 self.options.layers = enable;
908 self
909 }
910
911 /// Set registry to pull cache from
912 ///
913 /// This corresponds to buildah's `--cache-from` flag, which allows
914 /// pulling cached layers from a remote registry to speed up builds.
915 ///
916 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
917 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
918 /// option is reserved for future implementation.
919 ///
920 /// TODO: Implement remote cache support. This would require either:
921 /// 1. Switching to `buildah build` command which supports --cache-from natively
922 /// 2. Implementing custom layer caching with registry pull for intermediate layers
923 ///
924 /// # Example
925 ///
926 /// ```no_run
927 /// # use zlayer_builder::ImageBuilder;
928 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
929 /// let builder = ImageBuilder::new("./my-project").await?
930 /// .cache_from("registry.example.com/myapp:cache")
931 /// .tag("myapp:latest");
932 /// # Ok(())
933 /// # }
934 /// ```
935 #[must_use]
936 pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
937 self.options.cache_from = Some(registry.into());
938 self
939 }
940
941 /// Set registry to push cache to
942 ///
943 /// This corresponds to buildah's `--cache-to` flag, which allows
944 /// pushing cached layers to a remote registry for future builds to use.
945 ///
946 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
947 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
948 /// option is reserved for future implementation.
949 ///
950 /// TODO: Implement remote cache support. This would require either:
951 /// 1. Switching to `buildah build` command which supports --cache-to natively
952 /// 2. Implementing custom layer caching with registry push for intermediate layers
953 ///
954 /// # Example
955 ///
956 /// ```no_run
957 /// # use zlayer_builder::ImageBuilder;
958 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
959 /// let builder = ImageBuilder::new("./my-project").await?
960 /// .cache_to("registry.example.com/myapp:cache")
961 /// .tag("myapp:latest");
962 /// # Ok(())
963 /// # }
964 /// ```
965 #[must_use]
966 pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
967 self.options.cache_to = Some(registry.into());
968 self
969 }
970
971 /// Set maximum cache age
972 ///
973 /// This corresponds to buildah's `--cache-ttl` flag, which sets the
974 /// maximum age for cached layers before they are considered stale.
975 ///
976 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
977 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
978 /// option is reserved for future implementation.
979 ///
980 /// TODO: Implement cache TTL support. This would require either:
981 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
982 /// 2. Implementing custom cache expiration logic for our layer caching system
983 ///
984 /// # Example
985 ///
986 /// ```no_run
987 /// # use zlayer_builder::ImageBuilder;
988 /// # use std::time::Duration;
989 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
990 /// let builder = ImageBuilder::new("./my-project").await?
991 /// .cache_ttl(Duration::from_secs(3600 * 24)) // 24 hours
992 /// .tag("myapp:latest");
993 /// # Ok(())
994 /// # }
995 /// ```
996 #[must_use]
997 pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
998 self.options.cache_ttl = Some(ttl);
999 self
1000 }
1001
1002 /// Push the image to a registry after building
1003 ///
1004 /// # Arguments
1005 ///
1006 /// * `auth` - Registry authentication credentials
1007 ///
1008 /// # Example
1009 ///
1010 /// ```no_run
1011 /// use zlayer_builder::{ImageBuilder, RegistryAuth};
1012 ///
1013 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1014 /// let builder = ImageBuilder::new("./my-project").await?
1015 /// .tag("registry.example.com/myapp:v1.0.0")
1016 /// .push(RegistryAuth::new("user", "password"));
1017 /// # Ok(())
1018 /// # }
1019 /// ```
1020 #[must_use]
1021 pub fn push(mut self, auth: RegistryAuth) -> Self {
1022 self.options.push = true;
1023 self.options.registry_auth = Some(auth);
1024 self
1025 }
1026
1027 /// Enable pushing without authentication
1028 ///
1029 /// Use this for registries that don't require authentication
1030 /// (e.g., local registries, insecure registries).
1031 #[must_use]
1032 pub fn push_without_auth(mut self) -> Self {
1033 self.options.push = true;
1034 self.options.registry_auth = None;
1035 self
1036 }
1037
1038 /// Set a default OCI/WASM-compatible registry to check for images.
1039 ///
1040 /// When set, the builder will probe this registry for short image names
1041 /// before qualifying them to `docker.io`. For example, if set to
1042 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
1043 /// the builder will check `git.example.com:5000/myapp:latest` first.
1044 #[must_use]
1045 pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
1046 self.options.default_registry = Some(registry.into());
1047 self
1048 }
1049
1050 /// Set a local OCI registry for image resolution.
1051 ///
1052 /// When set, the builder checks the local registry for cached images
1053 /// before pulling from remote registries.
1054 #[cfg(feature = "local-registry")]
1055 #[must_use]
1056 pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
1057 self.local_registry = Some(registry);
1058 self
1059 }
1060
1061 /// Squash all layers into a single layer
1062 ///
1063 /// This reduces image size but loses layer caching benefits.
1064 #[must_use]
1065 pub fn squash(mut self) -> Self {
1066 self.options.squash = true;
1067 self
1068 }
1069
1070 /// Set the image format
1071 ///
1072 /// Valid values are "oci" (default) or "docker".
1073 #[must_use]
1074 pub fn format(mut self, format: impl Into<String>) -> Self {
1075 self.options.format = Some(format.into());
1076 self
1077 }
1078
1079 /// Set default cache mounts to inject into all RUN instructions
1080 #[must_use]
1081 pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
1082 self.options.default_cache_mounts = mounts;
1083 self
1084 }
1085
1086 /// Set the number of retries for failed RUN steps
1087 #[must_use]
1088 pub fn retries(mut self, retries: u32) -> Self {
1089 self.options.retries = retries;
1090 self
1091 }
1092
1093 /// Set the target platform for cross-architecture builds.
1094 #[must_use]
1095 pub fn platform(mut self, platform: impl Into<String>) -> Self {
1096 self.options.platform = Some(platform.into());
1097 self
1098 }
1099
1100 /// Set a pre-computed source hash for content-based cache invalidation.
1101 ///
1102 /// When set, the sandbox builder can skip a full rebuild if the cached
1103 /// image was produced from identical source content.
1104 #[must_use]
1105 pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1106 self.options.source_hash = Some(hash.into());
1107 self
1108 }
1109
1110 /// Set an event sender for TUI progress updates
1111 ///
1112 /// Events will be sent as the build progresses, allowing you to
1113 /// display a progress UI or log build status.
1114 ///
1115 /// # Example
1116 ///
1117 /// ```no_run
1118 /// use zlayer_builder::{ImageBuilder, BuildEvent};
1119 /// use std::sync::mpsc;
1120 ///
1121 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1122 /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1123 ///
1124 /// let builder = ImageBuilder::new("./my-project").await?
1125 /// .tag("myapp:latest")
1126 /// .with_events(tx);
1127 /// # Ok(())
1128 /// # }
1129 /// ```
1130 #[must_use]
1131 pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1132 self.event_tx = Some(tx);
1133 self
1134 }
1135
1136 /// Configure a persistent disk cache backend for layer caching.
1137 ///
1138 /// When configured, the builder will store layer data on disk at the
1139 /// specified path. This cache persists across builds and significantly
1140 /// speeds up repeated builds of similar images.
1141 ///
1142 /// Requires the `cache-persistent` feature to be enabled.
1143 ///
1144 /// # Arguments
1145 ///
1146 /// * `path` - Path to the cache directory. If a directory, creates
1147 /// `blob_cache.redb` inside it. If a file path, uses it directly.
1148 ///
1149 /// # Example
1150 ///
1151 /// ```no_run,ignore
1152 /// use zlayer_builder::ImageBuilder;
1153 ///
1154 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1155 /// let builder = ImageBuilder::new("./my-project").await?
1156 /// .with_cache_dir("/var/cache/zlayer")
1157 /// .tag("myapp:latest");
1158 /// # Ok(())
1159 /// # }
1160 /// ```
1161 ///
1162 /// # Integration Status
1163 ///
1164 /// TODO: The cache backend is currently stored but not actively used
1165 /// during builds. Future work will wire up:
1166 /// - Cache lookups before executing RUN instructions
1167 /// - Storing layer data after successful execution
1168 /// - Caching base image layers from registry pulls
1169 #[cfg(feature = "cache-persistent")]
1170 #[must_use]
1171 pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1172 self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1173 path: path.as_ref().to_path_buf(),
1174 });
1175 debug!(
1176 "Configured persistent cache at: {}",
1177 path.as_ref().display()
1178 );
1179 self
1180 }
1181
1182 /// Configure an in-memory cache backend for layer caching.
1183 ///
1184 /// The in-memory cache is cleared when the process exits, but can
1185 /// speed up builds within a single session by caching intermediate
1186 /// layers and avoiding redundant operations.
1187 ///
1188 /// Requires the `cache` feature to be enabled.
1189 ///
1190 /// # Example
1191 ///
1192 /// ```no_run,ignore
1193 /// use zlayer_builder::ImageBuilder;
1194 ///
1195 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1196 /// let builder = ImageBuilder::new("./my-project").await?
1197 /// .with_memory_cache()
1198 /// .tag("myapp:latest");
1199 /// # Ok(())
1200 /// # }
1201 /// ```
1202 ///
1203 /// # Integration Status
1204 ///
1205 /// TODO: The cache backend is currently stored but not actively used
1206 /// during builds. See `with_cache_dir` for integration status details.
1207 #[cfg(feature = "cache")]
1208 #[must_use]
1209 pub fn with_memory_cache(mut self) -> Self {
1210 self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1211 debug!("Configured in-memory cache");
1212 self
1213 }
1214
1215 /// Configure an S3-compatible storage backend for layer caching.
1216 ///
1217 /// This is useful for distributed build systems where multiple build
1218 /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1219 /// Backblaze B2, `MinIO`, and other S3-compatible services.
1220 ///
1221 /// Requires the `cache-s3` feature to be enabled.
1222 ///
1223 /// # Arguments
1224 ///
1225 /// * `bucket` - S3 bucket name
1226 /// * `region` - AWS region (optional, uses SDK default if not set)
1227 ///
1228 /// # Example
1229 ///
1230 /// ```no_run,ignore
1231 /// use zlayer_builder::ImageBuilder;
1232 ///
1233 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1234 /// let builder = ImageBuilder::new("./my-project").await?
1235 /// .with_s3_cache("my-build-cache", Some("us-west-2"))
1236 /// .tag("myapp:latest");
1237 /// # Ok(())
1238 /// # }
1239 /// ```
1240 ///
1241 /// # Integration Status
1242 ///
1243 /// TODO: The cache backend is currently stored but not actively used
1244 /// during builds. See `with_cache_dir` for integration status details.
1245 #[cfg(feature = "cache-s3")]
1246 #[must_use]
1247 pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1248 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1249 bucket: bucket.into(),
1250 region,
1251 endpoint: None,
1252 prefix: None,
1253 });
1254 debug!("Configured S3 cache");
1255 self
1256 }
1257
1258 /// Configure an S3-compatible storage backend with custom endpoint.
1259 ///
1260 /// Use this method for S3-compatible services that require a custom
1261 /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1262 ///
1263 /// Requires the `cache-s3` feature to be enabled.
1264 ///
1265 /// # Arguments
1266 ///
1267 /// * `bucket` - S3 bucket name
1268 /// * `endpoint` - Custom endpoint URL
1269 /// * `region` - Region (required for some S3-compatible services)
1270 ///
1271 /// # Example
1272 ///
1273 /// ```no_run,ignore
1274 /// use zlayer_builder::ImageBuilder;
1275 ///
1276 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1277 /// // Cloudflare R2
1278 /// let builder = ImageBuilder::new("./my-project").await?
1279 /// .with_s3_cache_endpoint(
1280 /// "my-bucket",
1281 /// "https://accountid.r2.cloudflarestorage.com",
1282 /// Some("auto".to_string()),
1283 /// )
1284 /// .tag("myapp:latest");
1285 /// # Ok(())
1286 /// # }
1287 /// ```
1288 #[cfg(feature = "cache-s3")]
1289 #[must_use]
1290 pub fn with_s3_cache_endpoint(
1291 mut self,
1292 bucket: impl Into<String>,
1293 endpoint: impl Into<String>,
1294 region: Option<String>,
1295 ) -> Self {
1296 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1297 bucket: bucket.into(),
1298 region,
1299 endpoint: Some(endpoint.into()),
1300 prefix: None,
1301 });
1302 debug!("Configured S3 cache with custom endpoint");
1303 self
1304 }
1305
1306 /// Configure a custom cache backend configuration.
1307 ///
1308 /// This is the most flexible way to configure the cache backend,
1309 /// allowing full control over all cache settings.
1310 ///
1311 /// Requires the `cache` feature to be enabled.
1312 ///
1313 /// # Example
1314 ///
1315 /// ```no_run,ignore
1316 /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1317 ///
1318 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1319 /// let builder = ImageBuilder::new("./my-project").await?
1320 /// .with_cache_config(CacheBackendConfig::Memory)
1321 /// .tag("myapp:latest");
1322 /// # Ok(())
1323 /// # }
1324 /// ```
1325 #[cfg(feature = "cache")]
1326 #[must_use]
1327 pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1328 self.options.cache_backend_config = Some(config);
1329 debug!("Configured custom cache backend");
1330 self
1331 }
1332
1333 /// Set an already-initialized cache backend directly.
1334 ///
1335 /// This is useful when you have a pre-configured cache backend instance
1336 /// that you want to share across multiple builders or when you need
1337 /// fine-grained control over cache initialization.
1338 ///
1339 /// Requires the `cache` feature to be enabled.
1340 ///
1341 /// # Example
1342 ///
1343 /// ```no_run,ignore
1344 /// use zlayer_builder::ImageBuilder;
1345 /// use zlayer_registry::cache::BlobCache;
1346 /// use std::sync::Arc;
1347 ///
1348 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1349 /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1350 ///
1351 /// let builder = ImageBuilder::new("./my-project").await?
1352 /// .with_cache_backend(cache)
1353 /// .tag("myapp:latest");
1354 /// # Ok(())
1355 /// # }
1356 /// ```
1357 #[cfg(feature = "cache")]
1358 #[must_use]
1359 pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1360 self.cache_backend = Some(backend);
1361 debug!("Configured pre-initialized cache backend");
1362 self
1363 }
1364
1365 /// Run the build
1366 ///
1367 /// This executes the complete build process:
1368 /// 1. Parse Dockerfile or load runtime template
1369 /// 2. Build all required stages
1370 /// 3. Commit and tag the final image
1371 /// 4. Push to registry if configured
1372 /// 5. Clean up intermediate containers
1373 ///
1374 /// # Errors
1375 ///
1376 /// Returns an error if:
1377 /// - Dockerfile parsing fails
1378 /// - A buildah command fails
1379 /// - Target stage is not found
1380 /// - Registry push fails
1381 ///
1382 /// # Panics
1383 ///
1384 /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1385 #[instrument(skip(self), fields(context = %self.context.display()))]
1386 #[allow(clippy::too_many_lines)]
1387 pub async fn build(mut self) -> Result<BuiltImage> {
1388 let start_time = std::time::Instant::now();
1389
1390 info!("Starting build in context: {}", self.context.display());
1391
1392 // 0. Resolve the effective target OS from the priority chain when the
1393 // caller did not pin one explicitly. Re-detects the backend if the
1394 // resolved OS differs from the one we initially probed (Linux). A
1395 // pinned `target_os` wins and skips this resolution entirely.
1396 self.resolve_target_os_and_backend().await?;
1397
1398 // 1. Get build output (Dockerfile IR or WASM artifact)
1399 let build_output = self.get_build_output().await?;
1400
1401 // If this is a WASM build, return early with the artifact info.
1402 if let BuildOutput::WasmArtifact {
1403 wasm_path,
1404 // `oci_path` drives the optional push branch below; when the
1405 // `local-registry` feature is off the push branch is compiled
1406 // out, so the binding is unused.
1407 #[cfg_attr(not(feature = "local-registry"), allow(unused_variables))]
1408 oci_path,
1409 manifest_digest,
1410 artifact_type: _,
1411 language,
1412 optimized,
1413 size,
1414 } = build_output
1415 {
1416 #[allow(clippy::cast_possible_truncation)]
1417 let build_time_ms = start_time.elapsed().as_millis() as u64;
1418
1419 // Prefer a user tag as the image id; otherwise fall back to the
1420 // OCI manifest digest (sha256:...), which is what WASM tooling
1421 // references in `oci-archive:` / `oci:` URIs. As a last resort
1422 // (no tag, no digest — only possible if export somehow produced
1423 // no digest) use a `wasm-path:` marker so downstream code can
1424 // tell this was a WASM build.
1425 let image_id = if let Some(tag) = self.options.tags.first() {
1426 tag.clone()
1427 } else if let Some(digest) = manifest_digest.as_ref() {
1428 format!("wasm:{digest}")
1429 } else {
1430 format!("wasm-path:{}", wasm_path.display())
1431 };
1432
1433 // Push WASM OCI artifact(s) to the remote registry if the user
1434 // both supplied tags and requested a push (e.g. `zlayer build
1435 // -t ghcr.io/org/mod:v1 --push`). Mirrors the container flow at
1436 // `BuildahBackend::build_image` where `options.push` drives
1437 // `push_image_internal` for each tag.
1438 //
1439 // Gated on `local-registry` because `ImagePuller::push_wasm` is
1440 // behind the `zlayer-registry/local` feature, matching the other
1441 // push-to-registry sites in this crate.
1442 #[cfg(feature = "local-registry")]
1443 if let Some(oci_dir) = oci_path
1444 .as_ref()
1445 .filter(|_| self.options.push && !self.options.tags.is_empty())
1446 {
1447 self.push_wasm_oci(&wasm_path, oci_dir).await?;
1448 }
1449
1450 self.send_event(BuildEvent::BuildComplete {
1451 image_id: image_id.clone(),
1452 });
1453
1454 info!(
1455 "WASM build completed in {}ms: {} ({}, {} bytes, optimized={}, image_id={})",
1456 build_time_ms,
1457 wasm_path.display(),
1458 language,
1459 size,
1460 optimized,
1461 image_id,
1462 );
1463
1464 return Ok(BuiltImage {
1465 image_id,
1466 tags: self.options.tags.clone(),
1467 layer_count: 1,
1468 size,
1469 build_time_ms,
1470 is_manifest: false,
1471 });
1472 }
1473
1474 // Extract the Dockerfile from the BuildOutput.
1475 let BuildOutput::Dockerfile(dockerfile) = build_output else {
1476 unreachable!("WasmArtifact case handled above");
1477 };
1478 debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1479
1480 // L-5: Static guard — catch `RUN choco install ...` /
1481 // `RUN winget install ...` on a nanoserver base image before we hand
1482 // the Dockerfile off to the backend. Nanoserver ships no package
1483 // manager, so without this check the build fails deep inside buildah
1484 // / HCS with an opaque "`choco` is not recognized" message.
1485 //
1486 // The validator is a pure AST walk; it runs regardless of the
1487 // resolved target OS because a Dockerfile pinning a Windows base
1488 // should be diagnosed the same way on a Linux build host doing a
1489 // cross-OS build as on a Windows host.
1490 if let Err(err) = crate::windows::deps::validate_dockerfile(&dockerfile) {
1491 return Err(BuildError::InvalidInstruction {
1492 instruction: "RUN".to_string(),
1493 reason: err.to_string(),
1494 });
1495 }
1496
1497 // Delegate the build to the backend.
1498 let backend = self
1499 .backend
1500 .as_ref()
1501 .ok_or_else(|| BuildError::BuildahNotFound {
1502 message: "No build backend configured".into(),
1503 })?;
1504
1505 info!("Delegating build to {} backend", backend.name());
1506 let built = backend
1507 .build_image(
1508 &self.context,
1509 &dockerfile,
1510 &self.options,
1511 self.event_tx.clone(),
1512 )
1513 .await?;
1514
1515 // Import the built image into ZLayer's local registry and blob cache
1516 // so the runtime can find it without pulling from a remote registry.
1517 //
1518 // A user who wired up a local registry clearly wants built images to
1519 // live there — if the import fails (almost always EACCES on the
1520 // registry dir for an unprivileged user), bail with the registry path
1521 // in the message instead of silently producing a build that the
1522 // daemon can't find.
1523 #[cfg(feature = "local-registry")]
1524 if let Some(ref registry) = self.local_registry {
1525 if !built.tags.is_empty() {
1526 let tmp_path = std::env::temp_dir().join(format!(
1527 "zlayer-build-{}-{}.tar",
1528 std::process::id(),
1529 start_time.elapsed().as_nanos()
1530 ));
1531
1532 // Export the image from buildah's store to an OCI archive.
1533 let export_tag = &built.tags[0];
1534 let dest = format!("oci-archive:{}", tmp_path.display());
1535 let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1536
1537 self.executor
1538 .execute_checked(&push_cmd)
1539 .await
1540 .map_err(|e| BuildError::RegistryError {
1541 message: format!(
1542 "failed to export image to OCI archive for local registry \
1543 import at {}: {e}",
1544 registry.root().display()
1545 ),
1546 })?;
1547
1548 // Resolve the blob cache backend (if available).
1549 let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1550 self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1551
1552 let import_result = async {
1553 for tag in &built.tags {
1554 let info =
1555 import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1556 .await
1557 .map_err(|e| BuildError::RegistryError {
1558 message: format!(
1559 "failed to import '{tag}' into local registry at {}: {e}",
1560 registry.root().display()
1561 ),
1562 })?;
1563 info!(
1564 tag = %tag,
1565 digest = %info.digest,
1566 "Imported into local registry"
1567 );
1568 }
1569 Ok::<(), BuildError>(())
1570 }
1571 .await;
1572
1573 // Clean up the temporary archive regardless of whether the
1574 // import succeeded (best-effort; warn on failure).
1575 if let Err(e) = fs::remove_file(&tmp_path).await {
1576 warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1577 }
1578
1579 import_result?;
1580 }
1581 }
1582
1583 Ok(built)
1584 }
1585
1586 /// Resolve the effective target OS for this build and re-detect the
1587 /// backend when it differs from what was probed at construction.
1588 ///
1589 /// Priority (highest first):
1590 /// 1. `self.target_os` — explicit pin from the caller (e.g. CLI `--platform`).
1591 /// 2. `ZImage::resolve_target_os()` — `os:` field, else OS parsed from
1592 /// the `platform:` field of the `ZImagefile`.
1593 /// 3. [`ImageOs::Linux`] — the historical default, applied whenever the
1594 /// `ZImagefile` has neither hint and the caller didn't pin an OS.
1595 ///
1596 /// The runtime-template and plain-Dockerfile paths never carry an OS
1597 /// hint, so they fall through to the caller's pin or the default.
1598 async fn resolve_target_os_and_backend(&mut self) -> Result<()> {
1599 // Explicit pin always wins: the backend was already detected for
1600 // this OS by `new_with_os`/`with_target_os`. Nothing to do.
1601 if self.target_os.is_some() {
1602 return Ok(());
1603 }
1604
1605 // Peek at the ZImagefile (if the caller pointed us at one, or if one
1606 // lives in the context dir). We only inspect the OS-related fields so
1607 // a malformed ZImagefile body defers its error to `get_build_output`.
1608 let zimage_path = self.options.zimagefile.clone().or_else(|| {
1609 let candidate = self.context.join("ZImagefile");
1610 candidate.exists().then_some(candidate)
1611 });
1612
1613 let Some(path) = zimage_path else {
1614 // No ZImagefile — Dockerfile / runtime template paths have no OS
1615 // metadata, so the initial Linux detection stands.
1616 return Ok(());
1617 };
1618
1619 // Let `get_build_output()` surface any real read / parse errors.
1620 let Ok(content) = fs::read_to_string(&path).await else {
1621 return Ok(());
1622 };
1623 let Ok(zimage) = crate::zimage::parse_zimagefile(&content) else {
1624 return Ok(());
1625 };
1626
1627 if let Some(resolved) = zimage.resolve_target_os() {
1628 // Re-detect only if the resolved OS differs from the one we
1629 // probed at construction. `new_with_os(None)` probes Linux, so
1630 // the common Linux case short-circuits.
1631 let initial = crate::backend::ImageOs::Linux;
1632 if resolved != initial {
1633 info!(
1634 "Re-detecting build backend for target OS {:?} (inferred from ZImagefile)",
1635 resolved
1636 );
1637 self.backend = Some(crate::backend::detect_backend(resolved).await?);
1638 }
1639 self.target_os = Some(resolved);
1640 }
1641
1642 Ok(())
1643 }
1644
1645 /// Detection order:
1646 /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1647 /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1648 /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1649 /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1650 ///
1651 /// Returns [`BuildOutput::Dockerfile`] for container builds or
1652 /// [`BuildOutput::WasmArtifact`] for WASM builds.
1653 async fn get_build_output(&self) -> Result<BuildOutput> {
1654 // (a) Runtime template takes highest priority.
1655 if let Some(runtime) = &self.options.runtime {
1656 debug!("Using runtime template: {}", runtime);
1657 let content = get_template(*runtime);
1658 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1659 }
1660
1661 // (b) Explicit ZImagefile path.
1662 if let Some(ref zimage_path) = self.options.zimagefile {
1663 debug!("Reading ZImagefile: {}", zimage_path.display());
1664 let content =
1665 fs::read_to_string(zimage_path)
1666 .await
1667 .map_err(|e| BuildError::ContextRead {
1668 path: zimage_path.clone(),
1669 source: e,
1670 })?;
1671 let zimage = crate::zimage::parse_zimagefile(&content)?;
1672 return self.handle_zimage(&zimage).await;
1673 }
1674
1675 // (c) Auto-detect ZImagefile in context directory.
1676 let auto_zimage_path = self.context.join("ZImagefile");
1677 if auto_zimage_path.exists() {
1678 debug!(
1679 "Found ZImagefile in context: {}",
1680 auto_zimage_path.display()
1681 );
1682 let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1683 BuildError::ContextRead {
1684 path: auto_zimage_path,
1685 source: e,
1686 }
1687 })?;
1688 let zimage = crate::zimage::parse_zimagefile(&content)?;
1689 return self.handle_zimage(&zimage).await;
1690 }
1691
1692 // (d) Fall back to Dockerfile.
1693 let dockerfile_path = self
1694 .options
1695 .dockerfile
1696 .clone()
1697 .unwrap_or_else(|| self.context.join("Dockerfile"));
1698
1699 debug!("Reading Dockerfile: {}", dockerfile_path.display());
1700
1701 let content =
1702 fs::read_to_string(&dockerfile_path)
1703 .await
1704 .map_err(|e| BuildError::ContextRead {
1705 path: dockerfile_path,
1706 source: e,
1707 })?;
1708
1709 Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1710 }
1711
1712 /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1713 ///
1714 /// Handles all four `ZImage` modes:
1715 /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1716 /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1717 /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1718 ///
1719 /// Any `build:` directives are resolved first by spawning nested builds.
1720 async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1721 // Runtime mode: delegate to template system.
1722 if let Some(ref runtime_name) = zimage.runtime {
1723 let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1724 BuildError::zimagefile_validation(format!(
1725 "unknown runtime '{runtime_name}' in ZImagefile"
1726 ))
1727 })?;
1728 let content = get_template(rt);
1729 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1730 }
1731
1732 // WASM mode: build a WASM component.
1733 if zimage.wasm.is_some() {
1734 return self.handle_wasm_build(zimage).await;
1735 }
1736
1737 // Resolve any `build:` directives to concrete base image tags.
1738 let resolved = self.resolve_build_directives(zimage).await?;
1739
1740 // Single-stage or multi-stage: convert to Dockerfile IR directly.
1741 Ok(BuildOutput::Dockerfile(
1742 crate::zimage::zimage_to_dockerfile(&resolved)?,
1743 ))
1744 }
1745
1746 /// Build a WASM component from the `ZImagefile` wasm configuration.
1747 ///
1748 /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1749 /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1750 /// the WASM builder pipeline.
1751 #[allow(clippy::too_many_lines)]
1752 async fn handle_wasm_build(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1753 use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1754 use zlayer_registry::wasm::WasiVersion;
1755 use zlayer_registry::{export_wasm_as_oci, WasmExportConfig};
1756
1757 // Caller guarantees `zimage.wasm` is `Some`.
1758 let wasm_config = zimage.wasm.as_ref().expect(
1759 "handle_wasm_build invoked without a wasm section in ZImage; caller must check",
1760 );
1761
1762 info!("ZImagefile specifies WASM mode, running WASM build");
1763
1764 // Convert target string to WasiTarget enum.
1765 let target = match wasm_config.target.as_str() {
1766 "preview1" => WasiTarget::Preview1,
1767 _ => WasiTarget::Preview2,
1768 };
1769
1770 // Resolve language: parse from string or leave as None for auto-detection.
1771 let language = wasm_config
1772 .language
1773 .as_deref()
1774 .and_then(WasmLanguage::from_name);
1775
1776 if let Some(ref lang_str) = wasm_config.language {
1777 if language.is_none() {
1778 return Err(BuildError::zimagefile_validation(format!(
1779 "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1780 typescript, assemblyscript, c, zig"
1781 )));
1782 }
1783 }
1784
1785 // Build the WasmBuildConfig.
1786 let mut config = WasmBuildConfig {
1787 language,
1788 target,
1789 optimize: wasm_config.optimize,
1790 opt_level: wasm_config
1791 .opt_level
1792 .clone()
1793 .unwrap_or_else(|| "Oz".to_string()),
1794 wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1795 output_path: wasm_config.output.as_ref().map(PathBuf::from),
1796 world: wasm_config.world.clone(),
1797 features: wasm_config.features.clone(),
1798 build_args: wasm_config.build_args.clone(),
1799 pre_build: Vec::new(),
1800 post_build: Vec::new(),
1801 adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1802 };
1803
1804 // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1805 for cmd in &wasm_config.pre_build {
1806 config.pre_build.push(zcommand_to_args(cmd));
1807 }
1808 for cmd in &wasm_config.post_build {
1809 config.post_build.push(zcommand_to_args(cmd));
1810 }
1811
1812 // Build the WASM component.
1813 let result = build_wasm(&self.context, config).await?;
1814
1815 let language_name = result.language.name().to_string();
1816 let wasm_path = result.wasm_path;
1817 let size = result.size;
1818
1819 info!(
1820 "WASM build complete: {} ({} bytes, optimized={})",
1821 wasm_path.display(),
1822 size,
1823 wasm_config.optimize
1824 );
1825
1826 // `wasm.oci: false` opts out of OCI artifact packaging and push —
1827 // the compilation pipeline above still runs (with caching, wasm-opt,
1828 // and the preview1 -> preview2 adapter), we simply skip the layout
1829 // write and leave `oci_path`/`manifest_digest`/`artifact_type` as
1830 // `None`. The push branch in `build()` keys off `oci_path.is_some()`
1831 // so skipping it here transparently disables push for this build.
1832 if !wasm_config.oci {
1833 info!(
1834 "WASM OCI export skipped (wasm.oci = false); raw .wasm at {}",
1835 wasm_path.display()
1836 );
1837 return Ok(BuildOutput::WasmArtifact {
1838 wasm_path,
1839 oci_path: None,
1840 manifest_digest: None,
1841 artifact_type: None,
1842 language: language_name,
1843 optimized: wasm_config.optimize,
1844 size,
1845 });
1846 }
1847
1848 // Derive a module name for OCI annotations. Prefer the first tag's
1849 // repository component (`repo` from `repo:version` or `host/repo`),
1850 // falling back to the wasm file stem, then "wasm-module".
1851 let module_name = self
1852 .options
1853 .tags
1854 .first()
1855 .map(|t| module_name_from_tag(t))
1856 .or_else(|| {
1857 wasm_path
1858 .file_stem()
1859 .and_then(|s| s.to_str())
1860 .map(str::to_string)
1861 })
1862 .unwrap_or_else(|| "wasm-module".to_string());
1863
1864 // Map the selected WASI target to a WasiVersion so the export uses
1865 // the correct artifact_type without re-analyzing the binary.
1866 let wasi_version = match target {
1867 WasiTarget::Preview1 => Some(WasiVersion::Preview1),
1868 WasiTarget::Preview2 => Some(WasiVersion::Preview2),
1869 };
1870
1871 // Carry ZImage labels across as OCI manifest annotations, matching
1872 // the behaviour of container image builds that emit LABEL -> annotations.
1873 let annotations: HashMap<String, String> = zimage.labels.clone();
1874
1875 let export_config = WasmExportConfig {
1876 wasm_path: wasm_path.clone(),
1877 module_name: module_name.clone(),
1878 wasi_version,
1879 annotations,
1880 };
1881
1882 let export =
1883 export_wasm_as_oci(&export_config)
1884 .await
1885 .map_err(|e| BuildError::RegistryError {
1886 message: format!("failed to export WASM as OCI artifact: {e}"),
1887 })?;
1888
1889 // Write the OCI image layout to disk next to the WASM file. The
1890 // layout directory name is `<module>-oci`, mirroring the CLI
1891 // `zlayer wasm export` layout in bin/zlayer/src/commands/wasm.rs.
1892 let layout_parent = wasm_path
1893 .parent()
1894 .map_or_else(|| self.context.clone(), Path::to_path_buf);
1895 let oci_dir = layout_parent.join(format!("{module_name}-oci"));
1896 write_wasm_oci_layout(&oci_dir, &export, &module_name).await?;
1897
1898 info!(
1899 manifest_digest = %export.manifest_digest,
1900 artifact_type = %export.artifact_type,
1901 oci_path = %oci_dir.display(),
1902 "WASM OCI artifact written"
1903 );
1904
1905 Ok(BuildOutput::WasmArtifact {
1906 wasm_path,
1907 oci_path: Some(oci_dir),
1908 manifest_digest: Some(export.manifest_digest),
1909 artifact_type: Some(export.artifact_type),
1910 language: language_name,
1911 optimized: wasm_config.optimize,
1912 size,
1913 })
1914 }
1915
1916 /// Resolve `build:` directives in a `ZImage` by running nested builds.
1917 ///
1918 /// For each `build:` directive (top-level or per-stage), this method:
1919 /// 1. Determines the build context directory
1920 /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1921 /// 3. Spawns a nested `ImageBuilder` to build the context
1922 /// 4. Tags the result and replaces `build` with `base`
1923 async fn resolve_build_directives(
1924 &self,
1925 zimage: &crate::zimage::ZImage,
1926 ) -> Result<crate::zimage::ZImage> {
1927 let mut resolved = zimage.clone();
1928
1929 // Resolve top-level `build:` directive.
1930 if let Some(ref build_ctx) = resolved.build {
1931 let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1932 resolved.base = Some(tag);
1933 resolved.build = None;
1934 }
1935
1936 // Resolve per-stage `build:` directives.
1937 if let Some(ref mut stages) = resolved.stages {
1938 for (name, stage) in stages.iter_mut() {
1939 if let Some(ref build_ctx) = stage.build {
1940 let tag = self.run_nested_build(build_ctx, name).await?;
1941 stage.base = Some(tag);
1942 stage.build = None;
1943 }
1944 }
1945 }
1946
1947 Ok(resolved)
1948 }
1949
1950 /// Run a nested build from a `build:` directive and return the resulting image tag.
1951 fn run_nested_build<'a>(
1952 &'a self,
1953 build_ctx: &'a crate::zimage::types::ZBuildContext,
1954 stage_name: &'a str,
1955 ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1956 Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1957 }
1958
1959 async fn run_nested_build_inner(
1960 &self,
1961 build_ctx: &crate::zimage::types::ZBuildContext,
1962 stage_name: &str,
1963 ) -> Result<String> {
1964 let context_dir = build_ctx.context_dir(&self.context);
1965
1966 if !context_dir.exists() {
1967 return Err(BuildError::ContextRead {
1968 path: context_dir,
1969 source: std::io::Error::new(
1970 std::io::ErrorKind::NotFound,
1971 format!(
1972 "build context directory not found for build directive in '{stage_name}'"
1973 ),
1974 ),
1975 });
1976 }
1977
1978 info!(
1979 "Building nested image for '{}' from context: {}",
1980 stage_name,
1981 context_dir.display()
1982 );
1983
1984 // Create a tag for the nested build result.
1985 let tag = format!(
1986 "zlayer-build-dep-{}:{}",
1987 stage_name,
1988 chrono_lite_timestamp()
1989 );
1990
1991 // Create nested builder. Inherit the parent's target_os (if any) so
1992 // a Windows top-level build doesn't silently spawn a Linux nested
1993 // build for its `build:` dependency.
1994 let mut nested = ImageBuilder::new_with_os(&context_dir, self.target_os).await?;
1995 nested = nested.tag(&tag);
1996
1997 // Apply explicit build file if specified.
1998 if let Some(file) = build_ctx.file() {
1999 let file_path = context_dir.join(file);
2000 if std::path::Path::new(file).extension().is_some_and(|ext| {
2001 ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
2002 }) || file.starts_with("ZImagefile")
2003 {
2004 nested = nested.zimagefile(file_path);
2005 } else {
2006 nested = nested.dockerfile(file_path);
2007 }
2008 }
2009
2010 // Apply build args.
2011 for (key, value) in build_ctx.args() {
2012 nested = nested.build_arg(&key, &value);
2013 }
2014
2015 // Propagate default registry if set.
2016 if let Some(ref reg) = self.options.default_registry {
2017 nested = nested.default_registry(reg.clone());
2018 }
2019
2020 // Run the nested build.
2021 let result = nested.build().await?;
2022 info!(
2023 "Nested build for '{}' completed: {}",
2024 stage_name, result.image_id
2025 );
2026
2027 Ok(tag)
2028 }
2029
2030 /// Push the WASM OCI artifact produced by `handle_wasm_build` to every
2031 /// user-supplied registry tag.
2032 ///
2033 /// Mirrors the container push flow in [`BuildahBackend::build_image`]:
2034 /// when `options.push` is true, each tag in `options.tags` is pushed.
2035 /// Tags that look like bare image names (no registry host, e.g.
2036 /// `myapp:wasm`) are skipped with an info log, matching how bare tags
2037 /// are treated elsewhere — a registryless tag has nowhere to be pushed.
2038 ///
2039 /// Re-runs [`export_wasm_as_oci`] on the produced `wasm_path` to obtain
2040 /// the [`WasmExportResult`] blobs required by [`ImagePuller::push_wasm`].
2041 /// The export is deterministic (same WASM binary produces the same
2042 /// blobs and digests), so the digests match the layout on disk at
2043 /// `oci_dir` that A1.2 wrote.
2044 ///
2045 /// [`BuildahBackend::build_image`]: crate::backend::buildah::BuildahBackend
2046 /// [`export_wasm_as_oci`]: zlayer_registry::export_wasm_as_oci
2047 /// [`WasmExportResult`]: zlayer_registry::WasmExportResult
2048 /// [`ImagePuller::push_wasm`]: zlayer_registry::ImagePuller::push_wasm
2049 #[cfg(feature = "local-registry")]
2050 async fn push_wasm_oci(&self, wasm_path: &Path, oci_dir: &Path) -> Result<()> {
2051 use zlayer_registry::wasm::WasiVersion;
2052 use zlayer_registry::{export_wasm_as_oci, BlobCache, ImagePuller, WasmExportConfig};
2053
2054 // Derive the module name the same way `handle_wasm_build` did so the
2055 // re-exported artifact carries identical OCI annotations.
2056 let module_name = self
2057 .options
2058 .tags
2059 .first()
2060 .map(|t| module_name_from_tag(t))
2061 .or_else(|| {
2062 wasm_path
2063 .file_stem()
2064 .and_then(|s| s.to_str())
2065 .map(str::to_string)
2066 })
2067 .unwrap_or_else(|| "wasm-module".to_string());
2068
2069 // Reconstruct the export result from the on-disk WASM binary. The
2070 // `wasi_version` is left `None` so it is re-detected from the binary
2071 // (matches whatever A1.2 wrote unless the user mutated the file).
2072 let export_config = WasmExportConfig {
2073 wasm_path: wasm_path.to_path_buf(),
2074 module_name,
2075 wasi_version: None::<WasiVersion>,
2076 annotations: HashMap::new(),
2077 };
2078 let export =
2079 export_wasm_as_oci(&export_config)
2080 .await
2081 .map_err(|e| BuildError::RegistryError {
2082 message: format!(
2083 "failed to re-export WASM for push from {}: {e}",
2084 wasm_path.display()
2085 ),
2086 })?;
2087
2088 // Build the puller once; reuse for every tag.
2089 let cache = BlobCache::new().map_err(|e| BuildError::RegistryError {
2090 message: format!("failed to create blob cache for WASM push: {e}"),
2091 })?;
2092 let puller = ImagePuller::new(cache);
2093
2094 for tag in &self.options.tags {
2095 if !tag_has_registry_host(tag) {
2096 info!(
2097 "Skipping WASM push for bare tag '{}' (no registry host); \
2098 OCI layout still available at {}",
2099 tag,
2100 oci_dir.display()
2101 );
2102 continue;
2103 }
2104
2105 let oci_auth = Self::resolve_wasm_push_auth(self.options.registry_auth.as_ref());
2106
2107 info!("Pushing WASM artifact: {}", tag);
2108 let push_result = puller
2109 .push_wasm(tag, &export, &oci_auth)
2110 .await
2111 .map_err(|e| BuildError::RegistryError {
2112 message: format!("failed to push WASM artifact '{tag}': {e}"),
2113 })?;
2114 info!(
2115 "Pushed WASM artifact: {} (manifest digest: {})",
2116 tag, push_result.manifest_digest
2117 );
2118 }
2119
2120 Ok(())
2121 }
2122
2123 /// Resolve registry auth for a WASM push.
2124 ///
2125 /// Uses the explicitly provided credentials when set; otherwise falls
2126 /// back to anonymous. Mirrors the minimal behaviour of the buildah push
2127 /// path (`--creds user:pass` when provided, otherwise let the registry
2128 /// decide).
2129 #[cfg(feature = "local-registry")]
2130 fn resolve_wasm_push_auth(auth: Option<&RegistryAuth>) -> zlayer_registry::RegistryAuth {
2131 match auth {
2132 Some(a) => zlayer_registry::RegistryAuth::Basic(a.username.clone(), a.password.clone()),
2133 None => zlayer_registry::RegistryAuth::Anonymous,
2134 }
2135 }
2136
2137 /// Send an event to the TUI (if configured)
2138 fn send_event(&self, event: BuildEvent) {
2139 if let Some(tx) = &self.event_tx {
2140 // Ignore send errors - the receiver may have been dropped
2141 let _ = tx.send(event);
2142 }
2143 }
2144}
2145
2146// Helper function to generate a timestamp-based name
2147fn chrono_lite_timestamp() -> String {
2148 use std::time::{SystemTime, UNIX_EPOCH};
2149 let duration = SystemTime::now()
2150 .duration_since(UNIX_EPOCH)
2151 .unwrap_or_default();
2152 format!("{}", duration.as_secs())
2153}
2154
2155/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
2156/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
2157/// pre/post build command lists.
2158fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
2159 match cmd {
2160 crate::zimage::ZCommand::Shell(s) => {
2161 vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
2162 }
2163 crate::zimage::ZCommand::Exec(args) => args.clone(),
2164 }
2165}
2166
2167/// Extract a short "module name" suitable for OCI annotations from an image
2168/// tag. Strips any registry host, leading path segments, and tag/digest.
2169///
2170/// Examples:
2171/// - `myapp:latest` -> `myapp`
2172/// - `ghcr.io/org/myapp:v1.2.3` -> `myapp`
2173/// - `myapp@sha256:...` -> `myapp`
2174fn module_name_from_tag(tag: &str) -> String {
2175 let last_segment = tag.rsplit('/').next().unwrap_or(tag);
2176 let without_tag = last_segment.split(':').next().unwrap_or(last_segment);
2177 let without_digest = without_tag.split('@').next().unwrap_or(without_tag);
2178 without_digest.to_string()
2179}
2180
2181/// Heuristic: does `tag` include an explicit registry host?
2182///
2183/// Used to decide which tags are push-eligible. A tag is treated as
2184/// registry-qualified when it has at least one `/` and the first path
2185/// component looks like a host — it contains a `.` (FQDN like `ghcr.io`,
2186/// `registry.example.com`), a `:` (host:port like `localhost:5000`), or
2187/// equals the literal `localhost`. Bare names like `myapp:wasm` and
2188/// Docker-Hub-style `org/app:v1` are skipped because there is no explicit
2189/// registry to push to.
2190#[cfg(feature = "local-registry")]
2191fn tag_has_registry_host(tag: &str) -> bool {
2192 // No `/` means the whole string is `name[:tag]` with no host component.
2193 if !tag.contains('/') {
2194 return false;
2195 }
2196 let Some(first) = tag.split('/').next() else {
2197 return false;
2198 };
2199 first.contains('.') || first.contains(':') || first == "localhost"
2200}
2201
2202/// Write an OCI image layout directory (`oci-layout`, `index.json`,
2203/// `blobs/sha256/...`) for a WASM artifact on disk. This mirrors the layout
2204/// emitted by the `zlayer wasm export` CLI command so the directory can be
2205/// consumed by tools that expect a standard OCI layout.
2206async fn write_wasm_oci_layout(
2207 oci_dir: &Path,
2208 export: &zlayer_registry::WasmExportResult,
2209 ref_name: &str,
2210) -> Result<()> {
2211 let map_io = |path: PathBuf| {
2212 move |e: std::io::Error| BuildError::ContextRead {
2213 path: path.clone(),
2214 source: e,
2215 }
2216 };
2217
2218 fs::create_dir_all(oci_dir)
2219 .await
2220 .map_err(map_io(oci_dir.to_path_buf()))?;
2221
2222 // `oci-layout` marker file.
2223 let layout_marker = oci_dir.join("oci-layout");
2224 let oci_layout = serde_json::json!({ "imageLayoutVersion": "1.0.0" });
2225 fs::write(
2226 &layout_marker,
2227 serde_json::to_vec_pretty(&oci_layout).map_err(|e| BuildError::RegistryError {
2228 message: format!("failed to serialize oci-layout marker: {e}"),
2229 })?,
2230 )
2231 .await
2232 .map_err(map_io(layout_marker.clone()))?;
2233
2234 // `blobs/sha256/` directory.
2235 let blobs_dir = oci_dir.join("blobs").join("sha256");
2236 fs::create_dir_all(&blobs_dir)
2237 .await
2238 .map_err(map_io(blobs_dir.clone()))?;
2239
2240 // Write config, wasm-layer, and manifest blobs under their digests.
2241 let write_blob = |digest: &str, data: &[u8]| {
2242 let hash = digest.strip_prefix("sha256:").unwrap_or(digest).to_string();
2243 let path = blobs_dir.join(hash);
2244 let data = data.to_vec();
2245 async move {
2246 fs::write(&path, &data)
2247 .await
2248 .map_err(map_io(path.clone()))?;
2249 Ok::<(), BuildError>(())
2250 }
2251 };
2252
2253 write_blob(&export.config_digest, &export.config_blob).await?;
2254 write_blob(&export.wasm_layer_digest, &export.wasm_binary).await?;
2255 write_blob(&export.manifest_digest, &export.manifest_json).await?;
2256
2257 // Write `index.json` pointing at the manifest.
2258 let index = serde_json::json!({
2259 "schemaVersion": 2,
2260 "mediaType": "application/vnd.oci.image.index.v1+json",
2261 "manifests": [{
2262 "mediaType": "application/vnd.oci.image.manifest.v1+json",
2263 "digest": export.manifest_digest,
2264 "size": export.manifest_size,
2265 "artifactType": export.artifact_type,
2266 "annotations": {
2267 "org.opencontainers.image.ref.name": ref_name,
2268 }
2269 }]
2270 });
2271 let index_path = oci_dir.join("index.json");
2272 fs::write(
2273 &index_path,
2274 serde_json::to_vec_pretty(&index).map_err(|e| BuildError::RegistryError {
2275 message: format!("failed to serialize OCI index.json: {e}"),
2276 })?,
2277 )
2278 .await
2279 .map_err(map_io(index_path.clone()))?;
2280
2281 Ok(())
2282}
2283
2284#[cfg(test)]
2285mod tests {
2286 use super::*;
2287
2288 #[test]
2289 fn test_registry_auth_new() {
2290 let auth = RegistryAuth::new("user", "pass");
2291 assert_eq!(auth.username, "user");
2292 assert_eq!(auth.password, "pass");
2293 }
2294
2295 #[test]
2296 fn test_build_options_default() {
2297 let opts = BuildOptions::default();
2298 assert!(opts.dockerfile.is_none());
2299 assert!(opts.zimagefile.is_none());
2300 assert!(opts.runtime.is_none());
2301 assert!(opts.build_args.is_empty());
2302 assert!(opts.target.is_none());
2303 assert!(opts.tags.is_empty());
2304 assert!(!opts.no_cache);
2305 assert!(!opts.push);
2306 assert!(!opts.squash);
2307 // New cache-related fields
2308 assert!(opts.layers); // Default is true
2309 assert!(opts.cache_from.is_none());
2310 assert!(opts.cache_to.is_none());
2311 assert!(opts.cache_ttl.is_none());
2312 // Cache backend config (only with cache feature)
2313 #[cfg(feature = "cache")]
2314 assert!(opts.cache_backend_config.is_none());
2315 }
2316
2317 fn create_test_builder() -> ImageBuilder {
2318 // Create a minimal builder for testing (without async initialization)
2319 ImageBuilder {
2320 context: PathBuf::from("/tmp/test"),
2321 options: BuildOptions::default(),
2322 executor: BuildahExecutor::with_path("/usr/bin/buildah"),
2323 event_tx: None,
2324 target_os: None,
2325 backend: None,
2326 #[cfg(feature = "cache")]
2327 cache_backend: None,
2328 #[cfg(feature = "local-registry")]
2329 local_registry: None,
2330 }
2331 }
2332
2333 // Builder method chaining tests
2334 #[test]
2335 fn test_builder_chaining() {
2336 let mut builder = create_test_builder();
2337
2338 builder = builder
2339 .dockerfile("./Dockerfile.test")
2340 .runtime(Runtime::Node20)
2341 .build_arg("VERSION", "1.0")
2342 .target("builder")
2343 .tag("myapp:latest")
2344 .tag("myapp:v1")
2345 .no_cache()
2346 .squash()
2347 .format("oci");
2348
2349 assert_eq!(
2350 builder.options.dockerfile,
2351 Some(PathBuf::from("./Dockerfile.test"))
2352 );
2353 assert_eq!(builder.options.runtime, Some(Runtime::Node20));
2354 assert_eq!(
2355 builder.options.build_args.get("VERSION"),
2356 Some(&"1.0".to_string())
2357 );
2358 assert_eq!(builder.options.target, Some("builder".to_string()));
2359 assert_eq!(builder.options.tags.len(), 2);
2360 assert!(builder.options.no_cache);
2361 assert!(builder.options.squash);
2362 assert_eq!(builder.options.format, Some("oci".to_string()));
2363 }
2364
2365 #[test]
2366 fn test_builder_push_with_auth() {
2367 let mut builder = create_test_builder();
2368 builder = builder.push(RegistryAuth::new("user", "pass"));
2369
2370 assert!(builder.options.push);
2371 assert!(builder.options.registry_auth.is_some());
2372 let auth = builder.options.registry_auth.unwrap();
2373 assert_eq!(auth.username, "user");
2374 assert_eq!(auth.password, "pass");
2375 }
2376
2377 #[test]
2378 fn test_builder_push_without_auth() {
2379 let mut builder = create_test_builder();
2380 builder = builder.push_without_auth();
2381
2382 assert!(builder.options.push);
2383 assert!(builder.options.registry_auth.is_none());
2384 }
2385
2386 #[test]
2387 fn test_builder_layers() {
2388 let mut builder = create_test_builder();
2389 // Default is true
2390 assert!(builder.options.layers);
2391
2392 // Disable layers
2393 builder = builder.layers(false);
2394 assert!(!builder.options.layers);
2395
2396 // Re-enable layers
2397 builder = builder.layers(true);
2398 assert!(builder.options.layers);
2399 }
2400
2401 #[test]
2402 fn test_builder_cache_from() {
2403 let mut builder = create_test_builder();
2404 assert!(builder.options.cache_from.is_none());
2405
2406 builder = builder.cache_from("registry.example.com/myapp:cache");
2407 assert_eq!(
2408 builder.options.cache_from,
2409 Some("registry.example.com/myapp:cache".to_string())
2410 );
2411 }
2412
2413 #[test]
2414 fn test_builder_cache_to() {
2415 let mut builder = create_test_builder();
2416 assert!(builder.options.cache_to.is_none());
2417
2418 builder = builder.cache_to("registry.example.com/myapp:cache");
2419 assert_eq!(
2420 builder.options.cache_to,
2421 Some("registry.example.com/myapp:cache".to_string())
2422 );
2423 }
2424
2425 #[test]
2426 fn test_builder_cache_ttl() {
2427 use std::time::Duration;
2428
2429 let mut builder = create_test_builder();
2430 assert!(builder.options.cache_ttl.is_none());
2431
2432 builder = builder.cache_ttl(Duration::from_secs(3600));
2433 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
2434 }
2435
2436 #[test]
2437 fn test_builder_cache_options_chaining() {
2438 use std::time::Duration;
2439
2440 let builder = create_test_builder()
2441 .layers(true)
2442 .cache_from("registry.example.com/cache:input")
2443 .cache_to("registry.example.com/cache:output")
2444 .cache_ttl(Duration::from_secs(7200))
2445 .no_cache();
2446
2447 assert!(builder.options.layers);
2448 assert_eq!(
2449 builder.options.cache_from,
2450 Some("registry.example.com/cache:input".to_string())
2451 );
2452 assert_eq!(
2453 builder.options.cache_to,
2454 Some("registry.example.com/cache:output".to_string())
2455 );
2456 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
2457 assert!(builder.options.no_cache);
2458 }
2459
2460 #[test]
2461 fn test_chrono_lite_timestamp() {
2462 let ts = chrono_lite_timestamp();
2463 // Should be a valid number
2464 let parsed: u64 = ts.parse().expect("Should be a valid u64");
2465 // Should be reasonably recent (after 2024)
2466 assert!(parsed > 1_700_000_000);
2467 }
2468}