zlayer_builder/builder.rs
1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//! // Build from a Dockerfile
15//! let image = ImageBuilder::new("./my-app").await?
16//! .tag("myapp:latest")
17//! .tag("myapp:v1.0.0")
18//! .build()
19//! .await?;
20//!
21//! println!("Built image: {}", image.image_id);
22//! Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//! // Build using a runtime template (no Dockerfile needed)
34//! let image = ImageBuilder::new("./my-node-app").await?
35//! .runtime(Runtime::Node20)
36//! .tag("myapp:latest")
37//! .build()
38//! .await?;
39//!
40//! println!("Built image: {}", image.image_id);
41//! Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//! // Build only up to a specific stage
53//! let image = ImageBuilder::new("./my-app").await?
54//! .target("builder")
55//! .tag("myapp:builder")
56//! .build()
57//! .await?;
58//!
59//! println!("Built intermediate image: {}", image.image_id);
60//! Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//! let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//! // Start TUI in another thread
75//! std::thread::spawn(move || {
76//! // Process events from rx...
77//! while let Ok(event) = rx.recv() {
78//! println!("Event: {:?}", event);
79//! }
80//! });
81//!
82//! let image = ImageBuilder::new("./my-app").await?
83//! .tag("myapp:latest")
84//! .with_events(tx)
85//! .build()
86//! .await?;
87//!
88//! Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//! let image = ImageBuilder::new("./my-app").await?
100//! .with_cache_dir("/var/cache/zlayer") // Use persistent disk cache
101//! .tag("myapp:latest")
102//! .build()
103//! .await?;
104//!
105//! println!("Built image: {}", image.image_id);
106//! Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113use std::sync::Arc;
114
115use tokio::fs;
116use tracing::{debug, info, instrument, warn};
117
118use crate::backend::BuildBackend;
119use crate::buildah::{BuildahCommand, BuildahExecutor};
120use crate::dockerfile::{Dockerfile, RunMount};
121use crate::error::{BuildError, Result};
122use crate::templates::{get_template, Runtime};
123use crate::tui::BuildEvent;
124
125#[cfg(feature = "cache")]
126use zlayer_registry::cache::BlobCacheBackend;
127
128#[cfg(feature = "local-registry")]
129use zlayer_registry::LocalRegistry;
130
131#[cfg(feature = "local-registry")]
132use zlayer_registry::import_image;
133
134/// Output from parsing a `ZImagefile` - either a Dockerfile for container builds
135/// or a WASM build result for WebAssembly builds.
136///
137/// Most `ZImagefile` modes (runtime, single-stage, multi-stage) produce a
138/// [`Dockerfile`] IR that is then built with buildah. WASM mode produces
139/// a compiled artifact directly, bypassing the container build pipeline.
140#[derive(Debug)]
141pub enum BuildOutput {
142 /// Standard container build - produces a Dockerfile to be built with buildah.
143 Dockerfile(Dockerfile),
144 /// WASM component build - already built, produces artifact path.
145 WasmArtifact {
146 /// Path to the compiled WASM binary.
147 wasm_path: PathBuf,
148 /// Path to the OCI artifact directory (if exported).
149 oci_path: Option<PathBuf>,
150 /// Source language used.
151 language: String,
152 /// Whether optimization was applied.
153 optimized: bool,
154 /// Size of the output file in bytes.
155 size: u64,
156 },
157}
158
159/// Configuration for the layer cache backend.
160///
161/// This enum specifies which cache backend to use for storing and retrieving
162/// cached layers during builds. The cache feature must be enabled for this
163/// to be available.
164///
165/// # Example
166///
167/// ```no_run,ignore
168/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
169///
170/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
171/// // Use persistent disk cache
172/// let builder = ImageBuilder::new("./my-app").await?
173/// .with_cache_config(CacheBackendConfig::Persistent {
174/// path: "/var/cache/zlayer".into(),
175/// })
176/// .tag("myapp:latest");
177/// # Ok(())
178/// # }
179/// ```
180#[cfg(feature = "cache")]
181#[derive(Debug, Clone, Default)]
182pub enum CacheBackendConfig {
183 /// In-memory cache (cleared when process exits).
184 ///
185 /// Useful for CI/CD environments where persistence isn't needed
186 /// but you want to avoid re-downloading base image layers within
187 /// a single build session.
188 #[default]
189 Memory,
190
191 /// Persistent disk-based cache using redb.
192 ///
193 /// Requires the `cache-persistent` feature. Layers are stored on disk
194 /// and persist across builds, significantly speeding up repeated builds.
195 #[cfg(feature = "cache-persistent")]
196 Persistent {
197 /// Path to the cache directory or database file.
198 /// If a directory, `blob_cache.redb` will be created inside it.
199 path: PathBuf,
200 },
201
202 /// S3-compatible object storage backend.
203 ///
204 /// Requires the `cache-s3` feature. Useful for distributed build systems
205 /// where multiple build machines need to share a cache.
206 #[cfg(feature = "cache-s3")]
207 S3 {
208 /// S3 bucket name
209 bucket: String,
210 /// AWS region (optional, uses SDK default if not set)
211 region: Option<String>,
212 /// Custom endpoint URL (for S3-compatible services like R2, B2, `MinIO`)
213 endpoint: Option<String>,
214 /// Key prefix for cached blobs (default: "zlayer/layers/")
215 prefix: Option<String>,
216 },
217}
218
219/// Built image information returned after a successful build
220#[derive(Debug, Clone)]
221pub struct BuiltImage {
222 /// Image ID (sha256:...)
223 pub image_id: String,
224 /// Applied tags
225 pub tags: Vec<String>,
226 /// Number of layers in the final image
227 pub layer_count: usize,
228 /// Total size in bytes (0 if not computed)
229 pub size: u64,
230 /// Build duration in milliseconds
231 pub build_time_ms: u64,
232 /// Whether this image is a manifest list (multi-arch).
233 pub is_manifest: bool,
234}
235
236/// Registry authentication credentials
237#[derive(Debug, Clone)]
238pub struct RegistryAuth {
239 /// Registry username
240 pub username: String,
241 /// Registry password or token
242 pub password: String,
243}
244
245impl RegistryAuth {
246 /// Create new registry authentication
247 pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
248 Self {
249 username: username.into(),
250 password: password.into(),
251 }
252 }
253}
254
255/// Strategy for pulling the base image before building.
256///
257/// Controls the `--pull` flag passed to `buildah from`. The default is
258/// [`PullBaseMode::Newer`], matching the behaviour users expect from
259/// modern build tools: fast when nothing has changed, correct when the
260/// upstream base image has been republished.
261#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
262pub enum PullBaseMode {
263 /// Pull only if the registry has a newer version (`--pull=newer`).
264 /// Default behaviour.
265 #[default]
266 Newer,
267 /// Always pull, even if a local copy exists (`--pull=always`).
268 Always,
269 /// Never pull — use whatever is in local storage (no `--pull` flag passed).
270 Never,
271}
272
273/// Build options for customizing the image build process
274#[derive(Debug, Clone)]
275#[allow(clippy::struct_excessive_bools)]
276pub struct BuildOptions {
277 /// Dockerfile path (default: Dockerfile in context)
278 pub dockerfile: Option<PathBuf>,
279 /// `ZImagefile` path (alternative to Dockerfile)
280 pub zimagefile: Option<PathBuf>,
281 /// Use runtime template instead of Dockerfile
282 pub runtime: Option<Runtime>,
283 /// Build arguments (ARG values)
284 pub build_args: HashMap<String, String>,
285 /// Target stage for multi-stage builds
286 pub target: Option<String>,
287 /// Image tags to apply
288 pub tags: Vec<String>,
289 /// Disable layer caching
290 pub no_cache: bool,
291 /// Push to registry after build
292 pub push: bool,
293 /// Registry auth (if pushing)
294 pub registry_auth: Option<RegistryAuth>,
295 /// Squash all layers into one
296 pub squash: bool,
297 /// Image format (oci or docker)
298 pub format: Option<String>,
299 /// Enable buildah layer caching (--layers flag for `buildah build`).
300 /// Default: true
301 ///
302 /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
303 /// `buildah commit`) rather than `buildah build`, so this flag is reserved
304 /// for future use when/if we switch to `buildah build` (bud) command.
305 pub layers: bool,
306 /// Registry to pull cache from (--cache-from for `buildah build`).
307 ///
308 /// Note: This would be used with `buildah build --cache-from=<registry>`.
309 /// Currently `ZLayer` uses manual container creation, so this is reserved
310 /// for future implementation or for switching to `buildah build`.
311 ///
312 /// TODO: Implement remote cache support. This would require either:
313 /// 1. Switching to `buildah build` command which supports --cache-from natively
314 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
315 pub cache_from: Option<String>,
316 /// Registry to push cache to (--cache-to for `buildah build`).
317 ///
318 /// Note: This would be used with `buildah build --cache-to=<registry>`.
319 /// Currently `ZLayer` uses manual container creation, so this is reserved
320 /// for future implementation or for switching to `buildah build`.
321 ///
322 /// TODO: Implement remote cache support. This would require either:
323 /// 1. Switching to `buildah build` command which supports --cache-to natively
324 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
325 pub cache_to: Option<String>,
326 /// Maximum cache age (--cache-ttl for `buildah build`).
327 ///
328 /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
329 /// Currently `ZLayer` uses manual container creation, so this is reserved
330 /// for future implementation or for switching to `buildah build`.
331 ///
332 /// TODO: Implement cache TTL support. This would require either:
333 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
334 /// 2. Implementing custom cache expiration logic for our layer caching system
335 pub cache_ttl: Option<std::time::Duration>,
336 /// Cache backend configuration (requires `cache` feature).
337 ///
338 /// When configured, the builder will store layer data in the specified
339 /// cache backend for faster subsequent builds. This is separate from
340 /// buildah's native caching and operates at the `ZLayer` level.
341 ///
342 /// # Integration Points
343 ///
344 /// The cache backend is used at several points during the build:
345 ///
346 /// 1. **Before instruction execution**: Check if a cached layer exists
347 /// for the (`instruction_hash`, `base_layer`) tuple
348 /// 2. **After instruction execution**: Store the resulting layer data
349 /// in the cache for future builds
350 /// 3. **Base image layers**: Cache pulled base image layers to avoid
351 /// re-downloading from registries
352 ///
353 /// TODO: Wire up cache lookups in the build loop once layer digests
354 /// are properly computed and tracked.
355 #[cfg(feature = "cache")]
356 pub cache_backend_config: Option<CacheBackendConfig>,
357 /// Default OCI/WASM-compatible registry to check for images before falling
358 /// back to Docker Hub qualification.
359 ///
360 /// When set, the builder will probe this registry for short image names
361 /// before qualifying them to `docker.io`. For example, if set to
362 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
363 /// the builder will check `git.example.com:5000/myapp:latest` first.
364 pub default_registry: Option<String>,
365 /// Default cache mounts injected into all RUN instructions.
366 /// These are merged with any step-level cache mounts (deduped by target path).
367 pub default_cache_mounts: Vec<RunMount>,
368 /// Number of retries for failed RUN steps (0 = no retries, default)
369 pub retries: u32,
370 /// Target platform for the build (e.g., "linux/amd64", "linux/arm64").
371 /// When set, `buildah from` pulls the platform-specific image variant.
372 pub platform: Option<String>,
373 /// SHA-256 hash of the source Dockerfile/ZImagefile content.
374 ///
375 /// When set, the sandbox builder can skip a rebuild if the cached image
376 /// was produced from identical source content (content-based invalidation).
377 pub source_hash: Option<String>,
378 /// How to handle base-image pulling during `buildah from`.
379 ///
380 /// Default: [`PullBaseMode::Newer`] — only pull if the registry has a
381 /// newer version. Set to [`PullBaseMode::Always`] for CI builds that
382 /// must always refresh, or [`PullBaseMode::Never`] for offline builds.
383 pub pull: PullBaseMode,
384}
385
386impl Default for BuildOptions {
387 fn default() -> Self {
388 Self {
389 dockerfile: None,
390 zimagefile: None,
391 runtime: None,
392 build_args: HashMap::new(),
393 target: None,
394 tags: Vec::new(),
395 no_cache: false,
396 push: false,
397 registry_auth: None,
398 squash: false,
399 format: None,
400 layers: true,
401 cache_from: None,
402 cache_to: None,
403 cache_ttl: None,
404 #[cfg(feature = "cache")]
405 cache_backend_config: None,
406 default_registry: None,
407 default_cache_mounts: Vec::new(),
408 retries: 0,
409 platform: None,
410 source_hash: None,
411 pull: PullBaseMode::default(),
412 }
413 }
414}
415
416/// Image builder - orchestrates the full build process
417///
418/// `ImageBuilder` provides a fluent API for configuring and executing
419/// container image builds using buildah as the backend.
420///
421/// # Build Process
422///
423/// 1. Parse Dockerfile (or use runtime template)
424/// 2. Resolve target stages if specified
425/// 3. Build each stage sequentially:
426/// - Create working container from base image
427/// - Execute each instruction
428/// - Commit intermediate stages for COPY --from
429/// 4. Commit final image with tags
430/// 5. Push to registry if configured
431/// 6. Clean up intermediate containers
432///
433/// # Cache Backend Integration (requires `cache` feature)
434///
435/// When a cache backend is configured, the builder can store and retrieve
436/// cached layer data to speed up subsequent builds:
437///
438/// ```no_run,ignore
439/// use zlayer_builder::ImageBuilder;
440///
441/// let builder = ImageBuilder::new("./my-app").await?
442/// .with_cache_dir("/var/cache/zlayer")
443/// .tag("myapp:latest");
444/// ```
445pub struct ImageBuilder {
446 /// Build context directory
447 context: PathBuf,
448 /// Build options
449 options: BuildOptions,
450 /// Buildah executor (kept for backwards compatibility)
451 #[allow(dead_code)]
452 executor: BuildahExecutor,
453 /// Event sender for TUI updates
454 event_tx: Option<mpsc::Sender<BuildEvent>>,
455 /// Pluggable build backend (buildah, sandbox, etc.).
456 ///
457 /// When set, the `build()` method delegates to this backend instead of
458 /// using the inline buildah logic. Set automatically by `new()` via
459 /// `detect_backend()`, or explicitly via `with_backend()`.
460 backend: Option<Arc<dyn BuildBackend>>,
461 /// Cache backend for layer caching (requires `cache` feature).
462 ///
463 /// When set, the builder will attempt to retrieve cached layers before
464 /// executing instructions, and store results in the cache after execution.
465 ///
466 /// TODO: Implement cache lookups in the build loop. Currently the backend
467 /// is stored but not actively used during builds. Integration points:
468 /// - Check cache before executing RUN instructions
469 /// - Store layer data after successful instruction execution
470 /// - Cache base image layers pulled from registries
471 #[cfg(feature = "cache")]
472 cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
473 /// Local OCI registry for checking cached images before remote pulls.
474 #[cfg(feature = "local-registry")]
475 local_registry: Option<LocalRegistry>,
476}
477
478impl ImageBuilder {
479 /// Create a new `ImageBuilder` with the given context directory
480 ///
481 /// The context directory should contain the Dockerfile (unless using
482 /// a runtime template) and any files that will be copied into the image.
483 ///
484 /// # Arguments
485 ///
486 /// * `context` - Path to the build context directory
487 ///
488 /// # Errors
489 ///
490 /// Returns an error if:
491 /// - The context directory does not exist
492 /// - Buildah is not installed or not accessible
493 ///
494 /// # Example
495 ///
496 /// ```no_run
497 /// use zlayer_builder::ImageBuilder;
498 ///
499 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
500 /// let builder = ImageBuilder::new("./my-project").await?;
501 /// # Ok(())
502 /// # }
503 /// ```
504 #[instrument(skip_all, fields(context = %context.as_ref().display()))]
505 pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
506 let context = context.as_ref().to_path_buf();
507
508 // Verify context exists
509 if !context.exists() {
510 return Err(BuildError::ContextRead {
511 path: context,
512 source: std::io::Error::new(
513 std::io::ErrorKind::NotFound,
514 "Build context directory not found",
515 ),
516 });
517 }
518
519 // Detect the best available build backend for this platform.
520 let backend = crate::backend::detect_backend().await.ok();
521
522 // Initialize buildah executor.
523 // On macOS, if buildah is not found we fall back to a default executor
524 // (the backend will handle the actual build dispatch).
525 let executor = match BuildahExecutor::new_async().await {
526 Ok(exec) => exec,
527 #[cfg(target_os = "macos")]
528 Err(_) => {
529 info!("Buildah not found on macOS; backend will handle build dispatch");
530 BuildahExecutor::default()
531 }
532 #[cfg(not(target_os = "macos"))]
533 Err(e) => return Err(e),
534 };
535
536 debug!("Created ImageBuilder for context: {}", context.display());
537
538 Ok(Self {
539 context,
540 options: BuildOptions::default(),
541 executor,
542 event_tx: None,
543 backend,
544 #[cfg(feature = "cache")]
545 cache_backend: None,
546 #[cfg(feature = "local-registry")]
547 local_registry: None,
548 })
549 }
550
551 /// Create an `ImageBuilder` with a custom buildah executor
552 ///
553 /// This is useful for testing or when you need to configure
554 /// the executor with specific storage options. The executor is
555 /// wrapped in a [`BuildahBackend`] so the build dispatches through
556 /// the [`BuildBackend`] trait.
557 ///
558 /// # Errors
559 ///
560 /// Returns an error if the context directory does not exist.
561 pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
562 let context = context.as_ref().to_path_buf();
563
564 if !context.exists() {
565 return Err(BuildError::ContextRead {
566 path: context,
567 source: std::io::Error::new(
568 std::io::ErrorKind::NotFound,
569 "Build context directory not found",
570 ),
571 });
572 }
573
574 let backend: Arc<dyn BuildBackend> = Arc::new(
575 crate::backend::BuildahBackend::with_executor(executor.clone()),
576 );
577
578 Ok(Self {
579 context,
580 options: BuildOptions::default(),
581 executor,
582 event_tx: None,
583 backend: Some(backend),
584 #[cfg(feature = "cache")]
585 cache_backend: None,
586 #[cfg(feature = "local-registry")]
587 local_registry: None,
588 })
589 }
590
591 /// Create an `ImageBuilder` with an explicit [`BuildBackend`].
592 ///
593 /// The backend is used for all build, push, tag, and manifest
594 /// operations. The internal `BuildahExecutor` is set to the default
595 /// (it is only used if no backend is set).
596 ///
597 /// # Errors
598 ///
599 /// Returns an error if the context directory does not exist.
600 pub fn with_backend(context: impl AsRef<Path>, backend: Arc<dyn BuildBackend>) -> Result<Self> {
601 let context = context.as_ref().to_path_buf();
602
603 if !context.exists() {
604 return Err(BuildError::ContextRead {
605 path: context,
606 source: std::io::Error::new(
607 std::io::ErrorKind::NotFound,
608 "Build context directory not found",
609 ),
610 });
611 }
612
613 Ok(Self {
614 context,
615 options: BuildOptions::default(),
616 executor: BuildahExecutor::default(),
617 event_tx: None,
618 backend: Some(backend),
619 #[cfg(feature = "cache")]
620 cache_backend: None,
621 #[cfg(feature = "local-registry")]
622 local_registry: None,
623 })
624 }
625
626 /// Set a custom Dockerfile path
627 ///
628 /// By default, the builder looks for a file named `Dockerfile` in the
629 /// context directory. Use this method to specify a different path.
630 ///
631 /// # Example
632 ///
633 /// ```no_run
634 /// # use zlayer_builder::ImageBuilder;
635 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
636 /// let builder = ImageBuilder::new("./my-project").await?
637 /// .dockerfile("./my-project/Dockerfile.prod");
638 /// # Ok(())
639 /// # }
640 /// ```
641 #[must_use]
642 pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
643 self.options.dockerfile = Some(path.as_ref().to_path_buf());
644 self
645 }
646
647 /// Set a custom `ZImagefile` path
648 ///
649 /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
650 /// the builder will parse the `ZImagefile` and convert it to the internal
651 /// Dockerfile IR for execution.
652 ///
653 /// # Example
654 ///
655 /// ```no_run
656 /// # use zlayer_builder::ImageBuilder;
657 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
658 /// let builder = ImageBuilder::new("./my-project").await?
659 /// .zimagefile("./my-project/ZImagefile");
660 /// # Ok(())
661 /// # }
662 /// ```
663 #[must_use]
664 pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
665 self.options.zimagefile = Some(path.as_ref().to_path_buf());
666 self
667 }
668
669 /// Use a runtime template instead of a Dockerfile
670 ///
671 /// Runtime templates provide pre-built Dockerfiles for common
672 /// development environments. When set, the Dockerfile option is ignored.
673 ///
674 /// # Example
675 ///
676 /// ```no_run
677 /// use zlayer_builder::{ImageBuilder, Runtime};
678 ///
679 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
680 /// let builder = ImageBuilder::new("./my-node-app").await?
681 /// .runtime(Runtime::Node20);
682 /// # Ok(())
683 /// # }
684 /// ```
685 #[must_use]
686 pub fn runtime(mut self, runtime: Runtime) -> Self {
687 self.options.runtime = Some(runtime);
688 self
689 }
690
691 /// Add a build argument
692 ///
693 /// Build arguments are passed to the Dockerfile and can be referenced
694 /// using the `ARG` instruction.
695 ///
696 /// # Example
697 ///
698 /// ```no_run
699 /// # use zlayer_builder::ImageBuilder;
700 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
701 /// let builder = ImageBuilder::new("./my-project").await?
702 /// .build_arg("VERSION", "1.0.0")
703 /// .build_arg("DEBUG", "false");
704 /// # Ok(())
705 /// # }
706 /// ```
707 #[must_use]
708 pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
709 self.options.build_args.insert(key.into(), value.into());
710 self
711 }
712
713 /// Set multiple build arguments at once
714 #[must_use]
715 pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
716 self.options.build_args.extend(args);
717 self
718 }
719
720 /// Set the target stage for multi-stage builds
721 ///
722 /// When building a multi-stage Dockerfile, you can stop at a specific
723 /// stage instead of building all stages.
724 ///
725 /// # Example
726 ///
727 /// ```no_run
728 /// # use zlayer_builder::ImageBuilder;
729 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
730 /// // Dockerfile:
731 /// // FROM node:20 AS builder
732 /// // ...
733 /// // FROM node:20-slim AS runtime
734 /// // ...
735 ///
736 /// let builder = ImageBuilder::new("./my-project").await?
737 /// .target("builder")
738 /// .tag("myapp:builder");
739 /// # Ok(())
740 /// # }
741 /// ```
742 #[must_use]
743 pub fn target(mut self, stage: impl Into<String>) -> Self {
744 self.options.target = Some(stage.into());
745 self
746 }
747
748 /// Add an image tag
749 ///
750 /// Tags are applied to the final image. You can add multiple tags.
751 /// The first tag is used as the primary image name during commit.
752 ///
753 /// # Example
754 ///
755 /// ```no_run
756 /// # use zlayer_builder::ImageBuilder;
757 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
758 /// let builder = ImageBuilder::new("./my-project").await?
759 /// .tag("myapp:latest")
760 /// .tag("myapp:v1.0.0")
761 /// .tag("registry.example.com/myapp:v1.0.0");
762 /// # Ok(())
763 /// # }
764 /// ```
765 #[must_use]
766 pub fn tag(mut self, tag: impl Into<String>) -> Self {
767 self.options.tags.push(tag.into());
768 self
769 }
770
771 /// Disable layer caching
772 ///
773 /// When enabled, all layers are rebuilt from scratch even if
774 /// they could be served from cache.
775 ///
776 /// Note: Currently this flag is tracked but not fully implemented in the
777 /// build process. `ZLayer` uses manual container creation (`buildah from`,
778 /// `buildah run`, `buildah commit`) which doesn't have built-in caching
779 /// like `buildah build` does. Future work could implement layer-level
780 /// caching by checking instruction hashes against previously built layers.
781 #[must_use]
782 pub fn no_cache(mut self) -> Self {
783 self.options.no_cache = true;
784 self
785 }
786
787 /// Set the base-image pull strategy for the build.
788 ///
789 /// By default, `buildah from` is invoked with `--pull=newer`, so an
790 /// up-to-date local base image is reused but a newer one on the
791 /// registry will be fetched. Pass [`PullBaseMode::Always`] to force a
792 /// fresh pull on every build, or [`PullBaseMode::Never`] to stay fully
793 /// offline.
794 #[must_use]
795 pub fn pull(mut self, mode: PullBaseMode) -> Self {
796 self.options.pull = mode;
797 self
798 }
799
800 /// Enable or disable layer caching
801 ///
802 /// This controls the `--layers` flag for buildah. When enabled (default),
803 /// buildah can cache and reuse intermediate layers.
804 ///
805 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
806 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
807 /// flag is reserved for future use when/if we switch to `buildah build`.
808 ///
809 /// # Example
810 ///
811 /// ```no_run
812 /// # use zlayer_builder::ImageBuilder;
813 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
814 /// let builder = ImageBuilder::new("./my-project").await?
815 /// .layers(false) // Disable layer caching
816 /// .tag("myapp:latest");
817 /// # Ok(())
818 /// # }
819 /// ```
820 #[must_use]
821 pub fn layers(mut self, enable: bool) -> Self {
822 self.options.layers = enable;
823 self
824 }
825
826 /// Set registry to pull cache from
827 ///
828 /// This corresponds to buildah's `--cache-from` flag, which allows
829 /// pulling cached layers from a remote registry to speed up builds.
830 ///
831 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
832 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
833 /// option is reserved for future implementation.
834 ///
835 /// TODO: Implement remote cache support. This would require either:
836 /// 1. Switching to `buildah build` command which supports --cache-from natively
837 /// 2. Implementing custom layer caching with registry pull for intermediate layers
838 ///
839 /// # Example
840 ///
841 /// ```no_run
842 /// # use zlayer_builder::ImageBuilder;
843 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
844 /// let builder = ImageBuilder::new("./my-project").await?
845 /// .cache_from("registry.example.com/myapp:cache")
846 /// .tag("myapp:latest");
847 /// # Ok(())
848 /// # }
849 /// ```
850 #[must_use]
851 pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
852 self.options.cache_from = Some(registry.into());
853 self
854 }
855
856 /// Set registry to push cache to
857 ///
858 /// This corresponds to buildah's `--cache-to` flag, which allows
859 /// pushing cached layers to a remote registry for future builds to use.
860 ///
861 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
862 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
863 /// option is reserved for future implementation.
864 ///
865 /// TODO: Implement remote cache support. This would require either:
866 /// 1. Switching to `buildah build` command which supports --cache-to natively
867 /// 2. Implementing custom layer caching with registry push for intermediate layers
868 ///
869 /// # Example
870 ///
871 /// ```no_run
872 /// # use zlayer_builder::ImageBuilder;
873 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
874 /// let builder = ImageBuilder::new("./my-project").await?
875 /// .cache_to("registry.example.com/myapp:cache")
876 /// .tag("myapp:latest");
877 /// # Ok(())
878 /// # }
879 /// ```
880 #[must_use]
881 pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
882 self.options.cache_to = Some(registry.into());
883 self
884 }
885
886 /// Set maximum cache age
887 ///
888 /// This corresponds to buildah's `--cache-ttl` flag, which sets the
889 /// maximum age for cached layers before they are considered stale.
890 ///
891 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
892 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
893 /// option is reserved for future implementation.
894 ///
895 /// TODO: Implement cache TTL support. This would require either:
896 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
897 /// 2. Implementing custom cache expiration logic for our layer caching system
898 ///
899 /// # Example
900 ///
901 /// ```no_run
902 /// # use zlayer_builder::ImageBuilder;
903 /// # use std::time::Duration;
904 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
905 /// let builder = ImageBuilder::new("./my-project").await?
906 /// .cache_ttl(Duration::from_secs(3600 * 24)) // 24 hours
907 /// .tag("myapp:latest");
908 /// # Ok(())
909 /// # }
910 /// ```
911 #[must_use]
912 pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
913 self.options.cache_ttl = Some(ttl);
914 self
915 }
916
917 /// Push the image to a registry after building
918 ///
919 /// # Arguments
920 ///
921 /// * `auth` - Registry authentication credentials
922 ///
923 /// # Example
924 ///
925 /// ```no_run
926 /// use zlayer_builder::{ImageBuilder, RegistryAuth};
927 ///
928 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
929 /// let builder = ImageBuilder::new("./my-project").await?
930 /// .tag("registry.example.com/myapp:v1.0.0")
931 /// .push(RegistryAuth::new("user", "password"));
932 /// # Ok(())
933 /// # }
934 /// ```
935 #[must_use]
936 pub fn push(mut self, auth: RegistryAuth) -> Self {
937 self.options.push = true;
938 self.options.registry_auth = Some(auth);
939 self
940 }
941
942 /// Enable pushing without authentication
943 ///
944 /// Use this for registries that don't require authentication
945 /// (e.g., local registries, insecure registries).
946 #[must_use]
947 pub fn push_without_auth(mut self) -> Self {
948 self.options.push = true;
949 self.options.registry_auth = None;
950 self
951 }
952
953 /// Set a default OCI/WASM-compatible registry to check for images.
954 ///
955 /// When set, the builder will probe this registry for short image names
956 /// before qualifying them to `docker.io`. For example, if set to
957 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
958 /// the builder will check `git.example.com:5000/myapp:latest` first.
959 #[must_use]
960 pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
961 self.options.default_registry = Some(registry.into());
962 self
963 }
964
965 /// Set a local OCI registry for image resolution.
966 ///
967 /// When set, the builder checks the local registry for cached images
968 /// before pulling from remote registries.
969 #[cfg(feature = "local-registry")]
970 #[must_use]
971 pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
972 self.local_registry = Some(registry);
973 self
974 }
975
976 /// Squash all layers into a single layer
977 ///
978 /// This reduces image size but loses layer caching benefits.
979 #[must_use]
980 pub fn squash(mut self) -> Self {
981 self.options.squash = true;
982 self
983 }
984
985 /// Set the image format
986 ///
987 /// Valid values are "oci" (default) or "docker".
988 #[must_use]
989 pub fn format(mut self, format: impl Into<String>) -> Self {
990 self.options.format = Some(format.into());
991 self
992 }
993
994 /// Set default cache mounts to inject into all RUN instructions
995 #[must_use]
996 pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
997 self.options.default_cache_mounts = mounts;
998 self
999 }
1000
1001 /// Set the number of retries for failed RUN steps
1002 #[must_use]
1003 pub fn retries(mut self, retries: u32) -> Self {
1004 self.options.retries = retries;
1005 self
1006 }
1007
1008 /// Set the target platform for cross-architecture builds.
1009 #[must_use]
1010 pub fn platform(mut self, platform: impl Into<String>) -> Self {
1011 self.options.platform = Some(platform.into());
1012 self
1013 }
1014
1015 /// Set a pre-computed source hash for content-based cache invalidation.
1016 ///
1017 /// When set, the sandbox builder can skip a full rebuild if the cached
1018 /// image was produced from identical source content.
1019 #[must_use]
1020 pub fn source_hash(mut self, hash: impl Into<String>) -> Self {
1021 self.options.source_hash = Some(hash.into());
1022 self
1023 }
1024
1025 /// Set an event sender for TUI progress updates
1026 ///
1027 /// Events will be sent as the build progresses, allowing you to
1028 /// display a progress UI or log build status.
1029 ///
1030 /// # Example
1031 ///
1032 /// ```no_run
1033 /// use zlayer_builder::{ImageBuilder, BuildEvent};
1034 /// use std::sync::mpsc;
1035 ///
1036 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1037 /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1038 ///
1039 /// let builder = ImageBuilder::new("./my-project").await?
1040 /// .tag("myapp:latest")
1041 /// .with_events(tx);
1042 /// # Ok(())
1043 /// # }
1044 /// ```
1045 #[must_use]
1046 pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1047 self.event_tx = Some(tx);
1048 self
1049 }
1050
1051 /// Configure a persistent disk cache backend for layer caching.
1052 ///
1053 /// When configured, the builder will store layer data on disk at the
1054 /// specified path. This cache persists across builds and significantly
1055 /// speeds up repeated builds of similar images.
1056 ///
1057 /// Requires the `cache-persistent` feature to be enabled.
1058 ///
1059 /// # Arguments
1060 ///
1061 /// * `path` - Path to the cache directory. If a directory, creates
1062 /// `blob_cache.redb` inside it. If a file path, uses it directly.
1063 ///
1064 /// # Example
1065 ///
1066 /// ```no_run,ignore
1067 /// use zlayer_builder::ImageBuilder;
1068 ///
1069 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1070 /// let builder = ImageBuilder::new("./my-project").await?
1071 /// .with_cache_dir("/var/cache/zlayer")
1072 /// .tag("myapp:latest");
1073 /// # Ok(())
1074 /// # }
1075 /// ```
1076 ///
1077 /// # Integration Status
1078 ///
1079 /// TODO: The cache backend is currently stored but not actively used
1080 /// during builds. Future work will wire up:
1081 /// - Cache lookups before executing RUN instructions
1082 /// - Storing layer data after successful execution
1083 /// - Caching base image layers from registry pulls
1084 #[cfg(feature = "cache-persistent")]
1085 #[must_use]
1086 pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1087 self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1088 path: path.as_ref().to_path_buf(),
1089 });
1090 debug!(
1091 "Configured persistent cache at: {}",
1092 path.as_ref().display()
1093 );
1094 self
1095 }
1096
1097 /// Configure an in-memory cache backend for layer caching.
1098 ///
1099 /// The in-memory cache is cleared when the process exits, but can
1100 /// speed up builds within a single session by caching intermediate
1101 /// layers and avoiding redundant operations.
1102 ///
1103 /// Requires the `cache` feature to be enabled.
1104 ///
1105 /// # Example
1106 ///
1107 /// ```no_run,ignore
1108 /// use zlayer_builder::ImageBuilder;
1109 ///
1110 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1111 /// let builder = ImageBuilder::new("./my-project").await?
1112 /// .with_memory_cache()
1113 /// .tag("myapp:latest");
1114 /// # Ok(())
1115 /// # }
1116 /// ```
1117 ///
1118 /// # Integration Status
1119 ///
1120 /// TODO: The cache backend is currently stored but not actively used
1121 /// during builds. See `with_cache_dir` for integration status details.
1122 #[cfg(feature = "cache")]
1123 #[must_use]
1124 pub fn with_memory_cache(mut self) -> Self {
1125 self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1126 debug!("Configured in-memory cache");
1127 self
1128 }
1129
1130 /// Configure an S3-compatible storage backend for layer caching.
1131 ///
1132 /// This is useful for distributed build systems where multiple build
1133 /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1134 /// Backblaze B2, `MinIO`, and other S3-compatible services.
1135 ///
1136 /// Requires the `cache-s3` feature to be enabled.
1137 ///
1138 /// # Arguments
1139 ///
1140 /// * `bucket` - S3 bucket name
1141 /// * `region` - AWS region (optional, uses SDK default if not set)
1142 ///
1143 /// # Example
1144 ///
1145 /// ```no_run,ignore
1146 /// use zlayer_builder::ImageBuilder;
1147 ///
1148 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1149 /// let builder = ImageBuilder::new("./my-project").await?
1150 /// .with_s3_cache("my-build-cache", Some("us-west-2"))
1151 /// .tag("myapp:latest");
1152 /// # Ok(())
1153 /// # }
1154 /// ```
1155 ///
1156 /// # Integration Status
1157 ///
1158 /// TODO: The cache backend is currently stored but not actively used
1159 /// during builds. See `with_cache_dir` for integration status details.
1160 #[cfg(feature = "cache-s3")]
1161 #[must_use]
1162 pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1163 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1164 bucket: bucket.into(),
1165 region,
1166 endpoint: None,
1167 prefix: None,
1168 });
1169 debug!("Configured S3 cache");
1170 self
1171 }
1172
1173 /// Configure an S3-compatible storage backend with custom endpoint.
1174 ///
1175 /// Use this method for S3-compatible services that require a custom
1176 /// endpoint URL (e.g., Cloudflare R2, `MinIO`, local development).
1177 ///
1178 /// Requires the `cache-s3` feature to be enabled.
1179 ///
1180 /// # Arguments
1181 ///
1182 /// * `bucket` - S3 bucket name
1183 /// * `endpoint` - Custom endpoint URL
1184 /// * `region` - Region (required for some S3-compatible services)
1185 ///
1186 /// # Example
1187 ///
1188 /// ```no_run,ignore
1189 /// use zlayer_builder::ImageBuilder;
1190 ///
1191 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1192 /// // Cloudflare R2
1193 /// let builder = ImageBuilder::new("./my-project").await?
1194 /// .with_s3_cache_endpoint(
1195 /// "my-bucket",
1196 /// "https://accountid.r2.cloudflarestorage.com",
1197 /// Some("auto".to_string()),
1198 /// )
1199 /// .tag("myapp:latest");
1200 /// # Ok(())
1201 /// # }
1202 /// ```
1203 #[cfg(feature = "cache-s3")]
1204 #[must_use]
1205 pub fn with_s3_cache_endpoint(
1206 mut self,
1207 bucket: impl Into<String>,
1208 endpoint: impl Into<String>,
1209 region: Option<String>,
1210 ) -> Self {
1211 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1212 bucket: bucket.into(),
1213 region,
1214 endpoint: Some(endpoint.into()),
1215 prefix: None,
1216 });
1217 debug!("Configured S3 cache with custom endpoint");
1218 self
1219 }
1220
1221 /// Configure a custom cache backend configuration.
1222 ///
1223 /// This is the most flexible way to configure the cache backend,
1224 /// allowing full control over all cache settings.
1225 ///
1226 /// Requires the `cache` feature to be enabled.
1227 ///
1228 /// # Example
1229 ///
1230 /// ```no_run,ignore
1231 /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1232 ///
1233 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1234 /// let builder = ImageBuilder::new("./my-project").await?
1235 /// .with_cache_config(CacheBackendConfig::Memory)
1236 /// .tag("myapp:latest");
1237 /// # Ok(())
1238 /// # }
1239 /// ```
1240 #[cfg(feature = "cache")]
1241 #[must_use]
1242 pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1243 self.options.cache_backend_config = Some(config);
1244 debug!("Configured custom cache backend");
1245 self
1246 }
1247
1248 /// Set an already-initialized cache backend directly.
1249 ///
1250 /// This is useful when you have a pre-configured cache backend instance
1251 /// that you want to share across multiple builders or when you need
1252 /// fine-grained control over cache initialization.
1253 ///
1254 /// Requires the `cache` feature to be enabled.
1255 ///
1256 /// # Example
1257 ///
1258 /// ```no_run,ignore
1259 /// use zlayer_builder::ImageBuilder;
1260 /// use zlayer_registry::cache::BlobCache;
1261 /// use std::sync::Arc;
1262 ///
1263 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1264 /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1265 ///
1266 /// let builder = ImageBuilder::new("./my-project").await?
1267 /// .with_cache_backend(cache)
1268 /// .tag("myapp:latest");
1269 /// # Ok(())
1270 /// # }
1271 /// ```
1272 #[cfg(feature = "cache")]
1273 #[must_use]
1274 pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1275 self.cache_backend = Some(backend);
1276 debug!("Configured pre-initialized cache backend");
1277 self
1278 }
1279
1280 /// Run the build
1281 ///
1282 /// This executes the complete build process:
1283 /// 1. Parse Dockerfile or load runtime template
1284 /// 2. Build all required stages
1285 /// 3. Commit and tag the final image
1286 /// 4. Push to registry if configured
1287 /// 5. Clean up intermediate containers
1288 ///
1289 /// # Errors
1290 ///
1291 /// Returns an error if:
1292 /// - Dockerfile parsing fails
1293 /// - A buildah command fails
1294 /// - Target stage is not found
1295 /// - Registry push fails
1296 ///
1297 /// # Panics
1298 ///
1299 /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1300 #[instrument(skip(self), fields(context = %self.context.display()))]
1301 #[allow(clippy::too_many_lines)]
1302 pub async fn build(self) -> Result<BuiltImage> {
1303 let start_time = std::time::Instant::now();
1304
1305 info!("Starting build in context: {}", self.context.display());
1306
1307 // 1. Get build output (Dockerfile IR or WASM artifact)
1308 let build_output = self.get_build_output().await?;
1309
1310 // If this is a WASM build, return early with the artifact info.
1311 if let BuildOutput::WasmArtifact {
1312 wasm_path,
1313 oci_path: _,
1314 language,
1315 optimized,
1316 size,
1317 } = build_output
1318 {
1319 #[allow(clippy::cast_possible_truncation)]
1320 let build_time_ms = start_time.elapsed().as_millis() as u64;
1321
1322 self.send_event(BuildEvent::BuildComplete {
1323 image_id: wasm_path.display().to_string(),
1324 });
1325
1326 info!(
1327 "WASM build completed in {}ms: {} ({}, {} bytes, optimized={})",
1328 build_time_ms,
1329 wasm_path.display(),
1330 language,
1331 size,
1332 optimized
1333 );
1334
1335 return Ok(BuiltImage {
1336 image_id: format!("wasm:{}", wasm_path.display()),
1337 tags: self.options.tags.clone(),
1338 layer_count: 1,
1339 size,
1340 build_time_ms,
1341 is_manifest: false,
1342 });
1343 }
1344
1345 // Extract the Dockerfile from the BuildOutput.
1346 let BuildOutput::Dockerfile(dockerfile) = build_output else {
1347 unreachable!("WasmArtifact case handled above");
1348 };
1349 debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1350
1351 // Delegate the build to the backend.
1352 let backend = self
1353 .backend
1354 .as_ref()
1355 .ok_or_else(|| BuildError::BuildahNotFound {
1356 message: "No build backend configured".into(),
1357 })?;
1358
1359 info!("Delegating build to {} backend", backend.name());
1360 let built = backend
1361 .build_image(
1362 &self.context,
1363 &dockerfile,
1364 &self.options,
1365 self.event_tx.clone(),
1366 )
1367 .await?;
1368
1369 // Import the built image into ZLayer's local registry and blob cache
1370 // so the runtime can find it without pulling from a remote registry.
1371 //
1372 // A user who wired up a local registry clearly wants built images to
1373 // live there — if the import fails (almost always EACCES on the
1374 // registry dir for an unprivileged user), bail with the registry path
1375 // in the message instead of silently producing a build that the
1376 // daemon can't find.
1377 #[cfg(feature = "local-registry")]
1378 if let Some(ref registry) = self.local_registry {
1379 if !built.tags.is_empty() {
1380 let tmp_path = std::env::temp_dir().join(format!(
1381 "zlayer-build-{}-{}.tar",
1382 std::process::id(),
1383 start_time.elapsed().as_nanos()
1384 ));
1385
1386 // Export the image from buildah's store to an OCI archive.
1387 let export_tag = &built.tags[0];
1388 let dest = format!("oci-archive:{}", tmp_path.display());
1389 let push_cmd = BuildahCommand::push_to(export_tag, &dest);
1390
1391 self.executor
1392 .execute_checked(&push_cmd)
1393 .await
1394 .map_err(|e| BuildError::RegistryError {
1395 message: format!(
1396 "failed to export image to OCI archive for local registry \
1397 import at {}: {e}",
1398 registry.root().display()
1399 ),
1400 })?;
1401
1402 // Resolve the blob cache backend (if available).
1403 let blob_cache: Option<&dyn zlayer_registry::cache::BlobCacheBackend> =
1404 self.cache_backend.as_ref().map(|arc| arc.as_ref().as_ref());
1405
1406 let import_result = async {
1407 for tag in &built.tags {
1408 let info =
1409 import_image(registry, &tmp_path, Some(tag.as_str()), blob_cache)
1410 .await
1411 .map_err(|e| BuildError::RegistryError {
1412 message: format!(
1413 "failed to import '{tag}' into local registry at {}: {e}",
1414 registry.root().display()
1415 ),
1416 })?;
1417 info!(
1418 tag = %tag,
1419 digest = %info.digest,
1420 "Imported into local registry"
1421 );
1422 }
1423 Ok::<(), BuildError>(())
1424 }
1425 .await;
1426
1427 // Clean up the temporary archive regardless of whether the
1428 // import succeeded (best-effort; warn on failure).
1429 if let Err(e) = fs::remove_file(&tmp_path).await {
1430 warn!(path = %tmp_path.display(), error = %e, "Failed to remove temp OCI archive");
1431 }
1432
1433 import_result?;
1434 }
1435 }
1436
1437 Ok(built)
1438 }
1439
1440 /// Detection order:
1441 /// 1. If `runtime` is set -> use template string -> parse as Dockerfile
1442 /// 2. If `zimagefile` is explicitly set -> read & parse `ZImagefile` -> convert
1443 /// 3. If a file called `ZImagefile` exists in the context dir -> same as (2)
1444 /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1445 ///
1446 /// Returns [`BuildOutput::Dockerfile`] for container builds or
1447 /// [`BuildOutput::WasmArtifact`] for WASM builds.
1448 async fn get_build_output(&self) -> Result<BuildOutput> {
1449 // (a) Runtime template takes highest priority.
1450 if let Some(runtime) = &self.options.runtime {
1451 debug!("Using runtime template: {}", runtime);
1452 let content = get_template(*runtime);
1453 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1454 }
1455
1456 // (b) Explicit ZImagefile path.
1457 if let Some(ref zimage_path) = self.options.zimagefile {
1458 debug!("Reading ZImagefile: {}", zimage_path.display());
1459 let content =
1460 fs::read_to_string(zimage_path)
1461 .await
1462 .map_err(|e| BuildError::ContextRead {
1463 path: zimage_path.clone(),
1464 source: e,
1465 })?;
1466 let zimage = crate::zimage::parse_zimagefile(&content)?;
1467 return self.handle_zimage(&zimage).await;
1468 }
1469
1470 // (c) Auto-detect ZImagefile in context directory.
1471 let auto_zimage_path = self.context.join("ZImagefile");
1472 if auto_zimage_path.exists() {
1473 debug!(
1474 "Found ZImagefile in context: {}",
1475 auto_zimage_path.display()
1476 );
1477 let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1478 BuildError::ContextRead {
1479 path: auto_zimage_path,
1480 source: e,
1481 }
1482 })?;
1483 let zimage = crate::zimage::parse_zimagefile(&content)?;
1484 return self.handle_zimage(&zimage).await;
1485 }
1486
1487 // (d) Fall back to Dockerfile.
1488 let dockerfile_path = self
1489 .options
1490 .dockerfile
1491 .clone()
1492 .unwrap_or_else(|| self.context.join("Dockerfile"));
1493
1494 debug!("Reading Dockerfile: {}", dockerfile_path.display());
1495
1496 let content =
1497 fs::read_to_string(&dockerfile_path)
1498 .await
1499 .map_err(|e| BuildError::ContextRead {
1500 path: dockerfile_path,
1501 source: e,
1502 })?;
1503
1504 Ok(BuildOutput::Dockerfile(Dockerfile::parse(&content)?))
1505 }
1506
1507 /// Convert a parsed [`ZImage`] into a [`BuildOutput`].
1508 ///
1509 /// Handles all four `ZImage` modes:
1510 /// - **Runtime** mode: delegates to the template system -> [`BuildOutput::Dockerfile`]
1511 /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`] -> [`BuildOutput::Dockerfile`]
1512 /// - **WASM** mode: builds a WASM component -> [`BuildOutput::WasmArtifact`]
1513 ///
1514 /// Any `build:` directives are resolved first by spawning nested builds.
1515 async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<BuildOutput> {
1516 // Runtime mode: delegate to template system.
1517 if let Some(ref runtime_name) = zimage.runtime {
1518 let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1519 BuildError::zimagefile_validation(format!(
1520 "unknown runtime '{runtime_name}' in ZImagefile"
1521 ))
1522 })?;
1523 let content = get_template(rt);
1524 return Ok(BuildOutput::Dockerfile(Dockerfile::parse(content)?));
1525 }
1526
1527 // WASM mode: build a WASM component.
1528 if let Some(ref wasm_config) = zimage.wasm {
1529 return self.handle_wasm_build(wasm_config).await;
1530 }
1531
1532 // Resolve any `build:` directives to concrete base image tags.
1533 let resolved = self.resolve_build_directives(zimage).await?;
1534
1535 // Single-stage or multi-stage: convert to Dockerfile IR directly.
1536 Ok(BuildOutput::Dockerfile(
1537 crate::zimage::zimage_to_dockerfile(&resolved)?,
1538 ))
1539 }
1540
1541 /// Build a WASM component from the `ZImagefile` wasm configuration.
1542 ///
1543 /// Converts [`ZWasmConfig`](crate::zimage::ZWasmConfig) into a
1544 /// [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig) and invokes
1545 /// the WASM builder pipeline.
1546 async fn handle_wasm_build(
1547 &self,
1548 wasm_config: &crate::zimage::ZWasmConfig,
1549 ) -> Result<BuildOutput> {
1550 use crate::wasm_builder::{build_wasm, WasiTarget, WasmBuildConfig, WasmLanguage};
1551
1552 info!("ZImagefile specifies WASM mode, running WASM build");
1553
1554 // Convert target string to WasiTarget enum.
1555 let target = match wasm_config.target.as_str() {
1556 "preview1" => WasiTarget::Preview1,
1557 _ => WasiTarget::Preview2,
1558 };
1559
1560 // Resolve language: parse from string or leave as None for auto-detection.
1561 let language = wasm_config
1562 .language
1563 .as_deref()
1564 .and_then(WasmLanguage::from_name);
1565
1566 if let Some(ref lang_str) = wasm_config.language {
1567 if language.is_none() {
1568 return Err(BuildError::zimagefile_validation(format!(
1569 "unknown WASM language '{lang_str}'. Supported: rust, go, python, \
1570 typescript, assemblyscript, c, zig"
1571 )));
1572 }
1573 }
1574
1575 // Build the WasmBuildConfig.
1576 let mut config = WasmBuildConfig {
1577 language,
1578 target,
1579 optimize: wasm_config.optimize,
1580 opt_level: wasm_config
1581 .opt_level
1582 .clone()
1583 .unwrap_or_else(|| "Oz".to_string()),
1584 wit_path: wasm_config.wit.as_ref().map(PathBuf::from),
1585 output_path: wasm_config.output.as_ref().map(PathBuf::from),
1586 world: wasm_config.world.clone(),
1587 features: wasm_config.features.clone(),
1588 build_args: wasm_config.build_args.clone(),
1589 pre_build: Vec::new(),
1590 post_build: Vec::new(),
1591 adapter: wasm_config.adapter.as_ref().map(PathBuf::from),
1592 };
1593
1594 // Convert ZCommand pre/post build steps to Vec<Vec<String>>.
1595 for cmd in &wasm_config.pre_build {
1596 config.pre_build.push(zcommand_to_args(cmd));
1597 }
1598 for cmd in &wasm_config.post_build {
1599 config.post_build.push(zcommand_to_args(cmd));
1600 }
1601
1602 // Build the WASM component.
1603 let result = build_wasm(&self.context, config).await?;
1604
1605 let language_name = result.language.name().to_string();
1606 let wasm_path = result.wasm_path;
1607 let size = result.size;
1608
1609 info!(
1610 "WASM build complete: {} ({} bytes, optimized={})",
1611 wasm_path.display(),
1612 size,
1613 wasm_config.optimize
1614 );
1615
1616 Ok(BuildOutput::WasmArtifact {
1617 wasm_path,
1618 oci_path: None,
1619 language: language_name,
1620 optimized: wasm_config.optimize,
1621 size,
1622 })
1623 }
1624
1625 /// Resolve `build:` directives in a `ZImage` by running nested builds.
1626 ///
1627 /// For each `build:` directive (top-level or per-stage), this method:
1628 /// 1. Determines the build context directory
1629 /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1630 /// 3. Spawns a nested `ImageBuilder` to build the context
1631 /// 4. Tags the result and replaces `build` with `base`
1632 async fn resolve_build_directives(
1633 &self,
1634 zimage: &crate::zimage::ZImage,
1635 ) -> Result<crate::zimage::ZImage> {
1636 let mut resolved = zimage.clone();
1637
1638 // Resolve top-level `build:` directive.
1639 if let Some(ref build_ctx) = resolved.build {
1640 let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1641 resolved.base = Some(tag);
1642 resolved.build = None;
1643 }
1644
1645 // Resolve per-stage `build:` directives.
1646 if let Some(ref mut stages) = resolved.stages {
1647 for (name, stage) in stages.iter_mut() {
1648 if let Some(ref build_ctx) = stage.build {
1649 let tag = self.run_nested_build(build_ctx, name).await?;
1650 stage.base = Some(tag);
1651 stage.build = None;
1652 }
1653 }
1654 }
1655
1656 Ok(resolved)
1657 }
1658
1659 /// Run a nested build from a `build:` directive and return the resulting image tag.
1660 fn run_nested_build<'a>(
1661 &'a self,
1662 build_ctx: &'a crate::zimage::types::ZBuildContext,
1663 stage_name: &'a str,
1664 ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1665 Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1666 }
1667
1668 async fn run_nested_build_inner(
1669 &self,
1670 build_ctx: &crate::zimage::types::ZBuildContext,
1671 stage_name: &str,
1672 ) -> Result<String> {
1673 let context_dir = build_ctx.context_dir(&self.context);
1674
1675 if !context_dir.exists() {
1676 return Err(BuildError::ContextRead {
1677 path: context_dir,
1678 source: std::io::Error::new(
1679 std::io::ErrorKind::NotFound,
1680 format!(
1681 "build context directory not found for build directive in '{stage_name}'"
1682 ),
1683 ),
1684 });
1685 }
1686
1687 info!(
1688 "Building nested image for '{}' from context: {}",
1689 stage_name,
1690 context_dir.display()
1691 );
1692
1693 // Create a tag for the nested build result.
1694 let tag = format!(
1695 "zlayer-build-dep-{}:{}",
1696 stage_name,
1697 chrono_lite_timestamp()
1698 );
1699
1700 // Create nested builder.
1701 let mut nested = ImageBuilder::new(&context_dir).await?;
1702 nested = nested.tag(&tag);
1703
1704 // Apply explicit build file if specified.
1705 if let Some(file) = build_ctx.file() {
1706 let file_path = context_dir.join(file);
1707 if std::path::Path::new(file).extension().is_some_and(|ext| {
1708 ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1709 }) || file.starts_with("ZImagefile")
1710 {
1711 nested = nested.zimagefile(file_path);
1712 } else {
1713 nested = nested.dockerfile(file_path);
1714 }
1715 }
1716
1717 // Apply build args.
1718 for (key, value) in build_ctx.args() {
1719 nested = nested.build_arg(&key, &value);
1720 }
1721
1722 // Propagate default registry if set.
1723 if let Some(ref reg) = self.options.default_registry {
1724 nested = nested.default_registry(reg.clone());
1725 }
1726
1727 // Run the nested build.
1728 let result = nested.build().await?;
1729 info!(
1730 "Nested build for '{}' completed: {}",
1731 stage_name, result.image_id
1732 );
1733
1734 Ok(tag)
1735 }
1736
1737 /// Send an event to the TUI (if configured)
1738 fn send_event(&self, event: BuildEvent) {
1739 if let Some(tx) = &self.event_tx {
1740 // Ignore send errors - the receiver may have been dropped
1741 let _ = tx.send(event);
1742 }
1743 }
1744}
1745
1746// Helper function to generate a timestamp-based name
1747fn chrono_lite_timestamp() -> String {
1748 use std::time::{SystemTime, UNIX_EPOCH};
1749 let duration = SystemTime::now()
1750 .duration_since(UNIX_EPOCH)
1751 .unwrap_or_default();
1752 format!("{}", duration.as_secs())
1753}
1754
1755/// Convert a [`ZCommand`](crate::zimage::ZCommand) into a vector of string arguments
1756/// suitable for passing to [`WasmBuildConfig`](crate::wasm_builder::WasmBuildConfig)
1757/// pre/post build command lists.
1758fn zcommand_to_args(cmd: &crate::zimage::ZCommand) -> Vec<String> {
1759 match cmd {
1760 crate::zimage::ZCommand::Shell(s) => {
1761 vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()]
1762 }
1763 crate::zimage::ZCommand::Exec(args) => args.clone(),
1764 }
1765}
1766
1767#[cfg(test)]
1768mod tests {
1769 use super::*;
1770
1771 #[test]
1772 fn test_registry_auth_new() {
1773 let auth = RegistryAuth::new("user", "pass");
1774 assert_eq!(auth.username, "user");
1775 assert_eq!(auth.password, "pass");
1776 }
1777
1778 #[test]
1779 fn test_build_options_default() {
1780 let opts = BuildOptions::default();
1781 assert!(opts.dockerfile.is_none());
1782 assert!(opts.zimagefile.is_none());
1783 assert!(opts.runtime.is_none());
1784 assert!(opts.build_args.is_empty());
1785 assert!(opts.target.is_none());
1786 assert!(opts.tags.is_empty());
1787 assert!(!opts.no_cache);
1788 assert!(!opts.push);
1789 assert!(!opts.squash);
1790 // New cache-related fields
1791 assert!(opts.layers); // Default is true
1792 assert!(opts.cache_from.is_none());
1793 assert!(opts.cache_to.is_none());
1794 assert!(opts.cache_ttl.is_none());
1795 // Cache backend config (only with cache feature)
1796 #[cfg(feature = "cache")]
1797 assert!(opts.cache_backend_config.is_none());
1798 }
1799
1800 fn create_test_builder() -> ImageBuilder {
1801 // Create a minimal builder for testing (without async initialization)
1802 ImageBuilder {
1803 context: PathBuf::from("/tmp/test"),
1804 options: BuildOptions::default(),
1805 executor: BuildahExecutor::with_path("/usr/bin/buildah"),
1806 event_tx: None,
1807 backend: None,
1808 #[cfg(feature = "cache")]
1809 cache_backend: None,
1810 #[cfg(feature = "local-registry")]
1811 local_registry: None,
1812 }
1813 }
1814
1815 // Builder method chaining tests
1816 #[test]
1817 fn test_builder_chaining() {
1818 let mut builder = create_test_builder();
1819
1820 builder = builder
1821 .dockerfile("./Dockerfile.test")
1822 .runtime(Runtime::Node20)
1823 .build_arg("VERSION", "1.0")
1824 .target("builder")
1825 .tag("myapp:latest")
1826 .tag("myapp:v1")
1827 .no_cache()
1828 .squash()
1829 .format("oci");
1830
1831 assert_eq!(
1832 builder.options.dockerfile,
1833 Some(PathBuf::from("./Dockerfile.test"))
1834 );
1835 assert_eq!(builder.options.runtime, Some(Runtime::Node20));
1836 assert_eq!(
1837 builder.options.build_args.get("VERSION"),
1838 Some(&"1.0".to_string())
1839 );
1840 assert_eq!(builder.options.target, Some("builder".to_string()));
1841 assert_eq!(builder.options.tags.len(), 2);
1842 assert!(builder.options.no_cache);
1843 assert!(builder.options.squash);
1844 assert_eq!(builder.options.format, Some("oci".to_string()));
1845 }
1846
1847 #[test]
1848 fn test_builder_push_with_auth() {
1849 let mut builder = create_test_builder();
1850 builder = builder.push(RegistryAuth::new("user", "pass"));
1851
1852 assert!(builder.options.push);
1853 assert!(builder.options.registry_auth.is_some());
1854 let auth = builder.options.registry_auth.unwrap();
1855 assert_eq!(auth.username, "user");
1856 assert_eq!(auth.password, "pass");
1857 }
1858
1859 #[test]
1860 fn test_builder_push_without_auth() {
1861 let mut builder = create_test_builder();
1862 builder = builder.push_without_auth();
1863
1864 assert!(builder.options.push);
1865 assert!(builder.options.registry_auth.is_none());
1866 }
1867
1868 #[test]
1869 fn test_builder_layers() {
1870 let mut builder = create_test_builder();
1871 // Default is true
1872 assert!(builder.options.layers);
1873
1874 // Disable layers
1875 builder = builder.layers(false);
1876 assert!(!builder.options.layers);
1877
1878 // Re-enable layers
1879 builder = builder.layers(true);
1880 assert!(builder.options.layers);
1881 }
1882
1883 #[test]
1884 fn test_builder_cache_from() {
1885 let mut builder = create_test_builder();
1886 assert!(builder.options.cache_from.is_none());
1887
1888 builder = builder.cache_from("registry.example.com/myapp:cache");
1889 assert_eq!(
1890 builder.options.cache_from,
1891 Some("registry.example.com/myapp:cache".to_string())
1892 );
1893 }
1894
1895 #[test]
1896 fn test_builder_cache_to() {
1897 let mut builder = create_test_builder();
1898 assert!(builder.options.cache_to.is_none());
1899
1900 builder = builder.cache_to("registry.example.com/myapp:cache");
1901 assert_eq!(
1902 builder.options.cache_to,
1903 Some("registry.example.com/myapp:cache".to_string())
1904 );
1905 }
1906
1907 #[test]
1908 fn test_builder_cache_ttl() {
1909 use std::time::Duration;
1910
1911 let mut builder = create_test_builder();
1912 assert!(builder.options.cache_ttl.is_none());
1913
1914 builder = builder.cache_ttl(Duration::from_secs(3600));
1915 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
1916 }
1917
1918 #[test]
1919 fn test_builder_cache_options_chaining() {
1920 use std::time::Duration;
1921
1922 let builder = create_test_builder()
1923 .layers(true)
1924 .cache_from("registry.example.com/cache:input")
1925 .cache_to("registry.example.com/cache:output")
1926 .cache_ttl(Duration::from_secs(7200))
1927 .no_cache();
1928
1929 assert!(builder.options.layers);
1930 assert_eq!(
1931 builder.options.cache_from,
1932 Some("registry.example.com/cache:input".to_string())
1933 );
1934 assert_eq!(
1935 builder.options.cache_to,
1936 Some("registry.example.com/cache:output".to_string())
1937 );
1938 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
1939 assert!(builder.options.no_cache);
1940 }
1941
1942 #[test]
1943 fn test_chrono_lite_timestamp() {
1944 let ts = chrono_lite_timestamp();
1945 // Should be a valid number
1946 let parsed: u64 = ts.parse().expect("Should be a valid u64");
1947 // Should be reasonably recent (after 2024)
1948 assert!(parsed > 1_700_000_000);
1949 }
1950}