zlayer_builder/builder.rs
1//! `ImageBuilder` - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//! // Build from a Dockerfile
15//! let image = ImageBuilder::new("./my-app").await?
16//! .tag("myapp:latest")
17//! .tag("myapp:v1.0.0")
18//! .build()
19//! .await?;
20//!
21//! println!("Built image: {}", image.image_id);
22//! Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//! // Build using a runtime template (no Dockerfile needed)
34//! let image = ImageBuilder::new("./my-node-app").await?
35//! .runtime(Runtime::Node20)
36//! .tag("myapp:latest")
37//! .build()
38//! .await?;
39//!
40//! println!("Built image: {}", image.image_id);
41//! Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//! // Build only up to a specific stage
53//! let image = ImageBuilder::new("./my-app").await?
54//! .target("builder")
55//! .tag("myapp:builder")
56//! .build()
57//! .await?;
58//!
59//! println!("Built intermediate image: {}", image.image_id);
60//! Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//! let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//! // Start TUI in another thread
75//! std::thread::spawn(move || {
76//! // Process events from rx...
77//! while let Ok(event) = rx.recv() {
78//! println!("Event: {:?}", event);
79//! }
80//! });
81//!
82//! let image = ImageBuilder::new("./my-app").await?
83//! .tag("myapp:latest")
84//! .with_events(tx)
85//! .build()
86//! .await?;
87//!
88//! Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//! let image = ImageBuilder::new("./my-app").await?
100//! .with_cache_dir("/var/cache/zlayer") // Use persistent disk cache
101//! .tag("myapp:latest")
102//! .build()
103//! .await?;
104//!
105//! println!("Built image: {}", image.image_id);
106//! Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113
114use tokio::fs;
115use tracing::{debug, info, instrument};
116
117use crate::buildah::{BuildahCommand, BuildahExecutor};
118use crate::dockerfile::{Dockerfile, ImageRef, Instruction, RunMount, Stage};
119use crate::error::{BuildError, Result};
120use crate::templates::{get_template, Runtime};
121use crate::tui::BuildEvent;
122
123// Cache backend integration (optional, requires `cache` feature)
124#[cfg(feature = "cache")]
125use std::sync::Arc;
126
127#[cfg(feature = "cache")]
128use zlayer_registry::cache::BlobCacheBackend;
129
130#[cfg(feature = "local-registry")]
131use zlayer_registry::LocalRegistry;
132
133/// Configuration for the layer cache backend.
134///
135/// This enum specifies which cache backend to use for storing and retrieving
136/// cached layers during builds. The cache feature must be enabled for this
137/// to be available.
138///
139/// # Example
140///
141/// ```no_run,ignore
142/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
143///
144/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
145/// // Use persistent disk cache
146/// let builder = ImageBuilder::new("./my-app").await?
147/// .with_cache_config(CacheBackendConfig::Persistent {
148/// path: "/var/cache/zlayer".into(),
149/// })
150/// .tag("myapp:latest");
151/// # Ok(())
152/// # }
153/// ```
154#[cfg(feature = "cache")]
155#[derive(Debug, Clone, Default)]
156pub enum CacheBackendConfig {
157 /// In-memory cache (cleared when process exits).
158 ///
159 /// Useful for CI/CD environments where persistence isn't needed
160 /// but you want to avoid re-downloading base image layers within
161 /// a single build session.
162 #[default]
163 Memory,
164
165 /// Persistent disk-based cache using redb.
166 ///
167 /// Requires the `cache-persistent` feature. Layers are stored on disk
168 /// and persist across builds, significantly speeding up repeated builds.
169 #[cfg(feature = "cache-persistent")]
170 Persistent {
171 /// Path to the cache directory or database file.
172 /// If a directory, `blob_cache.redb` will be created inside it.
173 path: PathBuf,
174 },
175
176 /// S3-compatible object storage backend.
177 ///
178 /// Requires the `cache-s3` feature. Useful for distributed build systems
179 /// where multiple build machines need to share a cache.
180 #[cfg(feature = "cache-s3")]
181 S3 {
182 /// S3 bucket name
183 bucket: String,
184 /// AWS region (optional, uses SDK default if not set)
185 region: Option<String>,
186 /// Custom endpoint URL (for S3-compatible services like R2, B2, MinIO)
187 endpoint: Option<String>,
188 /// Key prefix for cached blobs (default: "zlayer/layers/")
189 prefix: Option<String>,
190 },
191}
192
193/// Tracks layer cache state during builds.
194///
195/// This struct maintains a mapping of instruction cache keys combined with
196/// base layer identifiers to determine if a layer was previously built and
197/// can be served from cache.
198///
199/// # Cache Key Format
200///
201/// The cache key is a tuple of:
202/// - `instruction_key`: A hash of the instruction type and its parameters
203/// (generated by [`Instruction::cache_key()`])
204/// - `base_layer`: The container/image ID that the instruction was executed on
205///
206/// Together, these uniquely identify a layer's content.
207///
208/// # Future Enhancements
209///
210/// Currently, cache hit detection is limited because buildah's manual container
211/// creation workflow (`buildah from`, `buildah run`, `buildah commit`) doesn't
212/// directly expose layer reuse information. To implement true cache detection,
213/// we would need to:
214///
215/// 1. **Parse buildah output**: Look for indicators in command output that suggest
216/// layer reuse (e.g., fast execution time, specific log messages)
217///
218/// 2. **Implement layer digest comparison**: Before executing an instruction,
219/// compute what the expected layer digest would be and check if it already
220/// exists in local storage
221///
222/// 3. **Switch to `buildah build`**: The `buildah build` command has native
223/// caching support with `--layers` flag that automatically handles cache hits
224///
225/// 4. **Use external cache registry**: Implement `--cache-from`/`--cache-to`
226/// semantics by pulling/pushing layer digests from a remote registry
227#[derive(Debug, Default)]
228struct LayerCacheTracker {
229 /// Maps (`instruction_cache_key`, `base_layer_id`) -> `was_cached`
230 known_layers: HashMap<(String, String), bool>,
231}
232
233impl LayerCacheTracker {
234 /// Create a new empty cache tracker.
235 fn new() -> Self {
236 Self::default()
237 }
238
239 /// Check if we have a cached result for this instruction on the given base layer.
240 ///
241 /// # Arguments
242 ///
243 /// * `instruction_key` - The cache key from [`Instruction::cache_key()`]
244 /// * `base_layer` - The container or image ID the instruction runs on
245 ///
246 /// # Returns
247 ///
248 /// `true` if we've previously recorded this instruction as cached,
249 /// `false` otherwise (including if we've never seen this combination).
250 #[allow(dead_code)]
251 fn is_cached(&self, instruction_key: &str, base_layer: &str) -> bool {
252 self.known_layers
253 .get(&(instruction_key.to_string(), base_layer.to_string()))
254 .copied()
255 .unwrap_or(false)
256 }
257
258 /// Record the cache status for an instruction execution.
259 ///
260 /// # Arguments
261 ///
262 /// * `instruction_key` - The cache key from [`Instruction::cache_key()`]
263 /// * `base_layer` - The container or image ID the instruction ran on
264 /// * `cached` - Whether this execution was a cache hit
265 fn record(&mut self, instruction_key: String, base_layer: String, cached: bool) {
266 self.known_layers
267 .insert((instruction_key, base_layer), cached);
268 }
269
270 /// Attempt to detect if an instruction execution was a cache hit.
271 ///
272 /// This is a heuristic-based approach since buildah doesn't directly report
273 /// cache status for manual container operations.
274 ///
275 /// # Current Implementation
276 ///
277 /// Always returns `false` - true cache detection would require:
278 /// - Timing analysis (cached operations are typically < 100ms)
279 /// - Output parsing for cache-related messages
280 /// - Pre-computation of expected layer digests
281 ///
282 /// # Arguments
283 ///
284 /// * `_instruction` - The instruction that was executed
285 /// * `_execution_time_ms` - How long the execution took in milliseconds
286 /// * `_output` - The command's stdout/stderr output
287 ///
288 /// # Returns
289 ///
290 /// `true` if the execution appears to be a cache hit, `false` otherwise.
291 ///
292 /// TODO: Implement heuristic cache detection based on:
293 /// - Execution time (cached layers typically commit in < 100ms)
294 /// - Output analysis (look for "Using cache" or similar messages)
295 /// - Layer digest comparison with existing images
296 #[allow(dead_code, clippy::unused_self)]
297 fn detect_cache_hit(
298 &self,
299 _instruction: &Instruction,
300 _execution_time_ms: u64,
301 _output: &str,
302 ) -> bool {
303 // TODO: Implement cache hit detection heuristics
304 //
305 // Possible approaches:
306 // 1. Time-based: If execution took < 100ms, likely cached
307 // if execution_time_ms < 100 { return true; }
308 //
309 // 2. Output-based: Look for cache indicators in buildah output
310 // if output.contains("Using cache") { return true; }
311 //
312 // 3. Digest-based: Pre-compute expected digest and check storage
313 // let expected = compute_layer_digest(instruction, base_layer);
314 // if layer_exists_in_storage(expected) { return true; }
315 //
316 // For now, always return false until we have reliable detection
317 false
318 }
319}
320
321/// Built image information returned after a successful build
322#[derive(Debug, Clone)]
323pub struct BuiltImage {
324 /// Image ID (sha256:...)
325 pub image_id: String,
326 /// Applied tags
327 pub tags: Vec<String>,
328 /// Number of layers in the final image
329 pub layer_count: usize,
330 /// Total size in bytes (0 if not computed)
331 pub size: u64,
332 /// Build duration in milliseconds
333 pub build_time_ms: u64,
334}
335
336/// Registry authentication credentials
337#[derive(Debug, Clone)]
338pub struct RegistryAuth {
339 /// Registry username
340 pub username: String,
341 /// Registry password or token
342 pub password: String,
343}
344
345impl RegistryAuth {
346 /// Create new registry authentication
347 pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
348 Self {
349 username: username.into(),
350 password: password.into(),
351 }
352 }
353}
354
355/// Build options for customizing the image build process
356#[derive(Debug, Clone)]
357#[allow(clippy::struct_excessive_bools)]
358pub struct BuildOptions {
359 /// Dockerfile path (default: Dockerfile in context)
360 pub dockerfile: Option<PathBuf>,
361 /// `ZImagefile` path (alternative to Dockerfile)
362 pub zimagefile: Option<PathBuf>,
363 /// Use runtime template instead of Dockerfile
364 pub runtime: Option<Runtime>,
365 /// Build arguments (ARG values)
366 pub build_args: HashMap<String, String>,
367 /// Target stage for multi-stage builds
368 pub target: Option<String>,
369 /// Image tags to apply
370 pub tags: Vec<String>,
371 /// Disable layer caching
372 pub no_cache: bool,
373 /// Push to registry after build
374 pub push: bool,
375 /// Registry auth (if pushing)
376 pub registry_auth: Option<RegistryAuth>,
377 /// Squash all layers into one
378 pub squash: bool,
379 /// Image format (oci or docker)
380 pub format: Option<String>,
381 /// Enable buildah layer caching (--layers flag for `buildah build`).
382 /// Default: true
383 ///
384 /// Note: `ZLayer` uses manual container creation (`buildah from`, `buildah run`,
385 /// `buildah commit`) rather than `buildah build`, so this flag is reserved
386 /// for future use when/if we switch to `buildah build` (bud) command.
387 pub layers: bool,
388 /// Registry to pull cache from (--cache-from for `buildah build`).
389 ///
390 /// Note: This would be used with `buildah build --cache-from=<registry>`.
391 /// Currently `ZLayer` uses manual container creation, so this is reserved
392 /// for future implementation or for switching to `buildah build`.
393 ///
394 /// TODO: Implement remote cache support. This would require either:
395 /// 1. Switching to `buildah build` command which supports --cache-from natively
396 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
397 pub cache_from: Option<String>,
398 /// Registry to push cache to (--cache-to for `buildah build`).
399 ///
400 /// Note: This would be used with `buildah build --cache-to=<registry>`.
401 /// Currently `ZLayer` uses manual container creation, so this is reserved
402 /// for future implementation or for switching to `buildah build`.
403 ///
404 /// TODO: Implement remote cache support. This would require either:
405 /// 1. Switching to `buildah build` command which supports --cache-to natively
406 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
407 pub cache_to: Option<String>,
408 /// Maximum cache age (--cache-ttl for `buildah build`).
409 ///
410 /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
411 /// Currently `ZLayer` uses manual container creation, so this is reserved
412 /// for future implementation or for switching to `buildah build`.
413 ///
414 /// TODO: Implement cache TTL support. This would require either:
415 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
416 /// 2. Implementing custom cache expiration logic for our layer caching system
417 pub cache_ttl: Option<std::time::Duration>,
418 /// Cache backend configuration (requires `cache` feature).
419 ///
420 /// When configured, the builder will store layer data in the specified
421 /// cache backend for faster subsequent builds. This is separate from
422 /// buildah's native caching and operates at the ZLayer level.
423 ///
424 /// # Integration Points
425 ///
426 /// The cache backend is used at several points during the build:
427 ///
428 /// 1. **Before instruction execution**: Check if a cached layer exists
429 /// for the (instruction_hash, base_layer) tuple
430 /// 2. **After instruction execution**: Store the resulting layer data
431 /// in the cache for future builds
432 /// 3. **Base image layers**: Cache pulled base image layers to avoid
433 /// re-downloading from registries
434 ///
435 /// TODO: Wire up cache lookups in the build loop once layer digests
436 /// are properly computed and tracked.
437 #[cfg(feature = "cache")]
438 pub cache_backend_config: Option<CacheBackendConfig>,
439 /// Default OCI/WASM-compatible registry to check for images before falling
440 /// back to Docker Hub qualification.
441 ///
442 /// When set, the builder will probe this registry for short image names
443 /// before qualifying them to `docker.io`. For example, if set to
444 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
445 /// the builder will check `git.example.com:5000/myapp:latest` first.
446 pub default_registry: Option<String>,
447 /// Default cache mounts injected into all RUN instructions.
448 /// These are merged with any step-level cache mounts (deduped by target path).
449 pub default_cache_mounts: Vec<RunMount>,
450 /// Number of retries for failed RUN steps (0 = no retries, default)
451 pub retries: u32,
452}
453
454impl Default for BuildOptions {
455 fn default() -> Self {
456 Self {
457 dockerfile: None,
458 zimagefile: None,
459 runtime: None,
460 build_args: HashMap::new(),
461 target: None,
462 tags: Vec::new(),
463 no_cache: false,
464 push: false,
465 registry_auth: None,
466 squash: false,
467 format: None,
468 layers: true,
469 cache_from: None,
470 cache_to: None,
471 cache_ttl: None,
472 #[cfg(feature = "cache")]
473 cache_backend_config: None,
474 default_registry: None,
475 default_cache_mounts: Vec::new(),
476 retries: 0,
477 }
478 }
479}
480
481/// Image builder - orchestrates the full build process
482///
483/// `ImageBuilder` provides a fluent API for configuring and executing
484/// container image builds using buildah as the backend.
485///
486/// # Build Process
487///
488/// 1. Parse Dockerfile (or use runtime template)
489/// 2. Resolve target stages if specified
490/// 3. Build each stage sequentially:
491/// - Create working container from base image
492/// - Execute each instruction
493/// - Commit intermediate stages for COPY --from
494/// 4. Commit final image with tags
495/// 5. Push to registry if configured
496/// 6. Clean up intermediate containers
497///
498/// # Cache Backend Integration (requires `cache` feature)
499///
500/// When a cache backend is configured, the builder can store and retrieve
501/// cached layer data to speed up subsequent builds:
502///
503/// ```no_run,ignore
504/// use zlayer_builder::ImageBuilder;
505///
506/// let builder = ImageBuilder::new("./my-app").await?
507/// .with_cache_dir("/var/cache/zlayer")
508/// .tag("myapp:latest");
509/// ```
510pub struct ImageBuilder {
511 /// Build context directory
512 context: PathBuf,
513 /// Build options
514 options: BuildOptions,
515 /// Buildah executor
516 executor: BuildahExecutor,
517 /// Event sender for TUI updates
518 event_tx: Option<mpsc::Sender<BuildEvent>>,
519 /// Cache backend for layer caching (requires `cache` feature).
520 ///
521 /// When set, the builder will attempt to retrieve cached layers before
522 /// executing instructions, and store results in the cache after execution.
523 ///
524 /// TODO: Implement cache lookups in the build loop. Currently the backend
525 /// is stored but not actively used during builds. Integration points:
526 /// - Check cache before executing RUN instructions
527 /// - Store layer data after successful instruction execution
528 /// - Cache base image layers pulled from registries
529 #[cfg(feature = "cache")]
530 cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
531 /// Local OCI registry for checking cached images before remote pulls.
532 #[cfg(feature = "local-registry")]
533 local_registry: Option<LocalRegistry>,
534}
535
536impl ImageBuilder {
537 /// Create a new `ImageBuilder` with the given context directory
538 ///
539 /// The context directory should contain the Dockerfile (unless using
540 /// a runtime template) and any files that will be copied into the image.
541 ///
542 /// # Arguments
543 ///
544 /// * `context` - Path to the build context directory
545 ///
546 /// # Errors
547 ///
548 /// Returns an error if:
549 /// - The context directory does not exist
550 /// - Buildah is not installed or not accessible
551 ///
552 /// # Example
553 ///
554 /// ```no_run
555 /// use zlayer_builder::ImageBuilder;
556 ///
557 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
558 /// let builder = ImageBuilder::new("./my-project").await?;
559 /// # Ok(())
560 /// # }
561 /// ```
562 #[instrument(skip_all, fields(context = %context.as_ref().display()))]
563 pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
564 let context = context.as_ref().to_path_buf();
565
566 // Verify context exists
567 if !context.exists() {
568 return Err(BuildError::ContextRead {
569 path: context,
570 source: std::io::Error::new(
571 std::io::ErrorKind::NotFound,
572 "Build context directory not found",
573 ),
574 });
575 }
576
577 // Initialize buildah executor
578 let executor = BuildahExecutor::new_async().await?;
579
580 debug!("Created ImageBuilder for context: {}", context.display());
581
582 Ok(Self {
583 context,
584 options: BuildOptions::default(),
585 executor,
586 event_tx: None,
587 #[cfg(feature = "cache")]
588 cache_backend: None,
589 #[cfg(feature = "local-registry")]
590 local_registry: None,
591 })
592 }
593
594 /// Create an `ImageBuilder` with a custom buildah executor
595 ///
596 /// This is useful for testing or when you need to configure
597 /// the executor with specific storage options.
598 ///
599 /// # Errors
600 ///
601 /// Returns an error if the context directory does not exist.
602 pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
603 let context = context.as_ref().to_path_buf();
604
605 if !context.exists() {
606 return Err(BuildError::ContextRead {
607 path: context,
608 source: std::io::Error::new(
609 std::io::ErrorKind::NotFound,
610 "Build context directory not found",
611 ),
612 });
613 }
614
615 Ok(Self {
616 context,
617 options: BuildOptions::default(),
618 executor,
619 event_tx: None,
620 #[cfg(feature = "cache")]
621 cache_backend: None,
622 #[cfg(feature = "local-registry")]
623 local_registry: None,
624 })
625 }
626
627 /// Set a custom Dockerfile path
628 ///
629 /// By default, the builder looks for a file named `Dockerfile` in the
630 /// context directory. Use this method to specify a different path.
631 ///
632 /// # Example
633 ///
634 /// ```no_run
635 /// # use zlayer_builder::ImageBuilder;
636 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
637 /// let builder = ImageBuilder::new("./my-project").await?
638 /// .dockerfile("./my-project/Dockerfile.prod");
639 /// # Ok(())
640 /// # }
641 /// ```
642 #[must_use]
643 pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
644 self.options.dockerfile = Some(path.as_ref().to_path_buf());
645 self
646 }
647
648 /// Set a custom `ZImagefile` path
649 ///
650 /// `ZImagefiles` are a YAML-based alternative to Dockerfiles. When set,
651 /// the builder will parse the `ZImagefile` and convert it to the internal
652 /// Dockerfile IR for execution.
653 ///
654 /// # Example
655 ///
656 /// ```no_run
657 /// # use zlayer_builder::ImageBuilder;
658 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
659 /// let builder = ImageBuilder::new("./my-project").await?
660 /// .zimagefile("./my-project/ZImagefile");
661 /// # Ok(())
662 /// # }
663 /// ```
664 #[must_use]
665 pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
666 self.options.zimagefile = Some(path.as_ref().to_path_buf());
667 self
668 }
669
670 /// Use a runtime template instead of a Dockerfile
671 ///
672 /// Runtime templates provide pre-built Dockerfiles for common
673 /// development environments. When set, the Dockerfile option is ignored.
674 ///
675 /// # Example
676 ///
677 /// ```no_run
678 /// use zlayer_builder::{ImageBuilder, Runtime};
679 ///
680 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
681 /// let builder = ImageBuilder::new("./my-node-app").await?
682 /// .runtime(Runtime::Node20);
683 /// # Ok(())
684 /// # }
685 /// ```
686 #[must_use]
687 pub fn runtime(mut self, runtime: Runtime) -> Self {
688 self.options.runtime = Some(runtime);
689 self
690 }
691
692 /// Add a build argument
693 ///
694 /// Build arguments are passed to the Dockerfile and can be referenced
695 /// using the `ARG` instruction.
696 ///
697 /// # Example
698 ///
699 /// ```no_run
700 /// # use zlayer_builder::ImageBuilder;
701 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
702 /// let builder = ImageBuilder::new("./my-project").await?
703 /// .build_arg("VERSION", "1.0.0")
704 /// .build_arg("DEBUG", "false");
705 /// # Ok(())
706 /// # }
707 /// ```
708 #[must_use]
709 pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
710 self.options.build_args.insert(key.into(), value.into());
711 self
712 }
713
714 /// Set multiple build arguments at once
715 #[must_use]
716 pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
717 self.options.build_args.extend(args);
718 self
719 }
720
721 /// Set the target stage for multi-stage builds
722 ///
723 /// When building a multi-stage Dockerfile, you can stop at a specific
724 /// stage instead of building all stages.
725 ///
726 /// # Example
727 ///
728 /// ```no_run
729 /// # use zlayer_builder::ImageBuilder;
730 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
731 /// // Dockerfile:
732 /// // FROM node:20 AS builder
733 /// // ...
734 /// // FROM node:20-slim AS runtime
735 /// // ...
736 ///
737 /// let builder = ImageBuilder::new("./my-project").await?
738 /// .target("builder")
739 /// .tag("myapp:builder");
740 /// # Ok(())
741 /// # }
742 /// ```
743 #[must_use]
744 pub fn target(mut self, stage: impl Into<String>) -> Self {
745 self.options.target = Some(stage.into());
746 self
747 }
748
749 /// Add an image tag
750 ///
751 /// Tags are applied to the final image. You can add multiple tags.
752 /// The first tag is used as the primary image name during commit.
753 ///
754 /// # Example
755 ///
756 /// ```no_run
757 /// # use zlayer_builder::ImageBuilder;
758 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
759 /// let builder = ImageBuilder::new("./my-project").await?
760 /// .tag("myapp:latest")
761 /// .tag("myapp:v1.0.0")
762 /// .tag("registry.example.com/myapp:v1.0.0");
763 /// # Ok(())
764 /// # }
765 /// ```
766 #[must_use]
767 pub fn tag(mut self, tag: impl Into<String>) -> Self {
768 self.options.tags.push(tag.into());
769 self
770 }
771
772 /// Disable layer caching
773 ///
774 /// When enabled, all layers are rebuilt from scratch even if
775 /// they could be served from cache.
776 ///
777 /// Note: Currently this flag is tracked but not fully implemented in the
778 /// build process. `ZLayer` uses manual container creation (`buildah from`,
779 /// `buildah run`, `buildah commit`) which doesn't have built-in caching
780 /// like `buildah build` does. Future work could implement layer-level
781 /// caching by checking instruction hashes against previously built layers.
782 #[must_use]
783 pub fn no_cache(mut self) -> Self {
784 self.options.no_cache = true;
785 self
786 }
787
788 /// Enable or disable layer caching
789 ///
790 /// This controls the `--layers` flag for buildah. When enabled (default),
791 /// buildah can cache and reuse intermediate layers.
792 ///
793 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
794 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
795 /// flag is reserved for future use when/if we switch to `buildah build`.
796 ///
797 /// # Example
798 ///
799 /// ```no_run
800 /// # use zlayer_builder::ImageBuilder;
801 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
802 /// let builder = ImageBuilder::new("./my-project").await?
803 /// .layers(false) // Disable layer caching
804 /// .tag("myapp:latest");
805 /// # Ok(())
806 /// # }
807 /// ```
808 #[must_use]
809 pub fn layers(mut self, enable: bool) -> Self {
810 self.options.layers = enable;
811 self
812 }
813
814 /// Set registry to pull cache from
815 ///
816 /// This corresponds to buildah's `--cache-from` flag, which allows
817 /// pulling cached layers from a remote registry to speed up builds.
818 ///
819 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
820 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
821 /// option is reserved for future implementation.
822 ///
823 /// TODO: Implement remote cache support. This would require either:
824 /// 1. Switching to `buildah build` command which supports --cache-from natively
825 /// 2. Implementing custom layer caching with registry pull for intermediate layers
826 ///
827 /// # Example
828 ///
829 /// ```no_run
830 /// # use zlayer_builder::ImageBuilder;
831 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
832 /// let builder = ImageBuilder::new("./my-project").await?
833 /// .cache_from("registry.example.com/myapp:cache")
834 /// .tag("myapp:latest");
835 /// # Ok(())
836 /// # }
837 /// ```
838 #[must_use]
839 pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
840 self.options.cache_from = Some(registry.into());
841 self
842 }
843
844 /// Set registry to push cache to
845 ///
846 /// This corresponds to buildah's `--cache-to` flag, which allows
847 /// pushing cached layers to a remote registry for future builds to use.
848 ///
849 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
850 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
851 /// option is reserved for future implementation.
852 ///
853 /// TODO: Implement remote cache support. This would require either:
854 /// 1. Switching to `buildah build` command which supports --cache-to natively
855 /// 2. Implementing custom layer caching with registry push for intermediate layers
856 ///
857 /// # Example
858 ///
859 /// ```no_run
860 /// # use zlayer_builder::ImageBuilder;
861 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
862 /// let builder = ImageBuilder::new("./my-project").await?
863 /// .cache_to("registry.example.com/myapp:cache")
864 /// .tag("myapp:latest");
865 /// # Ok(())
866 /// # }
867 /// ```
868 #[must_use]
869 pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
870 self.options.cache_to = Some(registry.into());
871 self
872 }
873
874 /// Set maximum cache age
875 ///
876 /// This corresponds to buildah's `--cache-ttl` flag, which sets the
877 /// maximum age for cached layers before they are considered stale.
878 ///
879 /// Note: `ZLayer` currently uses manual container creation (`buildah from`,
880 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
881 /// option is reserved for future implementation.
882 ///
883 /// TODO: Implement cache TTL support. This would require either:
884 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
885 /// 2. Implementing custom cache expiration logic for our layer caching system
886 ///
887 /// # Example
888 ///
889 /// ```no_run
890 /// # use zlayer_builder::ImageBuilder;
891 /// # use std::time::Duration;
892 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
893 /// let builder = ImageBuilder::new("./my-project").await?
894 /// .cache_ttl(Duration::from_secs(3600 * 24)) // 24 hours
895 /// .tag("myapp:latest");
896 /// # Ok(())
897 /// # }
898 /// ```
899 #[must_use]
900 pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
901 self.options.cache_ttl = Some(ttl);
902 self
903 }
904
905 /// Push the image to a registry after building
906 ///
907 /// # Arguments
908 ///
909 /// * `auth` - Registry authentication credentials
910 ///
911 /// # Example
912 ///
913 /// ```no_run
914 /// use zlayer_builder::{ImageBuilder, RegistryAuth};
915 ///
916 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
917 /// let builder = ImageBuilder::new("./my-project").await?
918 /// .tag("registry.example.com/myapp:v1.0.0")
919 /// .push(RegistryAuth::new("user", "password"));
920 /// # Ok(())
921 /// # }
922 /// ```
923 #[must_use]
924 pub fn push(mut self, auth: RegistryAuth) -> Self {
925 self.options.push = true;
926 self.options.registry_auth = Some(auth);
927 self
928 }
929
930 /// Enable pushing without authentication
931 ///
932 /// Use this for registries that don't require authentication
933 /// (e.g., local registries, insecure registries).
934 #[must_use]
935 pub fn push_without_auth(mut self) -> Self {
936 self.options.push = true;
937 self.options.registry_auth = None;
938 self
939 }
940
941 /// Set a default OCI/WASM-compatible registry to check for images.
942 ///
943 /// When set, the builder will probe this registry for short image names
944 /// before qualifying them to `docker.io`. For example, if set to
945 /// `"git.example.com:5000"` and the `ZImagefile` uses `base: "myapp:latest"`,
946 /// the builder will check `git.example.com:5000/myapp:latest` first.
947 #[must_use]
948 pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
949 self.options.default_registry = Some(registry.into());
950 self
951 }
952
953 /// Set a local OCI registry for image resolution.
954 ///
955 /// When set, the builder checks the local registry for cached images
956 /// before pulling from remote registries.
957 #[cfg(feature = "local-registry")]
958 pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
959 self.local_registry = Some(registry);
960 self
961 }
962
963 /// Squash all layers into a single layer
964 ///
965 /// This reduces image size but loses layer caching benefits.
966 #[must_use]
967 pub fn squash(mut self) -> Self {
968 self.options.squash = true;
969 self
970 }
971
972 /// Set the image format
973 ///
974 /// Valid values are "oci" (default) or "docker".
975 #[must_use]
976 pub fn format(mut self, format: impl Into<String>) -> Self {
977 self.options.format = Some(format.into());
978 self
979 }
980
981 /// Set default cache mounts to inject into all RUN instructions
982 #[must_use]
983 pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
984 self.options.default_cache_mounts = mounts;
985 self
986 }
987
988 /// Set the number of retries for failed RUN steps
989 #[must_use]
990 pub fn retries(mut self, retries: u32) -> Self {
991 self.options.retries = retries;
992 self
993 }
994
995 /// Set an event sender for TUI progress updates
996 ///
997 /// Events will be sent as the build progresses, allowing you to
998 /// display a progress UI or log build status.
999 ///
1000 /// # Example
1001 ///
1002 /// ```no_run
1003 /// use zlayer_builder::{ImageBuilder, BuildEvent};
1004 /// use std::sync::mpsc;
1005 ///
1006 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1007 /// let (tx, rx) = mpsc::channel::<BuildEvent>();
1008 ///
1009 /// let builder = ImageBuilder::new("./my-project").await?
1010 /// .tag("myapp:latest")
1011 /// .with_events(tx);
1012 /// # Ok(())
1013 /// # }
1014 /// ```
1015 #[must_use]
1016 pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
1017 self.event_tx = Some(tx);
1018 self
1019 }
1020
1021 /// Configure a persistent disk cache backend for layer caching.
1022 ///
1023 /// When configured, the builder will store layer data on disk at the
1024 /// specified path. This cache persists across builds and significantly
1025 /// speeds up repeated builds of similar images.
1026 ///
1027 /// Requires the `cache-persistent` feature to be enabled.
1028 ///
1029 /// # Arguments
1030 ///
1031 /// * `path` - Path to the cache directory. If a directory, creates
1032 /// `blob_cache.redb` inside it. If a file path, uses it directly.
1033 ///
1034 /// # Example
1035 ///
1036 /// ```no_run,ignore
1037 /// use zlayer_builder::ImageBuilder;
1038 ///
1039 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1040 /// let builder = ImageBuilder::new("./my-project").await?
1041 /// .with_cache_dir("/var/cache/zlayer")
1042 /// .tag("myapp:latest");
1043 /// # Ok(())
1044 /// # }
1045 /// ```
1046 ///
1047 /// # Integration Status
1048 ///
1049 /// TODO: The cache backend is currently stored but not actively used
1050 /// during builds. Future work will wire up:
1051 /// - Cache lookups before executing RUN instructions
1052 /// - Storing layer data after successful execution
1053 /// - Caching base image layers from registry pulls
1054 #[cfg(feature = "cache-persistent")]
1055 pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1056 self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1057 path: path.as_ref().to_path_buf(),
1058 });
1059 debug!(
1060 "Configured persistent cache at: {}",
1061 path.as_ref().display()
1062 );
1063 self
1064 }
1065
1066 /// Configure an in-memory cache backend for layer caching.
1067 ///
1068 /// The in-memory cache is cleared when the process exits, but can
1069 /// speed up builds within a single session by caching intermediate
1070 /// layers and avoiding redundant operations.
1071 ///
1072 /// Requires the `cache` feature to be enabled.
1073 ///
1074 /// # Example
1075 ///
1076 /// ```no_run,ignore
1077 /// use zlayer_builder::ImageBuilder;
1078 ///
1079 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1080 /// let builder = ImageBuilder::new("./my-project").await?
1081 /// .with_memory_cache()
1082 /// .tag("myapp:latest");
1083 /// # Ok(())
1084 /// # }
1085 /// ```
1086 ///
1087 /// # Integration Status
1088 ///
1089 /// TODO: The cache backend is currently stored but not actively used
1090 /// during builds. See `with_cache_dir` for integration status details.
1091 #[cfg(feature = "cache")]
1092 pub fn with_memory_cache(mut self) -> Self {
1093 self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1094 debug!("Configured in-memory cache");
1095 self
1096 }
1097
1098 /// Configure an S3-compatible storage backend for layer caching.
1099 ///
1100 /// This is useful for distributed build systems where multiple build
1101 /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1102 /// Backblaze B2, MinIO, and other S3-compatible services.
1103 ///
1104 /// Requires the `cache-s3` feature to be enabled.
1105 ///
1106 /// # Arguments
1107 ///
1108 /// * `bucket` - S3 bucket name
1109 /// * `region` - AWS region (optional, uses SDK default if not set)
1110 ///
1111 /// # Example
1112 ///
1113 /// ```no_run,ignore
1114 /// use zlayer_builder::ImageBuilder;
1115 ///
1116 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1117 /// let builder = ImageBuilder::new("./my-project").await?
1118 /// .with_s3_cache("my-build-cache", Some("us-west-2"))
1119 /// .tag("myapp:latest");
1120 /// # Ok(())
1121 /// # }
1122 /// ```
1123 ///
1124 /// # Integration Status
1125 ///
1126 /// TODO: The cache backend is currently stored but not actively used
1127 /// during builds. See `with_cache_dir` for integration status details.
1128 #[cfg(feature = "cache-s3")]
1129 pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1130 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1131 bucket: bucket.into(),
1132 region,
1133 endpoint: None,
1134 prefix: None,
1135 });
1136 debug!("Configured S3 cache");
1137 self
1138 }
1139
1140 /// Configure an S3-compatible storage backend with custom endpoint.
1141 ///
1142 /// Use this method for S3-compatible services that require a custom
1143 /// endpoint URL (e.g., Cloudflare R2, MinIO, local development).
1144 ///
1145 /// Requires the `cache-s3` feature to be enabled.
1146 ///
1147 /// # Arguments
1148 ///
1149 /// * `bucket` - S3 bucket name
1150 /// * `endpoint` - Custom endpoint URL
1151 /// * `region` - Region (required for some S3-compatible services)
1152 ///
1153 /// # Example
1154 ///
1155 /// ```no_run,ignore
1156 /// use zlayer_builder::ImageBuilder;
1157 ///
1158 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1159 /// // Cloudflare R2
1160 /// let builder = ImageBuilder::new("./my-project").await?
1161 /// .with_s3_cache_endpoint(
1162 /// "my-bucket",
1163 /// "https://accountid.r2.cloudflarestorage.com",
1164 /// Some("auto".to_string()),
1165 /// )
1166 /// .tag("myapp:latest");
1167 /// # Ok(())
1168 /// # }
1169 /// ```
1170 #[cfg(feature = "cache-s3")]
1171 pub fn with_s3_cache_endpoint(
1172 mut self,
1173 bucket: impl Into<String>,
1174 endpoint: impl Into<String>,
1175 region: Option<String>,
1176 ) -> Self {
1177 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1178 bucket: bucket.into(),
1179 region,
1180 endpoint: Some(endpoint.into()),
1181 prefix: None,
1182 });
1183 debug!("Configured S3 cache with custom endpoint");
1184 self
1185 }
1186
1187 /// Configure a custom cache backend configuration.
1188 ///
1189 /// This is the most flexible way to configure the cache backend,
1190 /// allowing full control over all cache settings.
1191 ///
1192 /// Requires the `cache` feature to be enabled.
1193 ///
1194 /// # Example
1195 ///
1196 /// ```no_run,ignore
1197 /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1198 ///
1199 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1200 /// let builder = ImageBuilder::new("./my-project").await?
1201 /// .with_cache_config(CacheBackendConfig::Memory)
1202 /// .tag("myapp:latest");
1203 /// # Ok(())
1204 /// # }
1205 /// ```
1206 #[cfg(feature = "cache")]
1207 pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1208 self.options.cache_backend_config = Some(config);
1209 debug!("Configured custom cache backend");
1210 self
1211 }
1212
1213 /// Set an already-initialized cache backend directly.
1214 ///
1215 /// This is useful when you have a pre-configured cache backend instance
1216 /// that you want to share across multiple builders or when you need
1217 /// fine-grained control over cache initialization.
1218 ///
1219 /// Requires the `cache` feature to be enabled.
1220 ///
1221 /// # Example
1222 ///
1223 /// ```no_run,ignore
1224 /// use zlayer_builder::ImageBuilder;
1225 /// use zlayer_registry::cache::BlobCache;
1226 /// use std::sync::Arc;
1227 ///
1228 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1229 /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1230 ///
1231 /// let builder = ImageBuilder::new("./my-project").await?
1232 /// .with_cache_backend(cache)
1233 /// .tag("myapp:latest");
1234 /// # Ok(())
1235 /// # }
1236 /// ```
1237 #[cfg(feature = "cache")]
1238 pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1239 self.cache_backend = Some(backend);
1240 debug!("Configured pre-initialized cache backend");
1241 self
1242 }
1243
1244 /// Run the build
1245 ///
1246 /// This executes the complete build process:
1247 /// 1. Parse Dockerfile or load runtime template
1248 /// 2. Build all required stages
1249 /// 3. Commit and tag the final image
1250 /// 4. Push to registry if configured
1251 /// 5. Clean up intermediate containers
1252 ///
1253 /// # Errors
1254 ///
1255 /// Returns an error if:
1256 /// - Dockerfile parsing fails
1257 /// - A buildah command fails
1258 /// - Target stage is not found
1259 /// - Registry push fails
1260 ///
1261 /// # Panics
1262 ///
1263 /// Panics if an instruction output is missing after all retry attempts (internal invariant).
1264 #[allow(clippy::too_many_lines)]
1265 #[instrument(skip(self), fields(context = %self.context.display()))]
1266 pub async fn build(self) -> Result<BuiltImage> {
1267 let start_time = std::time::Instant::now();
1268 let build_id = generate_build_id();
1269
1270 info!(
1271 "Starting build in context: {} (build_id: {})",
1272 self.context.display(),
1273 build_id
1274 );
1275
1276 // 1. Get parsed Dockerfile (from template, ZImagefile, or Dockerfile)
1277 let dockerfile = self.get_dockerfile().await?;
1278 debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1279
1280 // 2. Determine stages to build
1281 let stages = self.resolve_stages(&dockerfile)?;
1282 debug!("Building {} stages", stages.len());
1283
1284 // 4. Build each stage
1285 let mut stage_images: HashMap<String, String> = HashMap::new();
1286 // Track the final WORKDIR for each committed stage, used to resolve
1287 // relative source paths in COPY --from instructions.
1288 let mut stage_workdirs: HashMap<String, String> = HashMap::new();
1289 let mut final_container: Option<String> = None;
1290 let mut total_instructions = 0;
1291
1292 // Initialize the layer cache tracker for this build session.
1293 // This tracks which instruction+base_layer combinations we've seen
1294 // and whether they were cache hits.
1295 let mut cache_tracker = LayerCacheTracker::new();
1296
1297 for (stage_idx, stage) in stages.iter().enumerate() {
1298 let is_final_stage = stage_idx == stages.len() - 1;
1299
1300 self.send_event(BuildEvent::StageStarted {
1301 index: stage_idx,
1302 name: stage.name.clone(),
1303 base_image: stage.base_image.to_string_ref(),
1304 });
1305
1306 // Create container from base image
1307 let base = self
1308 .resolve_base_image(&stage.base_image, &stage_images)
1309 .await?;
1310 let container_id = self.create_container(&base).await?;
1311
1312 debug!(
1313 "Created container {} for stage {} (base: {})",
1314 container_id,
1315 stage.identifier(),
1316 base
1317 );
1318
1319 // Track the current base layer for cache key computation.
1320 // Each instruction modifies the container, so we update this after each instruction.
1321 let mut current_base_layer = container_id.clone();
1322
1323 // Track the current WORKDIR for this stage. Used to resolve relative paths
1324 // when this stage is used as a source for COPY --from in a later stage.
1325 let mut current_workdir = String::from("/");
1326
1327 // Execute instructions
1328 for (inst_idx, instruction) in stage.instructions.iter().enumerate() {
1329 self.send_event(BuildEvent::InstructionStarted {
1330 stage: stage_idx,
1331 index: inst_idx,
1332 instruction: format!("{instruction:?}"),
1333 });
1334
1335 // Generate the cache key for this instruction
1336 let instruction_cache_key = instruction.cache_key();
1337
1338 // Track instruction start time for potential cache hit heuristics
1339 let instruction_start = std::time::Instant::now();
1340
1341 // Resolve COPY --from references to actual committed image names,
1342 // and resolve relative source paths using the source stage's WORKDIR.
1343 let resolved_instruction;
1344 let instruction_ref = if let Instruction::Copy(copy) = instruction {
1345 if let Some(ref from) = copy.from {
1346 if let Some(image_name) = stage_images.get(from) {
1347 let mut resolved_copy = copy.clone();
1348 resolved_copy.from = Some(image_name.clone());
1349
1350 // Resolve relative source paths using the source stage's WORKDIR.
1351 // If the source stage had `workdir: "/build"` and the copy source
1352 // is `"app"`, we need to resolve it to `"/build/app"`.
1353 if let Some(source_workdir) = stage_workdirs.get(from) {
1354 resolved_copy.sources = resolved_copy
1355 .sources
1356 .iter()
1357 .map(|src| {
1358 if src.starts_with('/') {
1359 // Absolute path - use as-is
1360 src.clone()
1361 } else {
1362 // Relative path - prepend source stage's workdir
1363 if source_workdir == "/" {
1364 format!("/{src}")
1365 } else {
1366 format!("{source_workdir}/{src}")
1367 }
1368 }
1369 })
1370 .collect();
1371 }
1372
1373 resolved_instruction = Instruction::Copy(resolved_copy);
1374 &resolved_instruction
1375 } else {
1376 instruction
1377 }
1378 } else {
1379 instruction
1380 }
1381 } else {
1382 instruction
1383 };
1384
1385 // Inject default cache mounts into RUN instructions
1386 let instruction_with_defaults;
1387 let instruction_ref = if self.options.default_cache_mounts.is_empty() {
1388 instruction_ref
1389 } else if let Instruction::Run(run) = instruction_ref {
1390 let mut merged = run.clone();
1391 for default_mount in &self.options.default_cache_mounts {
1392 // Deduplicate by target path
1393 let RunMount::Cache { target, .. } = default_mount else {
1394 continue;
1395 };
1396 let already_has = merged
1397 .mounts
1398 .iter()
1399 .any(|m| matches!(m, RunMount::Cache { target: t, .. } if t == target));
1400 if !already_has {
1401 merged.mounts.push(default_mount.clone());
1402 }
1403 }
1404 instruction_with_defaults = Instruction::Run(merged);
1405 &instruction_with_defaults
1406 } else {
1407 instruction_ref
1408 };
1409
1410 let is_run_instruction = matches!(instruction_ref, Instruction::Run(_));
1411 let max_attempts = if is_run_instruction {
1412 self.options.retries + 1
1413 } else {
1414 1
1415 };
1416
1417 let commands = BuildahCommand::from_instruction(&container_id, instruction_ref);
1418
1419 let mut combined_output = String::new();
1420 for cmd in commands {
1421 let mut last_output = None;
1422
1423 for attempt in 1..=max_attempts {
1424 if attempt > 1 {
1425 tracing::warn!(
1426 "Retrying step (attempt {}/{})...",
1427 attempt,
1428 max_attempts
1429 );
1430 self.send_event(BuildEvent::Output {
1431 line: format!(
1432 "⟳ Retrying step (attempt {attempt}/{max_attempts})..."
1433 ),
1434 is_stderr: false,
1435 });
1436 tokio::time::sleep(std::time::Duration::from_secs(3)).await;
1437 }
1438
1439 let output = self
1440 .executor
1441 .execute_streaming(&cmd, |is_stdout, line| {
1442 self.send_event(BuildEvent::Output {
1443 line: line.to_string(),
1444 is_stderr: !is_stdout,
1445 });
1446 })
1447 .await?;
1448
1449 combined_output.push_str(&output.stdout);
1450 combined_output.push_str(&output.stderr);
1451
1452 if output.success() {
1453 last_output = Some(output);
1454 break;
1455 }
1456
1457 last_output = Some(output);
1458 }
1459
1460 let output = last_output.unwrap();
1461 if !output.success() {
1462 self.send_event(BuildEvent::BuildFailed {
1463 error: output.stderr.clone(),
1464 });
1465
1466 // Cleanup container
1467 let _ = self
1468 .executor
1469 .execute(&BuildahCommand::rm(&container_id))
1470 .await;
1471
1472 return Err(BuildError::buildah_execution(
1473 cmd.to_command_string(),
1474 output.exit_code,
1475 output.stderr,
1476 ));
1477 }
1478 }
1479
1480 #[allow(clippy::cast_possible_truncation)]
1481 let instruction_elapsed_ms = instruction_start.elapsed().as_millis() as u64;
1482
1483 // Track WORKDIR changes for later COPY --from resolution.
1484 // We need to know the final WORKDIR of each stage so we can resolve
1485 // relative paths when copying from that stage.
1486 if let Instruction::Workdir(dir) = instruction {
1487 current_workdir.clone_from(dir);
1488 }
1489
1490 // Attempt to detect if this was a cache hit.
1491 // TODO: Implement proper cache detection. Currently always returns false.
1492 // Possible approaches:
1493 // - Time-based: Cached layers typically execute in < 100ms
1494 // - Output-based: Look for cache indicators in buildah output
1495 // - Digest-based: Pre-compute expected digest and check storage
1496 let cached = cache_tracker.detect_cache_hit(
1497 instruction,
1498 instruction_elapsed_ms,
1499 &combined_output,
1500 );
1501
1502 // Record this instruction execution for future reference
1503 cache_tracker.record(
1504 instruction_cache_key.clone(),
1505 current_base_layer.clone(),
1506 cached,
1507 );
1508
1509 // Update the base layer identifier for the next instruction.
1510 // In a proper implementation, this would be the new layer digest
1511 // after the instruction was committed. For now, we use a composite
1512 // of the previous base and the instruction key.
1513 current_base_layer = format!("{current_base_layer}:{instruction_cache_key}");
1514
1515 self.send_event(BuildEvent::InstructionComplete {
1516 stage: stage_idx,
1517 index: inst_idx,
1518 cached,
1519 });
1520
1521 total_instructions += 1;
1522 }
1523
1524 // Handle stage completion
1525 if let Some(name) = &stage.name {
1526 // Named stage - commit and save for COPY --from
1527 // Include the build_id to prevent collisions when parallel
1528 // builds share stage names (e.g., two Dockerfiles both having
1529 // a stage named "builder").
1530 let image_name = format!("zlayer-build-{build_id}-stage-{name}");
1531 self.commit_container(&container_id, &image_name, false)
1532 .await?;
1533 stage_images.insert(name.clone(), image_name.clone());
1534
1535 // Store the final WORKDIR for this stage so COPY --from can resolve
1536 // relative paths correctly.
1537 stage_workdirs.insert(name.clone(), current_workdir.clone());
1538
1539 // Also add by index
1540 stage_images.insert(stage.index.to_string(), image_name.clone());
1541 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
1542
1543 // If this is also the final stage (named target), keep reference
1544 if is_final_stage {
1545 final_container = Some(container_id);
1546 } else {
1547 // Cleanup intermediate container
1548 let _ = self
1549 .executor
1550 .execute(&BuildahCommand::rm(&container_id))
1551 .await;
1552 }
1553 } else if is_final_stage {
1554 // Unnamed final stage - keep container for final commit
1555 final_container = Some(container_id);
1556 } else {
1557 // Unnamed intermediate stage - commit by index for COPY --from
1558 let image_name = format!("zlayer-build-{}-stage-{}", build_id, stage.index);
1559 self.commit_container(&container_id, &image_name, false)
1560 .await?;
1561 stage_images.insert(stage.index.to_string(), image_name);
1562 // Store the final WORKDIR for this stage
1563 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
1564 let _ = self
1565 .executor
1566 .execute(&BuildahCommand::rm(&container_id))
1567 .await;
1568 }
1569
1570 self.send_event(BuildEvent::StageComplete { index: stage_idx });
1571 }
1572
1573 // 5. Commit final image
1574 let final_container = final_container.ok_or_else(|| BuildError::InvalidInstruction {
1575 instruction: "build".to_string(),
1576 reason: "No stages to build".to_string(),
1577 })?;
1578
1579 let image_name = self
1580 .options
1581 .tags
1582 .first()
1583 .cloned()
1584 .unwrap_or_else(|| format!("zlayer-build:{}", chrono_lite_timestamp()));
1585
1586 let image_id = self
1587 .commit_container(&final_container, &image_name, self.options.squash)
1588 .await?;
1589
1590 info!("Committed final image: {} ({})", image_name, image_id);
1591
1592 // 6. Apply additional tags
1593 for tag in self.options.tags.iter().skip(1) {
1594 self.tag_image(&image_id, tag).await?;
1595 debug!("Applied tag: {}", tag);
1596 }
1597
1598 // 7. Cleanup
1599 let _ = self
1600 .executor
1601 .execute(&BuildahCommand::rm(&final_container))
1602 .await;
1603
1604 // Cleanup intermediate stage images
1605 for (_, img) in stage_images {
1606 let _ = self.executor.execute(&BuildahCommand::rmi(&img)).await;
1607 }
1608
1609 // 8. Push if requested
1610 if self.options.push {
1611 for tag in &self.options.tags {
1612 self.push_image(tag).await?;
1613 info!("Pushed image: {}", tag);
1614 }
1615 }
1616
1617 #[allow(clippy::cast_possible_truncation)]
1618 let build_time_ms = start_time.elapsed().as_millis() as u64;
1619
1620 self.send_event(BuildEvent::BuildComplete {
1621 image_id: image_id.clone(),
1622 });
1623
1624 info!(
1625 "Build completed in {}ms: {} with {} tags",
1626 build_time_ms,
1627 image_id,
1628 self.options.tags.len()
1629 );
1630
1631 Ok(BuiltImage {
1632 image_id,
1633 tags: self.options.tags.clone(),
1634 layer_count: total_instructions,
1635 size: 0, // TODO: get actual size via buildah inspect
1636 build_time_ms,
1637 })
1638 }
1639
1640 /// Get a parsed [`Dockerfile`] from the configured source.
1641 ///
1642 /// Detection order:
1643 /// 1. If `runtime` is set → use template string → parse as Dockerfile
1644 /// 2. If `zimagefile` is explicitly set → read & parse `ZImagefile` → convert
1645 /// 3. If a file called `ZImagefile` exists in the context dir → same as (2)
1646 /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1647 async fn get_dockerfile(&self) -> Result<Dockerfile> {
1648 // (a) Runtime template takes highest priority.
1649 if let Some(runtime) = &self.options.runtime {
1650 debug!("Using runtime template: {}", runtime);
1651 let content = get_template(*runtime);
1652 return Dockerfile::parse(content);
1653 }
1654
1655 // (b) Explicit ZImagefile path.
1656 if let Some(ref zimage_path) = self.options.zimagefile {
1657 debug!("Reading ZImagefile: {}", zimage_path.display());
1658 let content =
1659 fs::read_to_string(zimage_path)
1660 .await
1661 .map_err(|e| BuildError::ContextRead {
1662 path: zimage_path.clone(),
1663 source: e,
1664 })?;
1665 let zimage = crate::zimage::parse_zimagefile(&content)?;
1666 return self.handle_zimage(&zimage).await;
1667 }
1668
1669 // (c) Auto-detect ZImagefile in context directory.
1670 let auto_zimage_path = self.context.join("ZImagefile");
1671 if auto_zimage_path.exists() {
1672 debug!(
1673 "Found ZImagefile in context: {}",
1674 auto_zimage_path.display()
1675 );
1676 let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1677 BuildError::ContextRead {
1678 path: auto_zimage_path,
1679 source: e,
1680 }
1681 })?;
1682 let zimage = crate::zimage::parse_zimagefile(&content)?;
1683 return self.handle_zimage(&zimage).await;
1684 }
1685
1686 // (d) Fall back to Dockerfile.
1687 let dockerfile_path = self
1688 .options
1689 .dockerfile
1690 .clone()
1691 .unwrap_or_else(|| self.context.join("Dockerfile"));
1692
1693 debug!("Reading Dockerfile: {}", dockerfile_path.display());
1694
1695 let content =
1696 fs::read_to_string(&dockerfile_path)
1697 .await
1698 .map_err(|e| BuildError::ContextRead {
1699 path: dockerfile_path,
1700 source: e,
1701 })?;
1702
1703 Dockerfile::parse(&content)
1704 }
1705
1706 /// Convert a parsed [`ZImage`] into the internal [`Dockerfile`] IR.
1707 ///
1708 /// Handles the three `ZImage` modes that can produce a Dockerfile:
1709 /// - **Runtime** mode: delegates to the template system
1710 /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`]
1711 /// - **WASM** mode: errors out (WASM uses `zlayer wasm build`, not `zlayer build`)
1712 ///
1713 /// Any `build:` directives are resolved first by spawning nested builds.
1714 async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<Dockerfile> {
1715 // Runtime mode: delegate to template system.
1716 if let Some(ref runtime_name) = zimage.runtime {
1717 let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1718 BuildError::zimagefile_validation(format!(
1719 "unknown runtime '{runtime_name}' in ZImagefile"
1720 ))
1721 })?;
1722 let content = get_template(rt);
1723 return Dockerfile::parse(content);
1724 }
1725
1726 // WASM mode: not supported through `zlayer build`.
1727 if zimage.wasm.is_some() {
1728 return Err(BuildError::invalid_instruction(
1729 "ZImagefile",
1730 "WASM builds use `zlayer wasm build`, not `zlayer build`",
1731 ));
1732 }
1733
1734 // Resolve any `build:` directives to concrete base image tags.
1735 let resolved = self.resolve_build_directives(zimage).await?;
1736
1737 // Single-stage or multi-stage: convert to Dockerfile IR directly.
1738 crate::zimage::zimage_to_dockerfile(&resolved)
1739 }
1740
1741 /// Resolve `build:` directives in a `ZImage` by running nested builds.
1742 ///
1743 /// For each `build:` directive (top-level or per-stage), this method:
1744 /// 1. Determines the build context directory
1745 /// 2. Auto-detects the build file (`ZImagefile` > Dockerfile) unless specified
1746 /// 3. Spawns a nested `ImageBuilder` to build the context
1747 /// 4. Tags the result and replaces `build` with `base`
1748 async fn resolve_build_directives(
1749 &self,
1750 zimage: &crate::zimage::ZImage,
1751 ) -> Result<crate::zimage::ZImage> {
1752 let mut resolved = zimage.clone();
1753
1754 // Resolve top-level `build:` directive.
1755 if let Some(ref build_ctx) = resolved.build {
1756 let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1757 resolved.base = Some(tag);
1758 resolved.build = None;
1759 }
1760
1761 // Resolve per-stage `build:` directives.
1762 if let Some(ref mut stages) = resolved.stages {
1763 for (name, stage) in stages.iter_mut() {
1764 if let Some(ref build_ctx) = stage.build {
1765 let tag = self.run_nested_build(build_ctx, name).await?;
1766 stage.base = Some(tag);
1767 stage.build = None;
1768 }
1769 }
1770 }
1771
1772 Ok(resolved)
1773 }
1774
1775 /// Run a nested build from a `build:` directive and return the resulting image tag.
1776 fn run_nested_build<'a>(
1777 &'a self,
1778 build_ctx: &'a crate::zimage::types::ZBuildContext,
1779 stage_name: &'a str,
1780 ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1781 Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1782 }
1783
1784 async fn run_nested_build_inner(
1785 &self,
1786 build_ctx: &crate::zimage::types::ZBuildContext,
1787 stage_name: &str,
1788 ) -> Result<String> {
1789 let context_dir = build_ctx.context_dir(&self.context);
1790
1791 if !context_dir.exists() {
1792 return Err(BuildError::ContextRead {
1793 path: context_dir,
1794 source: std::io::Error::new(
1795 std::io::ErrorKind::NotFound,
1796 format!(
1797 "build context directory not found for build directive in '{stage_name}'"
1798 ),
1799 ),
1800 });
1801 }
1802
1803 info!(
1804 "Building nested image for '{}' from context: {}",
1805 stage_name,
1806 context_dir.display()
1807 );
1808
1809 // Create a tag for the nested build result.
1810 let tag = format!(
1811 "zlayer-build-dep-{}:{}",
1812 stage_name,
1813 chrono_lite_timestamp()
1814 );
1815
1816 // Create nested builder.
1817 let mut nested = ImageBuilder::new(&context_dir).await?;
1818 nested = nested.tag(&tag);
1819
1820 // Apply explicit build file if specified.
1821 if let Some(file) = build_ctx.file() {
1822 let file_path = context_dir.join(file);
1823 if std::path::Path::new(file).extension().is_some_and(|ext| {
1824 ext.eq_ignore_ascii_case("yml") || ext.eq_ignore_ascii_case("yaml")
1825 }) || file.starts_with("ZImagefile")
1826 {
1827 nested = nested.zimagefile(file_path);
1828 } else {
1829 nested = nested.dockerfile(file_path);
1830 }
1831 }
1832
1833 // Apply build args.
1834 for (key, value) in build_ctx.args() {
1835 nested = nested.build_arg(&key, &value);
1836 }
1837
1838 // Propagate default registry if set.
1839 if let Some(ref reg) = self.options.default_registry {
1840 nested = nested.default_registry(reg.clone());
1841 }
1842
1843 // Run the nested build.
1844 let result = nested.build().await?;
1845 info!(
1846 "Nested build for '{}' completed: {}",
1847 stage_name, result.image_id
1848 );
1849
1850 Ok(tag)
1851 }
1852
1853 /// Resolve which stages need to be built
1854 fn resolve_stages<'a>(&self, dockerfile: &'a Dockerfile) -> Result<Vec<&'a Stage>> {
1855 if let Some(target) = &self.options.target {
1856 // Find target stage and all its dependencies
1857 self.resolve_target_stages(dockerfile, target)
1858 } else {
1859 // Build all stages
1860 Ok(dockerfile.stages.iter().collect())
1861 }
1862 }
1863
1864 /// Resolve stages needed for a specific target
1865 #[allow(clippy::unused_self)]
1866 fn resolve_target_stages<'a>(
1867 &self,
1868 dockerfile: &'a Dockerfile,
1869 target: &str,
1870 ) -> Result<Vec<&'a Stage>> {
1871 // Find the target stage
1872 let target_stage = dockerfile
1873 .get_stage(target)
1874 .ok_or_else(|| BuildError::stage_not_found(target))?;
1875
1876 // Collect all stages up to and including the target
1877 // This is a simplified approach - a full implementation would
1878 // analyze COPY --from dependencies
1879 let mut stages: Vec<&Stage> = Vec::new();
1880
1881 for stage in &dockerfile.stages {
1882 stages.push(stage);
1883 if stage.index == target_stage.index {
1884 break;
1885 }
1886 }
1887
1888 Ok(stages)
1889 }
1890
1891 /// Resolve a base image reference to an actual image name.
1892 ///
1893 /// Resolution chain for short (unqualified) image names:
1894 /// 1. Check `LocalRegistry` for a cached copy (if configured)
1895 /// 2. Check `default_registry` for the image (if configured)
1896 /// 3. Fall back to Docker Hub qualification (`docker.io/library/...`)
1897 ///
1898 /// Already-qualified names (containing a registry hostname) skip this chain.
1899 async fn resolve_base_image(
1900 &self,
1901 image_ref: &ImageRef,
1902 stage_images: &HashMap<String, String>,
1903 ) -> Result<String> {
1904 match image_ref {
1905 ImageRef::Stage(name) => {
1906 return stage_images
1907 .get(name)
1908 .cloned()
1909 .ok_or_else(|| BuildError::stage_not_found(name));
1910 }
1911 ImageRef::Scratch => return Ok("scratch".to_string()),
1912 ImageRef::Registry { .. } => {}
1913 }
1914
1915 // Check if name is already fully qualified (has registry hostname).
1916 let is_qualified = match image_ref {
1917 ImageRef::Registry { image, .. } => {
1918 let first = image.split('/').next().unwrap_or("");
1919 first.contains('.') || first.contains(':') || first == "localhost"
1920 }
1921 _ => false,
1922 };
1923
1924 // For unqualified names, try local registry and default registry first.
1925 if !is_qualified {
1926 if let Some(resolved) = self.try_resolve_from_sources(image_ref).await {
1927 return Ok(resolved);
1928 }
1929 }
1930
1931 // Fall back: qualify to docker.io and build the full string.
1932 let qualified = image_ref.qualify();
1933 match &qualified {
1934 ImageRef::Registry { image, tag, digest } => {
1935 let mut result = image.clone();
1936 if let Some(t) = tag {
1937 result.push(':');
1938 result.push_str(t);
1939 }
1940 if let Some(d) = digest {
1941 result.push('@');
1942 result.push_str(d);
1943 }
1944 if tag.is_none() && digest.is_none() {
1945 result.push_str(":latest");
1946 }
1947 Ok(result)
1948 }
1949 _ => unreachable!("qualify() preserves Registry variant"),
1950 }
1951 }
1952
1953 /// Try to resolve an unqualified image from local registry or default registry.
1954 ///
1955 /// Returns `Some(fully_qualified_name)` if found, `None` to fall back to docker.io.
1956 #[allow(clippy::unused_async)]
1957 async fn try_resolve_from_sources(&self, image_ref: &ImageRef) -> Option<String> {
1958 let (name, tag_str) = match image_ref {
1959 ImageRef::Registry { image, tag, .. } => {
1960 (image.as_str(), tag.as_deref().unwrap_or("latest"))
1961 }
1962 _ => return None,
1963 };
1964
1965 // 1. Check local OCI registry
1966 #[cfg(feature = "local-registry")]
1967 if let Some(ref local_reg) = self.local_registry {
1968 if local_reg.has_manifest(name, tag_str).await {
1969 info!(
1970 "Found {}:{} in local registry, using local copy",
1971 name, tag_str
1972 );
1973 // Build an OCI reference pointing to the local registry path.
1974 // buildah can pull from an OCI layout directory.
1975 let oci_path = format!("oci:{}:{}", local_reg.root().display(), tag_str);
1976 return Some(oci_path);
1977 }
1978 }
1979
1980 // 2. Check configured default registry
1981 if let Some(ref registry) = self.options.default_registry {
1982 let qualified = format!("{registry}/{name}:{tag_str}");
1983 debug!("Checking default registry for image: {}", qualified);
1984 // Return the qualified name for the configured registry.
1985 // buildah will attempt to pull from this registry; if it fails,
1986 // the build will error (the user explicitly configured this registry).
1987 return Some(qualified);
1988 }
1989
1990 None
1991 }
1992
1993 /// Create a working container from an image
1994 async fn create_container(&self, image: &str) -> Result<String> {
1995 let cmd = BuildahCommand::from_image(image);
1996 let output = self.executor.execute_checked(&cmd).await?;
1997 Ok(output.stdout.trim().to_string())
1998 }
1999
2000 /// Commit a container to create an image
2001 async fn commit_container(
2002 &self,
2003 container: &str,
2004 image_name: &str,
2005 squash: bool,
2006 ) -> Result<String> {
2007 let cmd = BuildahCommand::commit_with_opts(
2008 container,
2009 image_name,
2010 self.options.format.as_deref(),
2011 squash,
2012 );
2013 let output = self.executor.execute_checked(&cmd).await?;
2014 Ok(output.stdout.trim().to_string())
2015 }
2016
2017 /// Tag an image with an additional tag
2018 async fn tag_image(&self, image: &str, tag: &str) -> Result<()> {
2019 let cmd = BuildahCommand::tag(image, tag);
2020 self.executor.execute_checked(&cmd).await?;
2021 Ok(())
2022 }
2023
2024 /// Push an image to a registry
2025 async fn push_image(&self, tag: &str) -> Result<()> {
2026 let mut cmd = BuildahCommand::push(tag);
2027
2028 // Add auth if provided
2029 if let Some(auth) = &self.options.registry_auth {
2030 cmd = cmd
2031 .arg("--creds")
2032 .arg(format!("{}:{}", auth.username, auth.password));
2033 }
2034
2035 self.executor.execute_checked(&cmd).await?;
2036 Ok(())
2037 }
2038
2039 /// Send an event to the TUI (if configured)
2040 fn send_event(&self, event: BuildEvent) {
2041 if let Some(tx) = &self.event_tx {
2042 // Ignore send errors - the receiver may have been dropped
2043 let _ = tx.send(event);
2044 }
2045 }
2046}
2047
2048// Helper function to generate a timestamp-based name
2049fn chrono_lite_timestamp() -> String {
2050 use std::time::{SystemTime, UNIX_EPOCH};
2051 let duration = SystemTime::now()
2052 .duration_since(UNIX_EPOCH)
2053 .unwrap_or_default();
2054 format!("{}", duration.as_secs())
2055}
2056
2057/// Generate a short unique build ID for namespacing intermediate stage images.
2058///
2059/// This prevents parallel builds from clobbering each other's intermediate
2060/// stage images when they share stage names (e.g., two Dockerfiles both have
2061/// a stage named "builder").
2062///
2063/// The ID combines nanosecond-precision timestamp with the process ID, then
2064/// takes 12 hex characters from a SHA-256 hash for a compact, collision-resistant
2065/// identifier.
2066fn generate_build_id() -> String {
2067 use sha2::{Digest, Sha256};
2068 use std::time::{SystemTime, UNIX_EPOCH};
2069
2070 // Use a monotonic counter to guarantee uniqueness even within the same
2071 // nanosecond on the same process (e.g. tests or very fast sequential calls).
2072 static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
2073
2074 let nanos = SystemTime::now()
2075 .duration_since(UNIX_EPOCH)
2076 .unwrap_or_default()
2077 .as_nanos();
2078 let pid = std::process::id();
2079 let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
2080
2081 let mut hasher = Sha256::new();
2082 hasher.update(nanos.to_le_bytes());
2083 hasher.update(pid.to_le_bytes());
2084 hasher.update(count.to_le_bytes());
2085 let hash = hasher.finalize();
2086 // 12 hex chars = 6 bytes = 48 bits of entropy, ample for build parallelism
2087 hex::encode(&hash[..6])
2088}
2089
2090#[cfg(test)]
2091mod tests {
2092 use super::*;
2093
2094 #[test]
2095 fn test_registry_auth_new() {
2096 let auth = RegistryAuth::new("user", "pass");
2097 assert_eq!(auth.username, "user");
2098 assert_eq!(auth.password, "pass");
2099 }
2100
2101 #[test]
2102 fn test_build_options_default() {
2103 let opts = BuildOptions::default();
2104 assert!(opts.dockerfile.is_none());
2105 assert!(opts.zimagefile.is_none());
2106 assert!(opts.runtime.is_none());
2107 assert!(opts.build_args.is_empty());
2108 assert!(opts.target.is_none());
2109 assert!(opts.tags.is_empty());
2110 assert!(!opts.no_cache);
2111 assert!(!opts.push);
2112 assert!(!opts.squash);
2113 // New cache-related fields
2114 assert!(opts.layers); // Default is true
2115 assert!(opts.cache_from.is_none());
2116 assert!(opts.cache_to.is_none());
2117 assert!(opts.cache_ttl.is_none());
2118 // Cache backend config (only with cache feature)
2119 #[cfg(feature = "cache")]
2120 assert!(opts.cache_backend_config.is_none());
2121 }
2122
2123 #[tokio::test]
2124 async fn test_resolve_base_image_registry() {
2125 let builder = create_test_builder();
2126 let stage_images = HashMap::new();
2127
2128 // Simple image (qualified to docker.io)
2129 let image_ref = ImageRef::Registry {
2130 image: "alpine".to_string(),
2131 tag: Some("3.18".to_string()),
2132 digest: None,
2133 };
2134 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2135 assert_eq!(result.unwrap(), "docker.io/library/alpine:3.18");
2136
2137 // Image with digest (qualified to docker.io)
2138 let image_ref = ImageRef::Registry {
2139 image: "alpine".to_string(),
2140 tag: None,
2141 digest: Some("sha256:abc123".to_string()),
2142 };
2143 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2144 assert_eq!(result.unwrap(), "docker.io/library/alpine@sha256:abc123");
2145
2146 // Image with no tag or digest (qualified to docker.io + :latest)
2147 let image_ref = ImageRef::Registry {
2148 image: "alpine".to_string(),
2149 tag: None,
2150 digest: None,
2151 };
2152 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2153 assert_eq!(result.unwrap(), "docker.io/library/alpine:latest");
2154
2155 // Already-qualified image (unchanged)
2156 let image_ref = ImageRef::Registry {
2157 image: "ghcr.io/org/myimage".to_string(),
2158 tag: Some("v1".to_string()),
2159 digest: None,
2160 };
2161 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2162 assert_eq!(result.unwrap(), "ghcr.io/org/myimage:v1");
2163 }
2164
2165 #[tokio::test]
2166 async fn test_resolve_base_image_stage() {
2167 let builder = create_test_builder();
2168 let mut stage_images = HashMap::new();
2169 stage_images.insert(
2170 "builder".to_string(),
2171 "zlayer-build-stage-builder".to_string(),
2172 );
2173
2174 let image_ref = ImageRef::Stage("builder".to_string());
2175 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2176 assert_eq!(result.unwrap(), "zlayer-build-stage-builder");
2177
2178 // Missing stage
2179 let image_ref = ImageRef::Stage("missing".to_string());
2180 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2181 assert!(result.is_err());
2182 }
2183
2184 #[tokio::test]
2185 async fn test_resolve_base_image_scratch() {
2186 let builder = create_test_builder();
2187 let stage_images = HashMap::new();
2188
2189 let image_ref = ImageRef::Scratch;
2190 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2191 assert_eq!(result.unwrap(), "scratch");
2192 }
2193
2194 #[tokio::test]
2195 async fn test_resolve_base_image_with_default_registry() {
2196 let mut builder = create_test_builder();
2197 builder.options.default_registry = Some("git.example.com:5000".to_string());
2198 let stage_images = HashMap::new();
2199
2200 // Unqualified image should resolve to default registry
2201 let image_ref = ImageRef::Registry {
2202 image: "myapp".to_string(),
2203 tag: Some("v1".to_string()),
2204 digest: None,
2205 };
2206 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2207 assert_eq!(result.unwrap(), "git.example.com:5000/myapp:v1");
2208
2209 // Already-qualified image should NOT use default registry
2210 let image_ref = ImageRef::Registry {
2211 image: "ghcr.io/org/image".to_string(),
2212 tag: Some("latest".to_string()),
2213 digest: None,
2214 };
2215 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2216 assert_eq!(result.unwrap(), "ghcr.io/org/image:latest");
2217 }
2218
2219 fn create_test_builder() -> ImageBuilder {
2220 // Create a minimal builder for testing (without async initialization)
2221 ImageBuilder {
2222 context: PathBuf::from("/tmp/test"),
2223 options: BuildOptions::default(),
2224 executor: BuildahExecutor::with_path("/usr/bin/buildah"),
2225 event_tx: None,
2226 #[cfg(feature = "cache")]
2227 cache_backend: None,
2228 #[cfg(feature = "local-registry")]
2229 local_registry: None,
2230 }
2231 }
2232
2233 // Builder method chaining tests
2234 #[test]
2235 fn test_builder_chaining() {
2236 let mut builder = create_test_builder();
2237
2238 builder = builder
2239 .dockerfile("./Dockerfile.test")
2240 .runtime(Runtime::Node20)
2241 .build_arg("VERSION", "1.0")
2242 .target("builder")
2243 .tag("myapp:latest")
2244 .tag("myapp:v1")
2245 .no_cache()
2246 .squash()
2247 .format("oci");
2248
2249 assert_eq!(
2250 builder.options.dockerfile,
2251 Some(PathBuf::from("./Dockerfile.test"))
2252 );
2253 assert_eq!(builder.options.runtime, Some(Runtime::Node20));
2254 assert_eq!(
2255 builder.options.build_args.get("VERSION"),
2256 Some(&"1.0".to_string())
2257 );
2258 assert_eq!(builder.options.target, Some("builder".to_string()));
2259 assert_eq!(builder.options.tags.len(), 2);
2260 assert!(builder.options.no_cache);
2261 assert!(builder.options.squash);
2262 assert_eq!(builder.options.format, Some("oci".to_string()));
2263 }
2264
2265 #[test]
2266 fn test_builder_push_with_auth() {
2267 let mut builder = create_test_builder();
2268 builder = builder.push(RegistryAuth::new("user", "pass"));
2269
2270 assert!(builder.options.push);
2271 assert!(builder.options.registry_auth.is_some());
2272 let auth = builder.options.registry_auth.unwrap();
2273 assert_eq!(auth.username, "user");
2274 assert_eq!(auth.password, "pass");
2275 }
2276
2277 #[test]
2278 fn test_builder_push_without_auth() {
2279 let mut builder = create_test_builder();
2280 builder = builder.push_without_auth();
2281
2282 assert!(builder.options.push);
2283 assert!(builder.options.registry_auth.is_none());
2284 }
2285
2286 #[test]
2287 fn test_builder_layers() {
2288 let mut builder = create_test_builder();
2289 // Default is true
2290 assert!(builder.options.layers);
2291
2292 // Disable layers
2293 builder = builder.layers(false);
2294 assert!(!builder.options.layers);
2295
2296 // Re-enable layers
2297 builder = builder.layers(true);
2298 assert!(builder.options.layers);
2299 }
2300
2301 #[test]
2302 fn test_builder_cache_from() {
2303 let mut builder = create_test_builder();
2304 assert!(builder.options.cache_from.is_none());
2305
2306 builder = builder.cache_from("registry.example.com/myapp:cache");
2307 assert_eq!(
2308 builder.options.cache_from,
2309 Some("registry.example.com/myapp:cache".to_string())
2310 );
2311 }
2312
2313 #[test]
2314 fn test_builder_cache_to() {
2315 let mut builder = create_test_builder();
2316 assert!(builder.options.cache_to.is_none());
2317
2318 builder = builder.cache_to("registry.example.com/myapp:cache");
2319 assert_eq!(
2320 builder.options.cache_to,
2321 Some("registry.example.com/myapp:cache".to_string())
2322 );
2323 }
2324
2325 #[test]
2326 fn test_builder_cache_ttl() {
2327 use std::time::Duration;
2328
2329 let mut builder = create_test_builder();
2330 assert!(builder.options.cache_ttl.is_none());
2331
2332 builder = builder.cache_ttl(Duration::from_secs(3600));
2333 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
2334 }
2335
2336 #[test]
2337 fn test_builder_cache_options_chaining() {
2338 use std::time::Duration;
2339
2340 let builder = create_test_builder()
2341 .layers(true)
2342 .cache_from("registry.example.com/cache:input")
2343 .cache_to("registry.example.com/cache:output")
2344 .cache_ttl(Duration::from_secs(7200))
2345 .no_cache();
2346
2347 assert!(builder.options.layers);
2348 assert_eq!(
2349 builder.options.cache_from,
2350 Some("registry.example.com/cache:input".to_string())
2351 );
2352 assert_eq!(
2353 builder.options.cache_to,
2354 Some("registry.example.com/cache:output".to_string())
2355 );
2356 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
2357 assert!(builder.options.no_cache);
2358 }
2359
2360 #[test]
2361 fn test_chrono_lite_timestamp() {
2362 let ts = chrono_lite_timestamp();
2363 // Should be a valid number
2364 let parsed: u64 = ts.parse().expect("Should be a valid u64");
2365 // Should be reasonably recent (after 2024)
2366 assert!(parsed > 1700000000);
2367 }
2368
2369 // LayerCacheTracker tests
2370 #[test]
2371 fn test_layer_cache_tracker_new() {
2372 let tracker = LayerCacheTracker::new();
2373 assert!(tracker.known_layers.is_empty());
2374 }
2375
2376 #[test]
2377 fn test_layer_cache_tracker_record_and_lookup() {
2378 let mut tracker = LayerCacheTracker::new();
2379
2380 // Record a cache miss
2381 tracker.record("abc123".to_string(), "container-1".to_string(), false);
2382
2383 // Check that we can look it up
2384 assert!(!tracker.is_cached("abc123", "container-1"));
2385
2386 // Record a cache hit
2387 tracker.record("def456".to_string(), "container-2".to_string(), true);
2388
2389 assert!(tracker.is_cached("def456", "container-2"));
2390 }
2391
2392 #[test]
2393 fn test_layer_cache_tracker_unknown_returns_false() {
2394 let tracker = LayerCacheTracker::new();
2395
2396 // Unknown entries should return false
2397 assert!(!tracker.is_cached("unknown", "unknown"));
2398 }
2399
2400 #[test]
2401 fn test_layer_cache_tracker_different_base_layers() {
2402 let mut tracker = LayerCacheTracker::new();
2403
2404 // Same instruction key but different base layers
2405 tracker.record("inst-1".to_string(), "base-a".to_string(), true);
2406 tracker.record("inst-1".to_string(), "base-b".to_string(), false);
2407
2408 assert!(tracker.is_cached("inst-1", "base-a"));
2409 assert!(!tracker.is_cached("inst-1", "base-b"));
2410 }
2411
2412 #[test]
2413 fn test_layer_cache_tracker_detect_cache_hit() {
2414 use crate::dockerfile::RunInstruction;
2415
2416 let tracker = LayerCacheTracker::new();
2417 let instruction = Instruction::Run(RunInstruction::shell("echo hello"));
2418
2419 // Currently always returns false - this test documents the expected behavior
2420 // and will need to be updated when cache detection is implemented
2421 assert!(!tracker.detect_cache_hit(&instruction, 50, ""));
2422 assert!(!tracker.detect_cache_hit(&instruction, 1000, ""));
2423 assert!(!tracker.detect_cache_hit(&instruction, 50, "Using cache"));
2424 }
2425
2426 #[test]
2427 fn test_layer_cache_tracker_overwrite() {
2428 let mut tracker = LayerCacheTracker::new();
2429
2430 // Record as cache miss first
2431 tracker.record("key".to_string(), "base".to_string(), false);
2432 assert!(!tracker.is_cached("key", "base"));
2433
2434 // Overwrite with cache hit
2435 tracker.record("key".to_string(), "base".to_string(), true);
2436 assert!(tracker.is_cached("key", "base"));
2437 }
2438}