zlayer_builder/builder.rs
1//! ImageBuilder - High-level API for building container images
2//!
3//! This module provides the [`ImageBuilder`] type which orchestrates the full
4//! container image build process, from Dockerfile parsing through buildah
5//! execution to final image creation.
6//!
7//! # Example
8//!
9//! ```no_run
10//! use zlayer_builder::{ImageBuilder, Runtime};
11//!
12//! #[tokio::main]
13//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
14//! // Build from a Dockerfile
15//! let image = ImageBuilder::new("./my-app").await?
16//! .tag("myapp:latest")
17//! .tag("myapp:v1.0.0")
18//! .build()
19//! .await?;
20//!
21//! println!("Built image: {}", image.image_id);
22//! Ok(())
23//! }
24//! ```
25//!
26//! # Using Runtime Templates
27//!
28//! ```no_run
29//! use zlayer_builder::{ImageBuilder, Runtime};
30//!
31//! #[tokio::main]
32//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
33//! // Build using a runtime template (no Dockerfile needed)
34//! let image = ImageBuilder::new("./my-node-app").await?
35//! .runtime(Runtime::Node20)
36//! .tag("myapp:latest")
37//! .build()
38//! .await?;
39//!
40//! println!("Built image: {}", image.image_id);
41//! Ok(())
42//! }
43//! ```
44//!
45//! # Multi-stage Builds with Target
46//!
47//! ```no_run
48//! use zlayer_builder::ImageBuilder;
49//!
50//! #[tokio::main]
51//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
52//! // Build only up to a specific stage
53//! let image = ImageBuilder::new("./my-app").await?
54//! .target("builder")
55//! .tag("myapp:builder")
56//! .build()
57//! .await?;
58//!
59//! println!("Built intermediate image: {}", image.image_id);
60//! Ok(())
61//! }
62//! ```
63//!
64//! # With TUI Progress Updates
65//!
66//! ```no_run
67//! use zlayer_builder::{ImageBuilder, BuildEvent};
68//! use std::sync::mpsc;
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//! let (tx, rx) = mpsc::channel::<BuildEvent>();
73//!
74//! // Start TUI in another thread
75//! std::thread::spawn(move || {
76//! // Process events from rx...
77//! while let Ok(event) = rx.recv() {
78//! println!("Event: {:?}", event);
79//! }
80//! });
81//!
82//! let image = ImageBuilder::new("./my-app").await?
83//! .tag("myapp:latest")
84//! .with_events(tx)
85//! .build()
86//! .await?;
87//!
88//! Ok(())
89//! }
90//! ```
91//!
92//! # With Cache Backend (requires `cache` feature)
93//!
94//! ```no_run,ignore
95//! use zlayer_builder::ImageBuilder;
96//!
97//! #[tokio::main]
98//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
99//! let image = ImageBuilder::new("./my-app").await?
100//! .with_cache_dir("/var/cache/zlayer") // Use persistent disk cache
101//! .tag("myapp:latest")
102//! .build()
103//! .await?;
104//!
105//! println!("Built image: {}", image.image_id);
106//! Ok(())
107//! }
108//! ```
109
110use std::collections::HashMap;
111use std::path::{Path, PathBuf};
112use std::sync::mpsc;
113
114use tokio::fs;
115use tracing::{debug, info, instrument};
116
117use crate::buildah::{BuildahCommand, BuildahExecutor};
118use crate::dockerfile::{Dockerfile, ImageRef, Instruction, RunMount, Stage};
119use crate::error::{BuildError, Result};
120use crate::templates::{get_template, Runtime};
121use crate::tui::BuildEvent;
122
123// Cache backend integration (optional, requires `cache` feature)
124#[cfg(feature = "cache")]
125use std::sync::Arc;
126
127#[cfg(feature = "cache")]
128use zlayer_registry::cache::BlobCacheBackend;
129
130#[cfg(feature = "local-registry")]
131use zlayer_registry::LocalRegistry;
132
133/// Configuration for the layer cache backend.
134///
135/// This enum specifies which cache backend to use for storing and retrieving
136/// cached layers during builds. The cache feature must be enabled for this
137/// to be available.
138///
139/// # Example
140///
141/// ```no_run,ignore
142/// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
143///
144/// # async fn example() -> Result<(), zlayer_builder::BuildError> {
145/// // Use persistent disk cache
146/// let builder = ImageBuilder::new("./my-app").await?
147/// .with_cache_config(CacheBackendConfig::Persistent {
148/// path: "/var/cache/zlayer".into(),
149/// })
150/// .tag("myapp:latest");
151/// # Ok(())
152/// # }
153/// ```
154#[cfg(feature = "cache")]
155#[derive(Debug, Clone, Default)]
156pub enum CacheBackendConfig {
157 /// In-memory cache (cleared when process exits).
158 ///
159 /// Useful for CI/CD environments where persistence isn't needed
160 /// but you want to avoid re-downloading base image layers within
161 /// a single build session.
162 #[default]
163 Memory,
164
165 /// Persistent disk-based cache using redb.
166 ///
167 /// Requires the `cache-persistent` feature. Layers are stored on disk
168 /// and persist across builds, significantly speeding up repeated builds.
169 #[cfg(feature = "cache-persistent")]
170 Persistent {
171 /// Path to the cache directory or database file.
172 /// If a directory, `blob_cache.redb` will be created inside it.
173 path: PathBuf,
174 },
175
176 /// S3-compatible object storage backend.
177 ///
178 /// Requires the `cache-s3` feature. Useful for distributed build systems
179 /// where multiple build machines need to share a cache.
180 #[cfg(feature = "cache-s3")]
181 S3 {
182 /// S3 bucket name
183 bucket: String,
184 /// AWS region (optional, uses SDK default if not set)
185 region: Option<String>,
186 /// Custom endpoint URL (for S3-compatible services like R2, B2, MinIO)
187 endpoint: Option<String>,
188 /// Key prefix for cached blobs (default: "zlayer/layers/")
189 prefix: Option<String>,
190 },
191}
192
193/// Tracks layer cache state during builds.
194///
195/// This struct maintains a mapping of instruction cache keys combined with
196/// base layer identifiers to determine if a layer was previously built and
197/// can be served from cache.
198///
199/// # Cache Key Format
200///
201/// The cache key is a tuple of:
202/// - `instruction_key`: A hash of the instruction type and its parameters
203/// (generated by [`Instruction::cache_key()`])
204/// - `base_layer`: The container/image ID that the instruction was executed on
205///
206/// Together, these uniquely identify a layer's content.
207///
208/// # Future Enhancements
209///
210/// Currently, cache hit detection is limited because buildah's manual container
211/// creation workflow (`buildah from`, `buildah run`, `buildah commit`) doesn't
212/// directly expose layer reuse information. To implement true cache detection,
213/// we would need to:
214///
215/// 1. **Parse buildah output**: Look for indicators in command output that suggest
216/// layer reuse (e.g., fast execution time, specific log messages)
217///
218/// 2. **Implement layer digest comparison**: Before executing an instruction,
219/// compute what the expected layer digest would be and check if it already
220/// exists in local storage
221///
222/// 3. **Switch to `buildah build`**: The `buildah build` command has native
223/// caching support with `--layers` flag that automatically handles cache hits
224///
225/// 4. **Use external cache registry**: Implement `--cache-from`/`--cache-to`
226/// semantics by pulling/pushing layer digests from a remote registry
227#[derive(Debug, Default)]
228struct LayerCacheTracker {
229 /// Maps (instruction_cache_key, base_layer_id) -> was_cached
230 known_layers: HashMap<(String, String), bool>,
231}
232
233impl LayerCacheTracker {
234 /// Create a new empty cache tracker.
235 fn new() -> Self {
236 Self::default()
237 }
238
239 /// Check if we have a cached result for this instruction on the given base layer.
240 ///
241 /// # Arguments
242 ///
243 /// * `instruction_key` - The cache key from [`Instruction::cache_key()`]
244 /// * `base_layer` - The container or image ID the instruction runs on
245 ///
246 /// # Returns
247 ///
248 /// `true` if we've previously recorded this instruction as cached,
249 /// `false` otherwise (including if we've never seen this combination).
250 #[allow(dead_code)]
251 fn is_cached(&self, instruction_key: &str, base_layer: &str) -> bool {
252 self.known_layers
253 .get(&(instruction_key.to_string(), base_layer.to_string()))
254 .copied()
255 .unwrap_or(false)
256 }
257
258 /// Record the cache status for an instruction execution.
259 ///
260 /// # Arguments
261 ///
262 /// * `instruction_key` - The cache key from [`Instruction::cache_key()`]
263 /// * `base_layer` - The container or image ID the instruction ran on
264 /// * `cached` - Whether this execution was a cache hit
265 fn record(&mut self, instruction_key: String, base_layer: String, cached: bool) {
266 self.known_layers
267 .insert((instruction_key, base_layer), cached);
268 }
269
270 /// Attempt to detect if an instruction execution was a cache hit.
271 ///
272 /// This is a heuristic-based approach since buildah doesn't directly report
273 /// cache status for manual container operations.
274 ///
275 /// # Current Implementation
276 ///
277 /// Always returns `false` - true cache detection would require:
278 /// - Timing analysis (cached operations are typically < 100ms)
279 /// - Output parsing for cache-related messages
280 /// - Pre-computation of expected layer digests
281 ///
282 /// # Arguments
283 ///
284 /// * `_instruction` - The instruction that was executed
285 /// * `_execution_time_ms` - How long the execution took in milliseconds
286 /// * `_output` - The command's stdout/stderr output
287 ///
288 /// # Returns
289 ///
290 /// `true` if the execution appears to be a cache hit, `false` otherwise.
291 ///
292 /// TODO: Implement heuristic cache detection based on:
293 /// - Execution time (cached layers typically commit in < 100ms)
294 /// - Output analysis (look for "Using cache" or similar messages)
295 /// - Layer digest comparison with existing images
296 #[allow(dead_code)]
297 fn detect_cache_hit(
298 &self,
299 _instruction: &Instruction,
300 _execution_time_ms: u64,
301 _output: &str,
302 ) -> bool {
303 // TODO: Implement cache hit detection heuristics
304 //
305 // Possible approaches:
306 // 1. Time-based: If execution took < 100ms, likely cached
307 // if execution_time_ms < 100 { return true; }
308 //
309 // 2. Output-based: Look for cache indicators in buildah output
310 // if output.contains("Using cache") { return true; }
311 //
312 // 3. Digest-based: Pre-compute expected digest and check storage
313 // let expected = compute_layer_digest(instruction, base_layer);
314 // if layer_exists_in_storage(expected) { return true; }
315 //
316 // For now, always return false until we have reliable detection
317 false
318 }
319}
320
321/// Built image information returned after a successful build
322#[derive(Debug, Clone)]
323pub struct BuiltImage {
324 /// Image ID (sha256:...)
325 pub image_id: String,
326 /// Applied tags
327 pub tags: Vec<String>,
328 /// Number of layers in the final image
329 pub layer_count: usize,
330 /// Total size in bytes (0 if not computed)
331 pub size: u64,
332 /// Build duration in milliseconds
333 pub build_time_ms: u64,
334}
335
336/// Registry authentication credentials
337#[derive(Debug, Clone)]
338pub struct RegistryAuth {
339 /// Registry username
340 pub username: String,
341 /// Registry password or token
342 pub password: String,
343}
344
345impl RegistryAuth {
346 /// Create new registry authentication
347 pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
348 Self {
349 username: username.into(),
350 password: password.into(),
351 }
352 }
353}
354
355/// Build options for customizing the image build process
356#[derive(Debug, Clone)]
357pub struct BuildOptions {
358 /// Dockerfile path (default: Dockerfile in context)
359 pub dockerfile: Option<PathBuf>,
360 /// ZImagefile path (alternative to Dockerfile)
361 pub zimagefile: Option<PathBuf>,
362 /// Use runtime template instead of Dockerfile
363 pub runtime: Option<Runtime>,
364 /// Build arguments (ARG values)
365 pub build_args: HashMap<String, String>,
366 /// Target stage for multi-stage builds
367 pub target: Option<String>,
368 /// Image tags to apply
369 pub tags: Vec<String>,
370 /// Disable layer caching
371 pub no_cache: bool,
372 /// Push to registry after build
373 pub push: bool,
374 /// Registry auth (if pushing)
375 pub registry_auth: Option<RegistryAuth>,
376 /// Squash all layers into one
377 pub squash: bool,
378 /// Image format (oci or docker)
379 pub format: Option<String>,
380 /// Enable buildah layer caching (--layers flag for `buildah build`).
381 /// Default: true
382 ///
383 /// Note: ZLayer uses manual container creation (`buildah from`, `buildah run`,
384 /// `buildah commit`) rather than `buildah build`, so this flag is reserved
385 /// for future use when/if we switch to `buildah build` (bud) command.
386 pub layers: bool,
387 /// Registry to pull cache from (--cache-from for `buildah build`).
388 ///
389 /// Note: This would be used with `buildah build --cache-from=<registry>`.
390 /// Currently ZLayer uses manual container creation, so this is reserved
391 /// for future implementation or for switching to `buildah build`.
392 ///
393 /// TODO: Implement remote cache support. This would require either:
394 /// 1. Switching to `buildah build` command which supports --cache-from natively
395 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
396 pub cache_from: Option<String>,
397 /// Registry to push cache to (--cache-to for `buildah build`).
398 ///
399 /// Note: This would be used with `buildah build --cache-to=<registry>`.
400 /// Currently ZLayer uses manual container creation, so this is reserved
401 /// for future implementation or for switching to `buildah build`.
402 ///
403 /// TODO: Implement remote cache support. This would require either:
404 /// 1. Switching to `buildah build` command which supports --cache-to natively
405 /// 2. Implementing custom layer caching with registry push/pull for intermediate layers
406 pub cache_to: Option<String>,
407 /// Maximum cache age (--cache-ttl for `buildah build`).
408 ///
409 /// Note: This would be used with `buildah build --cache-ttl=<duration>`.
410 /// Currently ZLayer uses manual container creation, so this is reserved
411 /// for future implementation or for switching to `buildah build`.
412 ///
413 /// TODO: Implement cache TTL support. This would require either:
414 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
415 /// 2. Implementing custom cache expiration logic for our layer caching system
416 pub cache_ttl: Option<std::time::Duration>,
417 /// Cache backend configuration (requires `cache` feature).
418 ///
419 /// When configured, the builder will store layer data in the specified
420 /// cache backend for faster subsequent builds. This is separate from
421 /// buildah's native caching and operates at the ZLayer level.
422 ///
423 /// # Integration Points
424 ///
425 /// The cache backend is used at several points during the build:
426 ///
427 /// 1. **Before instruction execution**: Check if a cached layer exists
428 /// for the (instruction_hash, base_layer) tuple
429 /// 2. **After instruction execution**: Store the resulting layer data
430 /// in the cache for future builds
431 /// 3. **Base image layers**: Cache pulled base image layers to avoid
432 /// re-downloading from registries
433 ///
434 /// TODO: Wire up cache lookups in the build loop once layer digests
435 /// are properly computed and tracked.
436 #[cfg(feature = "cache")]
437 pub cache_backend_config: Option<CacheBackendConfig>,
438 /// Default OCI/WASM-compatible registry to check for images before falling
439 /// back to Docker Hub qualification.
440 ///
441 /// When set, the builder will probe this registry for short image names
442 /// before qualifying them to `docker.io`. For example, if set to
443 /// `"git.example.com:5000"` and the ZImagefile uses `base: "myapp:latest"`,
444 /// the builder will check `git.example.com:5000/myapp:latest` first.
445 pub default_registry: Option<String>,
446 /// Default cache mounts injected into all RUN instructions.
447 /// These are merged with any step-level cache mounts (deduped by target path).
448 pub default_cache_mounts: Vec<RunMount>,
449 /// Number of retries for failed RUN steps (0 = no retries, default)
450 pub retries: u32,
451}
452
453impl Default for BuildOptions {
454 fn default() -> Self {
455 Self {
456 dockerfile: None,
457 zimagefile: None,
458 runtime: None,
459 build_args: HashMap::new(),
460 target: None,
461 tags: Vec::new(),
462 no_cache: false,
463 push: false,
464 registry_auth: None,
465 squash: false,
466 format: None,
467 layers: true,
468 cache_from: None,
469 cache_to: None,
470 cache_ttl: None,
471 #[cfg(feature = "cache")]
472 cache_backend_config: None,
473 default_registry: None,
474 default_cache_mounts: Vec::new(),
475 retries: 0,
476 }
477 }
478}
479
480/// Image builder - orchestrates the full build process
481///
482/// `ImageBuilder` provides a fluent API for configuring and executing
483/// container image builds using buildah as the backend.
484///
485/// # Build Process
486///
487/// 1. Parse Dockerfile (or use runtime template)
488/// 2. Resolve target stages if specified
489/// 3. Build each stage sequentially:
490/// - Create working container from base image
491/// - Execute each instruction
492/// - Commit intermediate stages for COPY --from
493/// 4. Commit final image with tags
494/// 5. Push to registry if configured
495/// 6. Clean up intermediate containers
496///
497/// # Cache Backend Integration (requires `cache` feature)
498///
499/// When a cache backend is configured, the builder can store and retrieve
500/// cached layer data to speed up subsequent builds:
501///
502/// ```no_run,ignore
503/// use zlayer_builder::ImageBuilder;
504///
505/// let builder = ImageBuilder::new("./my-app").await?
506/// .with_cache_dir("/var/cache/zlayer")
507/// .tag("myapp:latest");
508/// ```
509pub struct ImageBuilder {
510 /// Build context directory
511 context: PathBuf,
512 /// Build options
513 options: BuildOptions,
514 /// Buildah executor
515 executor: BuildahExecutor,
516 /// Event sender for TUI updates
517 event_tx: Option<mpsc::Sender<BuildEvent>>,
518 /// Cache backend for layer caching (requires `cache` feature).
519 ///
520 /// When set, the builder will attempt to retrieve cached layers before
521 /// executing instructions, and store results in the cache after execution.
522 ///
523 /// TODO: Implement cache lookups in the build loop. Currently the backend
524 /// is stored but not actively used during builds. Integration points:
525 /// - Check cache before executing RUN instructions
526 /// - Store layer data after successful instruction execution
527 /// - Cache base image layers pulled from registries
528 #[cfg(feature = "cache")]
529 cache_backend: Option<Arc<Box<dyn BlobCacheBackend>>>,
530 /// Local OCI registry for checking cached images before remote pulls.
531 #[cfg(feature = "local-registry")]
532 local_registry: Option<LocalRegistry>,
533}
534
535impl ImageBuilder {
536 /// Create a new ImageBuilder with the given context directory
537 ///
538 /// The context directory should contain the Dockerfile (unless using
539 /// a runtime template) and any files that will be copied into the image.
540 ///
541 /// # Arguments
542 ///
543 /// * `context` - Path to the build context directory
544 ///
545 /// # Errors
546 ///
547 /// Returns an error if:
548 /// - The context directory does not exist
549 /// - Buildah is not installed or not accessible
550 ///
551 /// # Example
552 ///
553 /// ```no_run
554 /// use zlayer_builder::ImageBuilder;
555 ///
556 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
557 /// let builder = ImageBuilder::new("./my-project").await?;
558 /// # Ok(())
559 /// # }
560 /// ```
561 #[instrument(skip_all, fields(context = %context.as_ref().display()))]
562 pub async fn new(context: impl AsRef<Path>) -> Result<Self> {
563 let context = context.as_ref().to_path_buf();
564
565 // Verify context exists
566 if !context.exists() {
567 return Err(BuildError::ContextRead {
568 path: context,
569 source: std::io::Error::new(
570 std::io::ErrorKind::NotFound,
571 "Build context directory not found",
572 ),
573 });
574 }
575
576 // Initialize buildah executor
577 let executor = BuildahExecutor::new_async().await?;
578
579 debug!("Created ImageBuilder for context: {}", context.display());
580
581 Ok(Self {
582 context,
583 options: BuildOptions::default(),
584 executor,
585 event_tx: None,
586 #[cfg(feature = "cache")]
587 cache_backend: None,
588 #[cfg(feature = "local-registry")]
589 local_registry: None,
590 })
591 }
592
593 /// Create an ImageBuilder with a custom buildah executor
594 ///
595 /// This is useful for testing or when you need to configure
596 /// the executor with specific storage options.
597 pub fn with_executor(context: impl AsRef<Path>, executor: BuildahExecutor) -> Result<Self> {
598 let context = context.as_ref().to_path_buf();
599
600 if !context.exists() {
601 return Err(BuildError::ContextRead {
602 path: context,
603 source: std::io::Error::new(
604 std::io::ErrorKind::NotFound,
605 "Build context directory not found",
606 ),
607 });
608 }
609
610 Ok(Self {
611 context,
612 options: BuildOptions::default(),
613 executor,
614 event_tx: None,
615 #[cfg(feature = "cache")]
616 cache_backend: None,
617 #[cfg(feature = "local-registry")]
618 local_registry: None,
619 })
620 }
621
622 /// Set a custom Dockerfile path
623 ///
624 /// By default, the builder looks for a file named `Dockerfile` in the
625 /// context directory. Use this method to specify a different path.
626 ///
627 /// # Example
628 ///
629 /// ```no_run
630 /// # use zlayer_builder::ImageBuilder;
631 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
632 /// let builder = ImageBuilder::new("./my-project").await?
633 /// .dockerfile("./my-project/Dockerfile.prod");
634 /// # Ok(())
635 /// # }
636 /// ```
637 pub fn dockerfile(mut self, path: impl AsRef<Path>) -> Self {
638 self.options.dockerfile = Some(path.as_ref().to_path_buf());
639 self
640 }
641
642 /// Set a custom ZImagefile path
643 ///
644 /// ZImagefiles are a YAML-based alternative to Dockerfiles. When set,
645 /// the builder will parse the ZImagefile and convert it to the internal
646 /// Dockerfile IR for execution.
647 ///
648 /// # Example
649 ///
650 /// ```no_run
651 /// # use zlayer_builder::ImageBuilder;
652 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
653 /// let builder = ImageBuilder::new("./my-project").await?
654 /// .zimagefile("./my-project/ZImagefile");
655 /// # Ok(())
656 /// # }
657 /// ```
658 pub fn zimagefile(mut self, path: impl AsRef<Path>) -> Self {
659 self.options.zimagefile = Some(path.as_ref().to_path_buf());
660 self
661 }
662
663 /// Use a runtime template instead of a Dockerfile
664 ///
665 /// Runtime templates provide pre-built Dockerfiles for common
666 /// development environments. When set, the Dockerfile option is ignored.
667 ///
668 /// # Example
669 ///
670 /// ```no_run
671 /// use zlayer_builder::{ImageBuilder, Runtime};
672 ///
673 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
674 /// let builder = ImageBuilder::new("./my-node-app").await?
675 /// .runtime(Runtime::Node20);
676 /// # Ok(())
677 /// # }
678 /// ```
679 pub fn runtime(mut self, runtime: Runtime) -> Self {
680 self.options.runtime = Some(runtime);
681 self
682 }
683
684 /// Add a build argument
685 ///
686 /// Build arguments are passed to the Dockerfile and can be referenced
687 /// using the `ARG` instruction.
688 ///
689 /// # Example
690 ///
691 /// ```no_run
692 /// # use zlayer_builder::ImageBuilder;
693 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
694 /// let builder = ImageBuilder::new("./my-project").await?
695 /// .build_arg("VERSION", "1.0.0")
696 /// .build_arg("DEBUG", "false");
697 /// # Ok(())
698 /// # }
699 /// ```
700 pub fn build_arg(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
701 self.options.build_args.insert(key.into(), value.into());
702 self
703 }
704
705 /// Set multiple build arguments at once
706 pub fn build_args(mut self, args: HashMap<String, String>) -> Self {
707 self.options.build_args.extend(args);
708 self
709 }
710
711 /// Set the target stage for multi-stage builds
712 ///
713 /// When building a multi-stage Dockerfile, you can stop at a specific
714 /// stage instead of building all stages.
715 ///
716 /// # Example
717 ///
718 /// ```no_run
719 /// # use zlayer_builder::ImageBuilder;
720 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
721 /// // Dockerfile:
722 /// // FROM node:20 AS builder
723 /// // ...
724 /// // FROM node:20-slim AS runtime
725 /// // ...
726 ///
727 /// let builder = ImageBuilder::new("./my-project").await?
728 /// .target("builder")
729 /// .tag("myapp:builder");
730 /// # Ok(())
731 /// # }
732 /// ```
733 pub fn target(mut self, stage: impl Into<String>) -> Self {
734 self.options.target = Some(stage.into());
735 self
736 }
737
738 /// Add an image tag
739 ///
740 /// Tags are applied to the final image. You can add multiple tags.
741 /// The first tag is used as the primary image name during commit.
742 ///
743 /// # Example
744 ///
745 /// ```no_run
746 /// # use zlayer_builder::ImageBuilder;
747 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
748 /// let builder = ImageBuilder::new("./my-project").await?
749 /// .tag("myapp:latest")
750 /// .tag("myapp:v1.0.0")
751 /// .tag("registry.example.com/myapp:v1.0.0");
752 /// # Ok(())
753 /// # }
754 /// ```
755 pub fn tag(mut self, tag: impl Into<String>) -> Self {
756 self.options.tags.push(tag.into());
757 self
758 }
759
760 /// Disable layer caching
761 ///
762 /// When enabled, all layers are rebuilt from scratch even if
763 /// they could be served from cache.
764 ///
765 /// Note: Currently this flag is tracked but not fully implemented in the
766 /// build process. ZLayer uses manual container creation (`buildah from`,
767 /// `buildah run`, `buildah commit`) which doesn't have built-in caching
768 /// like `buildah build` does. Future work could implement layer-level
769 /// caching by checking instruction hashes against previously built layers.
770 pub fn no_cache(mut self) -> Self {
771 self.options.no_cache = true;
772 self
773 }
774
775 /// Enable or disable layer caching
776 ///
777 /// This controls the `--layers` flag for buildah. When enabled (default),
778 /// buildah can cache and reuse intermediate layers.
779 ///
780 /// Note: ZLayer currently uses manual container creation (`buildah from`,
781 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
782 /// flag is reserved for future use when/if we switch to `buildah build`.
783 ///
784 /// # Example
785 ///
786 /// ```no_run
787 /// # use zlayer_builder::ImageBuilder;
788 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
789 /// let builder = ImageBuilder::new("./my-project").await?
790 /// .layers(false) // Disable layer caching
791 /// .tag("myapp:latest");
792 /// # Ok(())
793 /// # }
794 /// ```
795 pub fn layers(mut self, enable: bool) -> Self {
796 self.options.layers = enable;
797 self
798 }
799
800 /// Set registry to pull cache from
801 ///
802 /// This corresponds to buildah's `--cache-from` flag, which allows
803 /// pulling cached layers from a remote registry to speed up builds.
804 ///
805 /// Note: ZLayer currently uses manual container creation (`buildah from`,
806 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
807 /// option is reserved for future implementation.
808 ///
809 /// TODO: Implement remote cache support. This would require either:
810 /// 1. Switching to `buildah build` command which supports --cache-from natively
811 /// 2. Implementing custom layer caching with registry pull for intermediate layers
812 ///
813 /// # Example
814 ///
815 /// ```no_run
816 /// # use zlayer_builder::ImageBuilder;
817 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
818 /// let builder = ImageBuilder::new("./my-project").await?
819 /// .cache_from("registry.example.com/myapp:cache")
820 /// .tag("myapp:latest");
821 /// # Ok(())
822 /// # }
823 /// ```
824 pub fn cache_from(mut self, registry: impl Into<String>) -> Self {
825 self.options.cache_from = Some(registry.into());
826 self
827 }
828
829 /// Set registry to push cache to
830 ///
831 /// This corresponds to buildah's `--cache-to` flag, which allows
832 /// pushing cached layers to a remote registry for future builds to use.
833 ///
834 /// Note: ZLayer currently uses manual container creation (`buildah from`,
835 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
836 /// option is reserved for future implementation.
837 ///
838 /// TODO: Implement remote cache support. This would require either:
839 /// 1. Switching to `buildah build` command which supports --cache-to natively
840 /// 2. Implementing custom layer caching with registry push for intermediate layers
841 ///
842 /// # Example
843 ///
844 /// ```no_run
845 /// # use zlayer_builder::ImageBuilder;
846 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
847 /// let builder = ImageBuilder::new("./my-project").await?
848 /// .cache_to("registry.example.com/myapp:cache")
849 /// .tag("myapp:latest");
850 /// # Ok(())
851 /// # }
852 /// ```
853 pub fn cache_to(mut self, registry: impl Into<String>) -> Self {
854 self.options.cache_to = Some(registry.into());
855 self
856 }
857
858 /// Set maximum cache age
859 ///
860 /// This corresponds to buildah's `--cache-ttl` flag, which sets the
861 /// maximum age for cached layers before they are considered stale.
862 ///
863 /// Note: ZLayer currently uses manual container creation (`buildah from`,
864 /// `buildah run`, `buildah commit`) rather than `buildah build`, so this
865 /// option is reserved for future implementation.
866 ///
867 /// TODO: Implement cache TTL support. This would require either:
868 /// 1. Switching to `buildah build` command which supports --cache-ttl natively
869 /// 2. Implementing custom cache expiration logic for our layer caching system
870 ///
871 /// # Example
872 ///
873 /// ```no_run
874 /// # use zlayer_builder::ImageBuilder;
875 /// # use std::time::Duration;
876 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
877 /// let builder = ImageBuilder::new("./my-project").await?
878 /// .cache_ttl(Duration::from_secs(3600 * 24)) // 24 hours
879 /// .tag("myapp:latest");
880 /// # Ok(())
881 /// # }
882 /// ```
883 pub fn cache_ttl(mut self, ttl: std::time::Duration) -> Self {
884 self.options.cache_ttl = Some(ttl);
885 self
886 }
887
888 /// Push the image to a registry after building
889 ///
890 /// # Arguments
891 ///
892 /// * `auth` - Registry authentication credentials
893 ///
894 /// # Example
895 ///
896 /// ```no_run
897 /// use zlayer_builder::{ImageBuilder, RegistryAuth};
898 ///
899 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
900 /// let builder = ImageBuilder::new("./my-project").await?
901 /// .tag("registry.example.com/myapp:v1.0.0")
902 /// .push(RegistryAuth::new("user", "password"));
903 /// # Ok(())
904 /// # }
905 /// ```
906 pub fn push(mut self, auth: RegistryAuth) -> Self {
907 self.options.push = true;
908 self.options.registry_auth = Some(auth);
909 self
910 }
911
912 /// Enable pushing without authentication
913 ///
914 /// Use this for registries that don't require authentication
915 /// (e.g., local registries, insecure registries).
916 pub fn push_without_auth(mut self) -> Self {
917 self.options.push = true;
918 self.options.registry_auth = None;
919 self
920 }
921
922 /// Set a default OCI/WASM-compatible registry to check for images.
923 ///
924 /// When set, the builder will probe this registry for short image names
925 /// before qualifying them to `docker.io`. For example, if set to
926 /// `"git.example.com:5000"` and the ZImagefile uses `base: "myapp:latest"`,
927 /// the builder will check `git.example.com:5000/myapp:latest` first.
928 pub fn default_registry(mut self, registry: impl Into<String>) -> Self {
929 self.options.default_registry = Some(registry.into());
930 self
931 }
932
933 /// Set a local OCI registry for image resolution.
934 ///
935 /// When set, the builder checks the local registry for cached images
936 /// before pulling from remote registries.
937 #[cfg(feature = "local-registry")]
938 pub fn with_local_registry(mut self, registry: LocalRegistry) -> Self {
939 self.local_registry = Some(registry);
940 self
941 }
942
943 /// Squash all layers into a single layer
944 ///
945 /// This reduces image size but loses layer caching benefits.
946 pub fn squash(mut self) -> Self {
947 self.options.squash = true;
948 self
949 }
950
951 /// Set the image format
952 ///
953 /// Valid values are "oci" (default) or "docker".
954 pub fn format(mut self, format: impl Into<String>) -> Self {
955 self.options.format = Some(format.into());
956 self
957 }
958
959 /// Set default cache mounts to inject into all RUN instructions
960 pub fn default_cache_mounts(mut self, mounts: Vec<RunMount>) -> Self {
961 self.options.default_cache_mounts = mounts;
962 self
963 }
964
965 /// Set the number of retries for failed RUN steps
966 pub fn retries(mut self, retries: u32) -> Self {
967 self.options.retries = retries;
968 self
969 }
970
971 /// Set an event sender for TUI progress updates
972 ///
973 /// Events will be sent as the build progresses, allowing you to
974 /// display a progress UI or log build status.
975 ///
976 /// # Example
977 ///
978 /// ```no_run
979 /// use zlayer_builder::{ImageBuilder, BuildEvent};
980 /// use std::sync::mpsc;
981 ///
982 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
983 /// let (tx, rx) = mpsc::channel::<BuildEvent>();
984 ///
985 /// let builder = ImageBuilder::new("./my-project").await?
986 /// .tag("myapp:latest")
987 /// .with_events(tx);
988 /// # Ok(())
989 /// # }
990 /// ```
991 pub fn with_events(mut self, tx: mpsc::Sender<BuildEvent>) -> Self {
992 self.event_tx = Some(tx);
993 self
994 }
995
996 /// Configure a persistent disk cache backend for layer caching.
997 ///
998 /// When configured, the builder will store layer data on disk at the
999 /// specified path. This cache persists across builds and significantly
1000 /// speeds up repeated builds of similar images.
1001 ///
1002 /// Requires the `cache-persistent` feature to be enabled.
1003 ///
1004 /// # Arguments
1005 ///
1006 /// * `path` - Path to the cache directory. If a directory, creates
1007 /// `blob_cache.redb` inside it. If a file path, uses it directly.
1008 ///
1009 /// # Example
1010 ///
1011 /// ```no_run,ignore
1012 /// use zlayer_builder::ImageBuilder;
1013 ///
1014 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1015 /// let builder = ImageBuilder::new("./my-project").await?
1016 /// .with_cache_dir("/var/cache/zlayer")
1017 /// .tag("myapp:latest");
1018 /// # Ok(())
1019 /// # }
1020 /// ```
1021 ///
1022 /// # Integration Status
1023 ///
1024 /// TODO: The cache backend is currently stored but not actively used
1025 /// during builds. Future work will wire up:
1026 /// - Cache lookups before executing RUN instructions
1027 /// - Storing layer data after successful execution
1028 /// - Caching base image layers from registry pulls
1029 #[cfg(feature = "cache-persistent")]
1030 pub fn with_cache_dir(mut self, path: impl AsRef<Path>) -> Self {
1031 self.options.cache_backend_config = Some(CacheBackendConfig::Persistent {
1032 path: path.as_ref().to_path_buf(),
1033 });
1034 debug!(
1035 "Configured persistent cache at: {}",
1036 path.as_ref().display()
1037 );
1038 self
1039 }
1040
1041 /// Configure an in-memory cache backend for layer caching.
1042 ///
1043 /// The in-memory cache is cleared when the process exits, but can
1044 /// speed up builds within a single session by caching intermediate
1045 /// layers and avoiding redundant operations.
1046 ///
1047 /// Requires the `cache` feature to be enabled.
1048 ///
1049 /// # Example
1050 ///
1051 /// ```no_run,ignore
1052 /// use zlayer_builder::ImageBuilder;
1053 ///
1054 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1055 /// let builder = ImageBuilder::new("./my-project").await?
1056 /// .with_memory_cache()
1057 /// .tag("myapp:latest");
1058 /// # Ok(())
1059 /// # }
1060 /// ```
1061 ///
1062 /// # Integration Status
1063 ///
1064 /// TODO: The cache backend is currently stored but not actively used
1065 /// during builds. See `with_cache_dir` for integration status details.
1066 #[cfg(feature = "cache")]
1067 pub fn with_memory_cache(mut self) -> Self {
1068 self.options.cache_backend_config = Some(CacheBackendConfig::Memory);
1069 debug!("Configured in-memory cache");
1070 self
1071 }
1072
1073 /// Configure an S3-compatible storage backend for layer caching.
1074 ///
1075 /// This is useful for distributed build systems where multiple build
1076 /// machines need to share a layer cache. Supports AWS S3, Cloudflare R2,
1077 /// Backblaze B2, MinIO, and other S3-compatible services.
1078 ///
1079 /// Requires the `cache-s3` feature to be enabled.
1080 ///
1081 /// # Arguments
1082 ///
1083 /// * `bucket` - S3 bucket name
1084 /// * `region` - AWS region (optional, uses SDK default if not set)
1085 ///
1086 /// # Example
1087 ///
1088 /// ```no_run,ignore
1089 /// use zlayer_builder::ImageBuilder;
1090 ///
1091 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1092 /// let builder = ImageBuilder::new("./my-project").await?
1093 /// .with_s3_cache("my-build-cache", Some("us-west-2"))
1094 /// .tag("myapp:latest");
1095 /// # Ok(())
1096 /// # }
1097 /// ```
1098 ///
1099 /// # Integration Status
1100 ///
1101 /// TODO: The cache backend is currently stored but not actively used
1102 /// during builds. See `with_cache_dir` for integration status details.
1103 #[cfg(feature = "cache-s3")]
1104 pub fn with_s3_cache(mut self, bucket: impl Into<String>, region: Option<String>) -> Self {
1105 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1106 bucket: bucket.into(),
1107 region,
1108 endpoint: None,
1109 prefix: None,
1110 });
1111 debug!("Configured S3 cache");
1112 self
1113 }
1114
1115 /// Configure an S3-compatible storage backend with custom endpoint.
1116 ///
1117 /// Use this method for S3-compatible services that require a custom
1118 /// endpoint URL (e.g., Cloudflare R2, MinIO, local development).
1119 ///
1120 /// Requires the `cache-s3` feature to be enabled.
1121 ///
1122 /// # Arguments
1123 ///
1124 /// * `bucket` - S3 bucket name
1125 /// * `endpoint` - Custom endpoint URL
1126 /// * `region` - Region (required for some S3-compatible services)
1127 ///
1128 /// # Example
1129 ///
1130 /// ```no_run,ignore
1131 /// use zlayer_builder::ImageBuilder;
1132 ///
1133 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1134 /// // Cloudflare R2
1135 /// let builder = ImageBuilder::new("./my-project").await?
1136 /// .with_s3_cache_endpoint(
1137 /// "my-bucket",
1138 /// "https://accountid.r2.cloudflarestorage.com",
1139 /// Some("auto".to_string()),
1140 /// )
1141 /// .tag("myapp:latest");
1142 /// # Ok(())
1143 /// # }
1144 /// ```
1145 #[cfg(feature = "cache-s3")]
1146 pub fn with_s3_cache_endpoint(
1147 mut self,
1148 bucket: impl Into<String>,
1149 endpoint: impl Into<String>,
1150 region: Option<String>,
1151 ) -> Self {
1152 self.options.cache_backend_config = Some(CacheBackendConfig::S3 {
1153 bucket: bucket.into(),
1154 region,
1155 endpoint: Some(endpoint.into()),
1156 prefix: None,
1157 });
1158 debug!("Configured S3 cache with custom endpoint");
1159 self
1160 }
1161
1162 /// Configure a custom cache backend configuration.
1163 ///
1164 /// This is the most flexible way to configure the cache backend,
1165 /// allowing full control over all cache settings.
1166 ///
1167 /// Requires the `cache` feature to be enabled.
1168 ///
1169 /// # Example
1170 ///
1171 /// ```no_run,ignore
1172 /// use zlayer_builder::{ImageBuilder, CacheBackendConfig};
1173 ///
1174 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1175 /// let builder = ImageBuilder::new("./my-project").await?
1176 /// .with_cache_config(CacheBackendConfig::Memory)
1177 /// .tag("myapp:latest");
1178 /// # Ok(())
1179 /// # }
1180 /// ```
1181 #[cfg(feature = "cache")]
1182 pub fn with_cache_config(mut self, config: CacheBackendConfig) -> Self {
1183 self.options.cache_backend_config = Some(config);
1184 debug!("Configured custom cache backend");
1185 self
1186 }
1187
1188 /// Set an already-initialized cache backend directly.
1189 ///
1190 /// This is useful when you have a pre-configured cache backend instance
1191 /// that you want to share across multiple builders or when you need
1192 /// fine-grained control over cache initialization.
1193 ///
1194 /// Requires the `cache` feature to be enabled.
1195 ///
1196 /// # Example
1197 ///
1198 /// ```no_run,ignore
1199 /// use zlayer_builder::ImageBuilder;
1200 /// use zlayer_registry::cache::BlobCache;
1201 /// use std::sync::Arc;
1202 ///
1203 /// # async fn example() -> Result<(), zlayer_builder::BuildError> {
1204 /// let cache = Arc::new(Box::new(BlobCache::new()?) as Box<dyn zlayer_registry::cache::BlobCacheBackend>);
1205 ///
1206 /// let builder = ImageBuilder::new("./my-project").await?
1207 /// .with_cache_backend(cache)
1208 /// .tag("myapp:latest");
1209 /// # Ok(())
1210 /// # }
1211 /// ```
1212 #[cfg(feature = "cache")]
1213 pub fn with_cache_backend(mut self, backend: Arc<Box<dyn BlobCacheBackend>>) -> Self {
1214 self.cache_backend = Some(backend);
1215 debug!("Configured pre-initialized cache backend");
1216 self
1217 }
1218
1219 /// Run the build
1220 ///
1221 /// This executes the complete build process:
1222 /// 1. Parse Dockerfile or load runtime template
1223 /// 2. Build all required stages
1224 /// 3. Commit and tag the final image
1225 /// 4. Push to registry if configured
1226 /// 5. Clean up intermediate containers
1227 ///
1228 /// # Errors
1229 ///
1230 /// Returns an error if:
1231 /// - Dockerfile parsing fails
1232 /// - A buildah command fails
1233 /// - Target stage is not found
1234 /// - Registry push fails
1235 #[instrument(skip(self), fields(context = %self.context.display()))]
1236 pub async fn build(self) -> Result<BuiltImage> {
1237 let start_time = std::time::Instant::now();
1238 let build_id = generate_build_id();
1239
1240 info!(
1241 "Starting build in context: {} (build_id: {})",
1242 self.context.display(),
1243 build_id
1244 );
1245
1246 // 1. Get parsed Dockerfile (from template, ZImagefile, or Dockerfile)
1247 let dockerfile = self.get_dockerfile().await?;
1248 debug!("Parsed Dockerfile with {} stages", dockerfile.stages.len());
1249
1250 // 2. Determine stages to build
1251 let stages = self.resolve_stages(&dockerfile)?;
1252 debug!("Building {} stages", stages.len());
1253
1254 // 4. Build each stage
1255 let mut stage_images: HashMap<String, String> = HashMap::new();
1256 // Track the final WORKDIR for each committed stage, used to resolve
1257 // relative source paths in COPY --from instructions.
1258 let mut stage_workdirs: HashMap<String, String> = HashMap::new();
1259 let mut final_container: Option<String> = None;
1260 let mut total_instructions = 0;
1261
1262 // Initialize the layer cache tracker for this build session.
1263 // This tracks which instruction+base_layer combinations we've seen
1264 // and whether they were cache hits.
1265 let mut cache_tracker = LayerCacheTracker::new();
1266
1267 for (stage_idx, stage) in stages.iter().enumerate() {
1268 let is_final_stage = stage_idx == stages.len() - 1;
1269
1270 self.send_event(BuildEvent::StageStarted {
1271 index: stage_idx,
1272 name: stage.name.clone(),
1273 base_image: stage.base_image.to_string_ref(),
1274 });
1275
1276 // Create container from base image
1277 let base = self
1278 .resolve_base_image(&stage.base_image, &stage_images)
1279 .await?;
1280 let container_id = self.create_container(&base).await?;
1281
1282 debug!(
1283 "Created container {} for stage {} (base: {})",
1284 container_id,
1285 stage.identifier(),
1286 base
1287 );
1288
1289 // Track the current base layer for cache key computation.
1290 // Each instruction modifies the container, so we update this after each instruction.
1291 let mut current_base_layer = container_id.clone();
1292
1293 // Track the current WORKDIR for this stage. Used to resolve relative paths
1294 // when this stage is used as a source for COPY --from in a later stage.
1295 let mut current_workdir = String::from("/");
1296
1297 // Execute instructions
1298 for (inst_idx, instruction) in stage.instructions.iter().enumerate() {
1299 self.send_event(BuildEvent::InstructionStarted {
1300 stage: stage_idx,
1301 index: inst_idx,
1302 instruction: format!("{:?}", instruction),
1303 });
1304
1305 // Generate the cache key for this instruction
1306 let instruction_cache_key = instruction.cache_key();
1307
1308 // Track instruction start time for potential cache hit heuristics
1309 let instruction_start = std::time::Instant::now();
1310
1311 // Resolve COPY --from references to actual committed image names,
1312 // and resolve relative source paths using the source stage's WORKDIR.
1313 let resolved_instruction;
1314 let instruction_ref = if let Instruction::Copy(copy) = instruction {
1315 if let Some(ref from) = copy.from {
1316 if let Some(image_name) = stage_images.get(from) {
1317 let mut resolved_copy = copy.clone();
1318 resolved_copy.from = Some(image_name.clone());
1319
1320 // Resolve relative source paths using the source stage's WORKDIR.
1321 // If the source stage had `workdir: "/build"` and the copy source
1322 // is `"app"`, we need to resolve it to `"/build/app"`.
1323 if let Some(source_workdir) = stage_workdirs.get(from) {
1324 resolved_copy.sources = resolved_copy
1325 .sources
1326 .iter()
1327 .map(|src| {
1328 if src.starts_with('/') {
1329 // Absolute path - use as-is
1330 src.clone()
1331 } else {
1332 // Relative path - prepend source stage's workdir
1333 if source_workdir == "/" {
1334 format!("/{}", src)
1335 } else {
1336 format!("{}/{}", source_workdir, src)
1337 }
1338 }
1339 })
1340 .collect();
1341 }
1342
1343 resolved_instruction = Instruction::Copy(resolved_copy);
1344 &resolved_instruction
1345 } else {
1346 instruction
1347 }
1348 } else {
1349 instruction
1350 }
1351 } else {
1352 instruction
1353 };
1354
1355 // Inject default cache mounts into RUN instructions
1356 let instruction_with_defaults;
1357 let instruction_ref = if !self.options.default_cache_mounts.is_empty() {
1358 if let Instruction::Run(run) = instruction_ref {
1359 let mut merged = run.clone();
1360 for default_mount in &self.options.default_cache_mounts {
1361 // Deduplicate by target path
1362 let target = match default_mount {
1363 RunMount::Cache { target, .. } => target,
1364 _ => continue,
1365 };
1366 let already_has = merged.mounts.iter().any(
1367 |m| matches!(m, RunMount::Cache { target: t, .. } if t == target),
1368 );
1369 if !already_has {
1370 merged.mounts.push(default_mount.clone());
1371 }
1372 }
1373 instruction_with_defaults = Instruction::Run(merged);
1374 &instruction_with_defaults
1375 } else {
1376 instruction_ref
1377 }
1378 } else {
1379 instruction_ref
1380 };
1381
1382 let is_run_instruction = matches!(instruction_ref, Instruction::Run(_));
1383 let max_attempts = if is_run_instruction {
1384 self.options.retries + 1
1385 } else {
1386 1
1387 };
1388
1389 let commands = BuildahCommand::from_instruction(&container_id, instruction_ref);
1390
1391 let mut combined_output = String::new();
1392 for cmd in commands {
1393 let mut last_output = None;
1394
1395 for attempt in 1..=max_attempts {
1396 if attempt > 1 {
1397 tracing::warn!(
1398 "Retrying step (attempt {}/{})...",
1399 attempt,
1400 max_attempts
1401 );
1402 self.send_event(BuildEvent::Output {
1403 line: format!(
1404 "⟳ Retrying step (attempt {}/{})...",
1405 attempt, max_attempts
1406 ),
1407 is_stderr: false,
1408 });
1409 tokio::time::sleep(std::time::Duration::from_secs(3)).await;
1410 }
1411
1412 let output = self
1413 .executor
1414 .execute_streaming(&cmd, |is_stdout, line| {
1415 self.send_event(BuildEvent::Output {
1416 line: line.to_string(),
1417 is_stderr: !is_stdout,
1418 });
1419 })
1420 .await?;
1421
1422 combined_output.push_str(&output.stdout);
1423 combined_output.push_str(&output.stderr);
1424
1425 if output.success() {
1426 last_output = Some(output);
1427 break;
1428 }
1429
1430 last_output = Some(output);
1431 }
1432
1433 let output = last_output.unwrap();
1434 if !output.success() {
1435 self.send_event(BuildEvent::BuildFailed {
1436 error: output.stderr.clone(),
1437 });
1438
1439 // Cleanup container
1440 let _ = self
1441 .executor
1442 .execute(&BuildahCommand::rm(&container_id))
1443 .await;
1444
1445 return Err(BuildError::buildah_execution(
1446 cmd.to_command_string(),
1447 output.exit_code,
1448 output.stderr,
1449 ));
1450 }
1451 }
1452
1453 let instruction_elapsed_ms = instruction_start.elapsed().as_millis() as u64;
1454
1455 // Track WORKDIR changes for later COPY --from resolution.
1456 // We need to know the final WORKDIR of each stage so we can resolve
1457 // relative paths when copying from that stage.
1458 if let Instruction::Workdir(dir) = instruction {
1459 current_workdir = dir.clone();
1460 }
1461
1462 // Attempt to detect if this was a cache hit.
1463 // TODO: Implement proper cache detection. Currently always returns false.
1464 // Possible approaches:
1465 // - Time-based: Cached layers typically execute in < 100ms
1466 // - Output-based: Look for cache indicators in buildah output
1467 // - Digest-based: Pre-compute expected digest and check storage
1468 let cached = cache_tracker.detect_cache_hit(
1469 instruction,
1470 instruction_elapsed_ms,
1471 &combined_output,
1472 );
1473
1474 // Record this instruction execution for future reference
1475 cache_tracker.record(
1476 instruction_cache_key.clone(),
1477 current_base_layer.clone(),
1478 cached,
1479 );
1480
1481 // Update the base layer identifier for the next instruction.
1482 // In a proper implementation, this would be the new layer digest
1483 // after the instruction was committed. For now, we use a composite
1484 // of the previous base and the instruction key.
1485 current_base_layer = format!("{}:{}", current_base_layer, instruction_cache_key);
1486
1487 self.send_event(BuildEvent::InstructionComplete {
1488 stage: stage_idx,
1489 index: inst_idx,
1490 cached,
1491 });
1492
1493 total_instructions += 1;
1494 }
1495
1496 // Handle stage completion
1497 if let Some(name) = &stage.name {
1498 // Named stage - commit and save for COPY --from
1499 // Include the build_id to prevent collisions when parallel
1500 // builds share stage names (e.g., two Dockerfiles both having
1501 // a stage named "builder").
1502 let image_name = format!("zlayer-build-{}-stage-{}", build_id, name);
1503 self.commit_container(&container_id, &image_name, false)
1504 .await?;
1505 stage_images.insert(name.clone(), image_name.clone());
1506
1507 // Store the final WORKDIR for this stage so COPY --from can resolve
1508 // relative paths correctly.
1509 stage_workdirs.insert(name.clone(), current_workdir.clone());
1510
1511 // Also add by index
1512 stage_images.insert(stage.index.to_string(), image_name.clone());
1513 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
1514
1515 // If this is also the final stage (named target), keep reference
1516 if is_final_stage {
1517 final_container = Some(container_id);
1518 } else {
1519 // Cleanup intermediate container
1520 let _ = self
1521 .executor
1522 .execute(&BuildahCommand::rm(&container_id))
1523 .await;
1524 }
1525 } else if is_final_stage {
1526 // Unnamed final stage - keep container for final commit
1527 final_container = Some(container_id);
1528 } else {
1529 // Unnamed intermediate stage - commit by index for COPY --from
1530 let image_name = format!("zlayer-build-{}-stage-{}", build_id, stage.index);
1531 self.commit_container(&container_id, &image_name, false)
1532 .await?;
1533 stage_images.insert(stage.index.to_string(), image_name);
1534 // Store the final WORKDIR for this stage
1535 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
1536 let _ = self
1537 .executor
1538 .execute(&BuildahCommand::rm(&container_id))
1539 .await;
1540 }
1541
1542 self.send_event(BuildEvent::StageComplete { index: stage_idx });
1543 }
1544
1545 // 5. Commit final image
1546 let final_container = final_container.ok_or_else(|| BuildError::InvalidInstruction {
1547 instruction: "build".to_string(),
1548 reason: "No stages to build".to_string(),
1549 })?;
1550
1551 let image_name = self
1552 .options
1553 .tags
1554 .first()
1555 .cloned()
1556 .unwrap_or_else(|| format!("zlayer-build:{}", chrono_lite_timestamp()));
1557
1558 let image_id = self
1559 .commit_container(&final_container, &image_name, self.options.squash)
1560 .await?;
1561
1562 info!("Committed final image: {} ({})", image_name, image_id);
1563
1564 // 6. Apply additional tags
1565 for tag in self.options.tags.iter().skip(1) {
1566 self.tag_image(&image_id, tag).await?;
1567 debug!("Applied tag: {}", tag);
1568 }
1569
1570 // 7. Cleanup
1571 let _ = self
1572 .executor
1573 .execute(&BuildahCommand::rm(&final_container))
1574 .await;
1575
1576 // Cleanup intermediate stage images
1577 for (_, img) in stage_images {
1578 let _ = self.executor.execute(&BuildahCommand::rmi(&img)).await;
1579 }
1580
1581 // 8. Push if requested
1582 if self.options.push {
1583 for tag in &self.options.tags {
1584 self.push_image(tag).await?;
1585 info!("Pushed image: {}", tag);
1586 }
1587 }
1588
1589 let build_time_ms = start_time.elapsed().as_millis() as u64;
1590
1591 self.send_event(BuildEvent::BuildComplete {
1592 image_id: image_id.clone(),
1593 });
1594
1595 info!(
1596 "Build completed in {}ms: {} with {} tags",
1597 build_time_ms,
1598 image_id,
1599 self.options.tags.len()
1600 );
1601
1602 Ok(BuiltImage {
1603 image_id,
1604 tags: self.options.tags.clone(),
1605 layer_count: total_instructions,
1606 size: 0, // TODO: get actual size via buildah inspect
1607 build_time_ms,
1608 })
1609 }
1610
1611 /// Get a parsed [`Dockerfile`] from the configured source.
1612 ///
1613 /// Detection order:
1614 /// 1. If `runtime` is set → use template string → parse as Dockerfile
1615 /// 2. If `zimagefile` is explicitly set → read & parse ZImagefile → convert
1616 /// 3. If a file called `ZImagefile` exists in the context dir → same as (2)
1617 /// 4. Fall back to reading a Dockerfile (from `dockerfile` option or default)
1618 async fn get_dockerfile(&self) -> Result<Dockerfile> {
1619 // (a) Runtime template takes highest priority.
1620 if let Some(runtime) = &self.options.runtime {
1621 debug!("Using runtime template: {}", runtime);
1622 let content = get_template(*runtime);
1623 return Dockerfile::parse(content);
1624 }
1625
1626 // (b) Explicit ZImagefile path.
1627 if let Some(ref zimage_path) = self.options.zimagefile {
1628 debug!("Reading ZImagefile: {}", zimage_path.display());
1629 let content =
1630 fs::read_to_string(zimage_path)
1631 .await
1632 .map_err(|e| BuildError::ContextRead {
1633 path: zimage_path.clone(),
1634 source: e,
1635 })?;
1636 let zimage = crate::zimage::parse_zimagefile(&content)?;
1637 return self.handle_zimage(&zimage).await;
1638 }
1639
1640 // (c) Auto-detect ZImagefile in context directory.
1641 let auto_zimage_path = self.context.join("ZImagefile");
1642 if auto_zimage_path.exists() {
1643 debug!(
1644 "Found ZImagefile in context: {}",
1645 auto_zimage_path.display()
1646 );
1647 let content = fs::read_to_string(&auto_zimage_path).await.map_err(|e| {
1648 BuildError::ContextRead {
1649 path: auto_zimage_path,
1650 source: e,
1651 }
1652 })?;
1653 let zimage = crate::zimage::parse_zimagefile(&content)?;
1654 return self.handle_zimage(&zimage).await;
1655 }
1656
1657 // (d) Fall back to Dockerfile.
1658 let dockerfile_path = self
1659 .options
1660 .dockerfile
1661 .clone()
1662 .unwrap_or_else(|| self.context.join("Dockerfile"));
1663
1664 debug!("Reading Dockerfile: {}", dockerfile_path.display());
1665
1666 let content =
1667 fs::read_to_string(&dockerfile_path)
1668 .await
1669 .map_err(|e| BuildError::ContextRead {
1670 path: dockerfile_path,
1671 source: e,
1672 })?;
1673
1674 Dockerfile::parse(&content)
1675 }
1676
1677 /// Convert a parsed [`ZImage`] into the internal [`Dockerfile`] IR.
1678 ///
1679 /// Handles the three ZImage modes that can produce a Dockerfile:
1680 /// - **Runtime** mode: delegates to the template system
1681 /// - **Single-stage / Multi-stage**: converts via [`zimage_to_dockerfile`]
1682 /// - **WASM** mode: errors out (WASM uses `zlayer wasm build`, not `zlayer build`)
1683 ///
1684 /// Any `build:` directives are resolved first by spawning nested builds.
1685 async fn handle_zimage(&self, zimage: &crate::zimage::ZImage) -> Result<Dockerfile> {
1686 // Runtime mode: delegate to template system.
1687 if let Some(ref runtime_name) = zimage.runtime {
1688 let rt = Runtime::from_name(runtime_name).ok_or_else(|| {
1689 BuildError::zimagefile_validation(format!(
1690 "unknown runtime '{runtime_name}' in ZImagefile"
1691 ))
1692 })?;
1693 let content = get_template(rt);
1694 return Dockerfile::parse(content);
1695 }
1696
1697 // WASM mode: not supported through `zlayer build`.
1698 if zimage.wasm.is_some() {
1699 return Err(BuildError::invalid_instruction(
1700 "ZImagefile",
1701 "WASM builds use `zlayer wasm build`, not `zlayer build`",
1702 ));
1703 }
1704
1705 // Resolve any `build:` directives to concrete base image tags.
1706 let resolved = self.resolve_build_directives(zimage).await?;
1707
1708 // Single-stage or multi-stage: convert to Dockerfile IR directly.
1709 crate::zimage::zimage_to_dockerfile(&resolved)
1710 }
1711
1712 /// Resolve `build:` directives in a ZImage by running nested builds.
1713 ///
1714 /// For each `build:` directive (top-level or per-stage), this method:
1715 /// 1. Determines the build context directory
1716 /// 2. Auto-detects the build file (ZImagefile > Dockerfile) unless specified
1717 /// 3. Spawns a nested `ImageBuilder` to build the context
1718 /// 4. Tags the result and replaces `build` with `base`
1719 async fn resolve_build_directives(
1720 &self,
1721 zimage: &crate::zimage::ZImage,
1722 ) -> Result<crate::zimage::ZImage> {
1723 let mut resolved = zimage.clone();
1724
1725 // Resolve top-level `build:` directive.
1726 if let Some(ref build_ctx) = resolved.build {
1727 let tag = self.run_nested_build(build_ctx, "toplevel").await?;
1728 resolved.base = Some(tag);
1729 resolved.build = None;
1730 }
1731
1732 // Resolve per-stage `build:` directives.
1733 if let Some(ref mut stages) = resolved.stages {
1734 for (name, stage) in stages.iter_mut() {
1735 if let Some(ref build_ctx) = stage.build {
1736 let tag = self.run_nested_build(build_ctx, name).await?;
1737 stage.base = Some(tag);
1738 stage.build = None;
1739 }
1740 }
1741 }
1742
1743 Ok(resolved)
1744 }
1745
1746 /// Run a nested build from a `build:` directive and return the resulting image tag.
1747 fn run_nested_build<'a>(
1748 &'a self,
1749 build_ctx: &'a crate::zimage::types::ZBuildContext,
1750 stage_name: &'a str,
1751 ) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<String>> + Send + 'a>> {
1752 Box::pin(self.run_nested_build_inner(build_ctx, stage_name))
1753 }
1754
1755 async fn run_nested_build_inner(
1756 &self,
1757 build_ctx: &crate::zimage::types::ZBuildContext,
1758 stage_name: &str,
1759 ) -> Result<String> {
1760 let context_dir = build_ctx.context_dir(&self.context);
1761
1762 if !context_dir.exists() {
1763 return Err(BuildError::ContextRead {
1764 path: context_dir,
1765 source: std::io::Error::new(
1766 std::io::ErrorKind::NotFound,
1767 format!(
1768 "build context directory not found for build directive in '{stage_name}'"
1769 ),
1770 ),
1771 });
1772 }
1773
1774 info!(
1775 "Building nested image for '{}' from context: {}",
1776 stage_name,
1777 context_dir.display()
1778 );
1779
1780 // Create a tag for the nested build result.
1781 let tag = format!(
1782 "zlayer-build-dep-{}:{}",
1783 stage_name,
1784 chrono_lite_timestamp()
1785 );
1786
1787 // Create nested builder.
1788 let mut nested = ImageBuilder::new(&context_dir).await?;
1789 nested = nested.tag(&tag);
1790
1791 // Apply explicit build file if specified.
1792 if let Some(file) = build_ctx.file() {
1793 let file_path = context_dir.join(file);
1794 if file.ends_with(".yml") || file.ends_with(".yaml") || file.starts_with("ZImagefile") {
1795 nested = nested.zimagefile(file_path);
1796 } else {
1797 nested = nested.dockerfile(file_path);
1798 }
1799 }
1800
1801 // Apply build args.
1802 for (key, value) in build_ctx.args() {
1803 nested = nested.build_arg(&key, &value);
1804 }
1805
1806 // Propagate default registry if set.
1807 if let Some(ref reg) = self.options.default_registry {
1808 nested = nested.default_registry(reg.clone());
1809 }
1810
1811 // Run the nested build.
1812 let result = nested.build().await?;
1813 info!(
1814 "Nested build for '{}' completed: {}",
1815 stage_name, result.image_id
1816 );
1817
1818 Ok(tag)
1819 }
1820
1821 /// Resolve which stages need to be built
1822 fn resolve_stages<'a>(&self, dockerfile: &'a Dockerfile) -> Result<Vec<&'a Stage>> {
1823 if let Some(target) = &self.options.target {
1824 // Find target stage and all its dependencies
1825 self.resolve_target_stages(dockerfile, target)
1826 } else {
1827 // Build all stages
1828 Ok(dockerfile.stages.iter().collect())
1829 }
1830 }
1831
1832 /// Resolve stages needed for a specific target
1833 fn resolve_target_stages<'a>(
1834 &self,
1835 dockerfile: &'a Dockerfile,
1836 target: &str,
1837 ) -> Result<Vec<&'a Stage>> {
1838 // Find the target stage
1839 let target_stage = dockerfile
1840 .get_stage(target)
1841 .ok_or_else(|| BuildError::stage_not_found(target))?;
1842
1843 // Collect all stages up to and including the target
1844 // This is a simplified approach - a full implementation would
1845 // analyze COPY --from dependencies
1846 let mut stages: Vec<&Stage> = Vec::new();
1847
1848 for stage in &dockerfile.stages {
1849 stages.push(stage);
1850 if stage.index == target_stage.index {
1851 break;
1852 }
1853 }
1854
1855 Ok(stages)
1856 }
1857
1858 /// Resolve a base image reference to an actual image name.
1859 ///
1860 /// Resolution chain for short (unqualified) image names:
1861 /// 1. Check `LocalRegistry` for a cached copy (if configured)
1862 /// 2. Check `default_registry` for the image (if configured)
1863 /// 3. Fall back to Docker Hub qualification (`docker.io/library/...`)
1864 ///
1865 /// Already-qualified names (containing a registry hostname) skip this chain.
1866 async fn resolve_base_image(
1867 &self,
1868 image_ref: &ImageRef,
1869 stage_images: &HashMap<String, String>,
1870 ) -> Result<String> {
1871 match image_ref {
1872 ImageRef::Stage(name) => {
1873 return stage_images
1874 .get(name)
1875 .cloned()
1876 .ok_or_else(|| BuildError::stage_not_found(name));
1877 }
1878 ImageRef::Scratch => return Ok("scratch".to_string()),
1879 ImageRef::Registry { .. } => {}
1880 }
1881
1882 // Check if name is already fully qualified (has registry hostname).
1883 let is_qualified = match image_ref {
1884 ImageRef::Registry { image, .. } => {
1885 let first = image.split('/').next().unwrap_or("");
1886 first.contains('.') || first.contains(':') || first == "localhost"
1887 }
1888 _ => false,
1889 };
1890
1891 // For unqualified names, try local registry and default registry first.
1892 if !is_qualified {
1893 if let Some(resolved) = self.try_resolve_from_sources(image_ref).await {
1894 return Ok(resolved);
1895 }
1896 }
1897
1898 // Fall back: qualify to docker.io and build the full string.
1899 let qualified = image_ref.qualify();
1900 match &qualified {
1901 ImageRef::Registry { image, tag, digest } => {
1902 let mut result = image.clone();
1903 if let Some(t) = tag {
1904 result.push(':');
1905 result.push_str(t);
1906 }
1907 if let Some(d) = digest {
1908 result.push('@');
1909 result.push_str(d);
1910 }
1911 if tag.is_none() && digest.is_none() {
1912 result.push_str(":latest");
1913 }
1914 Ok(result)
1915 }
1916 _ => unreachable!("qualify() preserves Registry variant"),
1917 }
1918 }
1919
1920 /// Try to resolve an unqualified image from local registry or default registry.
1921 ///
1922 /// Returns `Some(fully_qualified_name)` if found, `None` to fall back to docker.io.
1923 async fn try_resolve_from_sources(&self, image_ref: &ImageRef) -> Option<String> {
1924 let (name, tag_str) = match image_ref {
1925 ImageRef::Registry { image, tag, .. } => {
1926 (image.as_str(), tag.as_deref().unwrap_or("latest"))
1927 }
1928 _ => return None,
1929 };
1930
1931 // 1. Check local OCI registry
1932 #[cfg(feature = "local-registry")]
1933 if let Some(ref local_reg) = self.local_registry {
1934 if local_reg.has_manifest(name, tag_str).await {
1935 info!(
1936 "Found {}:{} in local registry, using local copy",
1937 name, tag_str
1938 );
1939 // Build an OCI reference pointing to the local registry path.
1940 // buildah can pull from an OCI layout directory.
1941 let oci_path = format!("oci:{}:{}", local_reg.root().display(), tag_str);
1942 return Some(oci_path);
1943 }
1944 }
1945
1946 // 2. Check configured default registry
1947 if let Some(ref registry) = self.options.default_registry {
1948 let qualified = format!("{}/{}:{}", registry, name, tag_str);
1949 debug!("Checking default registry for image: {}", qualified);
1950 // Return the qualified name for the configured registry.
1951 // buildah will attempt to pull from this registry; if it fails,
1952 // the build will error (the user explicitly configured this registry).
1953 return Some(qualified);
1954 }
1955
1956 None
1957 }
1958
1959 /// Create a working container from an image
1960 async fn create_container(&self, image: &str) -> Result<String> {
1961 let cmd = BuildahCommand::from_image(image);
1962 let output = self.executor.execute_checked(&cmd).await?;
1963 Ok(output.stdout.trim().to_string())
1964 }
1965
1966 /// Commit a container to create an image
1967 async fn commit_container(
1968 &self,
1969 container: &str,
1970 image_name: &str,
1971 squash: bool,
1972 ) -> Result<String> {
1973 let cmd = BuildahCommand::commit_with_opts(
1974 container,
1975 image_name,
1976 self.options.format.as_deref(),
1977 squash,
1978 );
1979 let output = self.executor.execute_checked(&cmd).await?;
1980 Ok(output.stdout.trim().to_string())
1981 }
1982
1983 /// Tag an image with an additional tag
1984 async fn tag_image(&self, image: &str, tag: &str) -> Result<()> {
1985 let cmd = BuildahCommand::tag(image, tag);
1986 self.executor.execute_checked(&cmd).await?;
1987 Ok(())
1988 }
1989
1990 /// Push an image to a registry
1991 async fn push_image(&self, tag: &str) -> Result<()> {
1992 let mut cmd = BuildahCommand::push(tag);
1993
1994 // Add auth if provided
1995 if let Some(auth) = &self.options.registry_auth {
1996 cmd = cmd
1997 .arg("--creds")
1998 .arg(format!("{}:{}", auth.username, auth.password));
1999 }
2000
2001 self.executor.execute_checked(&cmd).await?;
2002 Ok(())
2003 }
2004
2005 /// Send an event to the TUI (if configured)
2006 fn send_event(&self, event: BuildEvent) {
2007 if let Some(tx) = &self.event_tx {
2008 // Ignore send errors - the receiver may have been dropped
2009 let _ = tx.send(event);
2010 }
2011 }
2012}
2013
2014// Helper function to generate a timestamp-based name
2015fn chrono_lite_timestamp() -> String {
2016 use std::time::{SystemTime, UNIX_EPOCH};
2017 let duration = SystemTime::now()
2018 .duration_since(UNIX_EPOCH)
2019 .unwrap_or_default();
2020 format!("{}", duration.as_secs())
2021}
2022
2023/// Generate a short unique build ID for namespacing intermediate stage images.
2024///
2025/// This prevents parallel builds from clobbering each other's intermediate
2026/// stage images when they share stage names (e.g., two Dockerfiles both have
2027/// a stage named "builder").
2028///
2029/// The ID combines nanosecond-precision timestamp with the process ID, then
2030/// takes 12 hex characters from a SHA-256 hash for a compact, collision-resistant
2031/// identifier.
2032fn generate_build_id() -> String {
2033 use sha2::{Digest, Sha256};
2034 use std::time::{SystemTime, UNIX_EPOCH};
2035
2036 let nanos = SystemTime::now()
2037 .duration_since(UNIX_EPOCH)
2038 .unwrap_or_default()
2039 .as_nanos();
2040 let pid = std::process::id();
2041 // Use a monotonic counter to guarantee uniqueness even within the same
2042 // nanosecond on the same process (e.g. tests or very fast sequential calls).
2043 static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
2044 let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
2045
2046 let mut hasher = Sha256::new();
2047 hasher.update(nanos.to_le_bytes());
2048 hasher.update(pid.to_le_bytes());
2049 hasher.update(count.to_le_bytes());
2050 let hash = hasher.finalize();
2051 // 12 hex chars = 6 bytes = 48 bits of entropy, ample for build parallelism
2052 hex::encode(&hash[..6])
2053}
2054
2055#[cfg(test)]
2056mod tests {
2057 use super::*;
2058
2059 #[test]
2060 fn test_registry_auth_new() {
2061 let auth = RegistryAuth::new("user", "pass");
2062 assert_eq!(auth.username, "user");
2063 assert_eq!(auth.password, "pass");
2064 }
2065
2066 #[test]
2067 fn test_build_options_default() {
2068 let opts = BuildOptions::default();
2069 assert!(opts.dockerfile.is_none());
2070 assert!(opts.zimagefile.is_none());
2071 assert!(opts.runtime.is_none());
2072 assert!(opts.build_args.is_empty());
2073 assert!(opts.target.is_none());
2074 assert!(opts.tags.is_empty());
2075 assert!(!opts.no_cache);
2076 assert!(!opts.push);
2077 assert!(!opts.squash);
2078 // New cache-related fields
2079 assert!(opts.layers); // Default is true
2080 assert!(opts.cache_from.is_none());
2081 assert!(opts.cache_to.is_none());
2082 assert!(opts.cache_ttl.is_none());
2083 // Cache backend config (only with cache feature)
2084 #[cfg(feature = "cache")]
2085 assert!(opts.cache_backend_config.is_none());
2086 }
2087
2088 #[tokio::test]
2089 async fn test_resolve_base_image_registry() {
2090 let builder = create_test_builder();
2091 let stage_images = HashMap::new();
2092
2093 // Simple image (qualified to docker.io)
2094 let image_ref = ImageRef::Registry {
2095 image: "alpine".to_string(),
2096 tag: Some("3.18".to_string()),
2097 digest: None,
2098 };
2099 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2100 assert_eq!(result.unwrap(), "docker.io/library/alpine:3.18");
2101
2102 // Image with digest (qualified to docker.io)
2103 let image_ref = ImageRef::Registry {
2104 image: "alpine".to_string(),
2105 tag: None,
2106 digest: Some("sha256:abc123".to_string()),
2107 };
2108 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2109 assert_eq!(result.unwrap(), "docker.io/library/alpine@sha256:abc123");
2110
2111 // Image with no tag or digest (qualified to docker.io + :latest)
2112 let image_ref = ImageRef::Registry {
2113 image: "alpine".to_string(),
2114 tag: None,
2115 digest: None,
2116 };
2117 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2118 assert_eq!(result.unwrap(), "docker.io/library/alpine:latest");
2119
2120 // Already-qualified image (unchanged)
2121 let image_ref = ImageRef::Registry {
2122 image: "ghcr.io/org/myimage".to_string(),
2123 tag: Some("v1".to_string()),
2124 digest: None,
2125 };
2126 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2127 assert_eq!(result.unwrap(), "ghcr.io/org/myimage:v1");
2128 }
2129
2130 #[tokio::test]
2131 async fn test_resolve_base_image_stage() {
2132 let builder = create_test_builder();
2133 let mut stage_images = HashMap::new();
2134 stage_images.insert(
2135 "builder".to_string(),
2136 "zlayer-build-stage-builder".to_string(),
2137 );
2138
2139 let image_ref = ImageRef::Stage("builder".to_string());
2140 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2141 assert_eq!(result.unwrap(), "zlayer-build-stage-builder");
2142
2143 // Missing stage
2144 let image_ref = ImageRef::Stage("missing".to_string());
2145 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2146 assert!(result.is_err());
2147 }
2148
2149 #[tokio::test]
2150 async fn test_resolve_base_image_scratch() {
2151 let builder = create_test_builder();
2152 let stage_images = HashMap::new();
2153
2154 let image_ref = ImageRef::Scratch;
2155 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2156 assert_eq!(result.unwrap(), "scratch");
2157 }
2158
2159 #[tokio::test]
2160 async fn test_resolve_base_image_with_default_registry() {
2161 let mut builder = create_test_builder();
2162 builder.options.default_registry = Some("git.example.com:5000".to_string());
2163 let stage_images = HashMap::new();
2164
2165 // Unqualified image should resolve to default registry
2166 let image_ref = ImageRef::Registry {
2167 image: "myapp".to_string(),
2168 tag: Some("v1".to_string()),
2169 digest: None,
2170 };
2171 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2172 assert_eq!(result.unwrap(), "git.example.com:5000/myapp:v1");
2173
2174 // Already-qualified image should NOT use default registry
2175 let image_ref = ImageRef::Registry {
2176 image: "ghcr.io/org/image".to_string(),
2177 tag: Some("latest".to_string()),
2178 digest: None,
2179 };
2180 let result = builder.resolve_base_image(&image_ref, &stage_images).await;
2181 assert_eq!(result.unwrap(), "ghcr.io/org/image:latest");
2182 }
2183
2184 fn create_test_builder() -> ImageBuilder {
2185 // Create a minimal builder for testing (without async initialization)
2186 ImageBuilder {
2187 context: PathBuf::from("/tmp/test"),
2188 options: BuildOptions::default(),
2189 executor: BuildahExecutor::with_path("/usr/bin/buildah"),
2190 event_tx: None,
2191 #[cfg(feature = "cache")]
2192 cache_backend: None,
2193 #[cfg(feature = "local-registry")]
2194 local_registry: None,
2195 }
2196 }
2197
2198 // Builder method chaining tests
2199 #[test]
2200 fn test_builder_chaining() {
2201 let mut builder = create_test_builder();
2202
2203 builder = builder
2204 .dockerfile("./Dockerfile.test")
2205 .runtime(Runtime::Node20)
2206 .build_arg("VERSION", "1.0")
2207 .target("builder")
2208 .tag("myapp:latest")
2209 .tag("myapp:v1")
2210 .no_cache()
2211 .squash()
2212 .format("oci");
2213
2214 assert_eq!(
2215 builder.options.dockerfile,
2216 Some(PathBuf::from("./Dockerfile.test"))
2217 );
2218 assert_eq!(builder.options.runtime, Some(Runtime::Node20));
2219 assert_eq!(
2220 builder.options.build_args.get("VERSION"),
2221 Some(&"1.0".to_string())
2222 );
2223 assert_eq!(builder.options.target, Some("builder".to_string()));
2224 assert_eq!(builder.options.tags.len(), 2);
2225 assert!(builder.options.no_cache);
2226 assert!(builder.options.squash);
2227 assert_eq!(builder.options.format, Some("oci".to_string()));
2228 }
2229
2230 #[test]
2231 fn test_builder_push_with_auth() {
2232 let mut builder = create_test_builder();
2233 builder = builder.push(RegistryAuth::new("user", "pass"));
2234
2235 assert!(builder.options.push);
2236 assert!(builder.options.registry_auth.is_some());
2237 let auth = builder.options.registry_auth.unwrap();
2238 assert_eq!(auth.username, "user");
2239 assert_eq!(auth.password, "pass");
2240 }
2241
2242 #[test]
2243 fn test_builder_push_without_auth() {
2244 let mut builder = create_test_builder();
2245 builder = builder.push_without_auth();
2246
2247 assert!(builder.options.push);
2248 assert!(builder.options.registry_auth.is_none());
2249 }
2250
2251 #[test]
2252 fn test_builder_layers() {
2253 let mut builder = create_test_builder();
2254 // Default is true
2255 assert!(builder.options.layers);
2256
2257 // Disable layers
2258 builder = builder.layers(false);
2259 assert!(!builder.options.layers);
2260
2261 // Re-enable layers
2262 builder = builder.layers(true);
2263 assert!(builder.options.layers);
2264 }
2265
2266 #[test]
2267 fn test_builder_cache_from() {
2268 let mut builder = create_test_builder();
2269 assert!(builder.options.cache_from.is_none());
2270
2271 builder = builder.cache_from("registry.example.com/myapp:cache");
2272 assert_eq!(
2273 builder.options.cache_from,
2274 Some("registry.example.com/myapp:cache".to_string())
2275 );
2276 }
2277
2278 #[test]
2279 fn test_builder_cache_to() {
2280 let mut builder = create_test_builder();
2281 assert!(builder.options.cache_to.is_none());
2282
2283 builder = builder.cache_to("registry.example.com/myapp:cache");
2284 assert_eq!(
2285 builder.options.cache_to,
2286 Some("registry.example.com/myapp:cache".to_string())
2287 );
2288 }
2289
2290 #[test]
2291 fn test_builder_cache_ttl() {
2292 use std::time::Duration;
2293
2294 let mut builder = create_test_builder();
2295 assert!(builder.options.cache_ttl.is_none());
2296
2297 builder = builder.cache_ttl(Duration::from_secs(3600));
2298 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(3600)));
2299 }
2300
2301 #[test]
2302 fn test_builder_cache_options_chaining() {
2303 use std::time::Duration;
2304
2305 let builder = create_test_builder()
2306 .layers(true)
2307 .cache_from("registry.example.com/cache:input")
2308 .cache_to("registry.example.com/cache:output")
2309 .cache_ttl(Duration::from_secs(7200))
2310 .no_cache();
2311
2312 assert!(builder.options.layers);
2313 assert_eq!(
2314 builder.options.cache_from,
2315 Some("registry.example.com/cache:input".to_string())
2316 );
2317 assert_eq!(
2318 builder.options.cache_to,
2319 Some("registry.example.com/cache:output".to_string())
2320 );
2321 assert_eq!(builder.options.cache_ttl, Some(Duration::from_secs(7200)));
2322 assert!(builder.options.no_cache);
2323 }
2324
2325 #[test]
2326 fn test_chrono_lite_timestamp() {
2327 let ts = chrono_lite_timestamp();
2328 // Should be a valid number
2329 let parsed: u64 = ts.parse().expect("Should be a valid u64");
2330 // Should be reasonably recent (after 2024)
2331 assert!(parsed > 1700000000);
2332 }
2333
2334 // LayerCacheTracker tests
2335 #[test]
2336 fn test_layer_cache_tracker_new() {
2337 let tracker = LayerCacheTracker::new();
2338 assert!(tracker.known_layers.is_empty());
2339 }
2340
2341 #[test]
2342 fn test_layer_cache_tracker_record_and_lookup() {
2343 let mut tracker = LayerCacheTracker::new();
2344
2345 // Record a cache miss
2346 tracker.record("abc123".to_string(), "container-1".to_string(), false);
2347
2348 // Check that we can look it up
2349 assert!(!tracker.is_cached("abc123", "container-1"));
2350
2351 // Record a cache hit
2352 tracker.record("def456".to_string(), "container-2".to_string(), true);
2353
2354 assert!(tracker.is_cached("def456", "container-2"));
2355 }
2356
2357 #[test]
2358 fn test_layer_cache_tracker_unknown_returns_false() {
2359 let tracker = LayerCacheTracker::new();
2360
2361 // Unknown entries should return false
2362 assert!(!tracker.is_cached("unknown", "unknown"));
2363 }
2364
2365 #[test]
2366 fn test_layer_cache_tracker_different_base_layers() {
2367 let mut tracker = LayerCacheTracker::new();
2368
2369 // Same instruction key but different base layers
2370 tracker.record("inst-1".to_string(), "base-a".to_string(), true);
2371 tracker.record("inst-1".to_string(), "base-b".to_string(), false);
2372
2373 assert!(tracker.is_cached("inst-1", "base-a"));
2374 assert!(!tracker.is_cached("inst-1", "base-b"));
2375 }
2376
2377 #[test]
2378 fn test_layer_cache_tracker_detect_cache_hit() {
2379 use crate::dockerfile::RunInstruction;
2380
2381 let tracker = LayerCacheTracker::new();
2382 let instruction = Instruction::Run(RunInstruction::shell("echo hello"));
2383
2384 // Currently always returns false - this test documents the expected behavior
2385 // and will need to be updated when cache detection is implemented
2386 assert!(!tracker.detect_cache_hit(&instruction, 50, ""));
2387 assert!(!tracker.detect_cache_hit(&instruction, 1000, ""));
2388 assert!(!tracker.detect_cache_hit(&instruction, 50, "Using cache"));
2389 }
2390
2391 #[test]
2392 fn test_layer_cache_tracker_overwrite() {
2393 let mut tracker = LayerCacheTracker::new();
2394
2395 // Record as cache miss first
2396 tracker.record("key".to_string(), "base".to_string(), false);
2397 assert!(!tracker.is_cached("key", "base"));
2398
2399 // Overwrite with cache hit
2400 tracker.record("key".to_string(), "base".to_string(), true);
2401 assert!(tracker.is_cached("key", "base"));
2402 }
2403}