1use std::collections::HashMap;
8use std::path::Path;
9use std::sync::mpsc;
10
11use tracing::{debug, info};
12
13use crate::buildah::{BuildahCommand, BuildahExecutor};
14use crate::builder::{BuildOptions, BuiltImage, PullBaseMode, RegistryAuth};
15use crate::dockerfile::{Dockerfile, DockerfileFromTarget, Instruction, RunMount, Stage};
16use crate::error::{BuildError, Result};
17use crate::tui::BuildEvent;
18
19use super::BuildBackend;
20
21#[derive(Debug, Default)]
31struct LayerCacheTracker {
32 known_layers: HashMap<(String, String), bool>,
34}
35
36impl LayerCacheTracker {
37 fn new() -> Self {
38 Self::default()
39 }
40
41 #[allow(dead_code)]
42 fn is_cached(&self, instruction_key: &str, base_layer: &str) -> bool {
43 self.known_layers
44 .get(&(instruction_key.to_string(), base_layer.to_string()))
45 .copied()
46 .unwrap_or(false)
47 }
48
49 fn record(&mut self, instruction_key: String, base_layer: String, cached: bool) {
50 self.known_layers
51 .insert((instruction_key, base_layer), cached);
52 }
53
54 #[allow(dead_code, clippy::unused_self)]
55 fn detect_cache_hit(
56 &self,
57 _instruction: &Instruction,
58 _execution_time_ms: u64,
59 _output: &str,
60 ) -> bool {
61 false
63 }
64}
65
66pub struct BuildahBackend {
72 executor: BuildahExecutor,
73}
74
75impl BuildahBackend {
76 pub async fn try_new() -> Result<Self> {
84 let executor = BuildahExecutor::new_async().await?;
85 if !executor.is_available().await {
86 return Err(crate::error::BuildError::BuildahNotFound {
87 message: "buildah is installed but not responding".into(),
88 });
89 }
90 Ok(Self { executor })
91 }
92
93 pub async fn new() -> Result<Self> {
99 let executor = BuildahExecutor::new_async().await?;
100 Ok(Self { executor })
101 }
102
103 #[must_use]
105 pub fn with_executor(executor: BuildahExecutor) -> Self {
106 Self { executor }
107 }
108
109 #[must_use]
111 pub fn executor(&self) -> &BuildahExecutor {
112 &self.executor
113 }
114
115 #[allow(clippy::unused_self)]
121 fn resolve_stages<'a>(
122 &self,
123 dockerfile: &'a Dockerfile,
124 target: Option<&str>,
125 ) -> Result<Vec<&'a Stage>> {
126 if let Some(target) = target {
127 Self::resolve_target_stages(dockerfile, target)
128 } else {
129 Ok(dockerfile.stages.iter().collect())
130 }
131 }
132
133 fn resolve_target_stages<'a>(
135 dockerfile: &'a Dockerfile,
136 target: &str,
137 ) -> Result<Vec<&'a Stage>> {
138 let target_stage = dockerfile
139 .get_stage(target)
140 .ok_or_else(|| BuildError::stage_not_found(target))?;
141
142 let mut stages: Vec<&Stage> = Vec::new();
143 for stage in &dockerfile.stages {
144 stages.push(stage);
145 if stage.index == target_stage.index {
146 break;
147 }
148 }
149 Ok(stages)
150 }
151
152 async fn resolve_base_image(
158 &self,
159 image_ref: &DockerfileFromTarget,
160 stage_images: &HashMap<String, String>,
161 options: &BuildOptions,
162 ) -> Result<String> {
163 match image_ref {
164 DockerfileFromTarget::Stage(name) => {
165 return stage_images
166 .get(name)
167 .cloned()
168 .ok_or_else(|| BuildError::stage_not_found(name));
169 }
170 DockerfileFromTarget::Scratch => return Ok("scratch".to_string()),
171 DockerfileFromTarget::Image(_) => {}
172 }
173
174 let is_qualified = match image_ref {
176 DockerfileFromTarget::Image(r) => {
177 let repo = r.repository();
178 let first = repo.split('/').next().unwrap_or("");
179 first.contains('.') || first.contains(':') || first == "localhost"
180 }
181 _ => false,
182 };
183
184 if !is_qualified {
186 if let Some(resolved) = self.try_resolve_from_sources(image_ref, options).await {
187 return Ok(resolved);
188 }
189 }
190
191 match image_ref {
195 DockerfileFromTarget::Image(r) => {
196 let mut result = format!("{}/{}", r.registry(), r.repository());
197 if let Some(t) = r.tag() {
198 result.push(':');
199 result.push_str(t);
200 }
201 if let Some(d) = r.digest() {
202 result.push('@');
203 result.push_str(d);
204 }
205 if r.tag().is_none() && r.digest().is_none() {
206 result.push_str(":latest");
207 }
208 Ok(result)
209 }
210 _ => unreachable!("Stage and Scratch handled above"),
211 }
212 }
213
214 #[allow(clippy::unused_async)]
218 async fn try_resolve_from_sources(
219 &self,
220 image_ref: &DockerfileFromTarget,
221 options: &BuildOptions,
222 ) -> Option<String> {
223 let (name, tag_str) = match image_ref {
224 DockerfileFromTarget::Image(r) => (
225 r.repository().to_string(),
226 r.tag().unwrap_or("latest").to_string(),
227 ),
228 _ => return None,
229 };
230
231 if let Some(ref registry) = options.default_registry {
233 let qualified = format!("{registry}/{name}:{tag_str}");
234 debug!("Checking default registry for image: {}", qualified);
235 return Some(qualified);
236 }
237
238 None
239 }
240
241 async fn create_container(
243 &self,
244 image: &str,
245 platform: Option<&str>,
246 pull: PullBaseMode,
247 ) -> Result<String> {
248 let mut cmd = BuildahCommand::new("from").arg_opt("--platform", platform);
249
250 match pull {
251 PullBaseMode::Newer => cmd = cmd.arg("--pull=newer"),
252 PullBaseMode::Always => cmd = cmd.arg("--pull=always"),
253 PullBaseMode::Never => { }
254 }
255
256 cmd = cmd.arg(image);
257
258 let output = self.executor.execute_checked(&cmd).await?;
259 Ok(output.stdout.trim().to_string())
260 }
261
262 async fn commit_container(
264 &self,
265 container: &str,
266 image_name: &str,
267 format: Option<&str>,
268 squash: bool,
269 ) -> Result<String> {
270 let cmd = BuildahCommand::commit_with_opts(container, image_name, format, squash);
271 let output = self.executor.execute_checked(&cmd).await?;
272 Ok(output.stdout.trim().to_string())
273 }
274
275 async fn tag_image_internal(&self, image: &str, tag: &str) -> Result<()> {
277 let cmd = BuildahCommand::tag(image, tag);
278 self.executor.execute_checked(&cmd).await?;
279 Ok(())
280 }
281
282 async fn push_image_internal(&self, tag: &str, auth: Option<&RegistryAuth>) -> Result<()> {
284 let mut cmd = BuildahCommand::push(tag);
285 if let Some(auth) = auth {
286 cmd = cmd
287 .arg("--creds")
288 .arg(format!("{}:{}", auth.username, auth.password));
289 }
290 self.executor.execute_checked(&cmd).await?;
291 Ok(())
292 }
293
294 async fn pull_external_image(&self, image: &str, pull_mode: PullBaseMode) -> Result<()> {
308 let policy = match pull_mode {
309 PullBaseMode::Newer => Some("newer"),
310 PullBaseMode::Always => Some("always"),
311 PullBaseMode::Never => return Ok(()),
315 };
316
317 let cmd = BuildahCommand::pull(image, policy);
318 debug!("Pulling external COPY --from image: {}", image);
319 self.executor.execute_checked(&cmd).await?;
320 Ok(())
321 }
322
323 fn send_event(event_tx: Option<&mpsc::Sender<BuildEvent>>, event: BuildEvent) {
325 if let Some(tx) = event_tx {
326 let _ = tx.send(event);
327 }
328 }
329}
330
331#[async_trait::async_trait]
332impl BuildBackend for BuildahBackend {
333 #[allow(clippy::too_many_lines)]
334 async fn build_image(
335 &self,
336 _context: &Path,
337 dockerfile: &Dockerfile,
338 options: &BuildOptions,
339 event_tx: Option<mpsc::Sender<BuildEvent>>,
340 ) -> Result<BuiltImage> {
341 let start_time = std::time::Instant::now();
342 let build_id = generate_build_id();
343
344 debug!(
345 "BuildahBackend: starting build (build_id: {}, {} stages)",
346 build_id,
347 dockerfile.stages.len()
348 );
349
350 let stages = self.resolve_stages(dockerfile, options.target.as_deref())?;
352 debug!("Building {} stages", stages.len());
353
354 let total_instructions_planned: usize = stages.iter().map(|s| s.instructions.len()).sum();
358 Self::send_event(
359 event_tx.as_ref(),
360 BuildEvent::BuildStarted {
361 total_stages: stages.len(),
362 total_instructions: total_instructions_planned,
363 },
364 );
365
366 let mut stage_images: HashMap<String, String> = HashMap::new();
368 let mut stage_workdirs: HashMap<String, String> = HashMap::new();
371 let mut pulled_external_images: std::collections::HashSet<String> =
375 std::collections::HashSet::new();
376 let mut final_container: Option<String> = None;
377 let mut total_instructions = 0;
378
379 let mut cache_tracker = LayerCacheTracker::new();
381
382 for (stage_idx, stage) in stages.iter().enumerate() {
383 let is_final_stage = stage_idx == stages.len() - 1;
384
385 Self::send_event(
386 event_tx.as_ref(),
387 BuildEvent::StageStarted {
388 index: stage_idx,
389 name: stage.name.clone(),
390 base_image: stage.base_image.to_string(),
391 },
392 );
393
394 let base = self
396 .resolve_base_image(&stage.base_image, &stage_images, options)
397 .await?;
398 let container_id = self
399 .create_container(&base, options.platform.as_deref(), options.pull)
400 .await?;
401
402 debug!(
403 "Created container {} for stage {} (base: {})",
404 container_id,
405 stage.identifier(),
406 base
407 );
408
409 let mut current_base_layer = container_id.clone();
411
412 let mut current_workdir = match &stage.base_image {
414 DockerfileFromTarget::Stage(name) => stage_workdirs
415 .get(name)
416 .cloned()
417 .unwrap_or_else(|| String::from("/")),
418 _ => String::from("/"),
419 };
420
421 for (inst_idx, instruction) in stage.instructions.iter().enumerate() {
423 Self::send_event(
424 event_tx.as_ref(),
425 BuildEvent::InstructionStarted {
426 stage: stage_idx,
427 index: inst_idx,
428 instruction: format!("{instruction:?}"),
429 },
430 );
431
432 let instruction_cache_key = instruction.cache_key();
433 let instruction_start = std::time::Instant::now();
434
435 let resolved_instruction;
453 let instruction_ref = if let Instruction::Copy(copy) = instruction {
454 if let Some(ref from) = copy.from {
455 if let Some(image_name) = stage_images.get(from) {
456 let mut resolved_copy = copy.clone();
458 resolved_copy.from = Some(image_name.clone());
459
460 if let Some(source_workdir) = stage_workdirs.get(from) {
462 resolved_copy.sources = resolved_copy
463 .sources
464 .iter()
465 .map(|src| {
466 if src.starts_with('/') {
467 src.clone()
468 } else if source_workdir == "/" {
469 format!("/{src}")
470 } else {
471 format!("{source_workdir}/{src}")
472 }
473 })
474 .collect();
475 }
476
477 resolved_instruction = Instruction::Copy(resolved_copy);
478 &resolved_instruction
479 } else {
480 if !pulled_external_images.contains(from) {
484 self.pull_external_image(from, options.pull).await?;
485 pulled_external_images.insert(from.clone());
486 }
487 instruction
488 }
489 } else {
490 instruction
492 }
493 } else {
494 instruction
495 };
496
497 let instruction_with_defaults;
499 let instruction_ref = if options.default_cache_mounts.is_empty() {
500 instruction_ref
501 } else if let Instruction::Run(run) = instruction_ref {
502 let mut merged = run.clone();
503 for default_mount in &options.default_cache_mounts {
504 let RunMount::Cache { target, .. } = default_mount else {
505 continue;
506 };
507 let already_has = merged
508 .mounts
509 .iter()
510 .any(|m| matches!(m, RunMount::Cache { target: t, .. } if t == target));
511 if !already_has {
512 merged.mounts.push(default_mount.clone());
513 }
514 }
515 instruction_with_defaults = Instruction::Run(merged);
516 &instruction_with_defaults
517 } else {
518 instruction_ref
519 };
520
521 let is_run_instruction = matches!(instruction_ref, Instruction::Run(_));
522 let max_attempts = if is_run_instruction {
523 options.retries + 1
524 } else {
525 1
526 };
527
528 let commands = BuildahCommand::from_instruction(&container_id, instruction_ref);
529
530 let mut combined_output = String::new();
531 for cmd in commands {
532 let mut last_output = None;
533
534 for attempt in 1..=max_attempts {
535 if attempt > 1 {
536 tracing::warn!(
537 "Retrying step (attempt {}/{})...",
538 attempt,
539 max_attempts
540 );
541 Self::send_event(
542 event_tx.as_ref(),
543 BuildEvent::Output {
544 line: format!(
545 "⟳ Retrying step (attempt {attempt}/{max_attempts})..."
546 ),
547 is_stderr: false,
548 },
549 );
550 tokio::time::sleep(std::time::Duration::from_secs(3)).await;
551 }
552
553 let event_tx_clone = event_tx.clone();
554 let output = self
555 .executor
556 .execute_streaming(&cmd, |is_stdout, line| {
557 Self::send_event(
558 event_tx_clone.as_ref(),
559 BuildEvent::Output {
560 line: line.to_string(),
561 is_stderr: !is_stdout,
562 },
563 );
564 })
565 .await?;
566
567 combined_output.push_str(&output.stdout);
568 combined_output.push_str(&output.stderr);
569
570 if output.success() {
571 last_output = Some(output);
572 break;
573 }
574
575 last_output = Some(output);
576 }
577
578 let output = last_output.unwrap();
579 if !output.success() {
580 Self::send_event(
581 event_tx.as_ref(),
582 BuildEvent::BuildFailed {
583 error: output.stderr.clone(),
584 },
585 );
586
587 let _ = self
589 .executor
590 .execute(&BuildahCommand::rm(&container_id))
591 .await;
592
593 return Err(BuildError::buildah_execution(
594 cmd.to_command_string(),
595 output.exit_code,
596 output.stderr,
597 ));
598 }
599 }
600
601 #[allow(clippy::cast_possible_truncation)]
602 let instruction_elapsed_ms = instruction_start.elapsed().as_millis() as u64;
603
604 if let Instruction::Workdir(dir) = instruction {
606 current_workdir.clone_from(dir);
607 }
608
609 let cached = cache_tracker.detect_cache_hit(
611 instruction,
612 instruction_elapsed_ms,
613 &combined_output,
614 );
615
616 cache_tracker.record(
617 instruction_cache_key.clone(),
618 current_base_layer.clone(),
619 cached,
620 );
621
622 current_base_layer = format!("{current_base_layer}:{instruction_cache_key}");
623
624 Self::send_event(
625 event_tx.as_ref(),
626 BuildEvent::InstructionComplete {
627 stage: stage_idx,
628 index: inst_idx,
629 cached,
630 },
631 );
632
633 total_instructions += 1;
634 }
635
636 if let Some(name) = &stage.name {
638 let image_name = format!("zlayer-build-{build_id}-stage-{name}");
639 self.commit_container(&container_id, &image_name, options.format.as_deref(), false)
640 .await?;
641 stage_images.insert(name.clone(), image_name.clone());
642 stage_workdirs.insert(name.clone(), current_workdir.clone());
643
644 stage_images.insert(stage.index.to_string(), image_name.clone());
646 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
647
648 if is_final_stage {
649 final_container = Some(container_id);
650 } else {
651 let _ = self
652 .executor
653 .execute(&BuildahCommand::rm(&container_id))
654 .await;
655 }
656 } else if is_final_stage {
657 final_container = Some(container_id);
658 } else {
659 let image_name = format!("zlayer-build-{}-stage-{}", build_id, stage.index);
660 self.commit_container(&container_id, &image_name, options.format.as_deref(), false)
661 .await?;
662 stage_images.insert(stage.index.to_string(), image_name);
663 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
664 let _ = self
665 .executor
666 .execute(&BuildahCommand::rm(&container_id))
667 .await;
668 }
669
670 Self::send_event(
671 event_tx.as_ref(),
672 BuildEvent::StageComplete { index: stage_idx },
673 );
674 }
675
676 let final_container = final_container.ok_or_else(|| BuildError::InvalidInstruction {
678 instruction: "build".to_string(),
679 reason: "No stages to build".to_string(),
680 })?;
681
682 let image_name = options
683 .tags
684 .first()
685 .cloned()
686 .unwrap_or_else(|| format!("zlayer-build:{}", chrono_lite_timestamp()));
687
688 let image_id = self
689 .commit_container(
690 &final_container,
691 &image_name,
692 options.format.as_deref(),
693 options.squash,
694 )
695 .await?;
696
697 info!("Committed final image: {} ({})", image_name, image_id);
698
699 for tag in options.tags.iter().skip(1) {
701 self.tag_image_internal(&image_id, tag).await?;
702 debug!("Applied tag: {}", tag);
703 }
704
705 let _ = self
707 .executor
708 .execute(&BuildahCommand::rm(&final_container))
709 .await;
710
711 for (_, img) in stage_images {
713 let _ = self.executor.execute(&BuildahCommand::rmi(&img)).await;
714 }
715
716 if options.push {
718 for tag in &options.tags {
719 self.push_image_internal(tag, options.registry_auth.as_ref())
720 .await?;
721 info!("Pushed image: {}", tag);
722 }
723 }
724
725 #[allow(clippy::cast_possible_truncation)]
726 let build_time_ms = start_time.elapsed().as_millis() as u64;
727
728 Self::send_event(
729 event_tx.as_ref(),
730 BuildEvent::BuildComplete {
731 image_id: image_id.clone(),
732 },
733 );
734
735 info!(
736 "Build completed in {}ms: {} with {} tags",
737 build_time_ms,
738 image_id,
739 options.tags.len()
740 );
741
742 Ok(BuiltImage {
743 image_id,
744 tags: options.tags.clone(),
745 layer_count: total_instructions,
746 size: 0, build_time_ms,
748 is_manifest: false,
749 })
750 }
751
752 async fn push_image(&self, tag: &str, auth: Option<&RegistryAuth>) -> Result<()> {
753 self.push_image_internal(tag, auth).await
754 }
755
756 async fn tag_image(&self, image: &str, new_tag: &str) -> Result<()> {
757 self.tag_image_internal(image, new_tag).await
758 }
759
760 async fn manifest_create(&self, name: &str) -> Result<()> {
761 let cmd = BuildahCommand::manifest_create(name);
762 self.executor.execute_checked(&cmd).await?;
763 Ok(())
764 }
765
766 async fn manifest_add(&self, manifest: &str, image: &str) -> Result<()> {
767 let cmd = BuildahCommand::manifest_add(manifest, image);
768 self.executor.execute_checked(&cmd).await?;
769 Ok(())
770 }
771
772 async fn manifest_push(&self, name: &str, destination: &str) -> Result<()> {
773 let cmd = BuildahCommand::manifest_push(name, destination);
774 self.executor.execute_checked(&cmd).await?;
775 Ok(())
776 }
777
778 async fn is_available(&self) -> bool {
779 self.executor.is_available().await
780 }
781
782 fn name(&self) -> &'static str {
783 "buildah"
784 }
785}
786
787fn chrono_lite_timestamp() -> String {
792 use std::time::{SystemTime, UNIX_EPOCH};
793 let duration = SystemTime::now()
794 .duration_since(UNIX_EPOCH)
795 .unwrap_or_default();
796 format!("{}", duration.as_secs())
797}
798
799fn generate_build_id() -> String {
801 use sha2::{Digest, Sha256};
802 use std::time::{SystemTime, UNIX_EPOCH};
803
804 static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
805
806 let nanos = SystemTime::now()
807 .duration_since(UNIX_EPOCH)
808 .unwrap_or_default()
809 .as_nanos();
810 let pid = std::process::id();
811 let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
812
813 let mut hasher = Sha256::new();
814 hasher.update(nanos.to_le_bytes());
815 hasher.update(pid.to_le_bytes());
816 hasher.update(count.to_le_bytes());
817 let hash = hasher.finalize();
818 hex::encode(&hash[..6])
819}
820
821#[cfg(test)]
822mod tests {
823 use super::*;
824
825 #[test]
826 fn test_layer_cache_tracker_new() {
827 let tracker = LayerCacheTracker::new();
828 assert!(tracker.known_layers.is_empty());
829 }
830
831 #[test]
832 fn test_layer_cache_tracker_record_and_lookup() {
833 let mut tracker = LayerCacheTracker::new();
834
835 tracker.record("abc123".to_string(), "container-1".to_string(), false);
836 assert!(!tracker.is_cached("abc123", "container-1"));
837
838 tracker.record("def456".to_string(), "container-2".to_string(), true);
839 assert!(tracker.is_cached("def456", "container-2"));
840 }
841
842 #[test]
843 fn test_layer_cache_tracker_unknown_returns_false() {
844 let tracker = LayerCacheTracker::new();
845 assert!(!tracker.is_cached("unknown", "unknown"));
846 }
847
848 #[test]
849 fn test_layer_cache_tracker_different_base_layers() {
850 let mut tracker = LayerCacheTracker::new();
851
852 tracker.record("inst-1".to_string(), "base-a".to_string(), true);
853 tracker.record("inst-1".to_string(), "base-b".to_string(), false);
854
855 assert!(tracker.is_cached("inst-1", "base-a"));
856 assert!(!tracker.is_cached("inst-1", "base-b"));
857 }
858
859 #[test]
860 fn test_layer_cache_tracker_detect_cache_hit() {
861 use crate::dockerfile::RunInstruction;
862
863 let tracker = LayerCacheTracker::new();
864 let instruction = Instruction::Run(RunInstruction::shell("echo hello"));
865
866 assert!(!tracker.detect_cache_hit(&instruction, 50, ""));
867 assert!(!tracker.detect_cache_hit(&instruction, 1000, ""));
868 assert!(!tracker.detect_cache_hit(&instruction, 50, "Using cache"));
869 }
870
871 #[test]
872 fn test_layer_cache_tracker_overwrite() {
873 let mut tracker = LayerCacheTracker::new();
874
875 tracker.record("key".to_string(), "base".to_string(), false);
876 assert!(!tracker.is_cached("key", "base"));
877
878 tracker.record("key".to_string(), "base".to_string(), true);
879 assert!(tracker.is_cached("key", "base"));
880 }
881}