1use std::collections::HashMap;
8use std::path::Path;
9use std::sync::mpsc;
10
11use tracing::{debug, info};
12
13use crate::buildah::{BuildahCommand, BuildahExecutor};
14use crate::builder::{BuildOptions, BuiltImage, PullBaseMode, RegistryAuth};
15use crate::dockerfile::{Dockerfile, DockerfileFromTarget, Instruction, RunMount, Stage};
16use crate::error::{BuildError, Result};
17use crate::tui::BuildEvent;
18
19use super::BuildBackend;
20
21#[derive(Debug, Default)]
31struct LayerCacheTracker {
32 known_layers: HashMap<(String, String), bool>,
34}
35
36impl LayerCacheTracker {
37 fn new() -> Self {
38 Self::default()
39 }
40
41 #[allow(dead_code)]
42 fn is_cached(&self, instruction_key: &str, base_layer: &str) -> bool {
43 self.known_layers
44 .get(&(instruction_key.to_string(), base_layer.to_string()))
45 .copied()
46 .unwrap_or(false)
47 }
48
49 fn record(&mut self, instruction_key: String, base_layer: String, cached: bool) {
50 self.known_layers
51 .insert((instruction_key, base_layer), cached);
52 }
53
54 #[allow(dead_code, clippy::unused_self)]
55 fn detect_cache_hit(
56 &self,
57 _instruction: &Instruction,
58 _execution_time_ms: u64,
59 _output: &str,
60 ) -> bool {
61 false
63 }
64}
65
66pub struct BuildahBackend {
72 executor: BuildahExecutor,
73}
74
75impl BuildahBackend {
76 pub async fn try_new() -> Result<Self> {
84 let executor = BuildahExecutor::new_async().await?;
85 if !executor.is_available().await {
86 return Err(crate::error::BuildError::BuildahNotFound {
87 message: "buildah is installed but not responding".into(),
88 });
89 }
90 Ok(Self { executor })
91 }
92
93 pub async fn new() -> Result<Self> {
99 let executor = BuildahExecutor::new_async().await?;
100 Ok(Self { executor })
101 }
102
103 #[must_use]
105 pub fn with_executor(executor: BuildahExecutor) -> Self {
106 Self { executor }
107 }
108
109 #[must_use]
111 pub fn executor(&self) -> &BuildahExecutor {
112 &self.executor
113 }
114
115 #[allow(clippy::unused_self)]
121 fn resolve_stages<'a>(
122 &self,
123 dockerfile: &'a Dockerfile,
124 target: Option<&str>,
125 ) -> Result<Vec<&'a Stage>> {
126 if let Some(target) = target {
127 Self::resolve_target_stages(dockerfile, target)
128 } else {
129 Ok(dockerfile.stages.iter().collect())
130 }
131 }
132
133 fn resolve_target_stages<'a>(
135 dockerfile: &'a Dockerfile,
136 target: &str,
137 ) -> Result<Vec<&'a Stage>> {
138 let target_stage = dockerfile
139 .get_stage(target)
140 .ok_or_else(|| BuildError::stage_not_found(target))?;
141
142 let mut stages: Vec<&Stage> = Vec::new();
143 for stage in &dockerfile.stages {
144 stages.push(stage);
145 if stage.index == target_stage.index {
146 break;
147 }
148 }
149 Ok(stages)
150 }
151
152 async fn resolve_base_image(
158 &self,
159 image_ref: &DockerfileFromTarget,
160 stage_images: &HashMap<String, String>,
161 options: &BuildOptions,
162 ) -> Result<String> {
163 match image_ref {
164 DockerfileFromTarget::Stage(name) => {
165 return stage_images
166 .get(name)
167 .cloned()
168 .ok_or_else(|| BuildError::stage_not_found(name));
169 }
170 DockerfileFromTarget::Scratch => return Ok("scratch".to_string()),
171 DockerfileFromTarget::Image(_) => {}
172 }
173
174 let is_qualified = match image_ref {
176 DockerfileFromTarget::Image(r) => {
177 let repo = r.repository();
178 let first = repo.split('/').next().unwrap_or("");
179 first.contains('.') || first.contains(':') || first == "localhost"
180 }
181 _ => false,
182 };
183
184 if !is_qualified {
186 if let Some(resolved) = self.try_resolve_from_sources(image_ref, options).await {
187 return Ok(resolved);
188 }
189 }
190
191 match image_ref {
195 DockerfileFromTarget::Image(r) => {
196 let mut result = format!("{}/{}", r.registry(), r.repository());
197 if let Some(t) = r.tag() {
198 result.push(':');
199 result.push_str(t);
200 }
201 if let Some(d) = r.digest() {
202 result.push('@');
203 result.push_str(d);
204 }
205 if r.tag().is_none() && r.digest().is_none() {
206 result.push_str(":latest");
207 }
208 Ok(result)
209 }
210 _ => unreachable!("Stage and Scratch handled above"),
211 }
212 }
213
214 #[allow(clippy::unused_async)]
218 async fn try_resolve_from_sources(
219 &self,
220 image_ref: &DockerfileFromTarget,
221 options: &BuildOptions,
222 ) -> Option<String> {
223 let (name, tag_str) = match image_ref {
224 DockerfileFromTarget::Image(r) => (
225 r.repository().to_string(),
226 r.tag().unwrap_or("latest").to_string(),
227 ),
228 _ => return None,
229 };
230
231 if let Some(ref registry) = options.default_registry {
233 let qualified = format!("{registry}/{name}:{tag_str}");
234 debug!("Checking default registry for image: {}", qualified);
235 return Some(qualified);
236 }
237
238 None
239 }
240
241 async fn create_container(
243 &self,
244 image: &str,
245 platform: Option<&str>,
246 pull: PullBaseMode,
247 ) -> Result<String> {
248 let mut cmd = BuildahCommand::new("from").arg_opt("--platform", platform);
249
250 match pull {
251 PullBaseMode::Newer => cmd = cmd.arg("--pull=newer"),
252 PullBaseMode::Always => cmd = cmd.arg("--pull=always"),
253 PullBaseMode::Never => { }
254 }
255
256 cmd = cmd.arg(image);
257
258 let output = self.executor.execute_checked(&cmd).await?;
259 Ok(output.stdout.trim().to_string())
260 }
261
262 async fn commit_container(
264 &self,
265 container: &str,
266 image_name: &str,
267 format: Option<&str>,
268 squash: bool,
269 ) -> Result<String> {
270 let cmd = BuildahCommand::commit_with_opts(container, image_name, format, squash);
271 let output = self.executor.execute_checked(&cmd).await?;
272 Ok(output.stdout.trim().to_string())
273 }
274
275 async fn tag_image_internal(&self, image: &str, tag: &str) -> Result<()> {
277 let cmd = BuildahCommand::tag(image, tag);
278 self.executor.execute_checked(&cmd).await?;
279 Ok(())
280 }
281
282 async fn push_image_internal(&self, tag: &str, auth: Option<&RegistryAuth>) -> Result<()> {
284 let mut cmd = BuildahCommand::push(tag);
285 if let Some(auth) = auth {
286 cmd = cmd
287 .arg("--creds")
288 .arg(format!("{}:{}", auth.username, auth.password));
289 }
290 self.executor.execute_checked(&cmd).await?;
291 Ok(())
292 }
293
294 fn send_event(event_tx: Option<&mpsc::Sender<BuildEvent>>, event: BuildEvent) {
296 if let Some(tx) = event_tx {
297 let _ = tx.send(event);
298 }
299 }
300}
301
302#[async_trait::async_trait]
303impl BuildBackend for BuildahBackend {
304 #[allow(clippy::too_many_lines)]
305 async fn build_image(
306 &self,
307 _context: &Path,
308 dockerfile: &Dockerfile,
309 options: &BuildOptions,
310 event_tx: Option<mpsc::Sender<BuildEvent>>,
311 ) -> Result<BuiltImage> {
312 let start_time = std::time::Instant::now();
313 let build_id = generate_build_id();
314
315 debug!(
316 "BuildahBackend: starting build (build_id: {}, {} stages)",
317 build_id,
318 dockerfile.stages.len()
319 );
320
321 let stages = self.resolve_stages(dockerfile, options.target.as_deref())?;
323 debug!("Building {} stages", stages.len());
324
325 let total_instructions_planned: usize = stages.iter().map(|s| s.instructions.len()).sum();
329 Self::send_event(
330 event_tx.as_ref(),
331 BuildEvent::BuildStarted {
332 total_stages: stages.len(),
333 total_instructions: total_instructions_planned,
334 },
335 );
336
337 let mut stage_images: HashMap<String, String> = HashMap::new();
339 let mut stage_workdirs: HashMap<String, String> = HashMap::new();
342 let mut final_container: Option<String> = None;
343 let mut total_instructions = 0;
344
345 let mut cache_tracker = LayerCacheTracker::new();
347
348 for (stage_idx, stage) in stages.iter().enumerate() {
349 let is_final_stage = stage_idx == stages.len() - 1;
350
351 Self::send_event(
352 event_tx.as_ref(),
353 BuildEvent::StageStarted {
354 index: stage_idx,
355 name: stage.name.clone(),
356 base_image: stage.base_image.to_string(),
357 },
358 );
359
360 let base = self
362 .resolve_base_image(&stage.base_image, &stage_images, options)
363 .await?;
364 let container_id = self
365 .create_container(&base, options.platform.as_deref(), options.pull)
366 .await?;
367
368 debug!(
369 "Created container {} for stage {} (base: {})",
370 container_id,
371 stage.identifier(),
372 base
373 );
374
375 let mut current_base_layer = container_id.clone();
377
378 let mut current_workdir = match &stage.base_image {
380 DockerfileFromTarget::Stage(name) => stage_workdirs
381 .get(name)
382 .cloned()
383 .unwrap_or_else(|| String::from("/")),
384 _ => String::from("/"),
385 };
386
387 for (inst_idx, instruction) in stage.instructions.iter().enumerate() {
389 Self::send_event(
390 event_tx.as_ref(),
391 BuildEvent::InstructionStarted {
392 stage: stage_idx,
393 index: inst_idx,
394 instruction: format!("{instruction:?}"),
395 },
396 );
397
398 let instruction_cache_key = instruction.cache_key();
399 let instruction_start = std::time::Instant::now();
400
401 let resolved_instruction;
404 let instruction_ref = if let Instruction::Copy(copy) = instruction {
405 if let Some(ref from) = copy.from {
406 if let Some(image_name) = stage_images.get(from) {
407 let mut resolved_copy = copy.clone();
408 resolved_copy.from = Some(image_name.clone());
409
410 if let Some(source_workdir) = stage_workdirs.get(from) {
412 resolved_copy.sources = resolved_copy
413 .sources
414 .iter()
415 .map(|src| {
416 if src.starts_with('/') {
417 src.clone()
418 } else if source_workdir == "/" {
419 format!("/{src}")
420 } else {
421 format!("{source_workdir}/{src}")
422 }
423 })
424 .collect();
425 }
426
427 resolved_instruction = Instruction::Copy(resolved_copy);
428 &resolved_instruction
429 } else {
430 instruction
431 }
432 } else {
433 instruction
434 }
435 } else {
436 instruction
437 };
438
439 let instruction_with_defaults;
441 let instruction_ref = if options.default_cache_mounts.is_empty() {
442 instruction_ref
443 } else if let Instruction::Run(run) = instruction_ref {
444 let mut merged = run.clone();
445 for default_mount in &options.default_cache_mounts {
446 let RunMount::Cache { target, .. } = default_mount else {
447 continue;
448 };
449 let already_has = merged
450 .mounts
451 .iter()
452 .any(|m| matches!(m, RunMount::Cache { target: t, .. } if t == target));
453 if !already_has {
454 merged.mounts.push(default_mount.clone());
455 }
456 }
457 instruction_with_defaults = Instruction::Run(merged);
458 &instruction_with_defaults
459 } else {
460 instruction_ref
461 };
462
463 let is_run_instruction = matches!(instruction_ref, Instruction::Run(_));
464 let max_attempts = if is_run_instruction {
465 options.retries + 1
466 } else {
467 1
468 };
469
470 let commands = BuildahCommand::from_instruction(&container_id, instruction_ref);
471
472 let mut combined_output = String::new();
473 for cmd in commands {
474 let mut last_output = None;
475
476 for attempt in 1..=max_attempts {
477 if attempt > 1 {
478 tracing::warn!(
479 "Retrying step (attempt {}/{})...",
480 attempt,
481 max_attempts
482 );
483 Self::send_event(
484 event_tx.as_ref(),
485 BuildEvent::Output {
486 line: format!(
487 "⟳ Retrying step (attempt {attempt}/{max_attempts})..."
488 ),
489 is_stderr: false,
490 },
491 );
492 tokio::time::sleep(std::time::Duration::from_secs(3)).await;
493 }
494
495 let event_tx_clone = event_tx.clone();
496 let output = self
497 .executor
498 .execute_streaming(&cmd, |is_stdout, line| {
499 Self::send_event(
500 event_tx_clone.as_ref(),
501 BuildEvent::Output {
502 line: line.to_string(),
503 is_stderr: !is_stdout,
504 },
505 );
506 })
507 .await?;
508
509 combined_output.push_str(&output.stdout);
510 combined_output.push_str(&output.stderr);
511
512 if output.success() {
513 last_output = Some(output);
514 break;
515 }
516
517 last_output = Some(output);
518 }
519
520 let output = last_output.unwrap();
521 if !output.success() {
522 Self::send_event(
523 event_tx.as_ref(),
524 BuildEvent::BuildFailed {
525 error: output.stderr.clone(),
526 },
527 );
528
529 let _ = self
531 .executor
532 .execute(&BuildahCommand::rm(&container_id))
533 .await;
534
535 return Err(BuildError::buildah_execution(
536 cmd.to_command_string(),
537 output.exit_code,
538 output.stderr,
539 ));
540 }
541 }
542
543 #[allow(clippy::cast_possible_truncation)]
544 let instruction_elapsed_ms = instruction_start.elapsed().as_millis() as u64;
545
546 if let Instruction::Workdir(dir) = instruction {
548 current_workdir.clone_from(dir);
549 }
550
551 let cached = cache_tracker.detect_cache_hit(
553 instruction,
554 instruction_elapsed_ms,
555 &combined_output,
556 );
557
558 cache_tracker.record(
559 instruction_cache_key.clone(),
560 current_base_layer.clone(),
561 cached,
562 );
563
564 current_base_layer = format!("{current_base_layer}:{instruction_cache_key}");
565
566 Self::send_event(
567 event_tx.as_ref(),
568 BuildEvent::InstructionComplete {
569 stage: stage_idx,
570 index: inst_idx,
571 cached,
572 },
573 );
574
575 total_instructions += 1;
576 }
577
578 if let Some(name) = &stage.name {
580 let image_name = format!("zlayer-build-{build_id}-stage-{name}");
581 self.commit_container(&container_id, &image_name, options.format.as_deref(), false)
582 .await?;
583 stage_images.insert(name.clone(), image_name.clone());
584 stage_workdirs.insert(name.clone(), current_workdir.clone());
585
586 stage_images.insert(stage.index.to_string(), image_name.clone());
588 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
589
590 if is_final_stage {
591 final_container = Some(container_id);
592 } else {
593 let _ = self
594 .executor
595 .execute(&BuildahCommand::rm(&container_id))
596 .await;
597 }
598 } else if is_final_stage {
599 final_container = Some(container_id);
600 } else {
601 let image_name = format!("zlayer-build-{}-stage-{}", build_id, stage.index);
602 self.commit_container(&container_id, &image_name, options.format.as_deref(), false)
603 .await?;
604 stage_images.insert(stage.index.to_string(), image_name);
605 stage_workdirs.insert(stage.index.to_string(), current_workdir.clone());
606 let _ = self
607 .executor
608 .execute(&BuildahCommand::rm(&container_id))
609 .await;
610 }
611
612 Self::send_event(
613 event_tx.as_ref(),
614 BuildEvent::StageComplete { index: stage_idx },
615 );
616 }
617
618 let final_container = final_container.ok_or_else(|| BuildError::InvalidInstruction {
620 instruction: "build".to_string(),
621 reason: "No stages to build".to_string(),
622 })?;
623
624 let image_name = options
625 .tags
626 .first()
627 .cloned()
628 .unwrap_or_else(|| format!("zlayer-build:{}", chrono_lite_timestamp()));
629
630 let image_id = self
631 .commit_container(
632 &final_container,
633 &image_name,
634 options.format.as_deref(),
635 options.squash,
636 )
637 .await?;
638
639 info!("Committed final image: {} ({})", image_name, image_id);
640
641 for tag in options.tags.iter().skip(1) {
643 self.tag_image_internal(&image_id, tag).await?;
644 debug!("Applied tag: {}", tag);
645 }
646
647 let _ = self
649 .executor
650 .execute(&BuildahCommand::rm(&final_container))
651 .await;
652
653 for (_, img) in stage_images {
655 let _ = self.executor.execute(&BuildahCommand::rmi(&img)).await;
656 }
657
658 if options.push {
660 for tag in &options.tags {
661 self.push_image_internal(tag, options.registry_auth.as_ref())
662 .await?;
663 info!("Pushed image: {}", tag);
664 }
665 }
666
667 #[allow(clippy::cast_possible_truncation)]
668 let build_time_ms = start_time.elapsed().as_millis() as u64;
669
670 Self::send_event(
671 event_tx.as_ref(),
672 BuildEvent::BuildComplete {
673 image_id: image_id.clone(),
674 },
675 );
676
677 info!(
678 "Build completed in {}ms: {} with {} tags",
679 build_time_ms,
680 image_id,
681 options.tags.len()
682 );
683
684 Ok(BuiltImage {
685 image_id,
686 tags: options.tags.clone(),
687 layer_count: total_instructions,
688 size: 0, build_time_ms,
690 is_manifest: false,
691 })
692 }
693
694 async fn push_image(&self, tag: &str, auth: Option<&RegistryAuth>) -> Result<()> {
695 self.push_image_internal(tag, auth).await
696 }
697
698 async fn tag_image(&self, image: &str, new_tag: &str) -> Result<()> {
699 self.tag_image_internal(image, new_tag).await
700 }
701
702 async fn manifest_create(&self, name: &str) -> Result<()> {
703 let cmd = BuildahCommand::manifest_create(name);
704 self.executor.execute_checked(&cmd).await?;
705 Ok(())
706 }
707
708 async fn manifest_add(&self, manifest: &str, image: &str) -> Result<()> {
709 let cmd = BuildahCommand::manifest_add(manifest, image);
710 self.executor.execute_checked(&cmd).await?;
711 Ok(())
712 }
713
714 async fn manifest_push(&self, name: &str, destination: &str) -> Result<()> {
715 let cmd = BuildahCommand::manifest_push(name, destination);
716 self.executor.execute_checked(&cmd).await?;
717 Ok(())
718 }
719
720 async fn is_available(&self) -> bool {
721 self.executor.is_available().await
722 }
723
724 fn name(&self) -> &'static str {
725 "buildah"
726 }
727}
728
729fn chrono_lite_timestamp() -> String {
734 use std::time::{SystemTime, UNIX_EPOCH};
735 let duration = SystemTime::now()
736 .duration_since(UNIX_EPOCH)
737 .unwrap_or_default();
738 format!("{}", duration.as_secs())
739}
740
741fn generate_build_id() -> String {
743 use sha2::{Digest, Sha256};
744 use std::time::{SystemTime, UNIX_EPOCH};
745
746 static COUNTER: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
747
748 let nanos = SystemTime::now()
749 .duration_since(UNIX_EPOCH)
750 .unwrap_or_default()
751 .as_nanos();
752 let pid = std::process::id();
753 let count = COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
754
755 let mut hasher = Sha256::new();
756 hasher.update(nanos.to_le_bytes());
757 hasher.update(pid.to_le_bytes());
758 hasher.update(count.to_le_bytes());
759 let hash = hasher.finalize();
760 hex::encode(&hash[..6])
761}
762
763#[cfg(test)]
764mod tests {
765 use super::*;
766
767 #[test]
768 fn test_layer_cache_tracker_new() {
769 let tracker = LayerCacheTracker::new();
770 assert!(tracker.known_layers.is_empty());
771 }
772
773 #[test]
774 fn test_layer_cache_tracker_record_and_lookup() {
775 let mut tracker = LayerCacheTracker::new();
776
777 tracker.record("abc123".to_string(), "container-1".to_string(), false);
778 assert!(!tracker.is_cached("abc123", "container-1"));
779
780 tracker.record("def456".to_string(), "container-2".to_string(), true);
781 assert!(tracker.is_cached("def456", "container-2"));
782 }
783
784 #[test]
785 fn test_layer_cache_tracker_unknown_returns_false() {
786 let tracker = LayerCacheTracker::new();
787 assert!(!tracker.is_cached("unknown", "unknown"));
788 }
789
790 #[test]
791 fn test_layer_cache_tracker_different_base_layers() {
792 let mut tracker = LayerCacheTracker::new();
793
794 tracker.record("inst-1".to_string(), "base-a".to_string(), true);
795 tracker.record("inst-1".to_string(), "base-b".to_string(), false);
796
797 assert!(tracker.is_cached("inst-1", "base-a"));
798 assert!(!tracker.is_cached("inst-1", "base-b"));
799 }
800
801 #[test]
802 fn test_layer_cache_tracker_detect_cache_hit() {
803 use crate::dockerfile::RunInstruction;
804
805 let tracker = LayerCacheTracker::new();
806 let instruction = Instruction::Run(RunInstruction::shell("echo hello"));
807
808 assert!(!tracker.detect_cache_hit(&instruction, 50, ""));
809 assert!(!tracker.detect_cache_hit(&instruction, 1000, ""));
810 assert!(!tracker.detect_cache_hit(&instruction, 50, "Using cache"));
811 }
812
813 #[test]
814 fn test_layer_cache_tracker_overwrite() {
815 let mut tracker = LayerCacheTracker::new();
816
817 tracker.record("key".to_string(), "base".to_string(), false);
818 assert!(!tracker.is_cached("key", "base"));
819
820 tracker.record("key".to_string(), "base".to_string(), true);
821 assert!(tracker.is_cached("key", "base"));
822 }
823}