1use std::collections::{HashMap, HashSet};
39use std::path::{Path, PathBuf};
40use std::str::FromStr;
41use std::sync::Arc;
42
43use tokio::sync::Mutex;
44use tokio::task::JoinSet;
45use tracing::{error, info, warn};
46
47use serde::Deserialize;
48
49use crate::backend::{detect_backend, BuildBackend, ImageOs};
50
51#[derive(Deserialize)]
54struct CachedImageConfig {
55 #[serde(default)]
56 source_hash: Option<String>,
57}
58use crate::buildah::{BuildahCommand, BuildahExecutor};
59use crate::builder::{BuiltImage, ImageBuilder};
60use crate::error::{BuildError, Result};
61use zlayer_paths::ZLayerDirs;
62
63use super::types::{PipelineDefaults, PipelineImage, ZPipeline};
64
65#[cfg(feature = "local-registry")]
66use zlayer_registry::LocalRegistry;
67
68#[derive(Debug)]
70pub struct PipelineResult {
71 pub succeeded: HashMap<String, BuiltImage>,
73 pub failed: HashMap<String, String>,
75 pub total_time_ms: u64,
77}
78
79impl PipelineResult {
80 #[must_use]
82 pub fn is_success(&self) -> bool {
83 self.failed.is_empty()
84 }
85
86 #[must_use]
88 pub fn total_images(&self) -> usize {
89 self.succeeded.len() + self.failed.len()
90 }
91}
92
93pub struct PipelineExecutor {
98 pipeline: ZPipeline,
100 base_dir: PathBuf,
102 executor: BuildahExecutor,
104 backend: Option<Arc<dyn BuildBackend>>,
111 backend_cache: Arc<Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>>>,
119 fail_fast: bool,
121 push_enabled: bool,
123 #[cfg(feature = "local-registry")]
125 local_registry: Option<Arc<LocalRegistry>>,
126}
127
128impl PipelineExecutor {
129 #[must_use]
137 pub fn new(pipeline: ZPipeline, base_dir: PathBuf, executor: BuildahExecutor) -> Self {
138 let push_enabled = pipeline.push.after_all;
140
141 Self {
142 pipeline,
143 base_dir,
144 executor,
145 backend: None,
146 backend_cache: Arc::new(Mutex::new(HashMap::new())),
147 fail_fast: true,
148 push_enabled,
149 #[cfg(feature = "local-registry")]
150 local_registry: None,
151 }
152 }
153
154 #[must_use]
166 pub fn with_backend(
167 pipeline: ZPipeline,
168 base_dir: PathBuf,
169 backend: Arc<dyn BuildBackend>,
170 ) -> Self {
171 let push_enabled = pipeline.push.after_all;
172
173 Self {
174 pipeline,
175 base_dir,
176 executor: BuildahExecutor::default(),
177 backend: Some(backend),
178 backend_cache: Arc::new(Mutex::new(HashMap::new())),
179 fail_fast: true,
180 push_enabled,
181 #[cfg(feature = "local-registry")]
182 local_registry: None,
183 }
184 }
185
186 #[must_use]
192 pub fn fail_fast(mut self, fail_fast: bool) -> Self {
193 self.fail_fast = fail_fast;
194 self
195 }
196
197 #[must_use]
202 pub fn push(mut self, enabled: bool) -> Self {
203 self.push_enabled = enabled;
204 self
205 }
206
207 #[cfg(feature = "local-registry")]
213 #[must_use]
214 pub fn with_local_registry(mut self, registry: Arc<LocalRegistry>) -> Self {
215 self.local_registry = Some(registry);
216 self
217 }
218
219 fn resolve_execution_order(&self) -> Result<Vec<Vec<String>>> {
231 let mut waves: Vec<Vec<String>> = Vec::new();
232 let mut assigned: HashSet<String> = HashSet::new();
233 let mut remaining: HashSet<String> = self.pipeline.images.keys().cloned().collect();
234
235 for (name, image) in &self.pipeline.images {
237 for dep in &image.depends_on {
238 if !self.pipeline.images.contains_key(dep) {
239 return Err(BuildError::invalid_instruction(
240 "pipeline",
241 format!("Image '{name}' depends on unknown image '{dep}'"),
242 ));
243 }
244 }
245 }
246
247 while !remaining.is_empty() {
249 let mut wave: Vec<String> = Vec::new();
250
251 for name in &remaining {
252 let image = &self.pipeline.images[name];
253 let deps_satisfied = image.depends_on.iter().all(|d| assigned.contains(d));
255 if deps_satisfied {
256 wave.push(name.clone());
257 }
258 }
259
260 if wave.is_empty() {
261 return Err(BuildError::CircularDependency {
263 stages: remaining.into_iter().collect(),
264 });
265 }
266
267 for name in &wave {
269 remaining.remove(name);
270 assigned.insert(name.clone());
271 }
272
273 waves.push(wave);
274 }
275
276 Ok(waves)
277 }
278
279 pub async fn run(&self) -> Result<PipelineResult> {
294 let start = std::time::Instant::now();
295 let waves = self.resolve_execution_order()?;
296
297 let mut succeeded: HashMap<String, BuiltImage> = HashMap::new();
298 let mut failed: HashMap<String, String> = HashMap::new();
299
300 info!(
301 "Building {} images in {} waves",
302 self.pipeline.images.len(),
303 waves.len()
304 );
305
306 for (wave_idx, wave) in waves.iter().enumerate() {
307 info!("Wave {}: {:?}", wave_idx, wave);
308
309 if self.fail_fast && !failed.is_empty() {
311 warn!("Aborting pipeline due to previous failures (fail_fast enabled)");
312 break;
313 }
314
315 let wave_results = self.build_wave(wave).await;
317
318 for (name, result) in wave_results {
320 match result {
321 Ok(image) => {
322 info!("[{}] Build succeeded: {}", name, image.image_id);
323 succeeded.insert(name, image);
324 }
325 Err(e) => {
326 error!("[{}] Build failed: {}", name, e);
327 failed.insert(name.clone(), e.to_string());
328
329 if self.fail_fast {
330 return Err(e);
332 }
333 }
334 }
335 }
336 }
337
338 if self.push_enabled && failed.is_empty() {
340 info!("Pushing {} images", succeeded.len());
341
342 if let Some(ref backend) = self.backend {
346 for image in succeeded.values() {
347 if image.tags.len() > 1 {
348 let first = &image.tags[0];
349 for secondary in &image.tags[1..] {
350 if let Err(e) = backend.tag_image(first, secondary).await {
351 warn!("Failed to tag {} as {}: {}", first, secondary, e);
352 }
353 }
354 }
355 }
356 }
357
358 for (name, image) in &succeeded {
359 for tag in &image.tags {
360 let push_result = if image.is_manifest {
361 self.push_manifest(tag).await
362 } else {
363 self.push_image(tag).await
364 };
365
366 if let Err(e) = push_result {
367 warn!("[{}] Failed to push {}: {}", name, tag, e);
368 } else {
371 info!("[{}] Pushed: {}", name, tag);
372 }
373 }
374 }
375 }
376
377 #[allow(clippy::cast_possible_truncation)]
378 let total_time_ms = start.elapsed().as_millis() as u64;
379
380 Ok(PipelineResult {
381 succeeded,
382 failed,
383 total_time_ms,
384 })
385 }
386
387 async fn build_wave(&self, wave: &[String]) -> Vec<(String, Result<BuiltImage>)> {
407 let pipeline = Arc::new(self.pipeline.clone());
409 let base_dir = Arc::new(self.base_dir.clone());
410 let executor = self.executor.clone();
411 let explicit_backend = self.backend.clone();
412 let backend_cache = Arc::clone(&self.backend_cache);
413
414 #[cfg(feature = "local-registry")]
417 let registry_root: Option<PathBuf> =
418 self.local_registry.as_ref().map(|r| r.root().to_path_buf());
419 #[cfg(not(feature = "local-registry"))]
420 let registry_root: Option<PathBuf> = None;
421
422 let mut set = JoinSet::new();
423
424 for name in wave {
425 let name = name.clone();
426 let pipeline = Arc::clone(&pipeline);
427 let base_dir = Arc::clone(&base_dir);
428 let executor = executor.clone();
429 let explicit_backend = explicit_backend.clone();
430 let backend_cache = Arc::clone(&backend_cache);
431 let registry_root = registry_root.clone();
432
433 set.spawn(async move {
434 let result = build_one_image(
435 &name,
436 &pipeline,
437 &base_dir,
438 executor,
439 explicit_backend,
440 &backend_cache,
441 registry_root.as_deref(),
442 )
443 .await;
444 (name, result)
445 });
446 }
447
448 let mut results = Vec::new();
450 while let Some(join_result) = set.join_next().await {
451 match join_result {
452 Ok((name, result)) => {
453 results.push((name, result));
454 }
455 Err(e) => {
456 error!("Build task panicked: {}", e);
458 results.push((
459 "unknown".to_string(),
460 Err(BuildError::invalid_instruction(
461 "pipeline",
462 format!("Build task panicked: {e}"),
463 )),
464 ));
465 }
466 }
467 }
468
469 results
470 }
471
472 async fn push_image(&self, tag: &str) -> Result<()> {
474 if let Some(ref backend) = self.backend {
475 return backend.push_image(tag, None).await;
476 }
477 let cmd = BuildahCommand::push(tag);
478 self.executor.execute_checked(&cmd).await?;
479 Ok(())
480 }
481
482 async fn push_manifest(&self, tag: &str) -> Result<()> {
484 if let Some(ref backend) = self.backend {
485 let destination = format!("docker://{tag}");
486 return backend.manifest_push(tag, &destination).await;
487 }
488 let destination = format!("docker://{tag}");
489 let cmd = BuildahCommand::manifest_push(tag, &destination);
490 self.executor.execute_checked(&cmd).await?;
491 Ok(())
492 }
493}
494
495fn effective_platforms(image: &PipelineImage, defaults: &PipelineDefaults) -> Vec<String> {
501 if image.platforms.is_empty() {
502 defaults.platforms.clone()
503 } else {
504 image.platforms.clone()
505 }
506}
507
508fn target_os_for_image(platforms: &[String], explicit_os: Option<ImageOs>) -> Result<ImageOs> {
522 let mut selected: Option<ImageOs> = None;
525 for platform in platforms {
526 let os = ImageOs::from_str(platform).map_err(|e| {
527 BuildError::invalid_instruction(
528 "pipeline",
529 format!("unrecognized platform '{platform}': {e}"),
530 )
531 })?;
532 match selected {
533 None => selected = Some(os),
534 Some(existing) if existing == os => {}
535 Some(existing) => {
536 return Err(BuildError::invalid_instruction(
537 "pipeline",
538 format!(
539 "multi-platform images cannot mix OSes in a single entry \
540 (found {existing:?} and {os:?} in platforms={platforms:?}); \
541 split into separate PipelineImage entries"
542 ),
543 ));
544 }
545 }
546 }
547
548 if let Some(explicit) = explicit_os {
549 if let Some(from_platforms) = selected {
550 if from_platforms != explicit {
551 return Err(BuildError::invalid_instruction(
552 "pipeline",
553 format!(
554 "explicit os={explicit:?} conflicts with OS inferred from \
555 platforms={platforms:?} (got {from_platforms:?}); remove one \
556 or make them agree"
557 ),
558 ));
559 }
560 }
561 return Ok(explicit);
562 }
563
564 Ok(selected.unwrap_or(ImageOs::Linux))
565}
566
567async fn backend_for(
576 target_os: ImageOs,
577 cache: &Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>>,
578 explicit: Option<Arc<dyn BuildBackend>>,
579) -> Result<Arc<dyn BuildBackend>> {
580 if let Some(backend) = explicit {
581 return Ok(backend);
582 }
583
584 {
586 let guard = cache.lock().await;
587 if let Some(backend) = guard.get(&target_os) {
588 return Ok(Arc::clone(backend));
589 }
590 }
591
592 let mut guard = cache.lock().await;
595 if let Some(backend) = guard.get(&target_os) {
596 return Ok(Arc::clone(backend));
597 }
598 let backend = detect_backend(target_os).await?;
599 guard.insert(target_os, Arc::clone(&backend));
600 Ok(backend)
601}
602
603#[allow(clippy::too_many_arguments)]
609async fn build_one_image(
610 name: &str,
611 pipeline: &ZPipeline,
612 base_dir: &Path,
613 executor: BuildahExecutor,
614 explicit_backend: Option<Arc<dyn BuildBackend>>,
615 backend_cache: &Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>>,
616 registry_root: Option<&Path>,
617) -> Result<BuiltImage> {
618 let image_config = &pipeline.images[name];
619 let platforms = effective_platforms(image_config, &pipeline.defaults);
620
621 let target_os = target_os_for_image(&platforms, image_config.os)?;
623 info!(
624 "Building image '{}' (target_os={:?}, platforms={:?}, explicit_os={:?})",
625 name, target_os, platforms, image_config.os
626 );
627
628 let backend = backend_for(target_os, backend_cache, explicit_backend).await?;
629
630 match platforms.len() {
631 0 => {
633 build_single_image(
634 name,
635 pipeline,
636 base_dir,
637 executor,
638 Some(backend),
639 None,
640 registry_root,
641 )
642 .await
643 }
644 1 => {
646 let platform = platforms[0].clone();
647 build_single_image(
648 name,
649 pipeline,
650 base_dir,
651 executor,
652 Some(backend),
653 Some(&platform),
654 registry_root,
655 )
656 .await
657 }
658 _ => {
660 build_multiplatform_image(
661 name,
662 pipeline,
663 base_dir,
664 executor,
665 Some(backend),
666 &platforms,
667 registry_root,
668 )
669 .await
670 }
671 }
672}
673
674fn platform_to_suffix(platform: &str) -> String {
683 let parts: Vec<&str> = platform.split('/').collect();
684 match parts.len() {
685 0 | 1 => platform.replace('/', "-"),
686 2 => parts[1].to_string(),
687 _ => format!("{}-{}", parts[1], parts[2]),
688 }
689}
690
691fn apply_pipeline_config(
697 mut builder: ImageBuilder,
698 image_config: &PipelineImage,
699 defaults: &PipelineDefaults,
700) -> ImageBuilder {
701 let mut args = defaults.build_args.clone();
703 args.extend(image_config.build_args.clone());
704 builder = builder.build_args(args);
705
706 if let Some(fmt) = image_config.format.as_ref().or(defaults.format.as_ref()) {
708 builder = builder.format(fmt);
709 }
710
711 if image_config.no_cache.unwrap_or(defaults.no_cache) {
713 builder = builder.no_cache();
714 }
715
716 let mut cache_mounts = defaults.cache_mounts.clone();
718 cache_mounts.extend(image_config.cache_mounts.clone());
719 if !cache_mounts.is_empty() {
720 let run_mounts: Vec<_> = cache_mounts
721 .iter()
722 .map(crate::zimage::convert_cache_mount)
723 .collect();
724 builder = builder.default_cache_mounts(run_mounts);
725 }
726
727 let retries = image_config.retries.or(defaults.retries).unwrap_or(0);
729 if retries > 0 {
730 builder = builder.retries(retries);
731 }
732
733 builder
734}
735
736fn apply_build_file(builder: ImageBuilder, file_path: &Path) -> ImageBuilder {
739 let file_name = file_path
740 .file_name()
741 .map(|n| n.to_string_lossy().to_string())
742 .unwrap_or_default();
743 let extension = file_path
744 .extension()
745 .map(|e| e.to_string_lossy().to_string())
746 .unwrap_or_default();
747
748 if extension == "yaml" || extension == "yml" || file_name.starts_with("ZImagefile") {
749 builder.zimagefile(file_path)
750 } else {
751 builder.dockerfile(file_path)
752 }
753}
754
755async fn compute_file_hash(path: &Path) -> Option<String> {
759 use sha2::{Digest, Sha256};
760
761 let content = tokio::fs::read(path).await.ok()?;
762 let mut hasher = Sha256::new();
763 hasher.update(&content);
764 Some(format!("{:x}", hasher.finalize()))
765}
766
767fn sanitize_image_name_for_cache(image: &str) -> String {
771 image.replace(['/', ':', '@'], "_")
772}
773
774async fn check_cached_image_hash(
779 data_dir: &Path,
780 tag: &str,
781 expected_hash: &str,
782) -> Option<String> {
783 let sanitized = sanitize_image_name_for_cache(tag);
784 let config_path = data_dir.join("images").join(&sanitized).join("config.json");
785 let data = tokio::fs::read_to_string(&config_path).await.ok()?;
786 let config: CachedImageConfig = serde_json::from_str(&data).ok()?;
787 if config.source_hash.as_deref() == Some(expected_hash) {
788 Some(sanitized)
789 } else {
790 None
791 }
792}
793
794#[cfg_attr(not(feature = "local-registry"), allow(unused_variables))]
802async fn build_single_image(
803 name: &str,
804 pipeline: &ZPipeline,
805 base_dir: &Path,
806 executor: BuildahExecutor,
807 backend: Option<Arc<dyn BuildBackend>>,
808 platform: Option<&str>,
809 registry_root: Option<&Path>,
810) -> Result<BuiltImage> {
811 let image_config = &pipeline.images[name];
812 let context = base_dir.join(&image_config.context);
813 let file_path = base_dir.join(&image_config.file);
814
815 let file_hash = compute_file_hash(&file_path).await;
818 if let Some(ref hash) = file_hash {
819 let data_dir = ZLayerDirs::default_data_dir();
820
821 let expanded_tags: Vec<String> = image_config
822 .tags
823 .iter()
824 .map(|t| expand_tag_with_vars(t, &pipeline.vars))
825 .collect();
826
827 if let Some(first_tag) = expanded_tags.first() {
829 if let Some(cached_id) = check_cached_image_hash(&data_dir, first_tag, hash).await {
830 info!(
831 "[{}] Skipping build — cached image hash matches ({})",
832 name, cached_id
833 );
834 return Ok(BuiltImage {
835 image_id: cached_id,
836 tags: expanded_tags,
837 layer_count: 1,
838 size: 0,
839 build_time_ms: 0,
840 is_manifest: false,
841 });
842 }
843 }
844 }
845
846 let effective_backend: Arc<dyn BuildBackend> = backend
847 .unwrap_or_else(|| Arc::new(crate::backend::BuildahBackend::with_executor(executor)));
848 let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
849
850 builder = apply_build_file(builder, &file_path);
852
853 if let Some(hash) = file_hash {
855 builder = builder.source_hash(hash);
856 }
857
858 if let Some(plat) = platform {
860 builder = builder.platform(plat);
861 }
862
863 for tag in &image_config.tags {
865 let expanded = expand_tag_with_vars(tag, &pipeline.vars);
866 builder = builder.tag(expanded);
867 }
868
869 builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
871
872 #[cfg(feature = "local-registry")]
874 if let Some(root) = registry_root {
875 let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
876 BuildError::invalid_instruction(
877 "pipeline",
878 format!("failed to open local registry: {e}"),
879 )
880 })?;
881 builder = builder.with_local_registry(shared_registry);
882 }
883
884 builder.build().await
885}
886
887#[cfg_attr(not(feature = "local-registry"), allow(unused_variables))]
893async fn build_multiplatform_image(
894 name: &str,
895 pipeline: &ZPipeline,
896 base_dir: &Path,
897 executor: BuildahExecutor,
898 backend: Option<Arc<dyn BuildBackend>>,
899 platforms: &[String],
900 registry_root: Option<&Path>,
901) -> Result<BuiltImage> {
902 let image_config = &pipeline.images[name];
903 let start_time = std::time::Instant::now();
904
905 let expanded_tags: Vec<String> = image_config
907 .tags
908 .iter()
909 .map(|t| expand_tag_with_vars(t, &pipeline.vars))
910 .collect();
911
912 let manifest_name = expanded_tags
913 .first()
914 .cloned()
915 .unwrap_or_else(|| format!("zlayer-manifest-{name}"));
916
917 let mut arch_tags: Vec<String> = Vec::new();
919 let mut total_layers = 0usize;
920 let mut total_size = 0u64;
921
922 for platform in platforms {
923 let suffix = platform_to_suffix(platform);
924 let platform_tags: Vec<String> = expanded_tags
925 .iter()
926 .map(|t| format!("{t}-{suffix}"))
927 .collect();
928
929 info!("[{name}] Building for platform {platform}");
930
931 let context = base_dir.join(&image_config.context);
933 let file_path = base_dir.join(&image_config.file);
934
935 let effective_backend: Arc<dyn BuildBackend> = match backend {
936 Some(ref b) => Arc::clone(b),
937 None => Arc::new(crate::backend::BuildahBackend::with_executor(
938 executor.clone(),
939 )),
940 };
941 let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
942
943 builder = apply_build_file(builder, &file_path);
945
946 builder = builder.platform(platform);
948
949 for tag in &platform_tags {
951 builder = builder.tag(tag);
952 }
953
954 builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
956
957 #[cfg(feature = "local-registry")]
959 if let Some(root) = registry_root {
960 let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
961 BuildError::invalid_instruction(
962 "pipeline",
963 format!("failed to open local registry: {e}"),
964 )
965 })?;
966 builder = builder.with_local_registry(shared_registry);
967 }
968
969 let built = builder.build().await?;
970 total_layers += built.layer_count;
971 total_size += built.size;
972
973 if let Some(first_tag) = platform_tags.first() {
974 arch_tags.push(first_tag.clone());
975 }
976 }
977
978 assemble_manifest(
980 name,
981 &manifest_name,
982 &arch_tags,
983 &expanded_tags,
984 backend.as_ref(),
985 &executor,
986 )
987 .await?;
988
989 #[allow(clippy::cast_possible_truncation)]
990 let build_time_ms = start_time.elapsed().as_millis() as u64;
991
992 Ok(BuiltImage {
993 image_id: manifest_name,
994 tags: expanded_tags,
995 layer_count: total_layers,
996 size: total_size,
997 build_time_ms,
998 is_manifest: true,
999 })
1000}
1001
1002async fn assemble_manifest(
1007 name: &str,
1008 manifest_name: &str,
1009 arch_tags: &[String],
1010 expanded_tags: &[String],
1011 backend: Option<&Arc<dyn BuildBackend>>,
1012 executor: &BuildahExecutor,
1013) -> Result<()> {
1014 info!("[{name}] Creating manifest: {manifest_name}");
1016 if let Some(backend) = backend {
1017 backend
1018 .manifest_create(manifest_name)
1019 .await
1020 .map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
1021 } else {
1022 executor
1023 .execute_checked(&BuildahCommand::manifest_create(manifest_name))
1024 .await
1025 .map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
1026 }
1027
1028 for arch_tag in arch_tags {
1030 info!("[{name}] Adding to manifest: {arch_tag}");
1031 if let Some(backend) = backend {
1032 backend
1033 .manifest_add(manifest_name, arch_tag)
1034 .await
1035 .map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
1036 } else {
1037 executor
1038 .execute_checked(&BuildahCommand::manifest_add(manifest_name, arch_tag))
1039 .await
1040 .map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
1041 }
1042 }
1043
1044 for tag in expanded_tags.iter().skip(1) {
1046 if let Some(backend) = backend {
1047 backend
1048 .tag_image(manifest_name, tag)
1049 .await
1050 .map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
1051 } else {
1052 executor
1053 .execute_checked(&BuildahCommand::tag(manifest_name, tag))
1054 .await
1055 .map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
1056 }
1057 }
1058
1059 Ok(())
1060}
1061
1062fn expand_tag_with_vars(tag: &str, vars: &HashMap<String, String>) -> String {
1066 let mut result = tag.to_string();
1067 for (key, value) in vars {
1068 result = result.replace(&format!("${{{key}}}"), value);
1069 }
1070 result
1071}
1072
1073#[cfg(test)]
1074mod tests {
1075 use super::*;
1076 use crate::pipeline::parse_pipeline;
1077
1078 #[test]
1079 fn test_resolve_execution_order_simple() {
1080 let yaml = r"
1081images:
1082 app:
1083 file: Dockerfile
1084";
1085 let pipeline = parse_pipeline(yaml).unwrap();
1086 let executor = PipelineExecutor::new(
1087 pipeline,
1088 PathBuf::from("/tmp"),
1089 BuildahExecutor::with_path("/usr/bin/buildah"),
1090 );
1091
1092 let waves = executor.resolve_execution_order().unwrap();
1093 assert_eq!(waves.len(), 1);
1094 assert_eq!(waves[0], vec!["app"]);
1095 }
1096
1097 #[test]
1098 fn test_resolve_execution_order_with_deps() {
1099 let yaml = r"
1100images:
1101 base:
1102 file: Dockerfile.base
1103 app:
1104 file: Dockerfile.app
1105 depends_on: [base]
1106 test:
1107 file: Dockerfile.test
1108 depends_on: [app]
1109";
1110 let pipeline = parse_pipeline(yaml).unwrap();
1111 let executor = PipelineExecutor::new(
1112 pipeline,
1113 PathBuf::from("/tmp"),
1114 BuildahExecutor::with_path("/usr/bin/buildah"),
1115 );
1116
1117 let waves = executor.resolve_execution_order().unwrap();
1118 assert_eq!(waves.len(), 3);
1119 assert_eq!(waves[0], vec!["base"]);
1120 assert_eq!(waves[1], vec!["app"]);
1121 assert_eq!(waves[2], vec!["test"]);
1122 }
1123
1124 #[test]
1125 fn test_resolve_execution_order_parallel() {
1126 let yaml = r"
1127images:
1128 base:
1129 file: Dockerfile.base
1130 app1:
1131 file: Dockerfile.app1
1132 depends_on: [base]
1133 app2:
1134 file: Dockerfile.app2
1135 depends_on: [base]
1136";
1137 let pipeline = parse_pipeline(yaml).unwrap();
1138 let executor = PipelineExecutor::new(
1139 pipeline,
1140 PathBuf::from("/tmp"),
1141 BuildahExecutor::with_path("/usr/bin/buildah"),
1142 );
1143
1144 let waves = executor.resolve_execution_order().unwrap();
1145 assert_eq!(waves.len(), 2);
1146 assert_eq!(waves[0], vec!["base"]);
1147 assert_eq!(waves[1].len(), 2);
1149 assert!(waves[1].contains(&"app1".to_string()));
1150 assert!(waves[1].contains(&"app2".to_string()));
1151 }
1152
1153 #[test]
1154 fn test_resolve_execution_order_missing_dep() {
1155 let yaml = r"
1156images:
1157 app:
1158 file: Dockerfile
1159 depends_on: [missing]
1160";
1161 let pipeline = parse_pipeline(yaml).unwrap();
1162 let executor = PipelineExecutor::new(
1163 pipeline,
1164 PathBuf::from("/tmp"),
1165 BuildahExecutor::with_path("/usr/bin/buildah"),
1166 );
1167
1168 let result = executor.resolve_execution_order();
1169 assert!(result.is_err());
1170 assert!(result.unwrap_err().to_string().contains("missing"));
1171 }
1172
1173 #[test]
1174 fn test_resolve_execution_order_circular() {
1175 let yaml = r"
1176images:
1177 a:
1178 file: Dockerfile.a
1179 depends_on: [b]
1180 b:
1181 file: Dockerfile.b
1182 depends_on: [a]
1183";
1184 let pipeline = parse_pipeline(yaml).unwrap();
1185 let executor = PipelineExecutor::new(
1186 pipeline,
1187 PathBuf::from("/tmp"),
1188 BuildahExecutor::with_path("/usr/bin/buildah"),
1189 );
1190
1191 let result = executor.resolve_execution_order();
1192 assert!(result.is_err());
1193 match result.unwrap_err() {
1194 BuildError::CircularDependency { stages } => {
1195 assert!(stages.contains(&"a".to_string()));
1196 assert!(stages.contains(&"b".to_string()));
1197 }
1198 e => panic!("Expected CircularDependency error, got: {e:?}"),
1199 }
1200 }
1201
1202 #[test]
1203 fn test_expand_tag() {
1204 let mut vars = HashMap::new();
1205 vars.insert("VERSION".to_string(), "1.0.0".to_string());
1206 vars.insert("REGISTRY".to_string(), "ghcr.io/myorg".to_string());
1207
1208 let tag = "${REGISTRY}/app:${VERSION}";
1209 let expanded = expand_tag_with_vars(tag, &vars);
1210 assert_eq!(expanded, "ghcr.io/myorg/app:1.0.0");
1211 }
1212
1213 #[test]
1214 fn test_expand_tag_partial() {
1215 let mut vars = HashMap::new();
1216 vars.insert("VERSION".to_string(), "1.0.0".to_string());
1217
1218 let tag = "myapp:${VERSION}-${UNKNOWN}";
1220 let expanded = expand_tag_with_vars(tag, &vars);
1221 assert_eq!(expanded, "myapp:1.0.0-${UNKNOWN}");
1222 }
1223
1224 #[test]
1225 fn test_pipeline_result_is_success() {
1226 let mut result = PipelineResult {
1227 succeeded: HashMap::new(),
1228 failed: HashMap::new(),
1229 total_time_ms: 100,
1230 };
1231
1232 assert!(result.is_success());
1233
1234 result.failed.insert("app".to_string(), "error".to_string());
1235 assert!(!result.is_success());
1236 }
1237
1238 #[test]
1239 fn test_pipeline_result_total_images() {
1240 let mut result = PipelineResult {
1241 succeeded: HashMap::new(),
1242 failed: HashMap::new(),
1243 total_time_ms: 100,
1244 };
1245
1246 result.succeeded.insert(
1247 "app1".to_string(),
1248 BuiltImage {
1249 image_id: "sha256:abc".to_string(),
1250 tags: vec!["app1:latest".to_string()],
1251 layer_count: 5,
1252 size: 0,
1253 build_time_ms: 50,
1254 is_manifest: false,
1255 },
1256 );
1257 result
1258 .failed
1259 .insert("app2".to_string(), "error".to_string());
1260
1261 assert_eq!(result.total_images(), 2);
1262 }
1263
1264 #[test]
1265 fn test_builder_methods() {
1266 let yaml = r"
1267images:
1268 app:
1269 file: Dockerfile
1270push:
1271 after_all: true
1272";
1273 let pipeline = parse_pipeline(yaml).unwrap();
1274 let executor = PipelineExecutor::new(
1275 pipeline,
1276 PathBuf::from("/tmp"),
1277 BuildahExecutor::with_path("/usr/bin/buildah"),
1278 )
1279 .fail_fast(false)
1280 .push(false);
1281
1282 assert!(!executor.fail_fast);
1283 assert!(!executor.push_enabled);
1284 }
1285
1286 fn test_pipeline_image() -> PipelineImage {
1288 PipelineImage {
1289 file: PathBuf::from("Dockerfile"),
1290 context: PathBuf::from("."),
1291 tags: vec![],
1292 build_args: HashMap::new(),
1293 depends_on: vec![],
1294 no_cache: None,
1295 format: None,
1296 cache_mounts: vec![],
1297 retries: None,
1298 platforms: vec![],
1299 os: None,
1300 }
1301 }
1302
1303 #[test]
1304 fn test_platform_to_suffix() {
1305 assert_eq!(platform_to_suffix("linux/amd64"), "amd64");
1306 assert_eq!(platform_to_suffix("linux/arm64"), "arm64");
1307 assert_eq!(platform_to_suffix("linux/arm64/v8"), "arm64-v8");
1308 assert_eq!(platform_to_suffix("linux"), "linux");
1309 }
1310
1311 #[test]
1312 fn test_effective_platforms_image_overrides() {
1313 let defaults = PipelineDefaults {
1314 platforms: vec!["linux/amd64".into()],
1315 ..Default::default()
1316 };
1317 let image = PipelineImage {
1318 platforms: vec!["linux/arm64".into()],
1319 ..test_pipeline_image()
1320 };
1321 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/arm64"]);
1322 }
1323
1324 #[test]
1325 fn test_effective_platforms_inherits_defaults() {
1326 let defaults = PipelineDefaults {
1327 platforms: vec!["linux/amd64".into()],
1328 ..Default::default()
1329 };
1330 let image = test_pipeline_image();
1331 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/amd64"]);
1332 }
1333
1334 #[test]
1335 fn test_effective_platforms_empty() {
1336 let defaults = PipelineDefaults::default();
1337 let image = test_pipeline_image();
1338 assert!(effective_platforms(&image, &defaults).is_empty());
1339 }
1340
1341 #[test]
1342 fn test_platform_to_suffix_edge_cases() {
1343 assert_eq!(platform_to_suffix(""), "");
1345 assert_eq!(platform_to_suffix("linux"), "linux");
1347 assert_eq!(platform_to_suffix("linux/arm/v7/extra"), "arm-v7");
1349 }
1350
1351 #[test]
1352 fn test_effective_platforms_multiple_defaults() {
1353 let defaults = PipelineDefaults {
1354 platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
1355 ..Default::default()
1356 };
1357 let image = test_pipeline_image();
1358 assert_eq!(
1359 effective_platforms(&image, &defaults),
1360 vec!["linux/amd64", "linux/arm64"]
1361 );
1362 }
1363
1364 #[test]
1365 fn test_effective_platforms_image_overrides_multiple() {
1366 let defaults = PipelineDefaults {
1367 platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
1368 ..Default::default()
1369 };
1370 let image = PipelineImage {
1371 platforms: vec!["linux/s390x".into()],
1372 ..test_pipeline_image()
1373 };
1374 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/s390x"]);
1376 }
1377
1378 #[test]
1383 fn test_target_os_for_image_empty_defaults_to_linux() {
1384 assert_eq!(target_os_for_image(&[], None).unwrap(), ImageOs::Linux);
1385 }
1386
1387 #[test]
1388 fn test_target_os_for_image_single_linux() {
1389 assert_eq!(
1390 target_os_for_image(&["linux/amd64".to_string()], None).unwrap(),
1391 ImageOs::Linux
1392 );
1393 }
1394
1395 #[test]
1396 fn test_target_os_for_image_single_windows() {
1397 assert_eq!(
1398 target_os_for_image(&["windows/amd64".to_string()], None).unwrap(),
1399 ImageOs::Windows
1400 );
1401 }
1402
1403 #[test]
1404 fn test_target_os_for_image_multi_same_os() {
1405 let plats = vec!["linux/amd64".to_string(), "linux/arm64".to_string()];
1407 assert_eq!(target_os_for_image(&plats, None).unwrap(), ImageOs::Linux);
1408 }
1409
1410 #[test]
1411 fn test_target_os_for_image_mixed_os_is_rejected() {
1412 let plats = vec!["linux/amd64".to_string(), "windows/amd64".to_string()];
1415 let err = target_os_for_image(&plats, None).unwrap_err();
1416 let msg = err.to_string();
1417 assert!(
1418 msg.contains("cannot mix OSes"),
1419 "expected mix-of-OSes error, got: {msg}"
1420 );
1421 assert!(
1422 msg.contains("split into separate PipelineImage entries"),
1423 "expected remediation hint, got: {msg}"
1424 );
1425 }
1426
1427 #[test]
1428 fn test_target_os_for_image_unrecognized_platform() {
1429 let plats = vec!["plan9/amd64".to_string()];
1430 let err = target_os_for_image(&plats, None).unwrap_err();
1431 assert!(err.to_string().contains("unrecognized platform"));
1432 }
1433
1434 #[test]
1435 fn test_target_os_for_image_explicit_os_wins_empty_platforms() {
1436 assert_eq!(
1439 target_os_for_image(&[], Some(ImageOs::Windows)).unwrap(),
1440 ImageOs::Windows
1441 );
1442 }
1443
1444 #[test]
1445 fn test_target_os_for_image_explicit_os_matches_platforms() {
1446 let plats = vec!["windows/amd64".to_string()];
1448 assert_eq!(
1449 target_os_for_image(&plats, Some(ImageOs::Windows)).unwrap(),
1450 ImageOs::Windows
1451 );
1452 }
1453
1454 #[test]
1455 fn test_target_os_for_image_explicit_os_conflicts_with_platforms() {
1456 let plats = vec!["linux/amd64".to_string()];
1459 let err = target_os_for_image(&plats, Some(ImageOs::Windows)).unwrap_err();
1460 let msg = err.to_string();
1461 assert!(
1462 msg.contains("explicit os=")
1463 && msg.contains("conflicts with OS inferred from platforms"),
1464 "expected conflict error, got: {msg}"
1465 );
1466 }
1467
1468 struct FakeBackend {
1472 name: &'static str,
1473 }
1474
1475 #[async_trait::async_trait]
1476 impl BuildBackend for FakeBackend {
1477 async fn build_image(
1478 &self,
1479 _context: &Path,
1480 _dockerfile: &crate::dockerfile::Dockerfile,
1481 options: &crate::builder::BuildOptions,
1482 _event_tx: Option<std::sync::mpsc::Sender<crate::tui::BuildEvent>>,
1483 ) -> Result<BuiltImage> {
1484 Ok(BuiltImage {
1485 image_id: format!("{}:fake-id", self.name),
1486 tags: options.tags.clone(),
1487 layer_count: 1,
1488 size: 0,
1489 build_time_ms: 1,
1490 is_manifest: false,
1491 })
1492 }
1493
1494 async fn push_image(
1495 &self,
1496 _tag: &str,
1497 _auth: Option<&crate::builder::RegistryAuth>,
1498 ) -> Result<()> {
1499 Ok(())
1500 }
1501
1502 async fn tag_image(&self, _image: &str, _new_tag: &str) -> Result<()> {
1503 Ok(())
1504 }
1505
1506 async fn manifest_create(&self, _name: &str) -> Result<()> {
1507 Ok(())
1508 }
1509
1510 async fn manifest_add(&self, _manifest: &str, _image: &str) -> Result<()> {
1511 Ok(())
1512 }
1513
1514 async fn manifest_push(&self, _name: &str, _destination: &str) -> Result<()> {
1515 Ok(())
1516 }
1517
1518 async fn is_available(&self) -> bool {
1519 true
1520 }
1521
1522 fn name(&self) -> &'static str {
1523 self.name
1524 }
1525 }
1526
1527 #[tokio::test]
1528 async fn test_backend_for_uses_explicit_override() {
1529 let explicit: Arc<dyn BuildBackend> = Arc::new(FakeBackend { name: "explicit" });
1532 let cache: Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>> = Mutex::new(HashMap::new());
1533 let resolved = backend_for(ImageOs::Linux, &cache, Some(Arc::clone(&explicit)))
1534 .await
1535 .unwrap();
1536 assert_eq!(resolved.name(), "explicit");
1537 assert!(
1538 cache.lock().await.is_empty(),
1539 "explicit override should not populate cache"
1540 );
1541 }
1542
1543 #[tokio::test]
1544 async fn test_backend_for_cache_hit_returns_cached() {
1545 let fake: Arc<dyn BuildBackend> = Arc::new(FakeBackend { name: "cached" });
1548 let cache: Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>> = Mutex::new(HashMap::new());
1549 cache.lock().await.insert(ImageOs::Linux, Arc::clone(&fake));
1550 let resolved = backend_for(ImageOs::Linux, &cache, None).await.unwrap();
1551 assert_eq!(resolved.name(), "cached");
1552 }
1553
1554 #[cfg(not(target_os = "windows"))]
1560 #[tokio::test]
1561 async fn test_build_one_image_isolates_windows_failure_on_linux_host() {
1562 use tempfile::TempDir;
1563
1564 let tmp = TempDir::new().unwrap();
1565 let ctx = tmp.path();
1566 tokio::fs::write(ctx.join("Dockerfile"), "FROM scratch\n")
1569 .await
1570 .unwrap();
1571
1572 let yaml = r#"
1573images:
1574 linux-app:
1575 file: Dockerfile
1576 platforms: ["linux/amd64"]
1577 tags: ["example/linux:dev"]
1578 win-app:
1579 file: Dockerfile
1580 platforms: ["windows/amd64"]
1581 tags: ["example/windows:dev"]
1582"#;
1583 let pipeline = parse_pipeline(yaml).unwrap();
1584
1585 let cache: Arc<Mutex<HashMap<ImageOs, Arc<dyn BuildBackend>>>> =
1588 Arc::new(Mutex::new(HashMap::new()));
1589 let fake_linux: Arc<dyn BuildBackend> = Arc::new(FakeBackend { name: "fake-linux" });
1590 cache
1591 .lock()
1592 .await
1593 .insert(ImageOs::Linux, Arc::clone(&fake_linux));
1594
1595 let linux_res = build_one_image(
1597 "linux-app",
1598 &pipeline,
1599 ctx,
1600 BuildahExecutor::with_path("/usr/bin/buildah"),
1601 None, &cache,
1603 None,
1604 )
1605 .await;
1606 assert!(
1607 linux_res.is_ok(),
1608 "Linux image should succeed, got: {linux_res:?}"
1609 );
1610 assert_eq!(linux_res.unwrap().image_id, "fake-linux:fake-id");
1611
1612 let win_res = build_one_image(
1615 "win-app",
1616 &pipeline,
1617 ctx,
1618 BuildahExecutor::with_path("/usr/bin/buildah"),
1619 None,
1620 &cache,
1621 None,
1622 )
1623 .await;
1624 let err = win_res.unwrap_err();
1625 let msg = err.to_string();
1626 assert!(
1627 msg.contains("Windows host") || msg.contains("windows host"),
1628 "expected Windows-host error from detect_backend, got: {msg}"
1629 );
1630
1631 let guard = cache.lock().await;
1634 assert!(guard.contains_key(&ImageOs::Linux));
1635 assert!(!guard.contains_key(&ImageOs::Windows));
1636 }
1637}