1use std::collections::{HashMap, HashSet};
39use std::path::{Path, PathBuf};
40use std::sync::Arc;
41
42use tokio::task::JoinSet;
43use tracing::{error, info, warn};
44
45use serde::Deserialize;
46
47use crate::backend::BuildBackend;
48
49#[derive(Deserialize)]
52struct CachedImageConfig {
53 #[serde(default)]
54 source_hash: Option<String>,
55}
56use crate::buildah::{BuildahCommand, BuildahExecutor};
57use crate::builder::{BuiltImage, ImageBuilder};
58use crate::error::{BuildError, Result};
59use zlayer_paths::ZLayerDirs;
60
61use super::types::{PipelineDefaults, PipelineImage, ZPipeline};
62
63#[cfg(feature = "local-registry")]
64use zlayer_registry::LocalRegistry;
65
66#[derive(Debug)]
68pub struct PipelineResult {
69 pub succeeded: HashMap<String, BuiltImage>,
71 pub failed: HashMap<String, String>,
73 pub total_time_ms: u64,
75}
76
77impl PipelineResult {
78 #[must_use]
80 pub fn is_success(&self) -> bool {
81 self.failed.is_empty()
82 }
83
84 #[must_use]
86 pub fn total_images(&self) -> usize {
87 self.succeeded.len() + self.failed.len()
88 }
89}
90
91pub struct PipelineExecutor {
96 pipeline: ZPipeline,
98 base_dir: PathBuf,
100 executor: BuildahExecutor,
102 backend: Option<Arc<dyn BuildBackend>>,
107 fail_fast: bool,
109 push_enabled: bool,
111 #[cfg(feature = "local-registry")]
113 local_registry: Option<Arc<LocalRegistry>>,
114}
115
116impl PipelineExecutor {
117 #[must_use]
125 pub fn new(pipeline: ZPipeline, base_dir: PathBuf, executor: BuildahExecutor) -> Self {
126 let push_enabled = pipeline.push.after_all;
128
129 Self {
130 pipeline,
131 base_dir,
132 executor,
133 backend: None,
134 fail_fast: true,
135 push_enabled,
136 #[cfg(feature = "local-registry")]
137 local_registry: None,
138 }
139 }
140
141 #[must_use]
153 pub fn with_backend(
154 pipeline: ZPipeline,
155 base_dir: PathBuf,
156 backend: Arc<dyn BuildBackend>,
157 ) -> Self {
158 let push_enabled = pipeline.push.after_all;
159
160 Self {
161 pipeline,
162 base_dir,
163 executor: BuildahExecutor::default(),
164 backend: Some(backend),
165 fail_fast: true,
166 push_enabled,
167 #[cfg(feature = "local-registry")]
168 local_registry: None,
169 }
170 }
171
172 #[must_use]
178 pub fn fail_fast(mut self, fail_fast: bool) -> Self {
179 self.fail_fast = fail_fast;
180 self
181 }
182
183 #[must_use]
188 pub fn push(mut self, enabled: bool) -> Self {
189 self.push_enabled = enabled;
190 self
191 }
192
193 #[cfg(feature = "local-registry")]
199 #[must_use]
200 pub fn with_local_registry(mut self, registry: Arc<LocalRegistry>) -> Self {
201 self.local_registry = Some(registry);
202 self
203 }
204
205 fn resolve_execution_order(&self) -> Result<Vec<Vec<String>>> {
217 let mut waves: Vec<Vec<String>> = Vec::new();
218 let mut assigned: HashSet<String> = HashSet::new();
219 let mut remaining: HashSet<String> = self.pipeline.images.keys().cloned().collect();
220
221 for (name, image) in &self.pipeline.images {
223 for dep in &image.depends_on {
224 if !self.pipeline.images.contains_key(dep) {
225 return Err(BuildError::invalid_instruction(
226 "pipeline",
227 format!("Image '{name}' depends on unknown image '{dep}'"),
228 ));
229 }
230 }
231 }
232
233 while !remaining.is_empty() {
235 let mut wave: Vec<String> = Vec::new();
236
237 for name in &remaining {
238 let image = &self.pipeline.images[name];
239 let deps_satisfied = image.depends_on.iter().all(|d| assigned.contains(d));
241 if deps_satisfied {
242 wave.push(name.clone());
243 }
244 }
245
246 if wave.is_empty() {
247 return Err(BuildError::CircularDependency {
249 stages: remaining.into_iter().collect(),
250 });
251 }
252
253 for name in &wave {
255 remaining.remove(name);
256 assigned.insert(name.clone());
257 }
258
259 waves.push(wave);
260 }
261
262 Ok(waves)
263 }
264
265 pub async fn run(&self) -> Result<PipelineResult> {
280 let start = std::time::Instant::now();
281 let waves = self.resolve_execution_order()?;
282
283 let mut succeeded: HashMap<String, BuiltImage> = HashMap::new();
284 let mut failed: HashMap<String, String> = HashMap::new();
285
286 info!(
287 "Building {} images in {} waves",
288 self.pipeline.images.len(),
289 waves.len()
290 );
291
292 for (wave_idx, wave) in waves.iter().enumerate() {
293 info!("Wave {}: {:?}", wave_idx, wave);
294
295 if self.fail_fast && !failed.is_empty() {
297 warn!("Aborting pipeline due to previous failures (fail_fast enabled)");
298 break;
299 }
300
301 let wave_results = self.build_wave(wave).await;
303
304 for (name, result) in wave_results {
306 match result {
307 Ok(image) => {
308 info!("[{}] Build succeeded: {}", name, image.image_id);
309 succeeded.insert(name, image);
310 }
311 Err(e) => {
312 error!("[{}] Build failed: {}", name, e);
313 failed.insert(name.clone(), e.to_string());
314
315 if self.fail_fast {
316 return Err(e);
318 }
319 }
320 }
321 }
322 }
323
324 if self.push_enabled && failed.is_empty() {
326 info!("Pushing {} images", succeeded.len());
327
328 if let Some(ref backend) = self.backend {
332 for image in succeeded.values() {
333 if image.tags.len() > 1 {
334 let first = &image.tags[0];
335 for secondary in &image.tags[1..] {
336 if let Err(e) = backend.tag_image(first, secondary).await {
337 warn!("Failed to tag {} as {}: {}", first, secondary, e);
338 }
339 }
340 }
341 }
342 }
343
344 for (name, image) in &succeeded {
345 for tag in &image.tags {
346 let push_result = if image.is_manifest {
347 self.push_manifest(tag).await
348 } else {
349 self.push_image(tag).await
350 };
351
352 if let Err(e) = push_result {
353 warn!("[{}] Failed to push {}: {}", name, tag, e);
354 } else {
357 info!("[{}] Pushed: {}", name, tag);
358 }
359 }
360 }
361 }
362
363 #[allow(clippy::cast_possible_truncation)]
364 let total_time_ms = start.elapsed().as_millis() as u64;
365
366 Ok(PipelineResult {
367 succeeded,
368 failed,
369 total_time_ms,
370 })
371 }
372
373 async fn build_wave(&self, wave: &[String]) -> Vec<(String, Result<BuiltImage>)> {
383 let pipeline = Arc::new(self.pipeline.clone());
385 let base_dir = Arc::new(self.base_dir.clone());
386 let executor = self.executor.clone();
387 let backend = self.backend.clone();
388
389 #[cfg(feature = "local-registry")]
392 let registry_root: Option<PathBuf> =
393 self.local_registry.as_ref().map(|r| r.root().to_path_buf());
394 #[cfg(not(feature = "local-registry"))]
395 let registry_root: Option<PathBuf> = None;
396
397 let mut set = JoinSet::new();
398
399 for name in wave {
400 let name = name.clone();
401 let pipeline = Arc::clone(&pipeline);
402 let base_dir = Arc::clone(&base_dir);
403 let executor = executor.clone();
404 let backend = backend.clone();
405 let registry_root = registry_root.clone();
406
407 set.spawn(async move {
408 let platforms = {
409 let image_config = &pipeline.images[&name];
410 effective_platforms(image_config, &pipeline.defaults)
411 };
412
413 let result = match platforms.len() {
414 0 => {
416 build_single_image(
417 &name,
418 &pipeline,
419 &base_dir,
420 executor,
421 backend.as_ref().map(Arc::clone),
422 None,
423 registry_root.as_deref(),
424 )
425 .await
426 }
427 1 => {
429 let platform = platforms[0].clone();
430 build_single_image(
431 &name,
432 &pipeline,
433 &base_dir,
434 executor,
435 backend.as_ref().map(Arc::clone),
436 Some(&platform),
437 registry_root.as_deref(),
438 )
439 .await
440 }
441 _ => {
443 build_multiplatform_image(
444 &name,
445 &pipeline,
446 &base_dir,
447 executor,
448 backend.as_ref().map(Arc::clone),
449 &platforms,
450 registry_root.as_deref(),
451 )
452 .await
453 }
454 };
455
456 (name, result)
457 });
458 }
459
460 let mut results = Vec::new();
462 while let Some(join_result) = set.join_next().await {
463 match join_result {
464 Ok((name, result)) => {
465 results.push((name, result));
466 }
467 Err(e) => {
468 error!("Build task panicked: {}", e);
470 results.push((
471 "unknown".to_string(),
472 Err(BuildError::invalid_instruction(
473 "pipeline",
474 format!("Build task panicked: {e}"),
475 )),
476 ));
477 }
478 }
479 }
480
481 results
482 }
483
484 async fn push_image(&self, tag: &str) -> Result<()> {
486 if let Some(ref backend) = self.backend {
487 return backend.push_image(tag, None).await;
488 }
489 let cmd = BuildahCommand::push(tag);
490 self.executor.execute_checked(&cmd).await?;
491 Ok(())
492 }
493
494 async fn push_manifest(&self, tag: &str) -> Result<()> {
496 if let Some(ref backend) = self.backend {
497 let destination = format!("docker://{tag}");
498 return backend.manifest_push(tag, &destination).await;
499 }
500 let destination = format!("docker://{tag}");
501 let cmd = BuildahCommand::manifest_push(tag, &destination);
502 self.executor.execute_checked(&cmd).await?;
503 Ok(())
504 }
505}
506
507fn effective_platforms(image: &PipelineImage, defaults: &PipelineDefaults) -> Vec<String> {
513 if image.platforms.is_empty() {
514 defaults.platforms.clone()
515 } else {
516 image.platforms.clone()
517 }
518}
519
520fn platform_to_suffix(platform: &str) -> String {
529 let parts: Vec<&str> = platform.split('/').collect();
530 match parts.len() {
531 0 | 1 => platform.replace('/', "-"),
532 2 => parts[1].to_string(),
533 _ => format!("{}-{}", parts[1], parts[2]),
534 }
535}
536
537fn apply_pipeline_config(
543 mut builder: ImageBuilder,
544 image_config: &PipelineImage,
545 defaults: &PipelineDefaults,
546) -> ImageBuilder {
547 let mut args = defaults.build_args.clone();
549 args.extend(image_config.build_args.clone());
550 builder = builder.build_args(args);
551
552 if let Some(fmt) = image_config.format.as_ref().or(defaults.format.as_ref()) {
554 builder = builder.format(fmt);
555 }
556
557 if image_config.no_cache.unwrap_or(defaults.no_cache) {
559 builder = builder.no_cache();
560 }
561
562 let mut cache_mounts = defaults.cache_mounts.clone();
564 cache_mounts.extend(image_config.cache_mounts.clone());
565 if !cache_mounts.is_empty() {
566 let run_mounts: Vec<_> = cache_mounts
567 .iter()
568 .map(crate::zimage::convert_cache_mount)
569 .collect();
570 builder = builder.default_cache_mounts(run_mounts);
571 }
572
573 let retries = image_config.retries.or(defaults.retries).unwrap_or(0);
575 if retries > 0 {
576 builder = builder.retries(retries);
577 }
578
579 builder
580}
581
582fn apply_build_file(builder: ImageBuilder, file_path: &Path) -> ImageBuilder {
585 let file_name = file_path
586 .file_name()
587 .map(|n| n.to_string_lossy().to_string())
588 .unwrap_or_default();
589 let extension = file_path
590 .extension()
591 .map(|e| e.to_string_lossy().to_string())
592 .unwrap_or_default();
593
594 if extension == "yaml" || extension == "yml" || file_name.starts_with("ZImagefile") {
595 builder.zimagefile(file_path)
596 } else {
597 builder.dockerfile(file_path)
598 }
599}
600
601async fn compute_file_hash(path: &Path) -> Option<String> {
605 use sha2::{Digest, Sha256};
606
607 let content = tokio::fs::read(path).await.ok()?;
608 let mut hasher = Sha256::new();
609 hasher.update(&content);
610 Some(format!("{:x}", hasher.finalize()))
611}
612
613fn sanitize_image_name_for_cache(image: &str) -> String {
617 image.replace(['/', ':', '@'], "_")
618}
619
620async fn check_cached_image_hash(
625 data_dir: &Path,
626 tag: &str,
627 expected_hash: &str,
628) -> Option<String> {
629 let sanitized = sanitize_image_name_for_cache(tag);
630 let config_path = data_dir.join("images").join(&sanitized).join("config.json");
631 let data = tokio::fs::read_to_string(&config_path).await.ok()?;
632 let config: CachedImageConfig = serde_json::from_str(&data).ok()?;
633 if config.source_hash.as_deref() == Some(expected_hash) {
634 Some(sanitized)
635 } else {
636 None
637 }
638}
639
640async fn build_single_image(
648 name: &str,
649 pipeline: &ZPipeline,
650 base_dir: &Path,
651 executor: BuildahExecutor,
652 backend: Option<Arc<dyn BuildBackend>>,
653 platform: Option<&str>,
654 registry_root: Option<&Path>,
655) -> Result<BuiltImage> {
656 let image_config = &pipeline.images[name];
657 let context = base_dir.join(&image_config.context);
658 let file_path = base_dir.join(&image_config.file);
659
660 let file_hash = compute_file_hash(&file_path).await;
663 if let Some(ref hash) = file_hash {
664 let data_dir = ZLayerDirs::default_data_dir();
665
666 let expanded_tags: Vec<String> = image_config
667 .tags
668 .iter()
669 .map(|t| expand_tag_with_vars(t, &pipeline.vars))
670 .collect();
671
672 if let Some(first_tag) = expanded_tags.first() {
674 if let Some(cached_id) = check_cached_image_hash(&data_dir, first_tag, hash).await {
675 info!(
676 "[{}] Skipping build — cached image hash matches ({})",
677 name, cached_id
678 );
679 return Ok(BuiltImage {
680 image_id: cached_id,
681 tags: expanded_tags,
682 layer_count: 1,
683 size: 0,
684 build_time_ms: 0,
685 is_manifest: false,
686 });
687 }
688 }
689 }
690
691 let effective_backend: Arc<dyn BuildBackend> = backend
692 .unwrap_or_else(|| Arc::new(crate::backend::BuildahBackend::with_executor(executor)));
693 let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
694
695 builder = apply_build_file(builder, &file_path);
697
698 if let Some(hash) = file_hash {
700 builder = builder.source_hash(hash);
701 }
702
703 if let Some(plat) = platform {
705 builder = builder.platform(plat);
706 }
707
708 for tag in &image_config.tags {
710 let expanded = expand_tag_with_vars(tag, &pipeline.vars);
711 builder = builder.tag(expanded);
712 }
713
714 builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
716
717 #[cfg(feature = "local-registry")]
719 if let Some(root) = registry_root {
720 let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
721 BuildError::invalid_instruction(
722 "pipeline",
723 format!("failed to open local registry: {e}"),
724 )
725 })?;
726 builder = builder.with_local_registry(shared_registry);
727 }
728
729 builder.build().await
730}
731
732async fn build_multiplatform_image(
738 name: &str,
739 pipeline: &ZPipeline,
740 base_dir: &Path,
741 executor: BuildahExecutor,
742 backend: Option<Arc<dyn BuildBackend>>,
743 platforms: &[String],
744 registry_root: Option<&Path>,
745) -> Result<BuiltImage> {
746 let image_config = &pipeline.images[name];
747 let start_time = std::time::Instant::now();
748
749 let expanded_tags: Vec<String> = image_config
751 .tags
752 .iter()
753 .map(|t| expand_tag_with_vars(t, &pipeline.vars))
754 .collect();
755
756 let manifest_name = expanded_tags
757 .first()
758 .cloned()
759 .unwrap_or_else(|| format!("zlayer-manifest-{name}"));
760
761 let mut arch_tags: Vec<String> = Vec::new();
763 let mut total_layers = 0usize;
764 let mut total_size = 0u64;
765
766 for platform in platforms {
767 let suffix = platform_to_suffix(platform);
768 let platform_tags: Vec<String> = expanded_tags
769 .iter()
770 .map(|t| format!("{t}-{suffix}"))
771 .collect();
772
773 info!("[{name}] Building for platform {platform}");
774
775 let context = base_dir.join(&image_config.context);
777 let file_path = base_dir.join(&image_config.file);
778
779 let effective_backend: Arc<dyn BuildBackend> = match backend {
780 Some(ref b) => Arc::clone(b),
781 None => Arc::new(crate::backend::BuildahBackend::with_executor(
782 executor.clone(),
783 )),
784 };
785 let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
786
787 builder = apply_build_file(builder, &file_path);
789
790 builder = builder.platform(platform);
792
793 for tag in &platform_tags {
795 builder = builder.tag(tag);
796 }
797
798 builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
800
801 #[cfg(feature = "local-registry")]
803 if let Some(root) = registry_root {
804 let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
805 BuildError::invalid_instruction(
806 "pipeline",
807 format!("failed to open local registry: {e}"),
808 )
809 })?;
810 builder = builder.with_local_registry(shared_registry);
811 }
812
813 let built = builder.build().await?;
814 total_layers += built.layer_count;
815 total_size += built.size;
816
817 if let Some(first_tag) = platform_tags.first() {
818 arch_tags.push(first_tag.clone());
819 }
820 }
821
822 assemble_manifest(
824 name,
825 &manifest_name,
826 &arch_tags,
827 &expanded_tags,
828 backend.as_ref(),
829 &executor,
830 )
831 .await?;
832
833 #[allow(clippy::cast_possible_truncation)]
834 let build_time_ms = start_time.elapsed().as_millis() as u64;
835
836 Ok(BuiltImage {
837 image_id: manifest_name,
838 tags: expanded_tags,
839 layer_count: total_layers,
840 size: total_size,
841 build_time_ms,
842 is_manifest: true,
843 })
844}
845
846async fn assemble_manifest(
851 name: &str,
852 manifest_name: &str,
853 arch_tags: &[String],
854 expanded_tags: &[String],
855 backend: Option<&Arc<dyn BuildBackend>>,
856 executor: &BuildahExecutor,
857) -> Result<()> {
858 info!("[{name}] Creating manifest: {manifest_name}");
860 if let Some(backend) = backend {
861 backend
862 .manifest_create(manifest_name)
863 .await
864 .map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
865 } else {
866 executor
867 .execute_checked(&BuildahCommand::manifest_create(manifest_name))
868 .await
869 .map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
870 }
871
872 for arch_tag in arch_tags {
874 info!("[{name}] Adding to manifest: {arch_tag}");
875 if let Some(backend) = backend {
876 backend
877 .manifest_add(manifest_name, arch_tag)
878 .await
879 .map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
880 } else {
881 executor
882 .execute_checked(&BuildahCommand::manifest_add(manifest_name, arch_tag))
883 .await
884 .map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
885 }
886 }
887
888 for tag in expanded_tags.iter().skip(1) {
890 if let Some(backend) = backend {
891 backend
892 .tag_image(manifest_name, tag)
893 .await
894 .map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
895 } else {
896 executor
897 .execute_checked(&BuildahCommand::tag(manifest_name, tag))
898 .await
899 .map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
900 }
901 }
902
903 Ok(())
904}
905
906fn expand_tag_with_vars(tag: &str, vars: &HashMap<String, String>) -> String {
910 let mut result = tag.to_string();
911 for (key, value) in vars {
912 result = result.replace(&format!("${{{key}}}"), value);
913 }
914 result
915}
916
917#[cfg(test)]
918mod tests {
919 use super::*;
920 use crate::pipeline::parse_pipeline;
921
922 #[test]
923 fn test_resolve_execution_order_simple() {
924 let yaml = r"
925images:
926 app:
927 file: Dockerfile
928";
929 let pipeline = parse_pipeline(yaml).unwrap();
930 let executor = PipelineExecutor::new(
931 pipeline,
932 PathBuf::from("/tmp"),
933 BuildahExecutor::with_path("/usr/bin/buildah"),
934 );
935
936 let waves = executor.resolve_execution_order().unwrap();
937 assert_eq!(waves.len(), 1);
938 assert_eq!(waves[0], vec!["app"]);
939 }
940
941 #[test]
942 fn test_resolve_execution_order_with_deps() {
943 let yaml = r"
944images:
945 base:
946 file: Dockerfile.base
947 app:
948 file: Dockerfile.app
949 depends_on: [base]
950 test:
951 file: Dockerfile.test
952 depends_on: [app]
953";
954 let pipeline = parse_pipeline(yaml).unwrap();
955 let executor = PipelineExecutor::new(
956 pipeline,
957 PathBuf::from("/tmp"),
958 BuildahExecutor::with_path("/usr/bin/buildah"),
959 );
960
961 let waves = executor.resolve_execution_order().unwrap();
962 assert_eq!(waves.len(), 3);
963 assert_eq!(waves[0], vec!["base"]);
964 assert_eq!(waves[1], vec!["app"]);
965 assert_eq!(waves[2], vec!["test"]);
966 }
967
968 #[test]
969 fn test_resolve_execution_order_parallel() {
970 let yaml = r"
971images:
972 base:
973 file: Dockerfile.base
974 app1:
975 file: Dockerfile.app1
976 depends_on: [base]
977 app2:
978 file: Dockerfile.app2
979 depends_on: [base]
980";
981 let pipeline = parse_pipeline(yaml).unwrap();
982 let executor = PipelineExecutor::new(
983 pipeline,
984 PathBuf::from("/tmp"),
985 BuildahExecutor::with_path("/usr/bin/buildah"),
986 );
987
988 let waves = executor.resolve_execution_order().unwrap();
989 assert_eq!(waves.len(), 2);
990 assert_eq!(waves[0], vec!["base"]);
991 assert_eq!(waves[1].len(), 2);
993 assert!(waves[1].contains(&"app1".to_string()));
994 assert!(waves[1].contains(&"app2".to_string()));
995 }
996
997 #[test]
998 fn test_resolve_execution_order_missing_dep() {
999 let yaml = r"
1000images:
1001 app:
1002 file: Dockerfile
1003 depends_on: [missing]
1004";
1005 let pipeline = parse_pipeline(yaml).unwrap();
1006 let executor = PipelineExecutor::new(
1007 pipeline,
1008 PathBuf::from("/tmp"),
1009 BuildahExecutor::with_path("/usr/bin/buildah"),
1010 );
1011
1012 let result = executor.resolve_execution_order();
1013 assert!(result.is_err());
1014 assert!(result.unwrap_err().to_string().contains("missing"));
1015 }
1016
1017 #[test]
1018 fn test_resolve_execution_order_circular() {
1019 let yaml = r"
1020images:
1021 a:
1022 file: Dockerfile.a
1023 depends_on: [b]
1024 b:
1025 file: Dockerfile.b
1026 depends_on: [a]
1027";
1028 let pipeline = parse_pipeline(yaml).unwrap();
1029 let executor = PipelineExecutor::new(
1030 pipeline,
1031 PathBuf::from("/tmp"),
1032 BuildahExecutor::with_path("/usr/bin/buildah"),
1033 );
1034
1035 let result = executor.resolve_execution_order();
1036 assert!(result.is_err());
1037 match result.unwrap_err() {
1038 BuildError::CircularDependency { stages } => {
1039 assert!(stages.contains(&"a".to_string()));
1040 assert!(stages.contains(&"b".to_string()));
1041 }
1042 e => panic!("Expected CircularDependency error, got: {e:?}"),
1043 }
1044 }
1045
1046 #[test]
1047 fn test_expand_tag() {
1048 let mut vars = HashMap::new();
1049 vars.insert("VERSION".to_string(), "1.0.0".to_string());
1050 vars.insert("REGISTRY".to_string(), "ghcr.io/myorg".to_string());
1051
1052 let tag = "${REGISTRY}/app:${VERSION}";
1053 let expanded = expand_tag_with_vars(tag, &vars);
1054 assert_eq!(expanded, "ghcr.io/myorg/app:1.0.0");
1055 }
1056
1057 #[test]
1058 fn test_expand_tag_partial() {
1059 let mut vars = HashMap::new();
1060 vars.insert("VERSION".to_string(), "1.0.0".to_string());
1061
1062 let tag = "myapp:${VERSION}-${UNKNOWN}";
1064 let expanded = expand_tag_with_vars(tag, &vars);
1065 assert_eq!(expanded, "myapp:1.0.0-${UNKNOWN}");
1066 }
1067
1068 #[test]
1069 fn test_pipeline_result_is_success() {
1070 let mut result = PipelineResult {
1071 succeeded: HashMap::new(),
1072 failed: HashMap::new(),
1073 total_time_ms: 100,
1074 };
1075
1076 assert!(result.is_success());
1077
1078 result.failed.insert("app".to_string(), "error".to_string());
1079 assert!(!result.is_success());
1080 }
1081
1082 #[test]
1083 fn test_pipeline_result_total_images() {
1084 let mut result = PipelineResult {
1085 succeeded: HashMap::new(),
1086 failed: HashMap::new(),
1087 total_time_ms: 100,
1088 };
1089
1090 result.succeeded.insert(
1091 "app1".to_string(),
1092 BuiltImage {
1093 image_id: "sha256:abc".to_string(),
1094 tags: vec!["app1:latest".to_string()],
1095 layer_count: 5,
1096 size: 0,
1097 build_time_ms: 50,
1098 is_manifest: false,
1099 },
1100 );
1101 result
1102 .failed
1103 .insert("app2".to_string(), "error".to_string());
1104
1105 assert_eq!(result.total_images(), 2);
1106 }
1107
1108 #[test]
1109 fn test_builder_methods() {
1110 let yaml = r"
1111images:
1112 app:
1113 file: Dockerfile
1114push:
1115 after_all: true
1116";
1117 let pipeline = parse_pipeline(yaml).unwrap();
1118 let executor = PipelineExecutor::new(
1119 pipeline,
1120 PathBuf::from("/tmp"),
1121 BuildahExecutor::with_path("/usr/bin/buildah"),
1122 )
1123 .fail_fast(false)
1124 .push(false);
1125
1126 assert!(!executor.fail_fast);
1127 assert!(!executor.push_enabled);
1128 }
1129
1130 fn test_pipeline_image() -> PipelineImage {
1132 PipelineImage {
1133 file: PathBuf::from("Dockerfile"),
1134 context: PathBuf::from("."),
1135 tags: vec![],
1136 build_args: HashMap::new(),
1137 depends_on: vec![],
1138 no_cache: None,
1139 format: None,
1140 cache_mounts: vec![],
1141 retries: None,
1142 platforms: vec![],
1143 }
1144 }
1145
1146 #[test]
1147 fn test_platform_to_suffix() {
1148 assert_eq!(platform_to_suffix("linux/amd64"), "amd64");
1149 assert_eq!(platform_to_suffix("linux/arm64"), "arm64");
1150 assert_eq!(platform_to_suffix("linux/arm64/v8"), "arm64-v8");
1151 assert_eq!(platform_to_suffix("linux"), "linux");
1152 }
1153
1154 #[test]
1155 fn test_effective_platforms_image_overrides() {
1156 let defaults = PipelineDefaults {
1157 platforms: vec!["linux/amd64".into()],
1158 ..Default::default()
1159 };
1160 let image = PipelineImage {
1161 platforms: vec!["linux/arm64".into()],
1162 ..test_pipeline_image()
1163 };
1164 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/arm64"]);
1165 }
1166
1167 #[test]
1168 fn test_effective_platforms_inherits_defaults() {
1169 let defaults = PipelineDefaults {
1170 platforms: vec!["linux/amd64".into()],
1171 ..Default::default()
1172 };
1173 let image = test_pipeline_image();
1174 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/amd64"]);
1175 }
1176
1177 #[test]
1178 fn test_effective_platforms_empty() {
1179 let defaults = PipelineDefaults::default();
1180 let image = test_pipeline_image();
1181 assert!(effective_platforms(&image, &defaults).is_empty());
1182 }
1183
1184 #[test]
1185 fn test_platform_to_suffix_edge_cases() {
1186 assert_eq!(platform_to_suffix(""), "");
1188 assert_eq!(platform_to_suffix("linux"), "linux");
1190 assert_eq!(platform_to_suffix("linux/arm/v7/extra"), "arm-v7");
1192 }
1193
1194 #[test]
1195 fn test_effective_platforms_multiple_defaults() {
1196 let defaults = PipelineDefaults {
1197 platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
1198 ..Default::default()
1199 };
1200 let image = test_pipeline_image();
1201 assert_eq!(
1202 effective_platforms(&image, &defaults),
1203 vec!["linux/amd64", "linux/arm64"]
1204 );
1205 }
1206
1207 #[test]
1208 fn test_effective_platforms_image_overrides_multiple() {
1209 let defaults = PipelineDefaults {
1210 platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
1211 ..Default::default()
1212 };
1213 let image = PipelineImage {
1214 platforms: vec!["linux/s390x".into()],
1215 ..test_pipeline_image()
1216 };
1217 assert_eq!(effective_platforms(&image, &defaults), vec!["linux/s390x"]);
1219 }
1220}