use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::task::JoinSet;
use tracing::{error, info, warn};
use serde::Deserialize;
use crate::backend::BuildBackend;
#[derive(Deserialize)]
struct CachedImageConfig {
#[serde(default)]
source_hash: Option<String>,
}
use crate::buildah::{BuildahCommand, BuildahExecutor};
use crate::builder::{BuiltImage, ImageBuilder};
use crate::error::{BuildError, Result};
use zlayer_paths::ZLayerDirs;
use super::types::{PipelineDefaults, PipelineImage, ZPipeline};
#[cfg(feature = "local-registry")]
use zlayer_registry::LocalRegistry;
#[derive(Debug)]
pub struct PipelineResult {
pub succeeded: HashMap<String, BuiltImage>,
pub failed: HashMap<String, String>,
pub total_time_ms: u64,
}
impl PipelineResult {
#[must_use]
pub fn is_success(&self) -> bool {
self.failed.is_empty()
}
#[must_use]
pub fn total_images(&self) -> usize {
self.succeeded.len() + self.failed.len()
}
}
pub struct PipelineExecutor {
pipeline: ZPipeline,
base_dir: PathBuf,
executor: BuildahExecutor,
backend: Option<Arc<dyn BuildBackend>>,
fail_fast: bool,
push_enabled: bool,
#[cfg(feature = "local-registry")]
local_registry: Option<Arc<LocalRegistry>>,
}
impl PipelineExecutor {
#[must_use]
pub fn new(pipeline: ZPipeline, base_dir: PathBuf, executor: BuildahExecutor) -> Self {
let push_enabled = pipeline.push.after_all;
Self {
pipeline,
base_dir,
executor,
backend: None,
fail_fast: true,
push_enabled,
#[cfg(feature = "local-registry")]
local_registry: None,
}
}
#[must_use]
pub fn with_backend(
pipeline: ZPipeline,
base_dir: PathBuf,
backend: Arc<dyn BuildBackend>,
) -> Self {
let push_enabled = pipeline.push.after_all;
Self {
pipeline,
base_dir,
executor: BuildahExecutor::default(),
backend: Some(backend),
fail_fast: true,
push_enabled,
#[cfg(feature = "local-registry")]
local_registry: None,
}
}
#[must_use]
pub fn fail_fast(mut self, fail_fast: bool) -> Self {
self.fail_fast = fail_fast;
self
}
#[must_use]
pub fn push(mut self, enabled: bool) -> Self {
self.push_enabled = enabled;
self
}
#[cfg(feature = "local-registry")]
#[must_use]
pub fn with_local_registry(mut self, registry: Arc<LocalRegistry>) -> Self {
self.local_registry = Some(registry);
self
}
fn resolve_execution_order(&self) -> Result<Vec<Vec<String>>> {
let mut waves: Vec<Vec<String>> = Vec::new();
let mut assigned: HashSet<String> = HashSet::new();
let mut remaining: HashSet<String> = self.pipeline.images.keys().cloned().collect();
for (name, image) in &self.pipeline.images {
for dep in &image.depends_on {
if !self.pipeline.images.contains_key(dep) {
return Err(BuildError::invalid_instruction(
"pipeline",
format!("Image '{name}' depends on unknown image '{dep}'"),
));
}
}
}
while !remaining.is_empty() {
let mut wave: Vec<String> = Vec::new();
for name in &remaining {
let image = &self.pipeline.images[name];
let deps_satisfied = image.depends_on.iter().all(|d| assigned.contains(d));
if deps_satisfied {
wave.push(name.clone());
}
}
if wave.is_empty() {
return Err(BuildError::CircularDependency {
stages: remaining.into_iter().collect(),
});
}
for name in &wave {
remaining.remove(name);
assigned.insert(name.clone());
}
waves.push(wave);
}
Ok(waves)
}
pub async fn run(&self) -> Result<PipelineResult> {
let start = std::time::Instant::now();
let waves = self.resolve_execution_order()?;
let mut succeeded: HashMap<String, BuiltImage> = HashMap::new();
let mut failed: HashMap<String, String> = HashMap::new();
info!(
"Building {} images in {} waves",
self.pipeline.images.len(),
waves.len()
);
for (wave_idx, wave) in waves.iter().enumerate() {
info!("Wave {}: {:?}", wave_idx, wave);
if self.fail_fast && !failed.is_empty() {
warn!("Aborting pipeline due to previous failures (fail_fast enabled)");
break;
}
let wave_results = self.build_wave(wave).await;
for (name, result) in wave_results {
match result {
Ok(image) => {
info!("[{}] Build succeeded: {}", name, image.image_id);
succeeded.insert(name, image);
}
Err(e) => {
error!("[{}] Build failed: {}", name, e);
failed.insert(name.clone(), e.to_string());
if self.fail_fast {
return Err(e);
}
}
}
}
}
if self.push_enabled && failed.is_empty() {
info!("Pushing {} images", succeeded.len());
if let Some(ref backend) = self.backend {
for image in succeeded.values() {
if image.tags.len() > 1 {
let first = &image.tags[0];
for secondary in &image.tags[1..] {
if let Err(e) = backend.tag_image(first, secondary).await {
warn!("Failed to tag {} as {}: {}", first, secondary, e);
}
}
}
}
}
for (name, image) in &succeeded {
for tag in &image.tags {
let push_result = if image.is_manifest {
self.push_manifest(tag).await
} else {
self.push_image(tag).await
};
if let Err(e) = push_result {
warn!("[{}] Failed to push {}: {}", name, tag, e);
} else {
info!("[{}] Pushed: {}", name, tag);
}
}
}
}
#[allow(clippy::cast_possible_truncation)]
let total_time_ms = start.elapsed().as_millis() as u64;
Ok(PipelineResult {
succeeded,
failed,
total_time_ms,
})
}
async fn build_wave(&self, wave: &[String]) -> Vec<(String, Result<BuiltImage>)> {
let pipeline = Arc::new(self.pipeline.clone());
let base_dir = Arc::new(self.base_dir.clone());
let executor = self.executor.clone();
let backend = self.backend.clone();
#[cfg(feature = "local-registry")]
let registry_root: Option<PathBuf> =
self.local_registry.as_ref().map(|r| r.root().to_path_buf());
#[cfg(not(feature = "local-registry"))]
let registry_root: Option<PathBuf> = None;
let mut set = JoinSet::new();
for name in wave {
let name = name.clone();
let pipeline = Arc::clone(&pipeline);
let base_dir = Arc::clone(&base_dir);
let executor = executor.clone();
let backend = backend.clone();
let registry_root = registry_root.clone();
set.spawn(async move {
let platforms = {
let image_config = &pipeline.images[&name];
effective_platforms(image_config, &pipeline.defaults)
};
let result = match platforms.len() {
0 => {
build_single_image(
&name,
&pipeline,
&base_dir,
executor,
backend.as_ref().map(Arc::clone),
None,
registry_root.as_deref(),
)
.await
}
1 => {
let platform = platforms[0].clone();
build_single_image(
&name,
&pipeline,
&base_dir,
executor,
backend.as_ref().map(Arc::clone),
Some(&platform),
registry_root.as_deref(),
)
.await
}
_ => {
build_multiplatform_image(
&name,
&pipeline,
&base_dir,
executor,
backend.as_ref().map(Arc::clone),
&platforms,
registry_root.as_deref(),
)
.await
}
};
(name, result)
});
}
let mut results = Vec::new();
while let Some(join_result) = set.join_next().await {
match join_result {
Ok((name, result)) => {
results.push((name, result));
}
Err(e) => {
error!("Build task panicked: {}", e);
results.push((
"unknown".to_string(),
Err(BuildError::invalid_instruction(
"pipeline",
format!("Build task panicked: {e}"),
)),
));
}
}
}
results
}
async fn push_image(&self, tag: &str) -> Result<()> {
if let Some(ref backend) = self.backend {
return backend.push_image(tag, None).await;
}
let cmd = BuildahCommand::push(tag);
self.executor.execute_checked(&cmd).await?;
Ok(())
}
async fn push_manifest(&self, tag: &str) -> Result<()> {
if let Some(ref backend) = self.backend {
let destination = format!("docker://{tag}");
return backend.manifest_push(tag, &destination).await;
}
let destination = format!("docker://{tag}");
let cmd = BuildahCommand::manifest_push(tag, &destination);
self.executor.execute_checked(&cmd).await?;
Ok(())
}
}
fn effective_platforms(image: &PipelineImage, defaults: &PipelineDefaults) -> Vec<String> {
if image.platforms.is_empty() {
defaults.platforms.clone()
} else {
image.platforms.clone()
}
}
fn platform_to_suffix(platform: &str) -> String {
let parts: Vec<&str> = platform.split('/').collect();
match parts.len() {
0 | 1 => platform.replace('/', "-"),
2 => parts[1].to_string(),
_ => format!("{}-{}", parts[1], parts[2]),
}
}
fn apply_pipeline_config(
mut builder: ImageBuilder,
image_config: &PipelineImage,
defaults: &PipelineDefaults,
) -> ImageBuilder {
let mut args = defaults.build_args.clone();
args.extend(image_config.build_args.clone());
builder = builder.build_args(args);
if let Some(fmt) = image_config.format.as_ref().or(defaults.format.as_ref()) {
builder = builder.format(fmt);
}
if image_config.no_cache.unwrap_or(defaults.no_cache) {
builder = builder.no_cache();
}
let mut cache_mounts = defaults.cache_mounts.clone();
cache_mounts.extend(image_config.cache_mounts.clone());
if !cache_mounts.is_empty() {
let run_mounts: Vec<_> = cache_mounts
.iter()
.map(crate::zimage::convert_cache_mount)
.collect();
builder = builder.default_cache_mounts(run_mounts);
}
let retries = image_config.retries.or(defaults.retries).unwrap_or(0);
if retries > 0 {
builder = builder.retries(retries);
}
builder
}
fn apply_build_file(builder: ImageBuilder, file_path: &Path) -> ImageBuilder {
let file_name = file_path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default();
let extension = file_path
.extension()
.map(|e| e.to_string_lossy().to_string())
.unwrap_or_default();
if extension == "yaml" || extension == "yml" || file_name.starts_with("ZImagefile") {
builder.zimagefile(file_path)
} else {
builder.dockerfile(file_path)
}
}
async fn compute_file_hash(path: &Path) -> Option<String> {
use sha2::{Digest, Sha256};
let content = tokio::fs::read(path).await.ok()?;
let mut hasher = Sha256::new();
hasher.update(&content);
Some(format!("{:x}", hasher.finalize()))
}
fn sanitize_image_name_for_cache(image: &str) -> String {
image.replace(['/', ':', '@'], "_")
}
async fn check_cached_image_hash(
data_dir: &Path,
tag: &str,
expected_hash: &str,
) -> Option<String> {
let sanitized = sanitize_image_name_for_cache(tag);
let config_path = data_dir.join("images").join(&sanitized).join("config.json");
let data = tokio::fs::read_to_string(&config_path).await.ok()?;
let config: CachedImageConfig = serde_json::from_str(&data).ok()?;
if config.source_hash.as_deref() == Some(expected_hash) {
Some(sanitized)
} else {
None
}
}
async fn build_single_image(
name: &str,
pipeline: &ZPipeline,
base_dir: &Path,
executor: BuildahExecutor,
backend: Option<Arc<dyn BuildBackend>>,
platform: Option<&str>,
registry_root: Option<&Path>,
) -> Result<BuiltImage> {
let image_config = &pipeline.images[name];
let context = base_dir.join(&image_config.context);
let file_path = base_dir.join(&image_config.file);
let file_hash = compute_file_hash(&file_path).await;
if let Some(ref hash) = file_hash {
let data_dir = ZLayerDirs::default_data_dir();
let expanded_tags: Vec<String> = image_config
.tags
.iter()
.map(|t| expand_tag_with_vars(t, &pipeline.vars))
.collect();
if let Some(first_tag) = expanded_tags.first() {
if let Some(cached_id) = check_cached_image_hash(&data_dir, first_tag, hash).await {
info!(
"[{}] Skipping build — cached image hash matches ({})",
name, cached_id
);
return Ok(BuiltImage {
image_id: cached_id,
tags: expanded_tags,
layer_count: 1,
size: 0,
build_time_ms: 0,
is_manifest: false,
});
}
}
}
let effective_backend: Arc<dyn BuildBackend> = backend
.unwrap_or_else(|| Arc::new(crate::backend::BuildahBackend::with_executor(executor)));
let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
builder = apply_build_file(builder, &file_path);
if let Some(hash) = file_hash {
builder = builder.source_hash(hash);
}
if let Some(plat) = platform {
builder = builder.platform(plat);
}
for tag in &image_config.tags {
let expanded = expand_tag_with_vars(tag, &pipeline.vars);
builder = builder.tag(expanded);
}
builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
#[cfg(feature = "local-registry")]
if let Some(root) = registry_root {
let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
BuildError::invalid_instruction(
"pipeline",
format!("failed to open local registry: {e}"),
)
})?;
builder = builder.with_local_registry(shared_registry);
}
builder.build().await
}
async fn build_multiplatform_image(
name: &str,
pipeline: &ZPipeline,
base_dir: &Path,
executor: BuildahExecutor,
backend: Option<Arc<dyn BuildBackend>>,
platforms: &[String],
registry_root: Option<&Path>,
) -> Result<BuiltImage> {
let image_config = &pipeline.images[name];
let start_time = std::time::Instant::now();
let expanded_tags: Vec<String> = image_config
.tags
.iter()
.map(|t| expand_tag_with_vars(t, &pipeline.vars))
.collect();
let manifest_name = expanded_tags
.first()
.cloned()
.unwrap_or_else(|| format!("zlayer-manifest-{name}"));
let mut arch_tags: Vec<String> = Vec::new();
let mut total_layers = 0usize;
let mut total_size = 0u64;
for platform in platforms {
let suffix = platform_to_suffix(platform);
let platform_tags: Vec<String> = expanded_tags
.iter()
.map(|t| format!("{t}-{suffix}"))
.collect();
info!("[{name}] Building for platform {platform}");
let context = base_dir.join(&image_config.context);
let file_path = base_dir.join(&image_config.file);
let effective_backend: Arc<dyn BuildBackend> = match backend {
Some(ref b) => Arc::clone(b),
None => Arc::new(crate::backend::BuildahBackend::with_executor(
executor.clone(),
)),
};
let mut builder = ImageBuilder::with_backend(&context, effective_backend)?;
builder = apply_build_file(builder, &file_path);
builder = builder.platform(platform);
for tag in &platform_tags {
builder = builder.tag(tag);
}
builder = apply_pipeline_config(builder, image_config, &pipeline.defaults);
#[cfg(feature = "local-registry")]
if let Some(root) = registry_root {
let shared_registry = LocalRegistry::new(root.to_path_buf()).await.map_err(|e| {
BuildError::invalid_instruction(
"pipeline",
format!("failed to open local registry: {e}"),
)
})?;
builder = builder.with_local_registry(shared_registry);
}
let built = builder.build().await?;
total_layers += built.layer_count;
total_size += built.size;
if let Some(first_tag) = platform_tags.first() {
arch_tags.push(first_tag.clone());
}
}
assemble_manifest(
name,
&manifest_name,
&arch_tags,
&expanded_tags,
backend.as_ref(),
&executor,
)
.await?;
#[allow(clippy::cast_possible_truncation)]
let build_time_ms = start_time.elapsed().as_millis() as u64;
Ok(BuiltImage {
image_id: manifest_name,
tags: expanded_tags,
layer_count: total_layers,
size: total_size,
build_time_ms,
is_manifest: true,
})
}
async fn assemble_manifest(
name: &str,
manifest_name: &str,
arch_tags: &[String],
expanded_tags: &[String],
backend: Option<&Arc<dyn BuildBackend>>,
executor: &BuildahExecutor,
) -> Result<()> {
info!("[{name}] Creating manifest: {manifest_name}");
if let Some(backend) = backend {
backend
.manifest_create(manifest_name)
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
} else {
executor
.execute_checked(&BuildahCommand::manifest_create(manifest_name))
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest create failed: {e}")))?;
}
for arch_tag in arch_tags {
info!("[{name}] Adding to manifest: {arch_tag}");
if let Some(backend) = backend {
backend
.manifest_add(manifest_name, arch_tag)
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
} else {
executor
.execute_checked(&BuildahCommand::manifest_add(manifest_name, arch_tag))
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest add failed: {e}")))?;
}
}
for tag in expanded_tags.iter().skip(1) {
if let Some(backend) = backend {
backend
.tag_image(manifest_name, tag)
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
} else {
executor
.execute_checked(&BuildahCommand::tag(manifest_name, tag))
.await
.map_err(|e| BuildError::pipeline_error(format!("manifest tag failed: {e}")))?;
}
}
Ok(())
}
fn expand_tag_with_vars(tag: &str, vars: &HashMap<String, String>) -> String {
let mut result = tag.to_string();
for (key, value) in vars {
result = result.replace(&format!("${{{key}}}"), value);
}
result
}
#[cfg(test)]
mod tests {
use super::*;
use crate::pipeline::parse_pipeline;
#[test]
fn test_resolve_execution_order_simple() {
let yaml = r"
images:
app:
file: Dockerfile
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
);
let waves = executor.resolve_execution_order().unwrap();
assert_eq!(waves.len(), 1);
assert_eq!(waves[0], vec!["app"]);
}
#[test]
fn test_resolve_execution_order_with_deps() {
let yaml = r"
images:
base:
file: Dockerfile.base
app:
file: Dockerfile.app
depends_on: [base]
test:
file: Dockerfile.test
depends_on: [app]
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
);
let waves = executor.resolve_execution_order().unwrap();
assert_eq!(waves.len(), 3);
assert_eq!(waves[0], vec!["base"]);
assert_eq!(waves[1], vec!["app"]);
assert_eq!(waves[2], vec!["test"]);
}
#[test]
fn test_resolve_execution_order_parallel() {
let yaml = r"
images:
base:
file: Dockerfile.base
app1:
file: Dockerfile.app1
depends_on: [base]
app2:
file: Dockerfile.app2
depends_on: [base]
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
);
let waves = executor.resolve_execution_order().unwrap();
assert_eq!(waves.len(), 2);
assert_eq!(waves[0], vec!["base"]);
assert_eq!(waves[1].len(), 2);
assert!(waves[1].contains(&"app1".to_string()));
assert!(waves[1].contains(&"app2".to_string()));
}
#[test]
fn test_resolve_execution_order_missing_dep() {
let yaml = r"
images:
app:
file: Dockerfile
depends_on: [missing]
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
);
let result = executor.resolve_execution_order();
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("missing"));
}
#[test]
fn test_resolve_execution_order_circular() {
let yaml = r"
images:
a:
file: Dockerfile.a
depends_on: [b]
b:
file: Dockerfile.b
depends_on: [a]
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
);
let result = executor.resolve_execution_order();
assert!(result.is_err());
match result.unwrap_err() {
BuildError::CircularDependency { stages } => {
assert!(stages.contains(&"a".to_string()));
assert!(stages.contains(&"b".to_string()));
}
e => panic!("Expected CircularDependency error, got: {e:?}"),
}
}
#[test]
fn test_expand_tag() {
let mut vars = HashMap::new();
vars.insert("VERSION".to_string(), "1.0.0".to_string());
vars.insert("REGISTRY".to_string(), "ghcr.io/myorg".to_string());
let tag = "${REGISTRY}/app:${VERSION}";
let expanded = expand_tag_with_vars(tag, &vars);
assert_eq!(expanded, "ghcr.io/myorg/app:1.0.0");
}
#[test]
fn test_expand_tag_partial() {
let mut vars = HashMap::new();
vars.insert("VERSION".to_string(), "1.0.0".to_string());
let tag = "myapp:${VERSION}-${UNKNOWN}";
let expanded = expand_tag_with_vars(tag, &vars);
assert_eq!(expanded, "myapp:1.0.0-${UNKNOWN}");
}
#[test]
fn test_pipeline_result_is_success() {
let mut result = PipelineResult {
succeeded: HashMap::new(),
failed: HashMap::new(),
total_time_ms: 100,
};
assert!(result.is_success());
result.failed.insert("app".to_string(), "error".to_string());
assert!(!result.is_success());
}
#[test]
fn test_pipeline_result_total_images() {
let mut result = PipelineResult {
succeeded: HashMap::new(),
failed: HashMap::new(),
total_time_ms: 100,
};
result.succeeded.insert(
"app1".to_string(),
BuiltImage {
image_id: "sha256:abc".to_string(),
tags: vec!["app1:latest".to_string()],
layer_count: 5,
size: 0,
build_time_ms: 50,
is_manifest: false,
},
);
result
.failed
.insert("app2".to_string(), "error".to_string());
assert_eq!(result.total_images(), 2);
}
#[test]
fn test_builder_methods() {
let yaml = r"
images:
app:
file: Dockerfile
push:
after_all: true
";
let pipeline = parse_pipeline(yaml).unwrap();
let executor = PipelineExecutor::new(
pipeline,
PathBuf::from("/tmp"),
BuildahExecutor::with_path("/usr/bin/buildah"),
)
.fail_fast(false)
.push(false);
assert!(!executor.fail_fast);
assert!(!executor.push_enabled);
}
fn test_pipeline_image() -> PipelineImage {
PipelineImage {
file: PathBuf::from("Dockerfile"),
context: PathBuf::from("."),
tags: vec![],
build_args: HashMap::new(),
depends_on: vec![],
no_cache: None,
format: None,
cache_mounts: vec![],
retries: None,
platforms: vec![],
}
}
#[test]
fn test_platform_to_suffix() {
assert_eq!(platform_to_suffix("linux/amd64"), "amd64");
assert_eq!(platform_to_suffix("linux/arm64"), "arm64");
assert_eq!(platform_to_suffix("linux/arm64/v8"), "arm64-v8");
assert_eq!(platform_to_suffix("linux"), "linux");
}
#[test]
fn test_effective_platforms_image_overrides() {
let defaults = PipelineDefaults {
platforms: vec!["linux/amd64".into()],
..Default::default()
};
let image = PipelineImage {
platforms: vec!["linux/arm64".into()],
..test_pipeline_image()
};
assert_eq!(effective_platforms(&image, &defaults), vec!["linux/arm64"]);
}
#[test]
fn test_effective_platforms_inherits_defaults() {
let defaults = PipelineDefaults {
platforms: vec!["linux/amd64".into()],
..Default::default()
};
let image = test_pipeline_image();
assert_eq!(effective_platforms(&image, &defaults), vec!["linux/amd64"]);
}
#[test]
fn test_effective_platforms_empty() {
let defaults = PipelineDefaults::default();
let image = test_pipeline_image();
assert!(effective_platforms(&image, &defaults).is_empty());
}
#[test]
fn test_platform_to_suffix_edge_cases() {
assert_eq!(platform_to_suffix(""), "");
assert_eq!(platform_to_suffix("linux"), "linux");
assert_eq!(platform_to_suffix("linux/arm/v7/extra"), "arm-v7");
}
#[test]
fn test_effective_platforms_multiple_defaults() {
let defaults = PipelineDefaults {
platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
..Default::default()
};
let image = test_pipeline_image();
assert_eq!(
effective_platforms(&image, &defaults),
vec!["linux/amd64", "linux/arm64"]
);
}
#[test]
fn test_effective_platforms_image_overrides_multiple() {
let defaults = PipelineDefaults {
platforms: vec!["linux/amd64".into(), "linux/arm64".into()],
..Default::default()
};
let image = PipelineImage {
platforms: vec!["linux/s390x".into()],
..test_pipeline_image()
};
assert_eq!(effective_platforms(&image, &defaults), vec!["linux/s390x"]);
}
}