use std::fs;
use std::path::Path;
use std::thread;
use std::time::Duration;
use assert_cmd::Command;
use tempfile::tempdir;
use tiny_http::{Header, Response, Server, StatusCode};
fn write_file(path: &Path, content: &str) {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).expect("mkdir");
}
fs::write(path, content).expect("write");
}
fn create_parallel_workspace(root: &Path) {
write_file(
&root.join("Cargo.toml"),
r#"
[workspace]
members = ["core", "api", "cli", "app"]
resolver = "2"
"#,
);
write_file(
&root.join("core/Cargo.toml"),
r#"
[package]
name = "core"
version = "0.1.0"
edition = "2021"
"#,
);
write_file(&root.join("core/src/lib.rs"), "pub fn core() {}\n");
write_file(
&root.join("api/Cargo.toml"),
r#"
[package]
name = "api"
version = "0.1.0"
edition = "2021"
[dependencies]
core = { path = "../core" }
"#,
);
write_file(&root.join("api/src/lib.rs"), "pub fn api() {}\n");
write_file(
&root.join("cli/Cargo.toml"),
r#"
[package]
name = "cli"
version = "0.1.0"
edition = "2021"
[dependencies]
core = { path = "../core" }
"#,
);
write_file(&root.join("cli/src/lib.rs"), "pub fn cli() {}\n");
write_file(
&root.join("app/Cargo.toml"),
r#"
[package]
name = "app"
version = "0.1.0"
edition = "2021"
[dependencies]
api = { path = "../api" }
cli = { path = "../cli" }
"#,
);
write_file(&root.join("app/src/lib.rs"), "pub fn app() {}\n");
}
fn create_independent_workspace(root: &Path) {
write_file(
&root.join("Cargo.toml"),
r#"
[workspace]
members = ["alpha", "beta", "gamma"]
resolver = "2"
"#,
);
for name in &["alpha", "beta", "gamma"] {
write_file(
&root.join(format!("{name}/Cargo.toml")),
&format!(
r#"
[package]
name = "{name}"
version = "0.1.0"
edition = "2021"
"#
),
);
write_file(
&root.join(format!("{name}/src/lib.rs")),
&format!("pub fn {name}() {{}}\n"),
);
}
}
fn create_fake_cargo_proxy(bin_dir: &Path) {
#[cfg(windows)]
{
fs::write(
bin_dir.join("cargo.cmd"),
"@echo off\r\nif \"%1\"==\"publish\" (\r\n if \"%SHIPPER_FAKE_PUBLISH_EXIT%\"==\"\" (exit /b 0) else (exit /b %SHIPPER_FAKE_PUBLISH_EXIT%)\r\n)\r\n\"%REAL_CARGO%\" %*\r\nexit /b %ERRORLEVEL%\r\n",
)
.expect("write fake cargo");
}
#[cfg(not(windows))]
{
use std::os::unix::fs::PermissionsExt;
let path = bin_dir.join("cargo");
fs::write(
&path,
"#!/usr/bin/env sh\nif [ \"$1\" = \"publish\" ]; then\n exit \"${SHIPPER_FAKE_PUBLISH_EXIT:-0}\"\nfi\n\"$REAL_CARGO\" \"$@\"\n",
)
.expect("write fake cargo");
let mut perms = fs::metadata(&path).expect("meta").permissions();
perms.set_mode(0o755);
fs::set_permissions(&path, perms).expect("chmod");
}
}
fn create_fake_cargo_failing(bin_dir: &Path) {
#[cfg(windows)]
{
fs::write(
bin_dir.join("cargo.cmd"),
"@echo off\r\nif \"%1\"==\"publish\" (\r\n echo error: simulated permanent failure 1>&2\r\n exit /b 1\r\n)\r\n\"%REAL_CARGO%\" %*\r\nexit /b %ERRORLEVEL%\r\n",
)
.expect("write fake cargo");
}
#[cfg(not(windows))]
{
use std::os::unix::fs::PermissionsExt;
let path = bin_dir.join("cargo");
fs::write(
&path,
"#!/usr/bin/env sh\nif [ \"$1\" = \"publish\" ]; then\n echo 'error: simulated permanent failure' >&2\n exit 1\nfi\n\"$REAL_CARGO\" \"$@\"\n",
)
.expect("write fake cargo");
let mut perms = fs::metadata(&path).expect("meta").permissions();
perms.set_mode(0o755);
fs::set_permissions(&path, perms).expect("chmod");
}
}
fn path_sep() -> &'static str {
if cfg!(windows) { ";" } else { ":" }
}
struct TestRegistry {
base_url: String,
handle: thread::JoinHandle<()>,
}
impl TestRegistry {
fn join(self) {
self.handle.join().expect("join server");
}
}
fn spawn_registry(statuses: Vec<u16>, expected_requests: usize) -> TestRegistry {
let server = Server::http("127.0.0.1:0").expect("server");
let base_url = format!("http://{}", server.server_addr());
let handle = thread::spawn(move || {
for idx in 0..expected_requests {
let req = match server.recv_timeout(Duration::from_secs(30)) {
Ok(Some(r)) => r,
_ => break,
};
let status = statuses
.get(idx)
.copied()
.or_else(|| statuses.last().copied())
.unwrap_or(404);
let resp = Response::from_string("{}")
.with_status_code(StatusCode(status))
.with_header(
Header::from_bytes("Content-Type", "application/json").expect("header"),
);
req.respond(resp).expect("respond");
}
});
TestRegistry { base_url, handle }
}
fn shipper_cmd() -> Command {
Command::new(assert_cmd::cargo::cargo_bin!("shipper-cli"))
}
fn fake_cargo_bin_path(bin_dir: &Path) -> String {
#[cfg(windows)]
{
bin_dir.join("cargo.cmd").display().to_string()
}
#[cfg(not(windows))]
{
bin_dir.join("cargo").display().to_string()
}
}
fn prepend_fake_cargo(td: &Path) -> (String, String, String) {
let bin_dir = td.join("fake-bin");
fs::create_dir_all(&bin_dir).expect("mkdir");
create_fake_cargo_proxy(&bin_dir);
let old_path = std::env::var("PATH").unwrap_or_default();
let mut new_path = bin_dir.display().to_string();
if !old_path.is_empty() {
new_path.push_str(path_sep());
new_path.push_str(&old_path);
}
let real_cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string());
let fake_cargo = fake_cargo_bin_path(&bin_dir);
(new_path, real_cargo, fake_cargo)
}
fn prepend_fake_cargo_failing(td: &Path) -> (String, String, String) {
let bin_dir = td.join("fake-bin");
fs::create_dir_all(&bin_dir).expect("mkdir");
create_fake_cargo_failing(&bin_dir);
let old_path = std::env::var("PATH").unwrap_or_default();
let mut new_path = bin_dir.display().to_string();
if !old_path.is_empty() {
new_path.push_str(path_sep());
new_path.push_str(&old_path);
}
let real_cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string());
let fake_cargo = fake_cargo_bin_path(&bin_dir);
(new_path, real_cargo, fake_cargo)
}
mod parallel_level_grouping {
use super::*;
#[test]
fn given_independent_crates_when_publishing_then_publishes_in_parallel_levels() {
let td = tempdir().expect("tempdir");
create_independent_workspace(td.path());
let spec = shipper_core::types::ReleaseSpec {
manifest_path: td.path().join("Cargo.toml"),
registry: shipper_core::types::Registry::crates_io(),
selected_packages: None,
};
let ws = shipper_core::plan::build_plan(&spec).expect("plan");
let levels = ws.plan.group_by_levels();
assert_eq!(
levels.len(),
1,
"independent crates should form a single level"
);
assert_eq!(
levels[0].packages.len(),
3,
"all three crates should be in level 0"
);
let names: Vec<&str> = levels[0].packages.iter().map(|p| p.name.as_str()).collect();
assert!(names.contains(&"alpha"));
assert!(names.contains(&"beta"));
assert!(names.contains(&"gamma"));
}
#[test]
fn given_independent_crates_when_plan_verbose_then_shows_single_level() {
let td = tempdir().expect("tempdir");
create_independent_workspace(td.path());
let out = shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--verbose")
.arg("plan")
.assert()
.success()
.get_output()
.stdout
.clone();
let stdout = String::from_utf8(out).expect("utf8");
assert!(stdout.contains("Level 0:"));
assert!(stdout.contains("alpha@0.1.0"));
assert!(stdout.contains("beta@0.1.0"));
assert!(stdout.contains("gamma@0.1.0"));
}
}
mod dependency_level_ordering {
use super::*;
#[test]
fn given_workspace_with_dependencies_when_publishing_then_respects_level_ordering() {
let td = tempdir().expect("tempdir");
create_parallel_workspace(td.path());
let spec = shipper_core::types::ReleaseSpec {
manifest_path: td.path().join("Cargo.toml"),
registry: shipper_core::types::Registry::crates_io(),
selected_packages: None,
};
let ws = shipper_core::plan::build_plan(&spec).expect("plan");
let levels = ws.plan.group_by_levels();
assert_eq!(levels.len(), 3, "fan-out/fan-in should produce 3 levels");
let level0_names: Vec<&str> = levels[0].packages.iter().map(|p| p.name.as_str()).collect();
assert_eq!(
level0_names,
vec!["core"],
"level 0 should contain only core"
);
let mut level1_names: Vec<&str> =
levels[1].packages.iter().map(|p| p.name.as_str()).collect();
level1_names.sort();
assert_eq!(
level1_names,
vec!["api", "cli"],
"level 1 should contain api and cli"
);
let level2_names: Vec<&str> = levels[2].packages.iter().map(|p| p.name.as_str()).collect();
assert_eq!(level2_names, vec!["app"], "level 2 should contain only app");
}
#[test]
fn given_dependencies_when_plan_verbose_then_shows_three_levels() {
let td = tempdir().expect("tempdir");
create_parallel_workspace(td.path());
let out = shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--verbose")
.arg("plan")
.assert()
.success()
.get_output()
.stdout
.clone();
let stdout = String::from_utf8(out).expect("utf8");
assert!(stdout.contains("Level 0:"), "should show level 0");
assert!(stdout.contains("Level 1:"), "should show level 1");
assert!(stdout.contains("Level 2:"), "should show level 2");
assert!(stdout.contains("core@0.1.0"));
assert!(stdout.contains("app@0.1.0"));
}
}
mod resume_skips_completed_levels {
use super::*;
#[test]
fn given_partially_published_workspace_when_resuming_then_skips_completed_levels() {
let td = tempdir().expect("tempdir");
create_parallel_workspace(td.path());
let (new_path, real_cargo, fake_cargo) = prepend_fake_cargo(td.path());
let state_dir = td.path().join(".shipper");
let registry = spawn_registry(vec![200, 200, 200, 200], 4);
shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--api-base")
.arg(®istry.base_url)
.arg("--allow-dirty")
.arg("--max-attempts")
.arg("1")
.arg("--state-dir")
.arg(&state_dir)
.arg("--max-concurrent")
.arg("1")
.arg("publish")
.env("PATH", &new_path)
.env("REAL_CARGO", &real_cargo)
.env("SHIPPER_CARGO_BIN", &fake_cargo)
.env("SHIPPER_FAKE_PUBLISH_EXIT", "0")
.assert()
.success();
registry.join();
let state_file = state_dir.join("state.json");
assert!(state_file.exists(), "state.json should exist after publish");
let state_json = fs::read_to_string(&state_file).expect("read state");
let state: serde_json::Value = serde_json::from_str(&state_json).expect("parse state");
let packages = state["packages"].as_object().expect("packages object");
for (_key, progress) in packages {
let pkg_state = progress["state"]["state"].as_str().unwrap_or("unknown");
assert!(
pkg_state == "published" || pkg_state == "skipped",
"expected published or skipped, got: {pkg_state}"
);
}
let receipt_file = state_dir.join("receipt.json");
assert!(receipt_file.exists(), "receipt.json should exist");
let receipt_json = fs::read_to_string(&receipt_file).expect("read receipt");
let receipt: serde_json::Value =
serde_json::from_str(&receipt_json).expect("parse receipt");
let receipt_pkgs = receipt["packages"].as_array().expect("packages array");
assert_eq!(
receipt_pkgs.len(),
4,
"receipt should contain all 4 packages"
);
}
}
mod failure_stops_subsequent_levels {
use super::*;
#[test]
fn given_failure_in_one_level_when_continuing_then_stops_subsequent_levels() {
let td = tempdir().expect("tempdir");
create_parallel_workspace(td.path());
let (new_path, real_cargo, fake_cargo) = prepend_fake_cargo_failing(td.path());
let state_dir = td.path().join(".shipper");
let registry = spawn_registry(vec![404], 10);
let assert_result = shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--api-base")
.arg(®istry.base_url)
.arg("--allow-dirty")
.arg("--verify-timeout")
.arg("0ms")
.arg("--verify-poll")
.arg("0ms")
.arg("--max-attempts")
.arg("1")
.arg("--state-dir")
.arg(&state_dir)
.arg("--parallel")
.arg("publish")
.env("PATH", &new_path)
.env("REAL_CARGO", &real_cargo)
.env("SHIPPER_CARGO_BIN", &fake_cargo)
.assert()
.failure();
let stderr = String::from_utf8(assert_result.get_output().stderr.clone()).expect("utf8");
assert!(
stderr.contains("failed") || stderr.contains("error") || stderr.contains("Error"),
"stderr should indicate failure: {stderr}"
);
if state_dir.join("state.json").exists() {
let state_json = fs::read_to_string(state_dir.join("state.json")).expect("read state");
let state: serde_json::Value = serde_json::from_str(&state_json).expect("parse state");
if let Some(packages) = state["packages"].as_object() {
let app_attempted = packages.iter().any(|(k, v)| {
k.contains("app") && v["state"]["state"].as_str() == Some("published")
});
assert!(
!app_attempted,
"app (level 2) should not have been published when level 0 failed"
);
}
}
registry.join();
}
}
mod max_concurrent_limits_parallelism {
use super::*;
#[test]
fn given_max_concurrent_setting_when_publishing_then_limits_parallelism() {
let td = tempdir().expect("tempdir");
create_independent_workspace(td.path());
let (new_path, real_cargo, fake_cargo) = prepend_fake_cargo(td.path());
let state_dir = td.path().join(".shipper");
let registry = spawn_registry(vec![200, 200, 200], 3);
shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--api-base")
.arg(®istry.base_url)
.arg("--allow-dirty")
.arg("--max-attempts")
.arg("1")
.arg("--state-dir")
.arg(&state_dir)
.arg("--max-concurrent")
.arg("1")
.arg("publish")
.env("PATH", &new_path)
.env("REAL_CARGO", &real_cargo)
.env("SHIPPER_CARGO_BIN", &fake_cargo)
.env("SHIPPER_FAKE_PUBLISH_EXIT", "0")
.assert()
.success();
let receipt_path = state_dir.join("receipt.json");
assert!(receipt_path.exists(), "receipt.json should exist");
let receipt_json = fs::read_to_string(&receipt_path).expect("read receipt");
let receipt: serde_json::Value =
serde_json::from_str(&receipt_json).expect("parse receipt");
let packages = receipt["packages"].as_array().expect("packages array");
assert_eq!(packages.len(), 3, "all 3 packages should be in receipt");
for pkg in packages {
let state = pkg["state"]["state"].as_str().unwrap_or("unknown");
assert!(
state == "published" || state == "skipped",
"package should be published or skipped, got: {state}"
);
}
registry.join();
}
#[test]
fn given_max_concurrent_flag_when_plan_then_accepted() {
let td = tempdir().expect("tempdir");
create_independent_workspace(td.path());
shipper_cmd()
.arg("--manifest-path")
.arg(td.path().join("Cargo.toml"))
.arg("--max-concurrent")
.arg("2")
.arg("plan")
.assert()
.success();
}
}