use predicates::prelude::*;
use std::fs;
use tempfile::tempdir;
mod common;
use common::perfgate_cmd;
#[cfg(unix)]
fn success_command() -> Vec<&'static str> {
vec!["true"]
}
#[cfg(windows)]
fn success_command() -> Vec<&'static str> {
vec!["cmd", "/c", "exit", "0"]
}
#[cfg(unix)]
fn failure_command() -> Vec<&'static str> {
vec!["false"]
}
#[cfg(windows)]
fn failure_command() -> Vec<&'static str> {
vec!["cmd", "/c", "exit", "1"]
}
#[test]
fn test_run_basic_command() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("test")
.arg("--repeat")
.arg("2") .arg("--out")
.arg(&output_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
assert!(output_path.exists(), "output file should exist");
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
assert_eq!(
receipt["schema"].as_str(),
Some("perfgate.run.v1"),
"schema should be 'perfgate.run.v1'"
);
assert_eq!(
receipt["bench"]["name"].as_str(),
Some("test"),
"bench name should be 'test'"
);
let samples = receipt["samples"]
.as_array()
.expect("samples should be an array");
assert_eq!(samples.len(), 2, "should have 2 samples (repeat=2)");
assert!(
receipt["stats"]["wall_ms"].is_object(),
"stats should contain wall_ms"
);
}
#[test]
fn test_run_nonzero_command_fails_without_allow_nonzero() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("nonzero-test")
.arg("--repeat")
.arg("1")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in failure_command() {
cmd.arg(arg);
}
let output = cmd.output().expect("failed to execute perfgate run");
assert!(!output.status.success(), "run should fail on nonzero");
assert_eq!(output.status.code(), Some(1));
assert!(output_path.exists(), "output file should exist");
let stderr = String::from_utf8_lossy(&output.stderr);
assert!(
stderr.contains("benchmark command failed"),
"stderr should mention benchmark failure: {}",
stderr
);
}
#[test]
fn test_run_default_repeat_count() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("default-repeat-test")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
let samples = receipt["samples"]
.as_array()
.expect("samples should be an array");
assert_eq!(samples.len(), 5, "default repeat should produce 5 samples");
}
#[test]
fn test_run_missing_name_fails() {
let mut cmd = perfgate_cmd();
cmd.arg("run").arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert()
.failure()
.stderr(predicate::str::contains("--name"));
}
#[test]
fn test_run_missing_command_fails() {
let mut cmd = perfgate_cmd();
cmd.arg("run").arg("--name").arg("test");
cmd.assert()
.failure()
.stderr(predicate::str::contains("required"));
}
#[test]
fn test_run_receipt_contains_tool_info() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("tool-info-test")
.arg("--repeat")
.arg("1")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
assert_eq!(
receipt["tool"]["name"].as_str(),
Some("perfgate"),
"tool name should be 'perfgate'"
);
assert!(
receipt["tool"]["version"].is_string(),
"tool version should be present"
);
}
#[test]
fn test_run_pretty_flag() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("pretty-test")
.arg("--repeat")
.arg("1")
.arg("--pretty")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
assert!(
content.contains('\n'),
"pretty-printed JSON should contain newlines"
);
assert!(
content.contains(" "),
"pretty-printed JSON should have indentation"
);
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
assert_eq!(receipt["schema"].as_str(), Some("perfgate.run.v1"));
}
#[test]
fn test_run_with_echo_command() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("echo-test")
.arg("--repeat")
.arg("2")
.arg("--out")
.arg(&output_path)
.arg("--");
#[cfg(unix)]
{
cmd.arg("echo").arg("hello");
}
#[cfg(windows)]
{
cmd.arg("cmd").arg("/c").arg("echo").arg("hello");
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
assert_eq!(receipt["schema"].as_str(), Some("perfgate.run.v1"));
assert_eq!(receipt["bench"]["name"].as_str(), Some("echo-test"));
let samples = receipt["samples"]
.as_array()
.expect("samples should be an array");
assert_eq!(samples.len(), 2, "should have 2 samples");
for sample in samples {
assert_eq!(sample["exit_code"].as_i64(), Some(0));
}
}
#[test]
fn test_run_samples_contain_required_fields() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("sample-fields-test")
.arg("--repeat")
.arg("1")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
let samples = receipt["samples"]
.as_array()
.expect("samples should be an array");
let sample = &samples[0];
assert!(sample["wall_ms"].is_u64(), "sample should have wall_ms");
assert!(sample["exit_code"].is_i64(), "sample should have exit_code");
assert_eq!(
sample["exit_code"].as_i64(),
Some(0),
"exit_code should be 0 for successful command"
);
assert_eq!(
sample["warmup"].as_bool(),
Some(false),
"warmup should be false for measured samples"
);
}