mod common;
use common::perfgate_cmd;
use std::fs;
use tempfile::tempdir;
#[cfg(unix)]
fn success_command() -> Vec<&'static str> {
vec!["true"]
}
#[cfg(windows)]
fn success_command() -> Vec<&'static str> {
vec!["cmd", "/c", "exit", "0"]
}
#[cfg(unix)]
fn cpu_work_command() -> Vec<&'static str> {
vec![
"sh",
"-c",
"for i in $(seq 1 1000); do echo $i > /dev/null; done",
]
}
#[allow(dead_code)]
#[cfg(windows)]
fn cpu_work_command() -> Vec<&'static str> {
vec!["cmd", "/c", "exit", "0"]
}
#[cfg(unix)]
#[test]
fn test_run_samples_include_cpu_ms_on_unix() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("cpu-time-test")
.arg("--repeat")
.arg("2")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in cpu_work_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
let samples = receipt["samples"]
.as_array()
.expect("samples should be an array");
assert!(!samples.is_empty(), "should have samples");
for (i, sample) in samples.iter().enumerate() {
assert!(
sample.get("cpu_ms").is_some(),
"sample {} should have cpu_ms field on Unix",
i
);
let cpu_ms = sample["cpu_ms"]
.as_u64()
.expect("cpu_ms should be a valid u64");
assert!(
cpu_ms < 60_000,
"cpu_ms should be reasonable (< 60 seconds), got {}",
cpu_ms
);
}
}
#[cfg(unix)]
#[test]
fn test_run_stats_include_cpu_ms_summary_on_unix() {
let temp_dir = tempdir().expect("failed to create temp dir");
let output_path = temp_dir.path().join("output.json");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("cpu-stats-test")
.arg("--repeat")
.arg("3")
.arg("--out")
.arg(&output_path)
.arg("--");
for arg in cpu_work_command() {
cmd.arg(arg);
}
cmd.assert().success();
let content = fs::read_to_string(&output_path).expect("failed to read output file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("output should be valid JSON");
assert!(
receipt["stats"]["cpu_ms"].is_object(),
"stats should contain cpu_ms summary on Unix"
);
let cpu_stats = &receipt["stats"]["cpu_ms"];
assert!(
cpu_stats["median"].is_u64(),
"cpu_ms stats should have median"
);
assert!(cpu_stats["min"].is_u64(), "cpu_ms stats should have min");
assert!(cpu_stats["max"].is_u64(), "cpu_ms stats should have max");
let min = cpu_stats["min"].as_u64().unwrap();
let median = cpu_stats["median"].as_u64().unwrap();
let max = cpu_stats["max"].as_u64().unwrap();
assert!(
min <= median,
"min ({}) should be <= median ({})",
min,
median
);
assert!(
median <= max,
"median ({}) should be <= max ({})",
median,
max
);
}
#[test]
fn test_export_csv_includes_cpu_ms_column() {
let temp_dir = tempdir().expect("failed to create temp dir");
let run_path = temp_dir.path().join("run.json");
let export_path = temp_dir.path().join("export.csv");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("csv-export-test")
.arg("--repeat")
.arg("2")
.arg("--out")
.arg(&run_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let mut export_cmd = perfgate_cmd();
export_cmd
.arg("export")
.arg("--run")
.arg(&run_path)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(&export_path);
export_cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let header = content.lines().next().expect("CSV should have header");
assert!(
header.contains("cpu_ms_median"),
"CSV header should contain cpu_ms_median column. Header: {}",
header
);
}
#[test]
fn test_export_jsonl_includes_cpu_ms_when_present() {
let temp_dir = tempdir().expect("failed to create temp dir");
let run_path = temp_dir.path().join("run.json");
let export_path = temp_dir.path().join("export.jsonl");
let mut cmd = perfgate_cmd();
cmd.arg("run")
.arg("--name")
.arg("jsonl-export-test")
.arg("--repeat")
.arg("2")
.arg("--out")
.arg(&run_path)
.arg("--");
for arg in success_command() {
cmd.arg(arg);
}
cmd.assert().success();
let mut export_cmd = perfgate_cmd();
export_cmd
.arg("export")
.arg("--run")
.arg(&run_path)
.arg("--format")
.arg("jsonl")
.arg("--out")
.arg(&export_path);
export_cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let lines: Vec<&str> = content.trim().split('\n').collect();
assert!(!lines.is_empty(), "JSONL should have content");
let parsed: serde_json::Value = serde_json::from_str(lines[0]).expect("should be valid JSON");
assert!(
parsed.get("cpu_ms_median").is_some(),
"JSONL should have cpu_ms_median field"
);
}
#[test]
fn test_export_handles_missing_cpu_ms_gracefully() {
use std::path::PathBuf;
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let baseline = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests")
.join("fixtures")
.join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let header = content.lines().next().expect("CSV should have header");
assert!(
header.contains("cpu_ms_median"),
"CSV header should contain cpu_ms_median even when data is missing"
);
}
#[test]
fn test_compare_handles_missing_cpu_ms_gracefully() {
use std::path::PathBuf;
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_path = temp_dir.path().join("compare.json");
let baseline = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests")
.join("fixtures")
.join("baseline.json");
let current = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests")
.join("fixtures")
.join("current_pass.json");
let mut cmd = perfgate_cmd();
cmd.arg("compare")
.arg("--baseline")
.arg(&baseline)
.arg("--current")
.arg(¤t)
.arg("--out")
.arg(&compare_path);
cmd.assert().success();
let content = fs::read_to_string(&compare_path).expect("failed to read compare file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("compare.json should be valid JSON");
let deltas = &receipt["deltas"];
assert!(deltas.is_object(), "deltas should be an object");
assert!(
deltas.get("wall_ms").is_some(),
"deltas should contain wall_ms"
);
}
#[test]
fn test_compare_detects_cpu_ms_budget_violation() {
let temp_dir = tempdir().expect("failed to create temp dir");
let baseline_path = temp_dir.path().join("baseline.json");
let current_path = temp_dir.path().join("current.json");
let compare_path = temp_dir.path().join("compare.json");
let baseline = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "baseline",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "cpu-budget-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 50},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 50}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 50, "min": 50, "max": 50}
}
});
let current = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "current",
"started_at": "2024-01-02T00:00:00Z",
"ended_at": "2024-01-02T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "cpu-budget-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 100},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 100}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 100, "min": 100, "max": 100}
}
});
fs::write(
&baseline_path,
serde_json::to_string_pretty(&baseline).unwrap(),
)
.expect("write baseline");
fs::write(
¤t_path,
serde_json::to_string_pretty(¤t).unwrap(),
)
.expect("write current");
let mut cmd = perfgate_cmd();
cmd.arg("compare")
.arg("--baseline")
.arg(&baseline_path)
.arg("--current")
.arg(¤t_path)
.arg("--metric-threshold")
.arg("cpu_ms=0.10") .arg("--out")
.arg(&compare_path);
cmd.assert().code(2);
let content = fs::read_to_string(&compare_path).expect("failed to read compare file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("compare.json should be valid JSON");
let cpu_delta = &receipt["deltas"]["cpu_ms"];
assert!(cpu_delta.is_object(), "deltas should contain cpu_ms");
assert_eq!(
cpu_delta["status"].as_str(),
Some("fail"),
"cpu_ms status should be fail due to 100% regression exceeding 10% threshold"
);
let regression = cpu_delta["regression"].as_f64().unwrap();
assert!(
(regression - 1.0).abs() < 0.01,
"regression should be ~100% (1.0), got {}",
regression
);
}
#[test]
fn test_compare_cpu_ms_budget_passes_within_threshold() {
let temp_dir = tempdir().expect("failed to create temp dir");
let baseline_path = temp_dir.path().join("baseline.json");
let current_path = temp_dir.path().join("current.json");
let compare_path = temp_dir.path().join("compare.json");
let baseline = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "baseline",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "cpu-budget-pass-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 100},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 100}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 100, "min": 100, "max": 100}
}
});
let current = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "current",
"started_at": "2024-01-02T00:00:00Z",
"ended_at": "2024-01-02T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "cpu-budget-pass-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 105},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 105}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 105, "min": 105, "max": 105}
}
});
fs::write(
&baseline_path,
serde_json::to_string_pretty(&baseline).unwrap(),
)
.expect("write baseline");
fs::write(
¤t_path,
serde_json::to_string_pretty(¤t).unwrap(),
)
.expect("write current");
let mut cmd = perfgate_cmd();
cmd.arg("compare")
.arg("--baseline")
.arg(&baseline_path)
.arg("--current")
.arg(¤t_path)
.arg("--metric-threshold")
.arg("cpu_ms=0.20") .arg("--out")
.arg(&compare_path);
cmd.assert().success();
let content = fs::read_to_string(&compare_path).expect("failed to read compare file");
let receipt: serde_json::Value =
serde_json::from_str(&content).expect("compare.json should be valid JSON");
let cpu_delta = &receipt["deltas"]["cpu_ms"];
assert!(cpu_delta.is_object(), "deltas should contain cpu_ms");
assert_eq!(
cpu_delta["status"].as_str(),
Some("pass"),
"cpu_ms status should be pass (5% regression within 20% threshold)"
);
}
#[test]
fn test_md_includes_cpu_ms_when_present() {
let temp_dir = tempdir().expect("failed to create temp dir");
let baseline_path = temp_dir.path().join("baseline.json");
let current_path = temp_dir.path().join("current.json");
let compare_path = temp_dir.path().join("compare.json");
let md_path = temp_dir.path().join("comment.md");
let baseline = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "baseline",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "md-cpu-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 80},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 80}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 80, "min": 80, "max": 80}
}
});
let current = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {"name": "perfgate", "version": "0.1.0"},
"run": {
"id": "current",
"started_at": "2024-01-02T00:00:00Z",
"ended_at": "2024-01-02T00:01:00Z",
"host": {"os": "linux", "arch": "x86_64"}
},
"bench": {
"name": "md-cpu-test",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 85},
{"wall_ms": 100, "exit_code": 0, "warmup": false, "timed_out": false, "cpu_ms": 85}
],
"stats": {
"wall_ms": {"median": 100, "min": 100, "max": 100},
"cpu_ms": {"median": 85, "min": 85, "max": 85}
}
});
fs::write(
&baseline_path,
serde_json::to_string_pretty(&baseline).unwrap(),
)
.expect("write baseline");
fs::write(
¤t_path,
serde_json::to_string_pretty(¤t).unwrap(),
)
.expect("write current");
let mut compare_cmd = perfgate_cmd();
compare_cmd
.arg("compare")
.arg("--baseline")
.arg(&baseline_path)
.arg("--current")
.arg(¤t_path)
.arg("--metric-threshold")
.arg("cpu_ms=0.20")
.arg("--out")
.arg(&compare_path);
compare_cmd.assert().success();
let mut md_cmd = perfgate_cmd();
md_cmd
.arg("md")
.arg("--compare")
.arg(&compare_path)
.arg("--out")
.arg(&md_path);
md_cmd.assert().success();
let md_content = fs::read_to_string(&md_path).expect("failed to read markdown file");
assert!(
md_content.contains("cpu_ms"),
"Markdown output should contain cpu_ms metric"
);
}