use serde_json::Value;
use std::fs;
use tempfile::tempdir;
mod common;
use common::perfgate_cmd;
#[cfg(unix)]
fn success_command() -> Vec<&'static str> {
vec!["true"]
}
#[cfg(windows)]
fn success_command() -> Vec<&'static str> {
vec!["cmd", "/c", "exit", "0"]
}
#[cfg(unix)]
fn slow_command() -> Vec<&'static str> {
vec!["sh", "-c", "sleep 0.05"]
}
#[cfg(windows)]
fn slow_command() -> Vec<&'static str> {
vec!["powershell", "-Command", "Start-Sleep -Milliseconds 50"]
}
fn create_config_file(temp_dir: &std::path::Path, bench_name: &str) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate.toml");
let success_cmd = success_command();
let cmd_str = success_cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
let config_content = format!(
r#"
[defaults]
repeat = 2
warmup = 0
threshold = 0.20
[[bench]]
name = "{}"
command = [{}]
"#,
bench_name, cmd_str
);
fs::write(&config_path, config_content).expect("Failed to write config file");
config_path
}
fn create_slow_config_file(temp_dir: &std::path::Path, bench_name: &str) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate_slow.toml");
let slow_cmd = slow_command();
let cmd_str = slow_cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
let config_content = format!(
r#"
[defaults]
repeat = 2
warmup = 0
threshold = 0.20
[[bench]]
name = "{}"
command = [{}]
"#,
bench_name, cmd_str
);
fs::write(&config_path, config_content).expect("Failed to write config file");
config_path
}
fn create_json_config_file(temp_dir: &std::path::Path, bench_name: &str) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate.json");
let cmd = success_command();
let config = serde_json::json!({
"defaults": {
"repeat": 1,
"warmup": 0,
"threshold": 0.20
},
"bench": [{
"name": bench_name,
"command": cmd
}]
});
fs::write(&config_path, serde_json::to_string_pretty(&config).unwrap())
.expect("Failed to write config file");
config_path
}
fn create_baseline_receipt(temp_dir: &std::path::Path, bench_name: &str) -> std::path::PathBuf {
let baselines_dir = temp_dir.join("baselines");
fs::create_dir_all(&baselines_dir).expect("Failed to create baselines dir");
let baseline_path = baselines_dir.join(format!("{}.json", bench_name));
let receipt = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": {
"name": "perfgate",
"version": "0.1.0"
},
"run": {
"id": "baseline-run-id",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": {
"os": "linux",
"arch": "x86_64"
}
},
"bench": {
"name": bench_name,
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 10000, "exit_code": 0, "warmup": false, "timed_out": false},
{"wall_ms": 10200, "exit_code": 0, "warmup": false, "timed_out": false}
],
"stats": {
"wall_ms": {
"median": 10100,
"min": 10000,
"max": 10200
}
}
});
fs::write(
&baseline_path,
serde_json::to_string_pretty(&receipt).unwrap(),
)
.expect("Failed to write baseline");
baseline_path
}
#[test]
fn test_cockpit_mode_produces_sensor_report_schema() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(report_path.exists(), "report.json should exist at root");
let report_content = fs::read_to_string(&report_path).expect("failed to read report");
let report: Value = serde_json::from_str(&report_content).expect("failed to parse report");
assert_eq!(
report["schema"], "sensor.report.v1",
"schema should be sensor.report.v1"
);
}
#[test]
fn test_cockpit_mode_artifact_layout() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let _baseline_path = create_baseline_receipt(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
assert!(out_dir.join("report.json").exists(), "report.json at root");
assert!(out_dir.join("comment.md").exists(), "comment.md at root");
assert!(out_dir.join("extras").is_dir(), "extras/ directory");
assert!(
out_dir.join("extras/perfgate.run.v1.json").exists(),
"extras/perfgate.run.v1.json"
);
assert!(
out_dir.join("extras/perfgate.compare.v1.json").exists(),
"extras/perfgate.compare.v1.json (baseline present)"
);
assert!(
out_dir.join("extras/perfgate.report.v1.json").exists(),
"extras/perfgate.report.v1.json"
);
let root_report: Value =
serde_json::from_str(&fs::read_to_string(out_dir.join("report.json")).unwrap()).unwrap();
assert_eq!(root_report["schema"], "sensor.report.v1");
let native_report: Value = serde_json::from_str(
&fs::read_to_string(out_dir.join("extras/perfgate.report.v1.json")).unwrap(),
)
.unwrap();
assert_eq!(native_report["report_type"], "perfgate.report.v1");
}
#[test]
fn test_cockpit_mode_emit_repair_context_writes_extras_artifact() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit")
.arg("--emit-repair-context");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
assert!(
out_dir.join("extras/repair_context.json").exists(),
"extras/repair_context.json should exist when --emit-repair-context is set"
);
}
#[test]
fn test_cockpit_mode_exits_zero_on_fail() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_slow_config_file(temp_dir.path(), "test-bench");
let baselines_dir = temp_dir.path().join("baselines");
fs::create_dir_all(&baselines_dir).expect("Failed to create baselines dir");
let baseline_path = baselines_dir.join("test-bench.json");
let receipt = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": { "name": "perfgate", "version": "0.1.0" },
"run": {
"id": "baseline-run-id",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": { "os": "linux", "arch": "x86_64" }
},
"bench": {
"name": "test-bench",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false},
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false}
],
"stats": {
"wall_ms": { "median": 1, "min": 1, "max": 1 }
}
});
fs::write(
&baseline_path,
serde_json::to_string_pretty(&receipt).unwrap(),
)
.expect("Failed to write baseline");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0 even on fail: exit code {:?}, stderr: {}",
output.status.code(),
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
let verdict_status = report["verdict"]["status"].as_str().unwrap();
assert!(
verdict_status == "fail" || verdict_status == "warn",
"verdict should be fail or warn, got: {}",
verdict_status
);
}
#[test]
fn test_cockpit_mode_report_structure() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let _baseline_path = create_baseline_receipt(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let report_path = out_dir.join("report.json");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert!(report.get("schema").is_some(), "schema field missing");
assert!(report.get("tool").is_some(), "tool field missing");
assert!(report.get("run").is_some(), "run field missing");
assert!(report.get("verdict").is_some(), "verdict field missing");
assert!(report.get("findings").is_some(), "findings field missing");
assert!(report.get("data").is_some(), "data field missing");
assert_eq!(report["tool"]["name"], "perfgate");
let run = &report["run"];
assert!(run.get("started_at").is_some(), "started_at missing");
assert!(run.get("ended_at").is_some(), "ended_at missing");
assert!(run.get("duration_ms").is_some(), "duration_ms missing");
assert!(run.get("capabilities").is_some(), "capabilities missing");
assert_eq!(run["capabilities"]["baseline"]["status"], "available");
let verdict = &report["verdict"];
assert!(verdict.get("status").is_some(), "verdict.status missing");
assert!(verdict.get("counts").is_some(), "verdict.counts missing");
assert!(verdict.get("reasons").is_some(), "verdict.reasons missing");
let counts = &verdict["counts"];
assert!(counts.get("info").is_some(), "counts.info missing");
assert!(counts.get("warn").is_some(), "counts.warn missing");
assert!(counts.get("error").is_some(), "counts.error missing");
let data = &report["data"];
assert!(data.get("summary").is_some(), "data.summary missing");
assert!(
data.get("compare").is_none(),
"data should not have compare key"
);
}
#[test]
fn test_cockpit_mode_no_baseline_capability() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "no-baseline-bench");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("no-baseline-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(
report["run"]["capabilities"]["baseline"]["status"],
"unavailable"
);
let reason = report["run"]["capabilities"]["baseline"]["reason"]
.as_str()
.unwrap_or("");
assert_eq!(
reason, "no_baseline",
"reason should be 'no_baseline' token, got: {}",
reason
);
}
#[test]
fn test_cockpit_mode_config_error_produces_report() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = temp_dir.path().join("invalid.toml");
fs::write(&config_path, "this is not valid toml {{{").expect("write invalid config");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0 on config error: exit code {:?}, stderr: {}",
output.status.code(),
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(
report_path.exists(),
"report.json should exist even on error"
);
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(report["schema"], "sensor.report.v1");
assert_eq!(report["verdict"]["status"], "fail");
assert_eq!(report["verdict"]["reasons"][0], "tool_error");
let findings = report["findings"].as_array().expect("findings array");
assert!(!findings.is_empty(), "should have at least one finding");
assert_eq!(findings[0]["severity"], "error");
assert_eq!(findings[0]["check_id"], "tool.runtime");
assert_eq!(findings[0]["code"], "runtime_error");
let finding_data = &findings[0]["data"];
assert!(
finding_data.get("stage").is_some(),
"finding should have stage"
);
assert!(
finding_data.get("error_kind").is_some(),
"finding should have error_kind"
);
}
#[test]
fn test_cockpit_mode_json_config_parsing() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_json_config_file(temp_dir.path(), "json-bench");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("json-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should succeed with JSON config: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
assert!(
out_dir.join("report.json").exists(),
"report.json should exist"
);
}
#[test]
fn test_cockpit_mode_extras_dir_creation_error() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "extras-error");
fs::create_dir_all(&out_dir).expect("create out_dir");
fs::write(out_dir.join("extras"), "not a dir").expect("write extras file");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("extras-error")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0 on extras dir error: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(report_path.exists(), "report.json should exist");
}
#[test]
fn test_cockpit_mode_catastrophic_report_write_failure() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("not-a-dir");
fs::write(&out_dir, "not a directory").expect("write out_dir file");
let missing_config = temp_dir.path().join("missing.toml");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&missing_config)
.arg("--bench")
.arg("bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
!output.status.success(),
"cockpit mode should fail when report write fails"
);
assert_eq!(output.status.code(), Some(1));
}
#[test]
fn test_standard_mode_still_works() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("standard");
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let report_path = out_dir.join("report.json");
assert!(report_path.exists());
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(
report["report_type"], "perfgate.report.v1",
"standard mode should produce perfgate.report.v1"
);
}
#[test]
fn test_default_mode_is_standard() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts");
let config_path = create_config_file(temp_dir.path(), "test-bench");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir);
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let report_path = out_dir.join("report.json");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(
report["report_type"], "perfgate.report.v1",
"default mode should produce perfgate.report.v1"
);
}
#[test]
fn test_cockpit_mode_missing_bench_produces_error_report() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "real-bench");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("nonexistent-bench") .arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0 on missing bench: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(report_path.exists(), "report.json should exist");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(report["schema"], "sensor.report.v1");
assert_eq!(report["verdict"]["status"], "fail");
}
#[test]
fn test_cockpit_mode_rejects_path_traversal_bench_name() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = temp_dir.path().join("perfgate.toml");
let success_cmd = success_command();
let cmd_str = success_cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
let config_content = format!(
r#"
[defaults]
repeat = 2
warmup = 0
threshold = 0.20
[[bench]]
name = "../evil"
command = [{}]
"#,
cmd_str
);
fs::write(&config_path, config_content).expect("write config");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("../evil")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0 on validation error: exit code {:?}, stderr: {}",
output.status.code(),
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(
report_path.exists(),
"report.json should exist even on validation error"
);
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
assert_eq!(report["schema"], "sensor.report.v1");
assert_eq!(report["verdict"]["status"], "fail");
assert_eq!(report["verdict"]["reasons"][0], "tool_error");
let findings = report["findings"].as_array().expect("findings array");
assert!(!findings.is_empty(), "should have at least one finding");
assert_eq!(findings[0]["check_id"], "tool.runtime");
assert_eq!(findings[0]["code"], "runtime_error");
let finding_data = &findings[0]["data"];
assert_eq!(finding_data["stage"], "config_parse");
}
fn create_multi_bench_config(
temp_dir: &std::path::Path,
bench_names: &[&str],
) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate.toml");
let success_cmd = success_command();
let cmd_str = success_cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
let mut config_content = String::from(
r#"
[defaults]
repeat = 2
warmup = 0
threshold = 0.20
"#,
);
for name in bench_names {
config_content.push_str(&format!(
r#"
[[bench]]
name = "{}"
command = [{}]
"#,
name, cmd_str
));
}
fs::write(&config_path, config_content).expect("Failed to write config file");
config_path
}
fn create_slow_multi_bench_config(
temp_dir: &std::path::Path,
bench_names: &[&str],
) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate_slow_multi.toml");
let slow_cmd = slow_command();
let cmd_str = slow_cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
let mut config_content = String::from(
r#"
[defaults]
repeat = 2
warmup = 0
threshold = 0.20
"#,
);
for name in bench_names {
config_content.push_str(&format!(
r#"
[[bench]]
name = "{}"
command = [{}]
"#,
name, cmd_str
));
}
fs::write(&config_path, config_content).expect("Failed to write config file");
config_path
}
fn load_vendored_schema_validator() -> jsonschema::Validator {
let schema_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../../contracts/schemas/sensor.report.v1.schema.json");
let schema_content = fs::read_to_string(&schema_path).expect("read schema");
let schema_value: Value = serde_json::from_str(&schema_content).expect("parse schema");
jsonschema::validator_for(&schema_value).expect("compile schema")
}
fn run_cockpit_multi_bench(
temp_dir: &tempfile::TempDir,
bench_names: &[&str],
create_baselines: bool,
) -> (Value, std::path::PathBuf) {
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_multi_bench_config(temp_dir.path(), bench_names);
if create_baselines {
for name in bench_names {
create_baseline_receipt(temp_dir.path(), name);
}
}
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit multi-bench should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
let content = fs::read_to_string(&report_path).expect("read report.json");
let report: Value = serde_json::from_str(&content).expect("parse report.json");
(report, out_dir)
}
#[test]
fn test_cockpit_multi_bench_artifact_layout() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (_report, out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
assert!(out_dir.join("report.json").exists(), "report.json at root");
assert!(out_dir.join("comment.md").exists(), "comment.md at root");
for name in &["bench-a", "bench-b"] {
let prefix = out_dir.join("extras").join(name);
assert!(prefix.is_dir(), "extras/{} directory", name);
assert!(
prefix.join("perfgate.run.v1.json").exists(),
"extras/{}/perfgate.run.v1.json",
name
);
assert!(
prefix.join("perfgate.report.v1.json").exists(),
"extras/{}/perfgate.report.v1.json",
name
);
assert!(
!prefix.join("perfgate.compare.v1.json").exists(),
"extras/{}/perfgate.compare.v1.json should NOT exist without baseline",
name
);
}
}
#[test]
fn test_cockpit_multi_bench_artifact_layout_with_baselines() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (_report, out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], true);
for name in &["bench-a", "bench-b"] {
let prefix = out_dir.join("extras").join(name);
assert!(
prefix.join("perfgate.compare.v1.json").exists(),
"extras/{}/perfgate.compare.v1.json should exist with baseline",
name
);
}
}
#[test]
fn test_cockpit_multi_bench_schema_validation() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let validator = load_vendored_schema_validator();
let errors: Vec<_> = validator.iter_errors(&report).collect();
assert!(
errors.is_empty(),
"multi-bench report failed schema validation:\n{}",
errors
.iter()
.map(|e| format!(" - {}", e))
.collect::<Vec<_>>()
.join("\n")
);
}
#[test]
fn test_cockpit_multi_bench_verdict_worst_wins() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_slow_multi_bench_config(temp_dir.path(), &["bench-a", "bench-b"]);
let baselines_dir = temp_dir.path().join("baselines");
fs::create_dir_all(&baselines_dir).expect("create baselines dir");
let fast_baseline = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": { "name": "perfgate", "version": "0.1.0" },
"run": {
"id": "baseline-id",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": { "os": "linux", "arch": "x86_64" }
},
"bench": {
"name": "bench-a",
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false},
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false}
],
"stats": { "wall_ms": { "median": 1, "min": 1, "max": 1 } }
});
fs::write(
baselines_dir.join("bench-a.json"),
serde_json::to_string_pretty(&fast_baseline).unwrap(),
)
.expect("write bench-a baseline");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let report: Value =
serde_json::from_str(&fs::read_to_string(out_dir.join("report.json")).unwrap()).unwrap();
let verdict_status = report["verdict"]["status"].as_str().unwrap();
assert!(
verdict_status == "fail" || verdict_status == "warn",
"aggregated verdict should be fail or warn (worst-of), got: {}",
verdict_status
);
}
#[test]
fn test_cockpit_multi_bench_counts_summed() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let counts = &report["verdict"]["counts"];
let warn = counts["warn"].as_u64().unwrap();
let info = counts["info"].as_u64().unwrap();
let error = counts["error"].as_u64().unwrap();
assert_eq!(warn, 2, "warn counts should be summed");
assert_eq!(info, 0, "info counts should be summed");
assert_eq!(error, 0, "error counts should be summed");
let summary = &report["data"]["summary"];
assert_eq!(
summary["total_count"].as_u64().unwrap(),
info + warn + error,
"total_count should be sum of counts"
);
}
#[test]
fn test_cockpit_mode_output_github_writes_outputs() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "gh-cockpit-bench");
let github_output = temp_dir.path().join("github_output.txt");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("gh-cockpit-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit")
.arg("--output-github")
.env("GITHUB_OUTPUT", &github_output);
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should exit 0: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let content = fs::read_to_string(&github_output).expect("read GITHUB_OUTPUT");
assert!(content.contains("verdict="));
assert!(content.contains("pass_count="));
assert!(content.contains("warn_count="));
assert!(content.contains("fail_count="));
assert!(content.contains("bench_count=1"));
}
#[test]
fn test_cockpit_mode_md_template_customizes_comment() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_config_file(temp_dir.path(), "template-bench");
let _baseline_path = create_baseline_receipt(temp_dir.path(), "template-bench");
let template_path = temp_dir.path().join("comment.hbs");
fs::write(
&template_path,
r#"bench={{bench.name}}
{{#each rows}}metric={{metric}} status={{status}}
{{/each}}
"#,
)
.expect("write template");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("template-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit")
.arg("--md-template")
.arg(&template_path);
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mode should succeed: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let content = fs::read_to_string(out_dir.join("comment.md")).expect("read comment.md");
assert!(content.contains("bench=template-bench"));
assert!(content.contains("metric=wall_ms"));
}
#[test]
fn test_cockpit_multi_bench_findings_prefixed() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let findings = report["findings"].as_array().expect("findings array");
assert!(findings.len() >= 2, "should have at least 2 findings");
let has_bench_a_prefix = findings
.iter()
.any(|f| f["message"].as_str().unwrap_or("").starts_with("[bench-a]"));
let has_bench_b_prefix = findings
.iter()
.any(|f| f["message"].as_str().unwrap_or("").starts_with("[bench-b]"));
assert!(has_bench_a_prefix, "should have [bench-a] prefix");
assert!(has_bench_b_prefix, "should have [bench-b] prefix");
for finding in findings {
let data = finding.get("data");
if let Some(data) = data {
assert!(
data.get("bench_name").is_some(),
"finding data should have bench_name"
);
}
}
}
#[test]
fn test_cockpit_multi_bench_fingerprints_unique() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let findings = report["findings"].as_array().expect("findings array");
let mut fingerprints: Vec<&str> = Vec::new();
for finding in findings {
let fp = finding["fingerprint"]
.as_str()
.expect("finding should have fingerprint");
assert_eq!(fp.len(), 64, "fingerprint should be 64-char hex: {}", fp);
assert!(
fp.chars()
.all(|c| c.is_ascii_hexdigit() && !c.is_ascii_uppercase()),
"fingerprint should be lowercase hex: {}",
fp
);
assert!(
!fingerprints.contains(&fp),
"fingerprints should be unique: {} seen twice",
fp
);
fingerprints.push(fp);
}
}
#[test]
fn test_cockpit_multi_bench_reasons_deduped() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let reasons = report["verdict"]["reasons"]
.as_array()
.expect("reasons array");
let no_baseline_count = reasons
.iter()
.filter(|r| r.as_str() == Some("no_baseline"))
.count();
assert_eq!(
no_baseline_count, 1,
"no_baseline should appear exactly once in reasons"
);
}
#[test]
fn test_cockpit_multi_bench_baseline_all_available() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], true);
assert_eq!(
report["run"]["capabilities"]["baseline"]["status"], "available",
"all baselines → status = available"
);
}
#[test]
fn test_cockpit_multi_bench_baseline_partial() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_multi_bench_config(temp_dir.path(), &["bench-a", "bench-b"]);
create_baseline_receipt(temp_dir.path(), "bench-a");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let report: Value =
serde_json::from_str(&fs::read_to_string(out_dir.join("report.json")).unwrap()).unwrap();
assert_eq!(
report["run"]["capabilities"]["baseline"]["status"], "unavailable",
"partial baselines → status = unavailable"
);
assert!(
report["run"]["capabilities"]["baseline"]["reason"].is_null(),
"partial baselines → reason = null"
);
}
#[test]
fn test_cockpit_multi_bench_baseline_none() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
assert_eq!(
report["run"]["capabilities"]["baseline"]["status"],
"unavailable"
);
assert_eq!(
report["run"]["capabilities"]["baseline"]["reason"],
"no_baseline"
);
}
#[test]
fn test_cockpit_multi_bench_exits_zero_on_fail() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = create_multi_bench_config(temp_dir.path(), &["bench-a", "bench-b"]);
let baselines_dir = temp_dir.path().join("baselines");
fs::create_dir_all(&baselines_dir).expect("create baselines dir");
for name in &["bench-a", "bench-b"] {
let baseline = serde_json::json!({
"schema": "perfgate.run.v1",
"tool": { "name": "perfgate", "version": "0.1.0" },
"run": {
"id": "baseline-id",
"started_at": "2024-01-01T00:00:00Z",
"ended_at": "2024-01-01T00:01:00Z",
"host": { "os": "linux", "arch": "x86_64" }
},
"bench": {
"name": name,
"command": ["echo", "hello"],
"repeat": 2,
"warmup": 0
},
"samples": [
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false},
{"wall_ms": 1, "exit_code": 0, "warmup": false, "timed_out": false}
],
"stats": { "wall_ms": { "median": 1, "min": 1, "max": 1 } }
});
fs::write(
baselines_dir.join(format!("{}.json", name)),
serde_json::to_string_pretty(&baseline).unwrap(),
)
.expect("write baseline");
}
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit multi-bench should exit 0 even on fail: exit {:?}, stderr: {}",
output.status.code(),
String::from_utf8_lossy(&output.stderr)
);
}
#[test]
fn test_cockpit_multi_bench_comment_md_combined() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (_report, out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
let md = fs::read_to_string(out_dir.join("comment.md")).expect("read comment.md");
assert!(md.contains("bench-a"), "markdown should contain bench-a");
assert!(md.contains("bench-b"), "markdown should contain bench-b");
}
#[test]
fn test_cockpit_multi_bench_artifacts_sorted() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], true);
let artifacts = report["artifacts"].as_array().expect("artifacts array");
for window in artifacts.windows(2) {
let a_type = window[0]["type"].as_str().unwrap();
let a_path = window[0]["path"].as_str().unwrap();
let b_type = window[1]["type"].as_str().unwrap();
let b_path = window[1]["path"].as_str().unwrap();
assert!(
(a_type, a_path) <= (b_type, b_path),
"artifacts not sorted: ({}, {}) > ({}, {})",
a_type,
a_path,
b_type,
b_path
);
}
}
#[test]
fn test_cockpit_multi_bench_empty_config_error() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = temp_dir.path().join("perfgate.toml");
fs::write(
&config_path,
r#"
[defaults]
repeat = 2
"#,
)
.expect("write config");
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit should exit 0 even on empty config: stderr: {}",
String::from_utf8_lossy(&output.stderr)
);
let report: Value =
serde_json::from_str(&fs::read_to_string(out_dir.join("report.json")).unwrap()).unwrap();
assert_eq!(report["schema"], "sensor.report.v1");
assert_eq!(report["verdict"]["status"], "fail");
assert_eq!(report["verdict"]["reasons"][0], "tool_error");
}
#[test]
fn test_cockpit_multi_bench_report_structure() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], true);
assert_eq!(report["schema"], "sensor.report.v1");
assert!(report.get("tool").is_some(), "tool field missing");
assert_eq!(report["tool"]["name"], "perfgate");
assert!(report.get("run").is_some(), "run field missing");
assert!(
report["run"].get("started_at").is_some(),
"started_at missing"
);
assert!(report["run"].get("ended_at").is_some(), "ended_at missing");
assert!(
report["run"].get("duration_ms").is_some(),
"duration_ms missing"
);
assert!(
report["run"].get("capabilities").is_some(),
"capabilities missing"
);
assert!(report.get("verdict").is_some(), "verdict field missing");
assert!(
report["verdict"].get("status").is_some(),
"verdict.status missing"
);
assert!(
report["verdict"].get("counts").is_some(),
"verdict.counts missing"
);
assert!(
report["verdict"]["counts"].get("info").is_some(),
"counts.info missing"
);
assert!(
report["verdict"]["counts"].get("warn").is_some(),
"counts.warn missing"
);
assert!(
report["verdict"]["counts"].get("error").is_some(),
"counts.error missing"
);
assert!(report.get("findings").is_some(), "findings field missing");
assert!(report.get("data").is_some(), "data field missing");
assert!(
report["data"].get("summary").is_some(),
"data.summary missing"
);
}
#[test]
fn test_cockpit_multi_bench_no_truncation_fields() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (report, _out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], false);
assert!(
report["data"].get("findings_total").is_none()
|| report["data"]["findings_total"].is_null(),
"findings_total should be absent when under limit"
);
assert!(
report["data"].get("findings_emitted").is_none()
|| report["data"]["findings_emitted"].is_null(),
"findings_emitted should be absent when under limit"
);
}
#[test]
fn test_cockpit_multi_bench_extras_valid_native() {
let temp_dir = tempdir().expect("failed to create temp dir");
let (_report, out_dir) = run_cockpit_multi_bench(&temp_dir, &["bench-a", "bench-b"], true);
for name in &["bench-a", "bench-b"] {
let prefix = out_dir.join("extras").join(name);
let run_content =
fs::read_to_string(prefix.join("perfgate.run.v1.json")).expect("read run receipt");
let run: Value = serde_json::from_str(&run_content).expect("parse run receipt");
assert_eq!(
run["schema"], "perfgate.run.v1",
"extras/{}/perfgate.run.v1.json should have correct schema",
name
);
let report_content =
fs::read_to_string(prefix.join("perfgate.report.v1.json")).expect("read native report");
let native_report: Value =
serde_json::from_str(&report_content).expect("parse native report");
assert_eq!(
native_report["report_type"], "perfgate.report.v1",
"extras/{}/perfgate.report.v1.json should have correct report_type",
name
);
let compare_content = fs::read_to_string(prefix.join("perfgate.compare.v1.json"))
.expect("read compare receipt");
let compare: Value = serde_json::from_str(&compare_content).expect("parse compare receipt");
assert_eq!(
compare["schema"], "perfgate.compare.v1",
"extras/{}/perfgate.compare.v1.json should have correct schema",
name
);
}
}
#[test]
fn test_cockpit_mode_error_report_validates_schema() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let config_path = temp_dir.path().join("broken.toml");
fs::write(&config_path, "not valid toml {{{").expect("write broken config");
let mut cmd = perfgate_cmd();
cmd.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--bench")
.arg("test-bench")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(output.status.success());
let schema_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../../contracts/schemas/sensor.report.v1.schema.json");
let schema_content = fs::read_to_string(&schema_path).expect("read schema");
let schema_value: Value = serde_json::from_str(&schema_content).expect("parse schema");
let validator = jsonschema::validator_for(&schema_value).expect("compile schema");
let report_path = out_dir.join("report.json");
let content = fs::read_to_string(&report_path).expect("read report");
let instance: Value = serde_json::from_str(&content).expect("parse report");
let errors: Vec<_> = validator.iter_errors(&instance).collect();
assert!(
errors.is_empty(),
"error report failed schema validation:\n{}",
errors
.iter()
.map(|e| format!(" - {}", e))
.collect::<Vec<_>>()
.join("\n")
);
let findings = instance["findings"].as_array().expect("findings");
for finding in findings {
assert!(
finding.get("fingerprint").is_some(),
"error finding should have fingerprint"
);
}
}
fn nonexistent_command() -> Vec<&'static str> {
vec!["perfgate_nonexistent_command_that_does_not_exist_xyz"]
}
fn create_mixed_outcome_config(
temp_dir: &std::path::Path,
benches: &[(&str, &[&str])],
) -> std::path::PathBuf {
let config_path = temp_dir.join("perfgate.toml");
let mut config_content = String::from(
r#"
[defaults]
repeat = 1
warmup = 0
threshold = 0.20
"#,
);
for (name, cmd) in benches {
let cmd_str = cmd
.iter()
.map(|s| format!("\"{}\"", s))
.collect::<Vec<_>>()
.join(", ");
config_content.push_str(&format!(
r#"
[[bench]]
name = "{}"
command = [{}]
"#,
name, cmd_str
));
}
fs::write(&config_path, config_content).expect("Failed to write config file");
config_path
}
#[test]
fn test_cockpit_multi_bench_mixed_outcome_error_and_warn() {
let temp_dir = tempdir().expect("failed to create temp dir");
let out_dir = temp_dir.path().join("artifacts/perfgate");
let good_cmd = slow_command();
let bad_cmd = nonexistent_command();
let config_path = create_mixed_outcome_config(
temp_dir.path(),
&[("good-bench", &good_cmd), ("bad-bench", &bad_cmd)],
);
let mut cmd = perfgate_cmd();
cmd.current_dir(temp_dir.path())
.arg("check")
.arg("--config")
.arg(&config_path)
.arg("--all")
.arg("--out-dir")
.arg(&out_dir)
.arg("--mode")
.arg("cockpit");
let output = cmd.output().expect("failed to execute check");
assert!(
output.status.success(),
"cockpit mixed-outcome should exit 0: exit code {:?}, stderr: {}",
output.status.code(),
String::from_utf8_lossy(&output.stderr)
);
let report_path = out_dir.join("report.json");
assert!(report_path.exists(), "report.json should exist");
let report: Value =
serde_json::from_str(&fs::read_to_string(&report_path).expect("read report"))
.expect("parse report");
let validator = load_vendored_schema_validator();
let errors: Vec<_> = validator.iter_errors(&report).collect();
assert!(
errors.is_empty(),
"mixed-outcome report failed schema validation:\n{}",
errors
.iter()
.map(|e| format!(" - {}", e))
.collect::<Vec<_>>()
.join("\n")
);
assert_eq!(
report["verdict"]["status"], "fail",
"mixed-outcome verdict should be fail"
);
let error_count = report["verdict"]["counts"]["error"].as_u64().unwrap();
assert!(error_count >= 1, "should have at least 1 error count");
let reasons = report["verdict"]["reasons"]
.as_array()
.expect("reasons array");
let has_tool_error = reasons.iter().any(|r| r.as_str() == Some("tool_error"));
let has_no_baseline = reasons.iter().any(|r| r.as_str() == Some("no_baseline"));
assert!(has_tool_error, "reasons should include tool_error");
assert!(has_no_baseline, "reasons should include no_baseline");
let findings = report["findings"].as_array().expect("findings array");
assert!(
findings.len() >= 2,
"should have at least 2 findings (good-bench warn + bad-bench error)"
);
let has_good_prefix = findings.iter().any(|f| {
f["message"]
.as_str()
.unwrap_or("")
.starts_with("[good-bench]")
});
let has_bad_prefix = findings.iter().any(|f| {
f["message"]
.as_str()
.unwrap_or("")
.starts_with("[bad-bench]")
});
assert!(has_good_prefix, "should have [good-bench] prefixed finding");
assert!(has_bad_prefix, "should have [bad-bench] prefixed finding");
let error_finding = findings
.iter()
.find(|f| f["check_id"].as_str() == Some("tool.runtime"))
.expect("should have tool.runtime finding");
let finding_data = &error_finding["data"];
assert!(
finding_data.get("stage").is_some() && !finding_data["stage"].is_null(),
"error finding should have stage"
);
assert!(
finding_data.get("error_kind").is_some() && !finding_data["error_kind"].is_null(),
"error finding should have error_kind"
);
assert_eq!(
finding_data["bench_name"], "bad-bench",
"error finding should have bench_name = bad-bench"
);
let good_extras = out_dir.join("extras").join("good-bench");
let bad_extras = out_dir.join("extras").join("bad-bench");
assert!(
good_extras.join("perfgate.run.v1.json").exists(),
"good-bench should have extras/good-bench/perfgate.run.v1.json"
);
let bad_has_run = bad_extras.join("perfgate.run.v1.json").exists();
assert!(
!bad_has_run,
"bad-bench should NOT have perfgate.run.v1.json"
);
let artifacts = report["artifacts"].as_array().expect("artifacts array");
let has_good_artifact = artifacts
.iter()
.any(|a| a["path"].as_str().unwrap_or("").contains("good-bench"));
let has_bad_artifact = artifacts
.iter()
.any(|a| a["path"].as_str().unwrap_or("").contains("bad-bench"));
assert!(has_good_artifact, "artifacts should reference good-bench");
assert!(
!has_bad_artifact,
"artifacts should NOT reference bad-bench"
);
assert_eq!(
report["data"]["summary"]["bench_count"], 2,
"bench_count should be 2 (both benches counted)"
);
}