mod common;
use common::perfgate_cmd;
use predicates::prelude::*;
use std::fs;
use tempfile::tempdir;
use common::{fixtures_dir, generate_compare_receipt};
#[test]
fn test_export_run_to_csv() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
assert!(export_path.exists(), "export file should exist");
let content = fs::read_to_string(&export_path).expect("failed to read export file");
assert!(
content.starts_with("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,io_read_bytes_median,io_write_bytes_median,network_packets_median,energy_uj_median,throughput_median,sample_count,timestamp\n"),
"CSV should have correct header. Got: {}",
content.lines().next().unwrap_or("")
);
assert!(
content.contains("test-benchmark"),
"CSV should contain benchmark name"
);
assert!(
content.contains("100,98,102"),
"CSV should contain wall_ms stats (median=100, min=98, max=102)"
);
assert!(
content.contains("1024"),
"CSV should contain max_rss_kb median"
);
}
#[test]
fn test_export_run_to_jsonl() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.jsonl");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--format")
.arg("jsonl")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
assert!(export_path.exists(), "export file should exist");
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let lines: Vec<&str> = content.trim().split('\n').collect();
assert_eq!(
lines.len(),
1,
"JSONL should have exactly 1 line for run receipt"
);
let parsed: serde_json::Value = serde_json::from_str(lines[0]).expect("should be valid JSON");
assert_eq!(parsed["bench_name"], "test-benchmark");
assert_eq!(parsed["wall_ms_median"], 100);
assert_eq!(parsed["wall_ms_min"], 98);
assert_eq!(parsed["wall_ms_max"], 102);
assert_eq!(parsed["sample_count"], 5);
}
#[test]
fn test_export_compare_to_csv() {
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_receipt_path = temp_dir.path().join("compare.json");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let current = fixtures_dir().join("current_pass.json");
generate_compare_receipt(&baseline, ¤t, &compare_receipt_path)
.expect("failed to generate compare receipt");
assert!(
compare_receipt_path.exists(),
"compare receipt should exist"
);
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&compare_receipt_path)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
assert!(export_path.exists(), "export file should exist");
let content = fs::read_to_string(&export_path).expect("failed to read export file");
assert!(
content.starts_with(
"bench_name,metric,baseline_value,current_value,regression_pct,status,threshold\n"
),
"CSV should have correct header"
);
assert!(
content.contains("test-benchmark"),
"CSV should contain benchmark name"
);
assert!(
content.contains("wall_ms"),
"CSV should contain wall_ms metric"
);
}
#[test]
fn test_export_compare_to_jsonl() {
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_receipt_path = temp_dir.path().join("compare.json");
let export_path = temp_dir.path().join("export.jsonl");
let baseline = fixtures_dir().join("baseline.json");
let current = fixtures_dir().join("current_pass.json");
generate_compare_receipt(&baseline, ¤t, &compare_receipt_path)
.expect("failed to generate compare receipt");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&compare_receipt_path)
.arg("--format")
.arg("jsonl")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
assert!(export_path.exists(), "export file should exist");
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let lines: Vec<&str> = content
.trim()
.split('\n')
.filter(|s| !s.is_empty())
.collect();
assert!(
!lines.is_empty(),
"JSONL should have at least 1 line per metric"
);
for line in &lines {
let parsed: serde_json::Value = serde_json::from_str(line).expect("should be valid JSON");
assert!(
parsed.get("bench_name").is_some(),
"each line should have bench_name"
);
assert!(
parsed.get("metric").is_some(),
"each line should have metric"
);
}
}
#[test]
fn test_export_run_to_html() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.html");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--format")
.arg("html")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
assert!(
content.contains("<table"),
"HTML export should contain a table"
);
assert!(
content.contains("test-benchmark"),
"HTML export should include bench name"
);
}
#[test]
fn test_export_compare_to_prometheus() {
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_receipt_path = temp_dir.path().join("compare.json");
let export_path = temp_dir.path().join("export.prom");
let baseline = fixtures_dir().join("baseline.json");
let current = fixtures_dir().join("current_pass.json");
generate_compare_receipt(&baseline, ¤t, &compare_receipt_path)
.expect("failed to generate compare receipt");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&compare_receipt_path)
.arg("--format")
.arg("prometheus")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
assert!(
content.contains("perfgate_compare_regression_pct"),
"Prometheus export should include regression metric"
);
assert!(
content.contains("metric=\"wall_ms\""),
"Prometheus export should include wall_ms label"
);
}
#[test]
fn test_export_default_format_is_csv() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--out")
.arg(&export_path);
cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
assert!(
content.starts_with("bench_name,"),
"default format should be CSV with header"
);
}
#[test]
fn test_export_invalid_format() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--format")
.arg("invalid")
.arg("--out")
.arg(&export_path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("invalid format"));
}
#[test]
fn test_export_run_and_compare_mutually_exclusive() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&baseline)
.arg("--compare")
.arg(&baseline)
.arg("--out")
.arg(&export_path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("cannot be used with"));
}
#[test]
fn test_export_missing_input() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let mut cmd = perfgate_cmd();
cmd.arg("export").arg("--out").arg(&export_path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("--run").or(predicate::str::contains("--compare")));
}
#[test]
fn test_export_missing_input_file() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let nonexistent = temp_dir.path().join("nonexistent.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--run")
.arg(&nonexistent)
.arg("--out")
.arg(&export_path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("read"));
}
#[test]
fn test_export_missing_compare_input_file() {
let temp_dir = tempdir().expect("failed to create temp dir");
let export_path = temp_dir.path().join("export.csv");
let nonexistent = temp_dir.path().join("does_not_exist.json");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&nonexistent)
.arg("--out")
.arg(&export_path);
cmd.assert()
.failure()
.stderr(predicate::str::contains("read"));
assert!(
!export_path.exists(),
"export file should not be created on failure"
);
}
#[test]
fn test_export_csv_deterministic() {
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_receipt_path = temp_dir.path().join("compare.json");
let export_path1 = temp_dir.path().join("export1.csv");
let export_path2 = temp_dir.path().join("export2.csv");
let baseline = fixtures_dir().join("baseline.json");
let current = fixtures_dir().join("current_pass.json");
generate_compare_receipt(&baseline, ¤t, &compare_receipt_path)
.expect("failed to generate compare receipt");
for export_path in [&export_path1, &export_path2] {
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&compare_receipt_path)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(export_path);
cmd.assert().success();
}
let content1 = fs::read_to_string(&export_path1).expect("failed to read export1");
let content2 = fs::read_to_string(&export_path2).expect("failed to read export2");
assert_eq!(content1, content2, "CSV export should be deterministic");
}
#[test]
fn test_export_compare_metrics_sorted() {
let temp_dir = tempdir().expect("failed to create temp dir");
let compare_receipt_path = temp_dir.path().join("compare.json");
let export_path = temp_dir.path().join("export.csv");
let baseline = fixtures_dir().join("baseline.json");
let current = fixtures_dir().join("current_pass.json");
generate_compare_receipt(&baseline, ¤t, &compare_receipt_path)
.expect("failed to generate compare receipt");
let mut cmd = perfgate_cmd();
cmd.arg("export")
.arg("--compare")
.arg(&compare_receipt_path)
.arg("--format")
.arg("csv")
.arg("--out")
.arg(&export_path);
cmd.assert().success();
let content = fs::read_to_string(&export_path).expect("failed to read export file");
let max_rss_pos = content.find("max_rss_kb");
let wall_ms_pos = content.find("wall_ms");
if let (Some(max_rss), Some(wall_ms)) = (max_rss_pos, wall_ms_pos) {
assert!(
max_rss < wall_ms,
"max_rss_kb should come before wall_ms (alphabetical order)"
);
}
}