1use perfgate_types::{CompareReceipt, Metric, MetricStatus, RunReceipt};
53
54#[derive(Debug, Clone, Copy, PartialEq, Eq)]
66pub enum ExportFormat {
67 Csv,
69 Jsonl,
71 Html,
73 Prometheus,
75}
76
77impl ExportFormat {
78 pub fn parse(s: &str) -> Option<Self> {
89 s.parse().ok()
90 }
91}
92
93impl std::str::FromStr for ExportFormat {
94 type Err = ();
95
96 fn from_str(s: &str) -> Result<Self, Self::Err> {
110 match s.to_lowercase().as_str() {
111 "csv" => Ok(ExportFormat::Csv),
112 "jsonl" => Ok(ExportFormat::Jsonl),
113 "html" => Ok(ExportFormat::Html),
114 "prometheus" | "prom" => Ok(ExportFormat::Prometheus),
115 _ => Err(()),
116 }
117 }
118}
119
120#[derive(Debug, Clone, serde::Serialize)]
145pub struct RunExportRow {
146 pub bench_name: String,
147 pub wall_ms_median: u64,
148 pub wall_ms_min: u64,
149 pub wall_ms_max: u64,
150 pub binary_bytes_median: Option<u64>,
151 pub cpu_ms_median: Option<u64>,
152 pub ctx_switches_median: Option<u64>,
153 pub max_rss_kb_median: Option<u64>,
154 pub page_faults_median: Option<u64>,
155 pub throughput_median: Option<f64>,
156 pub sample_count: usize,
157 pub timestamp: String,
158}
159
160#[derive(Debug, Clone, serde::Serialize)]
180pub struct CompareExportRow {
181 pub bench_name: String,
182 pub metric: String,
183 pub baseline_value: f64,
184 pub current_value: f64,
185 pub regression_pct: f64,
186 pub status: String,
187 pub threshold: f64,
188}
189
190pub struct ExportUseCase;
192
193impl ExportUseCase {
194 pub fn export_run(receipt: &RunReceipt, format: ExportFormat) -> anyhow::Result<String> {
231 let row = Self::run_to_row(receipt);
232
233 match format {
234 ExportFormat::Csv => Self::run_row_to_csv(&row),
235 ExportFormat::Jsonl => Self::run_row_to_jsonl(&row),
236 ExportFormat::Html => Self::run_row_to_html(&row),
237 ExportFormat::Prometheus => Self::run_row_to_prometheus(&row),
238 }
239 }
240
241 pub fn export_compare(
274 receipt: &CompareReceipt,
275 format: ExportFormat,
276 ) -> anyhow::Result<String> {
277 let rows = Self::compare_to_rows(receipt);
278
279 match format {
280 ExportFormat::Csv => Self::compare_rows_to_csv(&rows),
281 ExportFormat::Jsonl => Self::compare_rows_to_jsonl(&rows),
282 ExportFormat::Html => Self::compare_rows_to_html(&rows),
283 ExportFormat::Prometheus => Self::compare_rows_to_prometheus(&rows),
284 }
285 }
286
287 fn run_to_row(receipt: &RunReceipt) -> RunExportRow {
289 let sample_count = receipt.samples.iter().filter(|s| !s.warmup).count();
290
291 RunExportRow {
292 bench_name: receipt.bench.name.clone(),
293 wall_ms_median: receipt.stats.wall_ms.median,
294 wall_ms_min: receipt.stats.wall_ms.min,
295 wall_ms_max: receipt.stats.wall_ms.max,
296 binary_bytes_median: receipt.stats.binary_bytes.as_ref().map(|s| s.median),
297 cpu_ms_median: receipt.stats.cpu_ms.as_ref().map(|s| s.median),
298 ctx_switches_median: receipt.stats.ctx_switches.as_ref().map(|s| s.median),
299 max_rss_kb_median: receipt.stats.max_rss_kb.as_ref().map(|s| s.median),
300 page_faults_median: receipt.stats.page_faults.as_ref().map(|s| s.median),
301 throughput_median: receipt.stats.throughput_per_s.as_ref().map(|s| s.median),
302 sample_count,
303 timestamp: receipt.run.started_at.clone(),
304 }
305 }
306
307 fn compare_to_rows(receipt: &CompareReceipt) -> Vec<CompareExportRow> {
309 let mut rows: Vec<CompareExportRow> = receipt
310 .deltas
311 .iter()
312 .map(|(metric, delta)| {
313 let threshold = receipt
314 .budgets
315 .get(metric)
316 .map(|b| b.threshold)
317 .unwrap_or(0.0);
318
319 CompareExportRow {
320 bench_name: receipt.bench.name.clone(),
321 metric: metric_to_string(*metric),
322 baseline_value: delta.baseline,
323 current_value: delta.current,
324 regression_pct: delta.pct * 100.0,
325 status: status_to_string(delta.status),
326 threshold: threshold * 100.0,
327 }
328 })
329 .collect();
330
331 rows.sort_by(|a, b| a.metric.cmp(&b.metric));
332 rows
333 }
334
335 fn run_row_to_csv(row: &RunExportRow) -> anyhow::Result<String> {
337 let mut output = String::new();
338
339 output.push_str("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,throughput_median,sample_count,timestamp\n");
340
341 output.push_str(&csv_escape(&row.bench_name));
342 output.push(',');
343 output.push_str(&row.wall_ms_median.to_string());
344 output.push(',');
345 output.push_str(&row.wall_ms_min.to_string());
346 output.push(',');
347 output.push_str(&row.wall_ms_max.to_string());
348 output.push(',');
349 output.push_str(
350 &row.binary_bytes_median
351 .map_or(String::new(), |v| v.to_string()),
352 );
353 output.push(',');
354 output.push_str(&row.cpu_ms_median.map_or(String::new(), |v| v.to_string()));
355 output.push(',');
356 output.push_str(
357 &row.ctx_switches_median
358 .map_or(String::new(), |v| v.to_string()),
359 );
360 output.push(',');
361 output.push_str(
362 &row.max_rss_kb_median
363 .map_or(String::new(), |v| v.to_string()),
364 );
365 output.push(',');
366 output.push_str(
367 &row.page_faults_median
368 .map_or(String::new(), |v| v.to_string()),
369 );
370 output.push(',');
371 output.push_str(
372 &row.throughput_median
373 .map_or(String::new(), |v| format!("{:.6}", v)),
374 );
375 output.push(',');
376 output.push_str(&row.sample_count.to_string());
377 output.push(',');
378 output.push_str(&csv_escape(&row.timestamp));
379 output.push('\n');
380
381 Ok(output)
382 }
383
384 fn run_row_to_jsonl(row: &RunExportRow) -> anyhow::Result<String> {
386 let json = serde_json::to_string(row)?;
387 Ok(format!("{}\n", json))
388 }
389
390 fn compare_rows_to_csv(rows: &[CompareExportRow]) -> anyhow::Result<String> {
392 let mut output = String::new();
393
394 output.push_str(
395 "bench_name,metric,baseline_value,current_value,regression_pct,status,threshold\n",
396 );
397
398 for row in rows {
399 output.push_str(&csv_escape(&row.bench_name));
400 output.push(',');
401 output.push_str(&csv_escape(&row.metric));
402 output.push(',');
403 output.push_str(&format!("{:.6}", row.baseline_value));
404 output.push(',');
405 output.push_str(&format!("{:.6}", row.current_value));
406 output.push(',');
407 output.push_str(&format!("{:.6}", row.regression_pct));
408 output.push(',');
409 output.push_str(&csv_escape(&row.status));
410 output.push(',');
411 output.push_str(&format!("{:.6}", row.threshold));
412 output.push('\n');
413 }
414
415 Ok(output)
416 }
417
418 fn compare_rows_to_jsonl(rows: &[CompareExportRow]) -> anyhow::Result<String> {
420 let mut output = String::new();
421
422 for row in rows {
423 let json = serde_json::to_string(row)?;
424 output.push_str(&json);
425 output.push('\n');
426 }
427
428 Ok(output)
429 }
430
431 fn run_row_to_html(row: &RunExportRow) -> anyhow::Result<String> {
432 let html = format!(
433 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate run export</title></head><body>\
434 <h1>perfgate run export</h1>\
435 <table border=\"1\">\
436 <thead><tr><th>bench_name</th><th>wall_ms_median</th><th>wall_ms_min</th><th>wall_ms_max</th><th>binary_bytes_median</th><th>cpu_ms_median</th><th>ctx_switches_median</th><th>max_rss_kb_median</th><th>page_faults_median</th><th>throughput_median</th><th>sample_count</th><th>timestamp</th></tr></thead>\
437 <tbody><tr><td>{bench}</td><td>{wall_med}</td><td>{wall_min}</td><td>{wall_max}</td><td>{binary}</td><td>{cpu}</td><td>{ctx}</td><td>{rss}</td><td>{pf}</td><td>{throughput}</td><td>{sample_count}</td><td>{timestamp}</td></tr></tbody>\
438 </table></body></html>\n",
439 bench = html_escape(&row.bench_name),
440 wall_med = row.wall_ms_median,
441 wall_min = row.wall_ms_min,
442 wall_max = row.wall_ms_max,
443 binary = row
444 .binary_bytes_median
445 .map_or(String::new(), |v| v.to_string()),
446 cpu = row.cpu_ms_median.map_or(String::new(), |v| v.to_string()),
447 ctx = row
448 .ctx_switches_median
449 .map_or(String::new(), |v| v.to_string()),
450 rss = row
451 .max_rss_kb_median
452 .map_or(String::new(), |v| v.to_string()),
453 pf = row
454 .page_faults_median
455 .map_or(String::new(), |v| v.to_string()),
456 throughput = row
457 .throughput_median
458 .map_or(String::new(), |v| format!("{:.6}", v)),
459 sample_count = row.sample_count,
460 timestamp = html_escape(&row.timestamp),
461 );
462 Ok(html)
463 }
464
465 fn compare_rows_to_html(rows: &[CompareExportRow]) -> anyhow::Result<String> {
466 let mut out = String::from(
467 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate compare export</title></head><body><h1>perfgate compare export</h1><table border=\"1\"><thead><tr><th>bench_name</th><th>metric</th><th>baseline_value</th><th>current_value</th><th>regression_pct</th><th>status</th><th>threshold</th></tr></thead><tbody>",
468 );
469
470 for row in rows {
471 out.push_str(&format!(
472 "<tr><td>{}</td><td>{}</td><td>{:.6}</td><td>{:.6}</td><td>{:.6}</td><td>{}</td><td>{:.6}</td></tr>",
473 html_escape(&row.bench_name),
474 html_escape(&row.metric),
475 row.baseline_value,
476 row.current_value,
477 row.regression_pct,
478 html_escape(&row.status),
479 row.threshold
480 ));
481 }
482
483 out.push_str("</tbody></table></body></html>\n");
484 Ok(out)
485 }
486
487 fn run_row_to_prometheus(row: &RunExportRow) -> anyhow::Result<String> {
488 let bench = prometheus_escape_label_value(&row.bench_name);
489 let mut out = String::new();
490 out.push_str(&format!(
491 "perfgate_run_wall_ms_median{{bench=\"{}\"}} {}\n",
492 bench, row.wall_ms_median
493 ));
494 out.push_str(&format!(
495 "perfgate_run_wall_ms_min{{bench=\"{}\"}} {}\n",
496 bench, row.wall_ms_min
497 ));
498 out.push_str(&format!(
499 "perfgate_run_wall_ms_max{{bench=\"{}\"}} {}\n",
500 bench, row.wall_ms_max
501 ));
502 if let Some(v) = row.binary_bytes_median {
503 out.push_str(&format!(
504 "perfgate_run_binary_bytes_median{{bench=\"{}\"}} {}\n",
505 bench, v
506 ));
507 }
508 if let Some(v) = row.cpu_ms_median {
509 out.push_str(&format!(
510 "perfgate_run_cpu_ms_median{{bench=\"{}\"}} {}\n",
511 bench, v
512 ));
513 }
514 if let Some(v) = row.ctx_switches_median {
515 out.push_str(&format!(
516 "perfgate_run_ctx_switches_median{{bench=\"{}\"}} {}\n",
517 bench, v
518 ));
519 }
520 if let Some(v) = row.max_rss_kb_median {
521 out.push_str(&format!(
522 "perfgate_run_max_rss_kb_median{{bench=\"{}\"}} {}\n",
523 bench, v
524 ));
525 }
526 if let Some(v) = row.page_faults_median {
527 out.push_str(&format!(
528 "perfgate_run_page_faults_median{{bench=\"{}\"}} {}\n",
529 bench, v
530 ));
531 }
532 if let Some(v) = row.throughput_median {
533 out.push_str(&format!(
534 "perfgate_run_throughput_per_s_median{{bench=\"{}\"}} {:.6}\n",
535 bench, v
536 ));
537 }
538 out.push_str(&format!(
539 "perfgate_run_sample_count{{bench=\"{}\"}} {}\n",
540 bench, row.sample_count
541 ));
542 Ok(out)
543 }
544
545 fn compare_rows_to_prometheus(rows: &[CompareExportRow]) -> anyhow::Result<String> {
546 let mut out = String::new();
547 for row in rows {
548 let bench = prometheus_escape_label_value(&row.bench_name);
549 let metric = prometheus_escape_label_value(&row.metric);
550 out.push_str(&format!(
551 "perfgate_compare_baseline_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
552 bench, metric, row.baseline_value
553 ));
554 out.push_str(&format!(
555 "perfgate_compare_current_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
556 bench, metric, row.current_value
557 ));
558 out.push_str(&format!(
559 "perfgate_compare_regression_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
560 bench, metric, row.regression_pct
561 ));
562 out.push_str(&format!(
563 "perfgate_compare_threshold_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
564 bench, metric, row.threshold
565 ));
566
567 let status_code = match row.status.as_str() {
568 "pass" => 0,
569 "warn" => 1,
570 "fail" => 2,
571 _ => -1,
572 };
573 out.push_str(&format!(
574 "perfgate_compare_status{{bench=\"{}\",metric=\"{}\",status=\"{}\"}} {}\n",
575 bench,
576 metric,
577 prometheus_escape_label_value(&row.status),
578 status_code
579 ));
580 }
581 Ok(out)
582 }
583}
584
585fn metric_to_string(metric: Metric) -> String {
587 metric.as_str().to_string()
588}
589
590fn status_to_string(status: MetricStatus) -> String {
592 match status {
593 MetricStatus::Pass => "pass".to_string(),
594 MetricStatus::Warn => "warn".to_string(),
595 MetricStatus::Fail => "fail".to_string(),
596 }
597}
598
599pub fn csv_escape(s: &str) -> String {
612 if s.contains(',') || s.contains('"') || s.contains('\n') || s.contains('\r') {
613 format!("\"{}\"", s.replace('"', "\"\""))
614 } else {
615 s.to_string()
616 }
617}
618
619fn html_escape(s: &str) -> String {
620 s.replace('&', "&")
621 .replace('<', "<")
622 .replace('>', ">")
623 .replace('"', """)
624}
625
626fn prometheus_escape_label_value(s: &str) -> String {
627 s.replace('\\', "\\\\").replace('"', "\\\"")
628}
629
630#[cfg(test)]
631mod tests {
632 use super::*;
633 use perfgate_types::{
634 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
635 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
636 U64Summary, Verdict, VerdictCounts, VerdictStatus,
637 };
638 use std::collections::BTreeMap;
639
640 fn create_test_run_receipt() -> RunReceipt {
641 RunReceipt {
642 schema: RUN_SCHEMA_V1.to_string(),
643 tool: ToolInfo {
644 name: "perfgate".to_string(),
645 version: "0.1.0".to_string(),
646 },
647 run: RunMeta {
648 id: "test-run-001".to_string(),
649 started_at: "2024-01-15T10:00:00Z".to_string(),
650 ended_at: "2024-01-15T10:00:05Z".to_string(),
651 host: HostInfo {
652 os: "linux".to_string(),
653 arch: "x86_64".to_string(),
654 cpu_count: None,
655 memory_bytes: None,
656 hostname_hash: None,
657 },
658 },
659 bench: BenchMeta {
660 name: "test-benchmark".to_string(),
661 cwd: None,
662 command: vec!["echo".to_string(), "hello".to_string()],
663 repeat: 5,
664 warmup: 0,
665 work_units: None,
666 timeout_ms: None,
667 },
668 samples: vec![
669 Sample {
670 wall_ms: 100,
671 exit_code: 0,
672 warmup: false,
673 timed_out: false,
674 cpu_ms: Some(50),
675 page_faults: None,
676 ctx_switches: None,
677 max_rss_kb: Some(1024),
678 binary_bytes: None,
679 stdout: None,
680 stderr: None,
681 },
682 Sample {
683 wall_ms: 102,
684 exit_code: 0,
685 warmup: false,
686 timed_out: false,
687 cpu_ms: Some(52),
688 page_faults: None,
689 ctx_switches: None,
690 max_rss_kb: Some(1028),
691 binary_bytes: None,
692 stdout: None,
693 stderr: None,
694 },
695 ],
696 stats: Stats {
697 wall_ms: U64Summary {
698 median: 100,
699 min: 98,
700 max: 102,
701 },
702 cpu_ms: Some(U64Summary {
703 median: 50,
704 min: 48,
705 max: 52,
706 }),
707 page_faults: None,
708 ctx_switches: None,
709 max_rss_kb: Some(U64Summary {
710 median: 1024,
711 min: 1020,
712 max: 1028,
713 }),
714 binary_bytes: None,
715 throughput_per_s: None,
716 },
717 }
718 }
719
720 fn create_test_compare_receipt() -> CompareReceipt {
721 let mut budgets = BTreeMap::new();
722 budgets.insert(
723 Metric::WallMs,
724 Budget {
725 threshold: 0.2,
726 warn_threshold: 0.18,
727 direction: Direction::Lower,
728 },
729 );
730 budgets.insert(
731 Metric::MaxRssKb,
732 Budget {
733 threshold: 0.15,
734 warn_threshold: 0.135,
735 direction: Direction::Lower,
736 },
737 );
738
739 let mut deltas = BTreeMap::new();
740 deltas.insert(
741 Metric::WallMs,
742 Delta {
743 baseline: 100.0,
744 current: 110.0,
745 ratio: 1.1,
746 pct: 0.1,
747 regression: 0.1,
748 statistic: MetricStatistic::Median,
749 significance: None,
750 status: MetricStatus::Pass,
751 },
752 );
753 deltas.insert(
754 Metric::MaxRssKb,
755 Delta {
756 baseline: 1024.0,
757 current: 1280.0,
758 ratio: 1.25,
759 pct: 0.25,
760 regression: 0.25,
761 statistic: MetricStatistic::Median,
762 significance: None,
763 status: MetricStatus::Fail,
764 },
765 );
766
767 CompareReceipt {
768 schema: COMPARE_SCHEMA_V1.to_string(),
769 tool: ToolInfo {
770 name: "perfgate".to_string(),
771 version: "0.1.0".to_string(),
772 },
773 bench: BenchMeta {
774 name: "alpha-bench".to_string(),
775 cwd: None,
776 command: vec!["test".to_string()],
777 repeat: 5,
778 warmup: 0,
779 work_units: None,
780 timeout_ms: None,
781 },
782 baseline_ref: CompareRef {
783 path: Some("baseline.json".to_string()),
784 run_id: Some("baseline-001".to_string()),
785 },
786 current_ref: CompareRef {
787 path: Some("current.json".to_string()),
788 run_id: Some("current-001".to_string()),
789 },
790 budgets,
791 deltas,
792 verdict: Verdict {
793 status: VerdictStatus::Fail,
794 counts: VerdictCounts {
795 pass: 1,
796 warn: 0,
797 fail: 1,
798 },
799 reasons: vec!["max_rss_kb_fail".to_string()],
800 },
801 }
802 }
803
804 #[test]
805 fn test_run_export_csv() {
806 let receipt = create_test_run_receipt();
807 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
808
809 assert!(csv.starts_with("bench_name,wall_ms_median,"));
810 assert!(csv.contains("test-benchmark"));
811 assert!(csv.contains("100,98,102"));
812 assert!(csv.contains("1024"));
813 assert!(csv.contains("2024-01-15T10:00:00Z"));
814 }
815
816 #[test]
817 fn test_run_export_jsonl() {
818 let receipt = create_test_run_receipt();
819 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
820
821 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
822 assert_eq!(lines.len(), 1);
823
824 let parsed: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
825 assert_eq!(parsed["bench_name"], "test-benchmark");
826 assert_eq!(parsed["wall_ms_median"], 100);
827 }
828
829 #[test]
830 fn test_compare_export_csv() {
831 let receipt = create_test_compare_receipt();
832 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
833
834 assert!(csv.starts_with("bench_name,metric,baseline_value,"));
835 assert!(csv.contains("alpha-bench"));
836 assert!(csv.contains("max_rss_kb"));
837 assert!(csv.contains("wall_ms"));
838 let max_rss_pos = csv.find("max_rss_kb").unwrap();
839 let wall_ms_pos = csv.find("wall_ms").unwrap();
840 assert!(max_rss_pos < wall_ms_pos);
841 }
842
843 #[test]
844 fn test_compare_export_jsonl() {
845 let receipt = create_test_compare_receipt();
846 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
847
848 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
849 assert_eq!(lines.len(), 2);
850
851 for line in &lines {
852 let _: serde_json::Value = serde_json::from_str(line).unwrap();
853 }
854
855 let first: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
856 assert_eq!(first["metric"], "max_rss_kb");
857 }
858
859 #[test]
860 fn test_csv_escape() {
861 assert_eq!(csv_escape("simple"), "simple");
862 assert_eq!(csv_escape("has,comma"), "\"has,comma\"");
863 assert_eq!(csv_escape("has\"quote"), "\"has\"\"quote\"");
864 assert_eq!(csv_escape("has\nnewline"), "\"has\nnewline\"");
865 }
866
867 #[test]
868 fn test_stable_ordering_across_runs() {
869 let receipt = create_test_compare_receipt();
870
871 let csv1 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
872 let csv2 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
873
874 assert_eq!(csv1, csv2, "CSV output should be deterministic");
875 }
876
877 #[test]
878 fn test_export_format_from_str() {
879 assert_eq!(ExportFormat::parse("csv"), Some(ExportFormat::Csv));
880 assert_eq!(ExportFormat::parse("CSV"), Some(ExportFormat::Csv));
881 assert_eq!(ExportFormat::parse("jsonl"), Some(ExportFormat::Jsonl));
882 assert_eq!(ExportFormat::parse("JSONL"), Some(ExportFormat::Jsonl));
883 assert_eq!(ExportFormat::parse("html"), Some(ExportFormat::Html));
884 assert_eq!(
885 ExportFormat::parse("prometheus"),
886 Some(ExportFormat::Prometheus)
887 );
888 assert_eq!(ExportFormat::parse("invalid"), None);
889 }
890
891 #[test]
892 fn test_run_export_html_and_prometheus() {
893 let receipt = create_test_run_receipt();
894
895 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
896 assert!(html.contains("<table"), "html output should contain table");
897 assert!(html.contains("test-benchmark"));
898
899 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
900 assert!(prom.contains("perfgate_run_wall_ms_median"));
901 assert!(prom.contains("bench=\"test-benchmark\""));
902 }
903
904 #[test]
905 fn test_compare_export_html_and_prometheus() {
906 let receipt = create_test_compare_receipt();
907
908 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
909 assert!(html.contains("<table"), "html output should contain table");
910 assert!(html.contains("max_rss_kb"));
911
912 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
913 assert!(prom.contains("perfgate_compare_regression_pct"));
914 assert!(prom.contains("metric=\"max_rss_kb\""));
915 }
916
917 #[test]
918 fn test_html_escape() {
919 assert_eq!(html_escape("simple"), "simple");
920 assert_eq!(html_escape("<script>"), "<script>");
921 assert_eq!(html_escape("a&b"), "a&b");
922 assert_eq!(html_escape("\"quoted\""), ""quoted"");
923 }
924
925 #[test]
926 fn test_prometheus_escape() {
927 assert_eq!(prometheus_escape_label_value("simple"), "simple");
928 assert_eq!(prometheus_escape_label_value("has\"quote"), "has\\\"quote");
929 assert_eq!(
930 prometheus_escape_label_value("has\\backslash"),
931 "has\\\\backslash"
932 );
933 }
934
935 mod snapshot_tests {
936 use super::*;
937 use insta::assert_snapshot;
938
939 #[test]
940 fn test_run_html_snapshot() {
941 let receipt = create_test_run_receipt();
942 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
943 assert_snapshot!("run_html", html);
944 }
945
946 #[test]
947 fn test_run_prometheus_snapshot() {
948 let receipt = create_test_run_receipt();
949 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
950 assert_snapshot!("run_prometheus", prom);
951 }
952
953 #[test]
954 fn test_compare_html_snapshot() {
955 let receipt = create_test_compare_receipt();
956 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
957 assert_snapshot!("compare_html", html);
958 }
959
960 #[test]
961 fn test_compare_prometheus_snapshot() {
962 let receipt = create_test_compare_receipt();
963 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
964 assert_snapshot!("compare_prometheus", prom);
965 }
966 }
967
968 mod edge_case_tests {
969 use super::*;
970
971 fn create_empty_run_receipt() -> RunReceipt {
972 RunReceipt {
973 schema: RUN_SCHEMA_V1.to_string(),
974 tool: ToolInfo {
975 name: "perfgate".to_string(),
976 version: "0.1.0".to_string(),
977 },
978 run: RunMeta {
979 id: "empty-run".to_string(),
980 started_at: "2024-01-01T00:00:00Z".to_string(),
981 ended_at: "2024-01-01T00:00:01Z".to_string(),
982 host: HostInfo {
983 os: "linux".to_string(),
984 arch: "x86_64".to_string(),
985 cpu_count: None,
986 memory_bytes: None,
987 hostname_hash: None,
988 },
989 },
990 bench: BenchMeta {
991 name: "empty-bench".to_string(),
992 cwd: None,
993 command: vec!["true".to_string()],
994 repeat: 0,
995 warmup: 0,
996 work_units: None,
997 timeout_ms: None,
998 },
999 samples: vec![],
1000 stats: Stats {
1001 wall_ms: U64Summary {
1002 median: 0,
1003 min: 0,
1004 max: 0,
1005 },
1006 cpu_ms: None,
1007 page_faults: None,
1008 ctx_switches: None,
1009 max_rss_kb: None,
1010 binary_bytes: None,
1011 throughput_per_s: None,
1012 },
1013 }
1014 }
1015
1016 fn create_empty_compare_receipt() -> CompareReceipt {
1017 CompareReceipt {
1018 schema: COMPARE_SCHEMA_V1.to_string(),
1019 tool: ToolInfo {
1020 name: "perfgate".to_string(),
1021 version: "0.1.0".to_string(),
1022 },
1023 bench: BenchMeta {
1024 name: "empty-bench".to_string(),
1025 cwd: None,
1026 command: vec!["true".to_string()],
1027 repeat: 0,
1028 warmup: 0,
1029 work_units: None,
1030 timeout_ms: None,
1031 },
1032 baseline_ref: CompareRef {
1033 path: None,
1034 run_id: None,
1035 },
1036 current_ref: CompareRef {
1037 path: None,
1038 run_id: None,
1039 },
1040 budgets: BTreeMap::new(),
1041 deltas: BTreeMap::new(),
1042 verdict: Verdict {
1043 status: VerdictStatus::Pass,
1044 counts: VerdictCounts {
1045 pass: 0,
1046 warn: 0,
1047 fail: 0,
1048 },
1049 reasons: vec![],
1050 },
1051 }
1052 }
1053
1054 fn create_run_receipt_with_bench_name(name: &str) -> RunReceipt {
1055 let mut receipt = create_empty_run_receipt();
1056 receipt.bench.name = name.to_string();
1057 receipt.samples.push(Sample {
1058 wall_ms: 42,
1059 exit_code: 0,
1060 warmup: false,
1061 timed_out: false,
1062 cpu_ms: None,
1063 page_faults: None,
1064 ctx_switches: None,
1065 max_rss_kb: None,
1066 binary_bytes: None,
1067 stdout: None,
1068 stderr: None,
1069 });
1070 receipt.stats.wall_ms = U64Summary {
1071 median: 42,
1072 min: 42,
1073 max: 42,
1074 };
1075 receipt
1076 }
1077
1078 #[test]
1081 fn empty_run_receipt_csv_has_header_and_one_row() {
1082 let receipt = create_empty_run_receipt();
1083 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1084 let lines: Vec<&str> = csv.trim().split('\n').collect();
1085 assert_eq!(lines.len(), 2, "should have header + 1 data row");
1086 assert!(lines[0].starts_with("bench_name,"));
1087 assert!(csv.contains("empty-bench"));
1088 }
1089
1090 #[test]
1091 fn empty_run_receipt_jsonl_is_valid() {
1092 let receipt = create_empty_run_receipt();
1093 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1094 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1095 assert_eq!(parsed["bench_name"], "empty-bench");
1096 assert_eq!(parsed["sample_count"], 0);
1097 }
1098
1099 #[test]
1100 fn empty_run_receipt_html_is_valid() {
1101 let receipt = create_empty_run_receipt();
1102 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1103 assert!(html.starts_with("<!doctype html>"));
1104 assert!(html.contains("<table"));
1105 assert!(html.contains("</table>"));
1106 assert!(html.contains("empty-bench"));
1107 }
1108
1109 #[test]
1110 fn empty_run_receipt_prometheus_is_valid() {
1111 let receipt = create_empty_run_receipt();
1112 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1113 assert!(prom.contains("perfgate_run_wall_ms_median"));
1114 assert!(prom.contains("bench=\"empty-bench\""));
1115 assert!(prom.contains("perfgate_run_sample_count"));
1116 }
1117
1118 #[test]
1119 fn empty_compare_receipt_csv_has_header_only() {
1120 let receipt = create_empty_compare_receipt();
1121 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1122 let lines: Vec<&str> = csv.trim().split('\n').collect();
1123 assert_eq!(lines.len(), 1, "should have header only with no deltas");
1124 assert!(lines[0].starts_with("bench_name,metric,"));
1125 }
1126
1127 #[test]
1128 fn empty_compare_receipt_jsonl_is_empty() {
1129 let receipt = create_empty_compare_receipt();
1130 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1131 assert!(
1132 jsonl.trim().is_empty(),
1133 "JSONL should be empty for no deltas"
1134 );
1135 }
1136
1137 #[test]
1138 fn empty_compare_receipt_html_has_valid_structure() {
1139 let receipt = create_empty_compare_receipt();
1140 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1141 assert!(html.starts_with("<!doctype html>"));
1142 assert!(html.contains("<table"));
1143 assert!(html.contains("</table>"));
1144 assert!(html.contains("<thead>"));
1145 assert!(html.contains("</tbody>"));
1146 }
1147
1148 #[test]
1149 fn empty_compare_receipt_prometheus_is_empty() {
1150 let receipt = create_empty_compare_receipt();
1151 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1152 assert!(
1153 prom.trim().is_empty(),
1154 "Prometheus output should be empty for no deltas"
1155 );
1156 }
1157
1158 #[test]
1161 fn csv_bench_name_with_comma() {
1162 let receipt = create_run_receipt_with_bench_name("bench,with,commas");
1163 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1164 assert!(
1165 csv.contains("\"bench,with,commas\""),
1166 "comma-containing bench name should be quoted"
1167 );
1168 let lines: Vec<&str> = csv.trim().split('\n').collect();
1169 assert_eq!(lines.len(), 2, "should still have exactly 2 lines");
1170 }
1171
1172 #[test]
1173 fn csv_bench_name_with_quotes() {
1174 let receipt = create_run_receipt_with_bench_name("bench\"quoted\"name");
1175 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1176 assert!(
1177 csv.contains("\"bench\"\"quoted\"\"name\""),
1178 "quotes should be escaped as double-quotes in CSV"
1179 );
1180 }
1181
1182 #[test]
1183 fn csv_bench_name_with_newline() {
1184 let receipt = create_run_receipt_with_bench_name("bench\nwith\nnewlines");
1185 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1186 assert!(
1187 csv.contains("\"bench\nwith\nnewlines\""),
1188 "newline-containing bench name should be quoted"
1189 );
1190 }
1191
1192 #[test]
1193 fn csv_bench_name_with_commas_and_quotes() {
1194 let receipt = create_run_receipt_with_bench_name("a,\"b\",c");
1195 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1196 assert!(csv.contains("\"a,\"\"b\"\",c\""));
1198 }
1199
1200 #[test]
1203 fn jsonl_bench_name_with_unicode() {
1204 let receipt = create_run_receipt_with_bench_name("ベンチマーク-速度");
1205 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1206 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1207 assert_eq!(parsed["bench_name"], "ベンチマーク-速度");
1208 }
1209
1210 #[test]
1211 fn jsonl_bench_name_with_emoji() {
1212 let receipt = create_run_receipt_with_bench_name("bench-🚀-fast");
1213 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1214 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1215 assert_eq!(parsed["bench_name"], "bench-🚀-fast");
1216 }
1217
1218 #[test]
1219 fn jsonl_bench_name_with_special_json_chars() {
1220 let receipt = create_run_receipt_with_bench_name("bench\\with\"special\tchars");
1221 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1222 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1223 assert_eq!(parsed["bench_name"], "bench\\with\"special\tchars");
1224 }
1225
1226 #[test]
1229 fn html_run_with_all_optional_metrics_none() {
1230 let receipt = create_empty_run_receipt();
1231 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1232 assert!(html.contains("<html>"));
1233 assert!(html.contains("</html>"));
1234 assert!(html.contains("empty-bench"));
1236 }
1237
1238 #[test]
1239 fn html_bench_name_with_html_chars() {
1240 let receipt = create_run_receipt_with_bench_name("<script>alert('xss')</script>");
1241 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1242 assert!(
1243 !html.contains("<script>"),
1244 "HTML special chars should be escaped"
1245 );
1246 assert!(html.contains("<script>"));
1247 }
1248
1249 #[test]
1252 fn prometheus_bench_name_with_quotes() {
1253 let receipt = create_run_receipt_with_bench_name("bench\"name");
1254 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1255 assert!(
1256 prom.contains("bench="),
1257 "Prometheus output should have bench label"
1258 );
1259 assert!(
1260 !prom.contains("bench=\"bench\"name\""),
1261 "raw quotes should be escaped"
1262 );
1263 assert!(prom.contains("bench=\"bench\\\"name\""));
1264 }
1265
1266 #[test]
1267 fn prometheus_bench_name_with_backslash() {
1268 let receipt = create_run_receipt_with_bench_name("bench\\path");
1269 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1270 assert!(prom.contains("bench=\"bench\\\\path\""));
1271 }
1272
1273 #[test]
1274 fn prometheus_compare_with_all_metric_types() {
1275 let mut receipt = create_empty_compare_receipt();
1276 receipt.bench.name = "full-metrics".to_string();
1277 receipt.deltas.insert(
1278 Metric::WallMs,
1279 Delta {
1280 baseline: 100.0,
1281 current: 110.0,
1282 ratio: 1.1,
1283 pct: 0.1,
1284 regression: 0.1,
1285 statistic: MetricStatistic::Median,
1286 significance: None,
1287 status: MetricStatus::Pass,
1288 },
1289 );
1290 receipt.deltas.insert(
1291 Metric::MaxRssKb,
1292 Delta {
1293 baseline: 1024.0,
1294 current: 1024.0,
1295 ratio: 1.0,
1296 pct: 0.0,
1297 regression: 0.0,
1298 statistic: MetricStatistic::Median,
1299 significance: None,
1300 status: MetricStatus::Pass,
1301 },
1302 );
1303 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1304 assert!(prom.contains("metric=\"wall_ms\""));
1305 assert!(prom.contains("metric=\"max_rss_kb\""));
1306 assert!(prom.contains("perfgate_compare_baseline_value"));
1307 assert!(prom.contains("perfgate_compare_current_value"));
1308 assert!(prom.contains("perfgate_compare_status"));
1309 }
1310
1311 #[test]
1314 fn single_sample_run_exports_all_formats() {
1315 let receipt = create_run_receipt_with_bench_name("single");
1316
1317 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1318 assert!(csv.contains("single"));
1319 assert_eq!(csv.trim().lines().count(), 2);
1320
1321 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1322 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1323 assert_eq!(parsed["sample_count"], 1);
1324
1325 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1326 assert!(html.contains("<td>single</td>"));
1327
1328 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1329 assert!(prom.contains("perfgate_run_sample_count{bench=\"single\"} 1"));
1330 }
1331
1332 #[test]
1335 fn huge_values_run_receipt() {
1336 let mut receipt = create_empty_run_receipt();
1337 receipt.bench.name = "huge".to_string();
1338 receipt.stats.wall_ms = U64Summary {
1339 median: u64::MAX,
1340 min: u64::MAX - 1,
1341 max: u64::MAX,
1342 };
1343 receipt.stats.max_rss_kb = Some(U64Summary {
1344 median: u64::MAX,
1345 min: u64::MAX,
1346 max: u64::MAX,
1347 });
1348
1349 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1350 assert!(csv.contains(&u64::MAX.to_string()));
1351
1352 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1353 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1354 assert_eq!(parsed["wall_ms_median"], u64::MAX);
1355
1356 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1357 assert!(html.contains(&u64::MAX.to_string()));
1358
1359 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1360 assert!(prom.contains(&u64::MAX.to_string()));
1361 }
1362
1363 #[test]
1366 fn warmup_only_samples_count_zero() {
1367 let mut receipt = create_empty_run_receipt();
1368 receipt.samples = vec![
1369 Sample {
1370 wall_ms: 10,
1371 exit_code: 0,
1372 warmup: true,
1373 timed_out: false,
1374 cpu_ms: None,
1375 page_faults: None,
1376 ctx_switches: None,
1377 max_rss_kb: None,
1378 binary_bytes: None,
1379 stdout: None,
1380 stderr: None,
1381 },
1382 Sample {
1383 wall_ms: 11,
1384 exit_code: 0,
1385 warmup: true,
1386 timed_out: false,
1387 cpu_ms: None,
1388 page_faults: None,
1389 ctx_switches: None,
1390 max_rss_kb: None,
1391 binary_bytes: None,
1392 stdout: None,
1393 stderr: None,
1394 },
1395 ];
1396
1397 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1398 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1399 assert_eq!(parsed["sample_count"], 0);
1400
1401 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1402 let data_line = csv.lines().nth(1).unwrap();
1404 assert!(
1405 data_line.contains(",0,"),
1406 "warmup-only should yield sample_count 0"
1407 );
1408 }
1409
1410 #[test]
1413 fn csv_bench_name_with_carriage_return() {
1414 let receipt = create_run_receipt_with_bench_name("bench\rwith\rcr");
1415 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1416 assert!(
1417 csv.contains("\"bench\rwith\rcr\""),
1418 "carriage-return-containing bench name should be quoted"
1419 );
1420 }
1421
1422 #[test]
1425 fn csv_compare_special_chars_in_bench_name() {
1426 let mut receipt = create_empty_compare_receipt();
1427 receipt.bench.name = "bench,\"special\"\nname".to_string();
1428 receipt.deltas.insert(
1429 Metric::WallMs,
1430 Delta {
1431 baseline: 100.0,
1432 current: 105.0,
1433 ratio: 1.05,
1434 pct: 0.05,
1435 regression: 0.05,
1436 statistic: MetricStatistic::Median,
1437 significance: None,
1438 status: MetricStatus::Pass,
1439 },
1440 );
1441 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1442 assert!(csv.contains("\"bench,\"\"special\"\"\nname\""));
1444 }
1445
1446 #[test]
1449 fn unicode_bench_name_all_formats() {
1450 let name = "日本語ベンチ_αβγ_🚀";
1451 let receipt = create_run_receipt_with_bench_name(name);
1452
1453 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1454 assert!(csv.contains(name));
1455
1456 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1457 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1458 assert_eq!(parsed["bench_name"], name);
1459
1460 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1461 assert!(html.contains(name));
1462
1463 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1464 assert!(prom.contains(name));
1465 }
1466
1467 #[test]
1470 fn html_compare_mixed_statuses() {
1471 let mut receipt = create_empty_compare_receipt();
1472 receipt.bench.name = "mixed".to_string();
1473 for (metric, status) in [
1474 (Metric::WallMs, MetricStatus::Pass),
1475 (Metric::CpuMs, MetricStatus::Warn),
1476 (Metric::MaxRssKb, MetricStatus::Fail),
1477 ] {
1478 receipt.deltas.insert(
1479 metric,
1480 Delta {
1481 baseline: 100.0,
1482 current: 120.0,
1483 ratio: 1.2,
1484 pct: 0.2,
1485 regression: 0.2,
1486 statistic: MetricStatistic::Median,
1487 significance: None,
1488 status,
1489 },
1490 );
1491 }
1492 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1493 assert!(html.contains("<td>pass</td>"));
1494 assert!(html.contains("<td>warn</td>"));
1495 assert!(html.contains("<td>fail</td>"));
1496 assert_eq!(html.matches("<tr><td>").count(), 3);
1498 }
1499
1500 #[test]
1503 fn html_empty_bench_name() {
1504 let receipt = create_run_receipt_with_bench_name("");
1505 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1506 assert!(html.contains("<td></td>"));
1507 assert!(html.contains("<html>"));
1508 }
1509
1510 #[test]
1513 fn prometheus_run_all_optional_metrics_present() {
1514 let mut receipt = create_empty_run_receipt();
1515 receipt.bench.name = "full".to_string();
1516 receipt.stats.cpu_ms = Some(U64Summary {
1517 median: 50,
1518 min: 48,
1519 max: 52,
1520 });
1521 receipt.stats.page_faults = Some(U64Summary {
1522 median: 10,
1523 min: 8,
1524 max: 12,
1525 });
1526 receipt.stats.ctx_switches = Some(U64Summary {
1527 median: 5,
1528 min: 3,
1529 max: 7,
1530 });
1531 receipt.stats.max_rss_kb = Some(U64Summary {
1532 median: 2048,
1533 min: 2000,
1534 max: 2100,
1535 });
1536 receipt.stats.binary_bytes = Some(U64Summary {
1537 median: 100000,
1538 min: 99000,
1539 max: 101000,
1540 });
1541 receipt.stats.throughput_per_s = Some(F64Summary {
1542 median: 1234.567890,
1543 min: 1200.0,
1544 max: 1300.0,
1545 });
1546
1547 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1548 assert!(prom.contains("perfgate_run_cpu_ms_median{bench=\"full\"} 50"));
1549 assert!(prom.contains("perfgate_run_page_faults_median{bench=\"full\"} 10"));
1550 assert!(prom.contains("perfgate_run_ctx_switches_median{bench=\"full\"} 5"));
1551 assert!(prom.contains("perfgate_run_max_rss_kb_median{bench=\"full\"} 2048"));
1552 assert!(prom.contains("perfgate_run_binary_bytes_median{bench=\"full\"} 100000"));
1553 assert!(
1554 prom.contains("perfgate_run_throughput_per_s_median{bench=\"full\"} 1234.567890")
1555 );
1556 }
1557
1558 #[test]
1561 fn prometheus_compare_status_codes() {
1562 let mut receipt = create_empty_compare_receipt();
1563 receipt.bench.name = "status-test".to_string();
1564 for (metric, status, expected_code) in [
1565 (Metric::WallMs, MetricStatus::Pass, "0"),
1566 (Metric::CpuMs, MetricStatus::Warn, "1"),
1567 (Metric::MaxRssKb, MetricStatus::Fail, "2"),
1568 ] {
1569 receipt.deltas.insert(
1570 metric,
1571 Delta {
1572 baseline: 100.0,
1573 current: 110.0,
1574 ratio: 1.1,
1575 pct: 0.1,
1576 regression: 0.1,
1577 statistic: MetricStatistic::Median,
1578 significance: None,
1579 status,
1580 },
1581 );
1582 receipt.budgets.insert(
1583 metric,
1584 Budget {
1585 threshold: 0.2,
1586 warn_threshold: 0.15,
1587 direction: Direction::Lower,
1588 },
1589 );
1590 let _ = expected_code; }
1592
1593 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1594 assert!(prom.contains("status=\"pass\"} 0"));
1595 assert!(prom.contains("status=\"warn\"} 1"));
1596 assert!(prom.contains("status=\"fail\"} 2"));
1597 }
1598
1599 #[test]
1602 fn jsonl_compare_fields_match_receipt() {
1603 let receipt = create_test_compare_receipt();
1604 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1605
1606 let lines: Vec<&str> = jsonl.trim().lines().collect();
1607 assert_eq!(lines.len(), receipt.deltas.len());
1608
1609 for line in lines {
1610 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1611 assert_eq!(parsed["bench_name"], "alpha-bench");
1612 let metric_name = parsed["metric"].as_str().unwrap();
1613 assert!(
1614 ["wall_ms", "max_rss_kb"].contains(&metric_name),
1615 "unexpected metric: {}",
1616 metric_name
1617 );
1618 assert!(parsed["baseline_value"].as_f64().unwrap() > 0.0);
1619 assert!(parsed["current_value"].as_f64().unwrap() > 0.0);
1620 let status = parsed["status"].as_str().unwrap();
1621 assert!(
1622 ["pass", "warn", "fail"].contains(&status),
1623 "unexpected status: {}",
1624 status
1625 );
1626 }
1627 }
1628
1629 #[test]
1632 fn jsonl_run_round_trip() {
1633 let receipt = create_test_run_receipt();
1634 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1635 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1636
1637 assert_eq!(parsed["bench_name"], receipt.bench.name);
1638 assert_eq!(parsed["wall_ms_median"], receipt.stats.wall_ms.median);
1639 assert_eq!(parsed["wall_ms_min"], receipt.stats.wall_ms.min);
1640 assert_eq!(parsed["wall_ms_max"], receipt.stats.wall_ms.max);
1641 assert_eq!(
1642 parsed["cpu_ms_median"],
1643 receipt.stats.cpu_ms.as_ref().unwrap().median
1644 );
1645 assert_eq!(
1646 parsed["max_rss_kb_median"],
1647 receipt.stats.max_rss_kb.as_ref().unwrap().median
1648 );
1649 assert_eq!(
1650 parsed["sample_count"],
1651 receipt.samples.iter().filter(|s| !s.warmup).count()
1652 );
1653 assert_eq!(parsed["timestamp"], receipt.run.started_at);
1654 }
1655
1656 #[test]
1659 fn html_run_all_optional_metrics_present() {
1660 let mut receipt = create_empty_run_receipt();
1661 receipt.bench.name = "full-html".to_string();
1662 receipt.stats.cpu_ms = Some(U64Summary {
1663 median: 50,
1664 min: 48,
1665 max: 52,
1666 });
1667 receipt.stats.throughput_per_s = Some(F64Summary {
1668 median: 999.123456,
1669 min: 900.0,
1670 max: 1100.0,
1671 });
1672
1673 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1674 assert!(html.contains("<td>50</td>"));
1675 assert!(html.contains("999.123456"));
1676 assert!(html.contains("full-html"));
1677 }
1678
1679 #[test]
1682 fn csv_escape_empty_string() {
1683 assert_eq!(csv_escape(""), "");
1684 }
1685
1686 #[test]
1687 fn csv_escape_only_quotes() {
1688 assert_eq!(csv_escape("\"\"\""), "\"\"\"\"\"\"\"\"");
1689 }
1690
1691 #[test]
1692 fn csv_escape_no_special_chars() {
1693 assert_eq!(csv_escape("plain-bench_name.v2"), "plain-bench_name.v2");
1694 }
1695
1696 #[test]
1699 fn prometheus_escape_newline_preserved() {
1700 let result = prometheus_escape_label_value("a\nb");
1703 assert_eq!(result, "a\nb");
1704 }
1705
1706 #[test]
1707 fn prometheus_escape_empty() {
1708 assert_eq!(prometheus_escape_label_value(""), "");
1709 }
1710
1711 #[test]
1714 fn html_escape_all_special_chars_combined() {
1715 assert_eq!(
1716 html_escape("<tag attr=\"val\">&</tag>"),
1717 "<tag attr="val">&</tag>"
1718 );
1719 }
1720
1721 #[test]
1722 fn html_escape_empty() {
1723 assert_eq!(html_escape(""), "");
1724 }
1725
1726 #[test]
1729 fn format_parse_prom_alias() {
1730 assert_eq!(ExportFormat::parse("prom"), Some(ExportFormat::Prometheus));
1731 assert_eq!(ExportFormat::parse("PROM"), Some(ExportFormat::Prometheus));
1732 }
1733
1734 #[test]
1735 fn format_parse_empty_string() {
1736 assert_eq!(ExportFormat::parse(""), None);
1737 }
1738
1739 #[test]
1742 fn compare_csv_threshold_percentage() {
1743 let receipt = create_test_compare_receipt();
1744 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1745 assert!(csv.contains("20.000000"));
1747 assert!(csv.contains("15.000000"));
1749 }
1750
1751 #[test]
1754 fn compare_regression_pct_is_percentage() {
1755 let receipt = create_test_compare_receipt();
1756 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1757
1758 for line in jsonl.trim().lines() {
1759 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1760 let metric = parsed["metric"].as_str().unwrap();
1761 let regression_pct = parsed["regression_pct"].as_f64().unwrap();
1762 match metric {
1763 "wall_ms" => {
1764 assert!((regression_pct - 10.0).abs() < 0.01);
1766 }
1767 "max_rss_kb" => {
1768 assert!((regression_pct - 25.0).abs() < 0.01);
1770 }
1771 _ => panic!("unexpected metric: {}", metric),
1772 }
1773 }
1774 }
1775 }
1776}
1777
1778#[cfg(test)]
1779mod property_tests {
1780 use super::*;
1781 use perfgate_types::{
1782 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
1783 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
1784 U64Summary, Verdict, VerdictCounts, VerdictStatus,
1785 };
1786 use proptest::prelude::*;
1787 use std::collections::BTreeMap;
1788
1789 fn non_empty_string() -> impl Strategy<Value = String> {
1790 "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
1791 }
1792
1793 fn rfc3339_timestamp() -> impl Strategy<Value = String> {
1794 (
1795 2020u32..2030,
1796 1u32..13,
1797 1u32..29,
1798 0u32..24,
1799 0u32..60,
1800 0u32..60,
1801 )
1802 .prop_map(|(year, month, day, hour, min, sec)| {
1803 format!(
1804 "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z",
1805 year, month, day, hour, min, sec
1806 )
1807 })
1808 }
1809
1810 fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
1811 (non_empty_string(), non_empty_string())
1812 .prop_map(|(name, version)| ToolInfo { name, version })
1813 }
1814
1815 fn host_info_strategy() -> impl Strategy<Value = HostInfo> {
1816 (non_empty_string(), non_empty_string()).prop_map(|(os, arch)| HostInfo {
1817 os,
1818 arch,
1819 cpu_count: None,
1820 memory_bytes: None,
1821 hostname_hash: None,
1822 })
1823 }
1824
1825 fn run_meta_strategy() -> impl Strategy<Value = RunMeta> {
1826 (
1827 non_empty_string(),
1828 rfc3339_timestamp(),
1829 rfc3339_timestamp(),
1830 host_info_strategy(),
1831 )
1832 .prop_map(|(id, started_at, ended_at, host)| RunMeta {
1833 id,
1834 started_at,
1835 ended_at,
1836 host,
1837 })
1838 }
1839
1840 fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
1841 (
1842 non_empty_string(),
1843 proptest::option::of(non_empty_string()),
1844 proptest::collection::vec(non_empty_string(), 1..5),
1845 1u32..100,
1846 0u32..10,
1847 proptest::option::of(1u64..10000),
1848 proptest::option::of(100u64..60000),
1849 )
1850 .prop_map(
1851 |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
1852 name,
1853 cwd,
1854 command,
1855 repeat,
1856 warmup,
1857 work_units,
1858 timeout_ms,
1859 },
1860 )
1861 }
1862
1863 fn sample_strategy() -> impl Strategy<Value = Sample> {
1864 (
1865 0u64..100000,
1866 -128i32..128,
1867 any::<bool>(),
1868 any::<bool>(),
1869 proptest::option::of(0u64..1000000),
1870 proptest::option::of(0u64..1000000),
1871 proptest::option::of(0u64..1000000),
1872 proptest::option::of(0u64..1000000),
1873 proptest::option::of(0u64..100000000),
1874 )
1875 .prop_map(
1876 |(
1877 wall_ms,
1878 exit_code,
1879 warmup,
1880 timed_out,
1881 cpu_ms,
1882 page_faults,
1883 ctx_switches,
1884 max_rss_kb,
1885 binary_bytes,
1886 )| Sample {
1887 wall_ms,
1888 exit_code,
1889 warmup,
1890 timed_out,
1891 cpu_ms,
1892 page_faults,
1893 ctx_switches,
1894 max_rss_kb,
1895 binary_bytes,
1896 stdout: None,
1897 stderr: None,
1898 },
1899 )
1900 }
1901
1902 fn u64_summary_strategy() -> impl Strategy<Value = U64Summary> {
1903 (0u64..1000000, 0u64..1000000, 0u64..1000000).prop_map(|(a, b, c)| {
1904 let mut vals = [a, b, c];
1905 vals.sort();
1906 U64Summary {
1907 min: vals[0],
1908 median: vals[1],
1909 max: vals[2],
1910 }
1911 })
1912 }
1913
1914 fn f64_summary_strategy() -> impl Strategy<Value = F64Summary> {
1915 (0.0f64..1000000.0, 0.0f64..1000000.0, 0.0f64..1000000.0).prop_map(|(a, b, c)| {
1916 let mut vals = [a, b, c];
1917 vals.sort_by(|x, y| x.partial_cmp(y).unwrap());
1918 F64Summary {
1919 min: vals[0],
1920 median: vals[1],
1921 max: vals[2],
1922 }
1923 })
1924 }
1925
1926 fn stats_strategy() -> impl Strategy<Value = Stats> {
1927 (
1928 u64_summary_strategy(),
1929 proptest::option::of(u64_summary_strategy()),
1930 proptest::option::of(u64_summary_strategy()),
1931 proptest::option::of(u64_summary_strategy()),
1932 proptest::option::of(u64_summary_strategy()),
1933 proptest::option::of(u64_summary_strategy()),
1934 proptest::option::of(f64_summary_strategy()),
1935 )
1936 .prop_map(
1937 |(
1938 wall_ms,
1939 cpu_ms,
1940 page_faults,
1941 ctx_switches,
1942 max_rss_kb,
1943 binary_bytes,
1944 throughput_per_s,
1945 )| Stats {
1946 wall_ms,
1947 cpu_ms,
1948 page_faults,
1949 ctx_switches,
1950 max_rss_kb,
1951 binary_bytes,
1952 throughput_per_s,
1953 },
1954 )
1955 }
1956
1957 fn run_receipt_strategy() -> impl Strategy<Value = RunReceipt> {
1958 (
1959 tool_info_strategy(),
1960 run_meta_strategy(),
1961 bench_meta_strategy(),
1962 proptest::collection::vec(sample_strategy(), 1..10),
1963 stats_strategy(),
1964 )
1965 .prop_map(|(tool, run, bench, samples, stats)| RunReceipt {
1966 schema: RUN_SCHEMA_V1.to_string(),
1967 tool,
1968 run,
1969 bench,
1970 samples,
1971 stats,
1972 })
1973 }
1974
1975 fn direction_strategy() -> impl Strategy<Value = Direction> {
1976 prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
1977 }
1978
1979 fn budget_strategy() -> impl Strategy<Value = Budget> {
1980 (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
1981 |(threshold, warn_factor, direction)| {
1982 let warn_threshold = threshold * warn_factor;
1983 Budget {
1984 threshold,
1985 warn_threshold,
1986 direction,
1987 }
1988 },
1989 )
1990 }
1991
1992 fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
1993 prop_oneof![
1994 Just(MetricStatus::Pass),
1995 Just(MetricStatus::Warn),
1996 Just(MetricStatus::Fail),
1997 ]
1998 }
1999
2000 fn delta_strategy() -> impl Strategy<Value = Delta> {
2001 (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
2002 |(baseline, current, status)| {
2003 let ratio = current / baseline;
2004 let pct = (current - baseline) / baseline;
2005 let regression = if pct > 0.0 { pct } else { 0.0 };
2006 Delta {
2007 baseline,
2008 current,
2009 ratio,
2010 pct,
2011 regression,
2012 statistic: MetricStatistic::Median,
2013 significance: None,
2014 status,
2015 }
2016 },
2017 )
2018 }
2019
2020 fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
2021 prop_oneof![
2022 Just(VerdictStatus::Pass),
2023 Just(VerdictStatus::Warn),
2024 Just(VerdictStatus::Fail),
2025 ]
2026 }
2027
2028 fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
2029 (0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail)| VerdictCounts {
2030 pass,
2031 warn,
2032 fail,
2033 })
2034 }
2035
2036 fn verdict_strategy() -> impl Strategy<Value = Verdict> {
2037 (
2038 verdict_status_strategy(),
2039 verdict_counts_strategy(),
2040 proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
2041 )
2042 .prop_map(|(status, counts, reasons)| Verdict {
2043 status,
2044 counts,
2045 reasons,
2046 })
2047 }
2048
2049 fn metric_strategy() -> impl Strategy<Value = Metric> {
2050 prop_oneof![
2051 Just(Metric::BinaryBytes),
2052 Just(Metric::CpuMs),
2053 Just(Metric::CtxSwitches),
2054 Just(Metric::MaxRssKb),
2055 Just(Metric::PageFaults),
2056 Just(Metric::ThroughputPerS),
2057 Just(Metric::WallMs),
2058 ]
2059 }
2060
2061 fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
2062 proptest::collection::btree_map(metric_strategy(), budget_strategy(), 1..8)
2063 }
2064
2065 fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
2066 proptest::collection::btree_map(metric_strategy(), delta_strategy(), 1..8)
2067 }
2068
2069 fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
2070 (
2071 proptest::option::of(non_empty_string()),
2072 proptest::option::of(non_empty_string()),
2073 )
2074 .prop_map(|(path, run_id)| CompareRef { path, run_id })
2075 }
2076
2077 fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
2078 (
2079 tool_info_strategy(),
2080 bench_meta_strategy(),
2081 compare_ref_strategy(),
2082 compare_ref_strategy(),
2083 budgets_map_strategy(),
2084 deltas_map_strategy(),
2085 verdict_strategy(),
2086 )
2087 .prop_map(
2088 |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
2089 CompareReceipt {
2090 schema: COMPARE_SCHEMA_V1.to_string(),
2091 tool,
2092 bench,
2093 baseline_ref,
2094 current_ref,
2095 budgets,
2096 deltas,
2097 verdict,
2098 }
2099 },
2100 )
2101 }
2102
2103 proptest! {
2104 #![proptest_config(ProptestConfig::with_cases(50))]
2105
2106 #[test]
2107 fn run_export_csv_has_header_and_data(receipt in run_receipt_strategy()) {
2108 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2109
2110 prop_assert!(csv.starts_with("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,throughput_median,sample_count,timestamp\n"));
2111
2112 let lines: Vec<&str> = csv.trim().split('\n').collect();
2113 prop_assert_eq!(lines.len(), 2);
2114
2115 let bench_in_csv = csv.contains(&receipt.bench.name) || csv.contains(&format!("\"{}\"", receipt.bench.name));
2116 prop_assert!(bench_in_csv, "CSV should contain bench name");
2117 }
2118
2119 #[test]
2120 fn run_export_jsonl_is_valid_json(receipt in run_receipt_strategy()) {
2121 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2122
2123 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
2124 prop_assert_eq!(lines.len(), 1);
2125
2126 let parsed: Result<serde_json::Value, _> = serde_json::from_str(lines[0]);
2127 prop_assert!(parsed.is_ok());
2128
2129 let json = parsed.unwrap();
2130 prop_assert_eq!(json["bench_name"].as_str().unwrap(), receipt.bench.name);
2131 }
2132
2133 #[test]
2134 fn compare_export_csv_metrics_sorted(receipt in compare_receipt_strategy()) {
2135 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
2136
2137 let lines: Vec<&str> = csv.trim().split('\n').skip(1).collect();
2138
2139 let mut metrics: Vec<String> = vec![];
2140 for line in &lines {
2141 let parts: Vec<&str> = line.split(',').collect();
2142 if parts.len() > 1 {
2143 metrics.push(parts[1].trim_matches('"').to_string());
2144 }
2145 }
2146
2147 let mut sorted_metrics = metrics.clone();
2148 sorted_metrics.sort();
2149
2150 prop_assert_eq!(metrics, sorted_metrics, "Metrics should be sorted alphabetically");
2151 }
2152
2153 #[test]
2154 fn compare_export_jsonl_line_per_metric(receipt in compare_receipt_strategy()) {
2155 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
2156
2157 let lines: Vec<&str> = jsonl.trim().split('\n').filter(|s| !s.is_empty()).collect();
2158 prop_assert_eq!(lines.len(), receipt.deltas.len());
2159
2160 for line in &lines {
2161 let parsed: Result<serde_json::Value, _> = serde_json::from_str(line);
2162 prop_assert!(parsed.is_ok());
2163 }
2164 }
2165
2166 #[test]
2167 fn export_is_deterministic(receipt in run_receipt_strategy()) {
2168 let csv1 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2169 let csv2 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2170 prop_assert_eq!(csv1, csv2);
2171
2172 let jsonl1 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2173 let jsonl2 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2174 prop_assert_eq!(jsonl1, jsonl2);
2175 }
2176
2177 #[test]
2178 fn html_output_contains_valid_structure(receipt in run_receipt_strategy()) {
2179 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
2180
2181 prop_assert!(html.starts_with("<!doctype html>"));
2182 prop_assert!(html.contains("<html>"));
2183 prop_assert!(html.contains("</html>"));
2184 prop_assert!(html.contains("<table"));
2185 prop_assert!(html.contains("</table>"));
2186 prop_assert!(html.contains(&receipt.bench.name));
2187 }
2188
2189 #[test]
2190 fn prometheus_output_valid_format(receipt in run_receipt_strategy()) {
2191 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
2192
2193 prop_assert!(prom.contains("perfgate_run_wall_ms_median"));
2194 let bench_label = format!("bench=\"{}\"", receipt.bench.name);
2195 prop_assert!(prom.contains(&bench_label));
2196
2197 for line in prom.lines() {
2198 if !line.is_empty() {
2199 let has_open = line.chars().any(|c| c == '{');
2200 let has_close = line.chars().any(|c| c == '}');
2201 prop_assert!(has_open, "Prometheus line should contain opening brace");
2202 prop_assert!(has_close, "Prometheus line should contain closing brace");
2203 }
2204 }
2205 }
2206
2207 #[test]
2208 fn csv_escape_preserves_content(receipt in run_receipt_strategy()) {
2209 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2210
2211 let quoted_bench = format!("\"{}\"", receipt.bench.name);
2212 prop_assert!(csv.contains(&receipt.bench.name) || csv.contains("ed_bench));
2213
2214 for line in csv.lines() {
2215 let quoted_count = line.matches('"').count();
2216 prop_assert!(quoted_count % 2 == 0, "Quotes should be balanced in CSV");
2217 }
2218 }
2219 }
2220}