1use perfgate_types::{CompareReceipt, Metric, MetricStatus, RunReceipt};
56
57#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum ExportFormat {
70 Csv,
72 Jsonl,
74 Html,
76 Prometheus,
78 JUnit,
80}
81
82impl ExportFormat {
83 pub fn parse(s: &str) -> Option<Self> {
94 s.parse().ok()
95 }
96}
97
98impl std::str::FromStr for ExportFormat {
99 type Err = ();
100
101 fn from_str(s: &str) -> Result<Self, Self::Err> {
115 match s.to_lowercase().as_str() {
116 "csv" => Ok(ExportFormat::Csv),
117 "jsonl" => Ok(ExportFormat::Jsonl),
118 "html" => Ok(ExportFormat::Html),
119 "prometheus" | "prom" => Ok(ExportFormat::Prometheus),
120 "junit" | "xml" => Ok(ExportFormat::JUnit),
121 _ => Err(()),
122 }
123 }
124}
125
126#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
155pub struct RunExportRow {
156 pub bench_name: String,
157 pub wall_ms_median: u64,
158 pub wall_ms_min: u64,
159 pub wall_ms_max: u64,
160 pub binary_bytes_median: Option<u64>,
161 pub cpu_ms_median: Option<u64>,
162 pub ctx_switches_median: Option<u64>,
163 pub energy_uj_median: Option<u64>,
164 pub max_rss_kb_median: Option<u64>,
165 pub page_faults_median: Option<u64>,
166 pub io_read_bytes_median: Option<u64>,
167 pub io_write_bytes_median: Option<u64>,
168 pub network_packets_median: Option<u64>,
169 pub throughput_median: Option<f64>,
170 pub sample_count: usize,
171 pub timestamp: String,
172}
173
174#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
197pub struct CompareExportRow {
198 pub bench_name: String,
199 pub metric: String,
200 pub baseline_value: f64,
201 pub current_value: f64,
202 pub regression_pct: f64,
203 pub status: String,
204 pub threshold: f64,
205 pub warn_threshold: Option<f64>,
206 pub cv: Option<f64>,
207 pub noise_threshold: Option<f64>,
208}
209
210pub struct ExportUseCase;
212
213impl ExportUseCase {
214 pub fn export_run(receipt: &RunReceipt, format: ExportFormat) -> anyhow::Result<String> {
253 let row = Self::run_to_row(receipt);
254
255 match format {
256 ExportFormat::Csv => Self::run_row_to_csv(&row),
257 ExportFormat::Jsonl => Self::run_row_to_jsonl(&row),
258 ExportFormat::Html => Self::run_row_to_html(&row),
259 ExportFormat::Prometheus => Self::run_row_to_prometheus(&row),
260 ExportFormat::JUnit => Self::run_row_to_junit_run(receipt, &row),
261 }
262 }
263
264 fn run_row_to_junit_run(receipt: &RunReceipt, _row: &RunExportRow) -> anyhow::Result<String> {
265 let mut out = String::new();
266 out.push_str("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
267 out.push_str("<testsuites name=\"perfgate\">\n");
268 out.push_str(&format!(
269 " <testsuite name=\"{}\" tests=\"1\" failures=\"0\" errors=\"0\">\n",
270 html_escape(&receipt.bench.name)
271 ));
272 out.push_str(&format!(
273 " <testcase name=\"execution\" classname=\"perfgate.{}\" time=\"{}\">\n",
274 html_escape(&receipt.bench.name),
275 receipt.stats.wall_ms.median as f64 / 1000.0
276 ));
277 out.push_str(" </testcase>\n");
278 out.push_str(" </testsuite>\n");
279 out.push_str("</testsuites>\n");
280 Ok(out)
281 }
282
283 pub fn export_compare(
316 receipt: &CompareReceipt,
317 format: ExportFormat,
318 ) -> anyhow::Result<String> {
319 let rows = Self::compare_to_rows(receipt);
320
321 match format {
322 ExportFormat::Csv => Self::compare_rows_to_csv(&rows),
323 ExportFormat::Jsonl => Self::compare_rows_to_jsonl(&rows),
324 ExportFormat::Html => Self::compare_rows_to_html(&rows),
325 ExportFormat::Prometheus => Self::compare_rows_to_prometheus(&rows),
326 ExportFormat::JUnit => Self::compare_rows_to_junit(receipt, &rows),
327 }
328 }
329
330 fn run_to_row(receipt: &RunReceipt) -> RunExportRow {
332 let sample_count = receipt.samples.iter().filter(|s| !s.warmup).count();
333
334 RunExportRow {
335 bench_name: receipt.bench.name.clone(),
336 wall_ms_median: receipt.stats.wall_ms.median,
337 wall_ms_min: receipt.stats.wall_ms.min,
338 wall_ms_max: receipt.stats.wall_ms.max,
339 binary_bytes_median: receipt.stats.binary_bytes.as_ref().map(|s| s.median),
340 cpu_ms_median: receipt.stats.cpu_ms.as_ref().map(|s| s.median),
341 ctx_switches_median: receipt.stats.ctx_switches.as_ref().map(|s| s.median),
342 energy_uj_median: receipt.stats.energy_uj.as_ref().map(|s| s.median),
343 max_rss_kb_median: receipt.stats.max_rss_kb.as_ref().map(|s| s.median),
344 page_faults_median: receipt.stats.page_faults.as_ref().map(|s| s.median),
345 io_read_bytes_median: receipt.stats.io_read_bytes.as_ref().map(|s| s.median),
346 io_write_bytes_median: receipt.stats.io_write_bytes.as_ref().map(|s| s.median),
347 network_packets_median: receipt.stats.network_packets.as_ref().map(|s| s.median),
348 throughput_median: receipt.stats.throughput_per_s.as_ref().map(|s| s.median),
349 sample_count,
350 timestamp: receipt.run.started_at.clone(),
351 }
352 }
353
354 fn compare_to_rows(receipt: &CompareReceipt) -> Vec<CompareExportRow> {
356 let mut rows: Vec<CompareExportRow> = receipt
357 .deltas
358 .iter()
359 .map(|(metric, delta)| {
360 let budget = receipt.budgets.get(metric);
361 let threshold = budget.map(|b| b.threshold).unwrap_or(0.0);
362 let warn_threshold = budget.map(|b| b.warn_threshold);
363
364 CompareExportRow {
365 bench_name: receipt.bench.name.clone(),
366 metric: metric_to_string(*metric),
367 baseline_value: delta.baseline,
368 current_value: delta.current,
369 regression_pct: delta.pct * 100.0,
370 status: status_to_string(delta.status),
371 threshold: threshold * 100.0,
372 warn_threshold: warn_threshold.map(|t| t * 100.0),
373 cv: delta.cv.map(|cv| cv * 100.0),
374 noise_threshold: delta.noise_threshold.map(|t| t * 100.0),
375 }
376 })
377 .collect();
378
379 rows.sort_by(|a, b| a.metric.cmp(&b.metric));
380 rows
381 }
382
383 fn run_row_to_csv(row: &RunExportRow) -> anyhow::Result<String> {
384 let mut output = String::new();
385
386 output.push_str("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,io_read_bytes_median,io_write_bytes_median,network_packets_median,energy_uj_median,throughput_median,sample_count,timestamp\n");
387
388 output.push_str(&csv_escape(&row.bench_name));
389 output.push(',');
390 output.push_str(&row.wall_ms_median.to_string());
391 output.push(',');
392 output.push_str(&row.wall_ms_min.to_string());
393 output.push(',');
394 output.push_str(&row.wall_ms_max.to_string());
395 output.push(',');
396 output.push_str(
397 &row.binary_bytes_median
398 .map_or(String::new(), |v| v.to_string()),
399 );
400 output.push(',');
401 output.push_str(&row.cpu_ms_median.map_or(String::new(), |v| v.to_string()));
402 output.push(',');
403 output.push_str(
404 &row.ctx_switches_median
405 .map_or(String::new(), |v| v.to_string()),
406 );
407 output.push(',');
408 output.push_str(
409 &row.max_rss_kb_median
410 .map_or(String::new(), |v| v.to_string()),
411 );
412 output.push(',');
413 output.push_str(
414 &row.page_faults_median
415 .map_or(String::new(), |v| v.to_string()),
416 );
417 output.push(',');
418 output.push_str(
419 &row.io_read_bytes_median
420 .map_or(String::new(), |v| v.to_string()),
421 );
422 output.push(',');
423 output.push_str(
424 &row.io_write_bytes_median
425 .map_or(String::new(), |v| v.to_string()),
426 );
427 output.push(',');
428 output.push_str(
429 &row.network_packets_median
430 .map_or(String::new(), |v| v.to_string()),
431 );
432 output.push(',');
433 output.push_str(
434 &row.energy_uj_median
435 .map_or(String::new(), |v| v.to_string()),
436 );
437 output.push(',');
438 output.push_str(
439 &row.throughput_median
440 .map_or(String::new(), |v| format!("{:.6}", v)),
441 );
442 output.push(',');
443 output.push_str(&row.sample_count.to_string());
444 output.push(',');
445 output.push_str(&csv_escape(&row.timestamp));
446 output.push('\n');
447
448 Ok(output)
449 }
450
451 fn run_row_to_jsonl(row: &RunExportRow) -> anyhow::Result<String> {
453 let json = serde_json::to_string(row)?;
454 Ok(format!("{}\n", json))
455 }
456
457 fn compare_rows_to_csv(rows: &[CompareExportRow]) -> anyhow::Result<String> {
459 let mut output = String::new();
460
461 output.push_str(
462 "bench_name,metric,baseline_value,current_value,regression_pct,status,threshold\n",
463 );
464
465 for row in rows {
466 output.push_str(&csv_escape(&row.bench_name));
467 output.push(',');
468 output.push_str(&csv_escape(&row.metric));
469 output.push(',');
470 output.push_str(&format!("{:.6}", row.baseline_value));
471 output.push(',');
472 output.push_str(&format!("{:.6}", row.current_value));
473 output.push(',');
474 output.push_str(&format!("{:.6}", row.regression_pct));
475 output.push(',');
476 output.push_str(&csv_escape(&row.status));
477 output.push(',');
478 output.push_str(&format!("{:.6}", row.threshold));
479 output.push('\n');
480 }
481
482 Ok(output)
483 }
484
485 fn compare_rows_to_jsonl(rows: &[CompareExportRow]) -> anyhow::Result<String> {
487 let mut output = String::new();
488
489 for row in rows {
490 let json = serde_json::to_string(row)?;
491 output.push_str(&json);
492 output.push('\n');
493 }
494
495 Ok(output)
496 }
497
498 fn run_row_to_html(row: &RunExportRow) -> anyhow::Result<String> {
499 let html = format!(
500 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate run export</title></head><body>\
501 <h1>perfgate run export</h1>\
502 <table border=\"1\">\
503 <thead><tr><th>bench_name</th><th>wall_ms_median</th><th>wall_ms_min</th><th>wall_ms_max</th><th>binary_bytes_median</th><th>cpu_ms_median</th><th>ctx_switches_median</th><th>max_rss_kb_median</th><th>page_faults_median</th><th>io_read_bytes_median</th><th>io_write_bytes_median</th><th>network_packets_median</th><th>energy_uj_median</th><th>throughput_median</th><th>sample_count</th><th>timestamp</th></tr></thead>\
504 <tbody><tr><td>{bench}</td><td>{wall_med}</td><td>{wall_min}</td><td>{wall_max}</td><td>{binary}</td><td>{cpu}</td><td>{ctx}</td><td>{rss}</td><td>{pf}</td><td>{io_read}</td><td>{io_write}</td><td>{net}</td><td>{energy}</td><td>{throughput}</td><td>{sample_count}</td><td>{timestamp}</td></tr></tbody>\
505 </table></body></html>\n",
506 bench = html_escape(&row.bench_name),
507 wall_med = row.wall_ms_median,
508 wall_min = row.wall_ms_min,
509 wall_max = row.wall_ms_max,
510 binary = row
511 .binary_bytes_median
512 .map_or(String::new(), |v| v.to_string()),
513 cpu = row.cpu_ms_median.map_or(String::new(), |v| v.to_string()),
514 ctx = row
515 .ctx_switches_median
516 .map_or(String::new(), |v| v.to_string()),
517 rss = row
518 .max_rss_kb_median
519 .map_or(String::new(), |v| v.to_string()),
520 pf = row
521 .page_faults_median
522 .map_or(String::new(), |v| v.to_string()),
523 io_read = row
524 .io_read_bytes_median
525 .map_or(String::new(), |v| v.to_string()),
526 io_write = row
527 .io_write_bytes_median
528 .map_or(String::new(), |v| v.to_string()),
529 net = row
530 .network_packets_median
531 .map_or(String::new(), |v| v.to_string()),
532 energy = row
533 .energy_uj_median
534 .map_or(String::new(), |v| v.to_string()),
535 throughput = row
536 .throughput_median
537 .map_or(String::new(), |v| format!("{:.6}", v)),
538 sample_count = row.sample_count,
539 timestamp = html_escape(&row.timestamp),
540 );
541 Ok(html)
542 }
543
544 fn compare_rows_to_html(rows: &[CompareExportRow]) -> anyhow::Result<String> {
545 let mut out = String::from(
546 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate compare export</title></head><body><h1>perfgate compare export</h1><table border=\"1\"><thead><tr><th>bench_name</th><th>metric</th><th>baseline_value</th><th>current_value</th><th>regression_pct</th><th>status</th><th>threshold</th></tr></thead><tbody>",
547 );
548
549 for row in rows {
550 out.push_str(&format!(
551 "<tr><td>{}</td><td>{}</td><td>{:.6}</td><td>{:.6}</td><td>{:.6}</td><td>{}</td><td>{:.6}</td></tr>",
552 html_escape(&row.bench_name),
553 html_escape(&row.metric),
554 row.baseline_value,
555 row.current_value,
556 row.regression_pct,
557 html_escape(&row.status),
558 row.threshold
559 ));
560 }
561
562 out.push_str("</tbody></table></body></html>\n");
563 Ok(out)
564 }
565
566 fn run_row_to_prometheus(row: &RunExportRow) -> anyhow::Result<String> {
567 let bench = prometheus_escape_label_value(&row.bench_name);
568 let mut out = String::new();
569 out.push_str(&format!(
570 "perfgate_run_wall_ms_median{{bench=\"{}\"}} {}\n",
571 bench, row.wall_ms_median
572 ));
573 out.push_str(&format!(
574 "perfgate_run_wall_ms_min{{bench=\"{}\"}} {}\n",
575 bench, row.wall_ms_min
576 ));
577 out.push_str(&format!(
578 "perfgate_run_wall_ms_max{{bench=\"{}\"}} {}\n",
579 bench, row.wall_ms_max
580 ));
581 if let Some(v) = row.binary_bytes_median {
582 out.push_str(&format!(
583 "perfgate_run_binary_bytes_median{{bench=\"{}\"}} {}\n",
584 bench, v
585 ));
586 }
587 if let Some(v) = row.cpu_ms_median {
588 out.push_str(&format!(
589 "perfgate_run_cpu_ms_median{{bench=\"{}\"}} {}\n",
590 bench, v
591 ));
592 }
593 if let Some(v) = row.ctx_switches_median {
594 out.push_str(&format!(
595 "perfgate_run_ctx_switches_median{{bench=\"{}\"}} {}\n",
596 bench, v
597 ));
598 }
599 if let Some(v) = row.max_rss_kb_median {
600 out.push_str(&format!(
601 "perfgate_run_max_rss_kb_median{{bench=\"{}\"}} {}\n",
602 bench, v
603 ));
604 }
605 if let Some(v) = row.page_faults_median {
606 out.push_str(&format!(
607 "perfgate_run_page_faults_median{{bench=\"{}\"}} {}\n",
608 bench, v
609 ));
610 }
611 if let Some(v) = row.io_read_bytes_median {
612 out.push_str(&format!(
613 "perfgate_run_io_read_bytes_median{{bench=\"{}\"}} {}\n",
614 bench, v
615 ));
616 }
617 if let Some(v) = row.io_write_bytes_median {
618 out.push_str(&format!(
619 "perfgate_run_io_write_bytes_median{{bench=\"{}\"}} {}\n",
620 bench, v
621 ));
622 }
623 if let Some(v) = row.network_packets_median {
624 out.push_str(&format!(
625 "perfgate_run_network_packets_median{{bench=\"{}\"}} {}\n",
626 bench, v
627 ));
628 }
629 if let Some(v) = row.energy_uj_median {
630 out.push_str(&format!(
631 "perfgate_run_energy_uj_median{{bench=\"{}\"}} {}\n",
632 bench, v
633 ));
634 }
635 if let Some(v) = row.throughput_median {
636 out.push_str(&format!(
637 "perfgate_run_throughput_per_s_median{{bench=\"{}\"}} {:.6}\n",
638 bench, v
639 ));
640 }
641 out.push_str(&format!(
642 "perfgate_run_sample_count{{bench=\"{}\"}} {}\n",
643 bench, row.sample_count
644 ));
645 Ok(out)
646 }
647
648 fn compare_rows_to_junit(
649 receipt: &CompareReceipt,
650 rows: &[CompareExportRow],
651 ) -> anyhow::Result<String> {
652 let mut out = String::new();
653 let total = rows.len();
654 let failures = rows.iter().filter(|r| r.status == "fail").count();
655 let errors = rows.iter().filter(|r| r.status == "error").count();
656
657 out.push_str("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
658 out.push_str(&format!(
659 "<testsuites name=\"perfgate\" tests=\"{}\" failures=\"{}\" errors=\"{}\">\n",
660 total, failures, errors
661 ));
662
663 out.push_str(&format!(
664 " <testsuite name=\"{}\" tests=\"{}\" failures=\"{}\" errors=\"{}\">\n",
665 html_escape(&receipt.bench.name),
666 total,
667 failures,
668 errors
669 ));
670
671 for row in rows {
672 let classname = format!("perfgate.{}", html_escape(&receipt.bench.name));
673 out.push_str(&format!(
674 " <testcase name=\"{}\" classname=\"{}\" time=\"0.0\">\n",
675 html_escape(&row.metric),
676 classname
677 ));
678
679 if row.status == "fail" {
680 out.push_str(&format!(
681 " <failure message=\"Performance regression detected for {}\">",
682 html_escape(&row.metric)
683 ));
684 out.push_str(&format!(
685 "Metric: {}\nBaseline: {:.6}\nCurrent: {:.6}\nRegression: {:.2}%\nThreshold: {:.2}%",
686 row.metric, row.baseline_value, row.current_value, row.regression_pct, row.threshold
687 ));
688 out.push_str("</failure>\n");
689 } else if row.status == "error" {
690 out.push_str(&format!(
691 " <error message=\"Error occurred during performance check for {}\">",
692 html_escape(&row.metric)
693 ));
694 out.push_str("</error>\n");
695 }
696
697 out.push_str(" </testcase>\n");
698 }
699
700 out.push_str(" </testsuite>\n");
701 out.push_str("</testsuites>\n");
702
703 Ok(out)
704 }
705
706 fn compare_rows_to_prometheus(rows: &[CompareExportRow]) -> anyhow::Result<String> {
707 let mut out = String::new();
708 for row in rows {
709 let bench = prometheus_escape_label_value(&row.bench_name);
710 let metric = prometheus_escape_label_value(&row.metric);
711 out.push_str(&format!(
712 "perfgate_compare_baseline_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
713 bench, metric, row.baseline_value
714 ));
715 out.push_str(&format!(
716 "perfgate_compare_current_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
717 bench, metric, row.current_value
718 ));
719 out.push_str(&format!(
720 "perfgate_compare_regression_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
721 bench, metric, row.regression_pct
722 ));
723 out.push_str(&format!(
724 "perfgate_compare_threshold_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
725 bench, metric, row.threshold
726 ));
727
728 let status_code = match row.status.as_str() {
729 "pass" => 0,
730 "warn" => 1,
731 "fail" => 2,
732 _ => -1,
733 };
734 out.push_str(&format!(
735 "perfgate_compare_status{{bench=\"{}\",metric=\"{}\",status=\"{}\"}} {}\n",
736 bench,
737 metric,
738 prometheus_escape_label_value(&row.status),
739 status_code
740 ));
741 }
742 Ok(out)
743 }
744}
745
746fn metric_to_string(metric: Metric) -> String {
748 metric.as_str().to_string()
749}
750
751fn status_to_string(status: MetricStatus) -> String {
753 match status {
754 MetricStatus::Pass => "pass".to_string(),
755 MetricStatus::Warn => "warn".to_string(),
756 MetricStatus::Fail => "fail".to_string(),
757 MetricStatus::Skip => "skip".to_string(),
758 }
759}
760
761pub fn csv_escape(s: &str) -> String {
774 if s.contains(',') || s.contains('"') || s.contains('\n') || s.contains('\r') {
775 format!("\"{}\"", s.replace('"', "\"\""))
776 } else {
777 s.to_string()
778 }
779}
780
781fn html_escape(s: &str) -> String {
782 s.replace('&', "&")
783 .replace('<', "<")
784 .replace('>', ">")
785 .replace('"', """)
786}
787
788fn prometheus_escape_label_value(s: &str) -> String {
789 s.replace('\\', "\\\\").replace('"', "\\\"")
790}
791
792#[cfg(test)]
793mod tests {
794 use super::*;
795 use perfgate_types::{
796 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
797 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
798 U64Summary, Verdict, VerdictCounts, VerdictStatus,
799 };
800 use std::collections::BTreeMap;
801
802 fn create_test_run_receipt() -> RunReceipt {
803 RunReceipt {
804 schema: RUN_SCHEMA_V1.to_string(),
805 tool: ToolInfo {
806 name: "perfgate".to_string(),
807 version: "0.1.0".to_string(),
808 },
809 run: RunMeta {
810 id: "test-run-001".to_string(),
811 started_at: "2024-01-15T10:00:00Z".to_string(),
812 ended_at: "2024-01-15T10:00:05Z".to_string(),
813 host: HostInfo {
814 os: "linux".to_string(),
815 arch: "x86_64".to_string(),
816 cpu_count: None,
817 memory_bytes: None,
818 hostname_hash: None,
819 },
820 },
821 bench: BenchMeta {
822 name: "test-benchmark".to_string(),
823 cwd: None,
824 command: vec!["echo".to_string(), "hello".to_string()],
825 repeat: 5,
826 warmup: 0,
827 work_units: None,
828 timeout_ms: None,
829 },
830 samples: vec![
831 Sample {
832 wall_ms: 100,
833 exit_code: 0,
834 warmup: false,
835 timed_out: false,
836 cpu_ms: Some(50),
837 page_faults: None,
838 ctx_switches: None,
839 max_rss_kb: Some(1024),
840 io_read_bytes: None,
841 io_write_bytes: None,
842 network_packets: None,
843 energy_uj: None,
844 binary_bytes: None,
845 stdout: None,
846 stderr: None,
847 },
848 Sample {
849 wall_ms: 102,
850 exit_code: 0,
851 warmup: false,
852 timed_out: false,
853 cpu_ms: Some(52),
854 page_faults: None,
855 ctx_switches: None,
856 max_rss_kb: Some(1028),
857 io_read_bytes: None,
858 io_write_bytes: None,
859 network_packets: None,
860 energy_uj: None,
861 binary_bytes: None,
862 stdout: None,
863 stderr: None,
864 },
865 ],
866 stats: Stats {
867 wall_ms: U64Summary::new(100, 98, 102),
868 cpu_ms: Some(U64Summary::new(50, 48, 52)),
869 page_faults: None,
870 ctx_switches: None,
871 max_rss_kb: Some(U64Summary::new(1024, 1020, 1028)),
872 io_read_bytes: None,
873 io_write_bytes: None,
874 network_packets: None,
875 energy_uj: None,
876 binary_bytes: None,
877 throughput_per_s: None,
878 },
879 }
880 }
881
882 fn create_test_compare_receipt() -> CompareReceipt {
883 let mut budgets = BTreeMap::new();
884 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
885 budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
886
887 let mut deltas = BTreeMap::new();
888 deltas.insert(
889 Metric::WallMs,
890 Delta {
891 baseline: 100.0,
892 current: 110.0,
893 ratio: 1.1,
894 pct: 0.1,
895 regression: 0.1,
896 cv: None,
897 noise_threshold: None,
898 statistic: MetricStatistic::Median,
899 significance: None,
900 status: MetricStatus::Pass,
901 },
902 );
903 deltas.insert(
904 Metric::MaxRssKb,
905 Delta {
906 baseline: 1024.0,
907 current: 1280.0,
908 ratio: 1.25,
909 pct: 0.25,
910 regression: 0.25,
911 cv: None,
912 noise_threshold: None,
913 statistic: MetricStatistic::Median,
914 significance: None,
915 status: MetricStatus::Fail,
916 },
917 );
918
919 CompareReceipt {
920 schema: COMPARE_SCHEMA_V1.to_string(),
921 tool: ToolInfo {
922 name: "perfgate".to_string(),
923 version: "0.1.0".to_string(),
924 },
925 bench: BenchMeta {
926 name: "alpha-bench".to_string(),
927 cwd: None,
928 command: vec!["test".to_string()],
929 repeat: 5,
930 warmup: 0,
931 work_units: None,
932 timeout_ms: None,
933 },
934 baseline_ref: CompareRef {
935 path: Some("baseline.json".to_string()),
936 run_id: Some("baseline-001".to_string()),
937 },
938 current_ref: CompareRef {
939 path: Some("current.json".to_string()),
940 run_id: Some("current-001".to_string()),
941 },
942 budgets,
943 deltas,
944 verdict: Verdict {
945 status: VerdictStatus::Fail,
946 counts: VerdictCounts {
947 pass: 1,
948 warn: 0,
949 fail: 0,
950 skip: 0,
951 },
952 reasons: vec!["max_rss_kb_fail".to_string()],
953 },
954 }
955 }
956
957 #[test]
958 fn test_run_export_csv() {
959 let receipt = create_test_run_receipt();
960 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
961
962 assert!(csv.starts_with("bench_name,wall_ms_median,"));
963 assert!(csv.contains("test-benchmark"));
964 assert!(csv.contains("100,98,102"));
965 assert!(csv.contains("1024"));
966 assert!(csv.contains("2024-01-15T10:00:00Z"));
967 }
968
969 #[test]
970 fn test_run_export_jsonl() {
971 let receipt = create_test_run_receipt();
972 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
973
974 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
975 assert_eq!(lines.len(), 1);
976
977 let parsed: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
978 assert_eq!(parsed["bench_name"], "test-benchmark");
979 assert_eq!(parsed["wall_ms_median"], 100);
980 }
981
982 #[test]
983 fn test_compare_export_csv() {
984 let receipt = create_test_compare_receipt();
985 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
986
987 assert!(csv.starts_with("bench_name,metric,baseline_value,"));
988 assert!(csv.contains("alpha-bench"));
989 assert!(csv.contains("max_rss_kb"));
990 assert!(csv.contains("wall_ms"));
991 let max_rss_pos = csv.find("max_rss_kb").unwrap();
992 let wall_ms_pos = csv.find("wall_ms").unwrap();
993 assert!(max_rss_pos < wall_ms_pos);
994 }
995
996 #[test]
997 fn test_compare_export_jsonl() {
998 let receipt = create_test_compare_receipt();
999 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1000
1001 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
1002 assert_eq!(lines.len(), 2);
1003
1004 for line in &lines {
1005 let _: serde_json::Value = serde_json::from_str(line).unwrap();
1006 }
1007
1008 let first: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
1009 assert_eq!(first["metric"], "max_rss_kb");
1010 }
1011
1012 #[test]
1013 fn test_csv_escape() {
1014 assert_eq!(csv_escape("simple"), "simple");
1015 assert_eq!(csv_escape("has,comma"), "\"has,comma\"");
1016 assert_eq!(csv_escape("has\"quote"), "\"has\"\"quote\"");
1017 assert_eq!(csv_escape("has\nnewline"), "\"has\nnewline\"");
1018 }
1019
1020 #[test]
1021 fn test_stable_ordering_across_runs() {
1022 let receipt = create_test_compare_receipt();
1023
1024 let csv1 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1025 let csv2 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1026
1027 assert_eq!(csv1, csv2, "CSV output should be deterministic");
1028 }
1029
1030 #[test]
1031 fn test_export_format_from_str() {
1032 assert_eq!(ExportFormat::parse("csv"), Some(ExportFormat::Csv));
1033 assert_eq!(ExportFormat::parse("CSV"), Some(ExportFormat::Csv));
1034 assert_eq!(ExportFormat::parse("jsonl"), Some(ExportFormat::Jsonl));
1035 assert_eq!(ExportFormat::parse("JSONL"), Some(ExportFormat::Jsonl));
1036 assert_eq!(ExportFormat::parse("html"), Some(ExportFormat::Html));
1037 assert_eq!(
1038 ExportFormat::parse("prometheus"),
1039 Some(ExportFormat::Prometheus)
1040 );
1041 assert_eq!(ExportFormat::parse("invalid"), None);
1042 }
1043
1044 #[test]
1045 fn test_run_export_html_and_prometheus() {
1046 let receipt = create_test_run_receipt();
1047
1048 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1049 assert!(html.contains("<table"), "html output should contain table");
1050 assert!(html.contains("test-benchmark"));
1051
1052 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1053 assert!(prom.contains("perfgate_run_wall_ms_median"));
1054 assert!(prom.contains("bench=\"test-benchmark\""));
1055 }
1056
1057 #[test]
1058 fn test_compare_export_prometheus() {
1059 let receipt = create_test_compare_receipt();
1060 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1061 assert!(prom.contains("perfgate_compare_regression_pct"));
1062 assert!(prom.contains("metric=\"max_rss_kb\""));
1063 }
1064
1065 #[test]
1066 fn test_compare_export_junit() {
1067 let receipt = create_test_compare_receipt();
1068 let junit = ExportUseCase::export_compare(&receipt, ExportFormat::JUnit).unwrap();
1069
1070 assert!(junit.contains("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"));
1071 assert!(junit.contains("<testsuites name=\"perfgate\""));
1072 assert!(junit.contains("testsuite name=\"alpha-bench\""));
1073 assert!(junit.contains("testcase name=\"wall_ms\""));
1074 assert!(junit.contains("testcase name=\"max_rss_kb\""));
1075 assert!(
1076 junit.contains("<failure message=\"Performance regression detected for max_rss_kb\">")
1077 );
1078 assert!(junit.contains("Baseline: 1024.000000"));
1079 assert!(junit.contains("Current: 1280.000000"));
1080 }
1081
1082 #[test]
1083 fn test_html_escape() {
1084 assert_eq!(html_escape("simple"), "simple");
1085 assert_eq!(html_escape("<script>"), "<script>");
1086 assert_eq!(html_escape("a&b"), "a&b");
1087 assert_eq!(html_escape("\"quoted\""), ""quoted"");
1088 }
1089
1090 #[test]
1091 fn test_prometheus_escape() {
1092 assert_eq!(prometheus_escape_label_value("simple"), "simple");
1093 assert_eq!(prometheus_escape_label_value("has\"quote"), "has\\\"quote");
1094 assert_eq!(
1095 prometheus_escape_label_value("has\\backslash"),
1096 "has\\\\backslash"
1097 );
1098 }
1099
1100 mod snapshot_tests {
1101 use super::*;
1102 use insta::assert_snapshot;
1103
1104 #[test]
1105 fn test_run_html_snapshot() {
1106 let receipt = create_test_run_receipt();
1107 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1108 assert_snapshot!("run_html", html);
1109 }
1110
1111 #[test]
1112 fn test_run_prometheus_snapshot() {
1113 let receipt = create_test_run_receipt();
1114 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1115 assert_snapshot!("run_prometheus", prom);
1116 }
1117
1118 #[test]
1119 fn test_compare_html_snapshot() {
1120 let receipt = create_test_compare_receipt();
1121 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1122 assert_snapshot!("compare_html", html);
1123 }
1124
1125 #[test]
1126 fn test_compare_prometheus_snapshot() {
1127 let receipt = create_test_compare_receipt();
1128 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1129 assert_snapshot!("compare_prometheus", prom);
1130 }
1131 }
1132
1133 mod edge_case_tests {
1134 use super::*;
1135
1136 fn create_empty_run_receipt() -> RunReceipt {
1137 RunReceipt {
1138 schema: RUN_SCHEMA_V1.to_string(),
1139 tool: ToolInfo {
1140 name: "perfgate".to_string(),
1141 version: "0.1.0".to_string(),
1142 },
1143 run: RunMeta {
1144 id: "empty-run".to_string(),
1145 started_at: "2024-01-01T00:00:00Z".to_string(),
1146 ended_at: "2024-01-01T00:00:01Z".to_string(),
1147 host: HostInfo {
1148 os: "linux".to_string(),
1149 arch: "x86_64".to_string(),
1150 cpu_count: None,
1151 memory_bytes: None,
1152 hostname_hash: None,
1153 },
1154 },
1155 bench: BenchMeta {
1156 name: "empty-bench".to_string(),
1157 cwd: None,
1158 command: vec!["true".to_string()],
1159 repeat: 0,
1160 warmup: 0,
1161 work_units: None,
1162 timeout_ms: None,
1163 },
1164 samples: vec![],
1165 stats: Stats {
1166 wall_ms: U64Summary::new(0, 0, 0),
1167 cpu_ms: None,
1168 page_faults: None,
1169 ctx_switches: None,
1170 max_rss_kb: None,
1171 io_read_bytes: None,
1172 io_write_bytes: None,
1173 network_packets: None,
1174 energy_uj: None,
1175 binary_bytes: None,
1176 throughput_per_s: None,
1177 },
1178 }
1179 }
1180
1181 fn create_empty_compare_receipt() -> CompareReceipt {
1182 CompareReceipt {
1183 schema: COMPARE_SCHEMA_V1.to_string(),
1184 tool: ToolInfo {
1185 name: "perfgate".to_string(),
1186 version: "0.1.0".to_string(),
1187 },
1188 bench: BenchMeta {
1189 name: "empty-bench".to_string(),
1190 cwd: None,
1191 command: vec!["true".to_string()],
1192 repeat: 0,
1193 warmup: 0,
1194 work_units: None,
1195 timeout_ms: None,
1196 },
1197 baseline_ref: CompareRef {
1198 path: None,
1199 run_id: None,
1200 },
1201 current_ref: CompareRef {
1202 path: None,
1203 run_id: None,
1204 },
1205 budgets: BTreeMap::new(),
1206 deltas: BTreeMap::new(),
1207 verdict: Verdict {
1208 status: VerdictStatus::Pass,
1209 counts: VerdictCounts {
1210 pass: 1,
1211 warn: 0,
1212 fail: 0,
1213 skip: 0,
1214 },
1215 reasons: vec![],
1216 },
1217 }
1218 }
1219
1220 fn create_run_receipt_with_bench_name(name: &str) -> RunReceipt {
1221 let mut receipt = create_empty_run_receipt();
1222 receipt.bench.name = name.to_string();
1223 receipt.samples.push(Sample {
1224 wall_ms: 42,
1225 exit_code: 0,
1226 warmup: false,
1227 timed_out: false,
1228 cpu_ms: None,
1229 page_faults: None,
1230 ctx_switches: None,
1231 max_rss_kb: None,
1232 io_read_bytes: None,
1233 io_write_bytes: None,
1234 network_packets: None,
1235 energy_uj: None,
1236 binary_bytes: None,
1237 stdout: None,
1238 stderr: None,
1239 });
1240 receipt.stats.wall_ms = U64Summary::new(42, 42, 42);
1241 receipt
1242 }
1243
1244 #[test]
1247 fn empty_run_receipt_csv_has_header_and_one_row() {
1248 let receipt = create_empty_run_receipt();
1249 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1250 let lines: Vec<&str> = csv.trim().split('\n').collect();
1251 assert_eq!(lines.len(), 2, "should have header + 1 data row");
1252 assert!(lines[0].starts_with("bench_name,"));
1253 assert!(csv.contains("empty-bench"));
1254 }
1255
1256 #[test]
1257 fn empty_run_receipt_jsonl_is_valid() {
1258 let receipt = create_empty_run_receipt();
1259 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1260 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1261 assert_eq!(parsed["bench_name"], "empty-bench");
1262 assert_eq!(parsed["sample_count"], 0);
1263 }
1264
1265 #[test]
1266 fn empty_run_receipt_html_is_valid() {
1267 let receipt = create_empty_run_receipt();
1268 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1269 assert!(html.starts_with("<!doctype html>"));
1270 assert!(html.contains("<table"));
1271 assert!(html.contains("</table>"));
1272 assert!(html.contains("empty-bench"));
1273 }
1274
1275 #[test]
1276 fn empty_run_receipt_prometheus_is_valid() {
1277 let receipt = create_empty_run_receipt();
1278 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1279 assert!(prom.contains("perfgate_run_wall_ms_median"));
1280 assert!(prom.contains("bench=\"empty-bench\""));
1281 assert!(prom.contains("perfgate_run_sample_count"));
1282 }
1283
1284 #[test]
1285 fn empty_compare_receipt_csv_has_header_only() {
1286 let receipt = create_empty_compare_receipt();
1287 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1288 let lines: Vec<&str> = csv.trim().split('\n').collect();
1289 assert_eq!(lines.len(), 1, "should have header only with no deltas");
1290 assert!(lines[0].starts_with("bench_name,metric,"));
1291 }
1292
1293 #[test]
1294 fn empty_compare_receipt_jsonl_is_empty() {
1295 let receipt = create_empty_compare_receipt();
1296 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1297 assert!(
1298 jsonl.trim().is_empty(),
1299 "JSONL should be empty for no deltas"
1300 );
1301 }
1302
1303 #[test]
1304 fn empty_compare_receipt_html_has_valid_structure() {
1305 let receipt = create_empty_compare_receipt();
1306 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1307 assert!(html.starts_with("<!doctype html>"));
1308 assert!(html.contains("<table"));
1309 assert!(html.contains("</table>"));
1310 assert!(html.contains("<thead>"));
1311 assert!(html.contains("</tbody>"));
1312 }
1313
1314 #[test]
1315 fn empty_compare_receipt_prometheus_is_empty() {
1316 let receipt = create_empty_compare_receipt();
1317 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1318 assert!(
1319 prom.trim().is_empty(),
1320 "Prometheus output should be empty for no deltas"
1321 );
1322 }
1323
1324 #[test]
1327 fn csv_bench_name_with_comma() {
1328 let receipt = create_run_receipt_with_bench_name("bench,with,commas");
1329 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1330 assert!(
1331 csv.contains("\"bench,with,commas\""),
1332 "comma-containing bench name should be quoted"
1333 );
1334 let lines: Vec<&str> = csv.trim().split('\n').collect();
1335 assert_eq!(lines.len(), 2, "should still have exactly 2 lines");
1336 }
1337
1338 #[test]
1339 fn csv_bench_name_with_quotes() {
1340 let receipt = create_run_receipt_with_bench_name("bench\"quoted\"name");
1341 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1342 assert!(
1343 csv.contains("\"bench\"\"quoted\"\"name\""),
1344 "quotes should be escaped as double-quotes in CSV"
1345 );
1346 }
1347
1348 #[test]
1349 fn csv_bench_name_with_newline() {
1350 let receipt = create_run_receipt_with_bench_name("bench\nwith\nnewlines");
1351 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1352 assert!(
1353 csv.contains("\"bench\nwith\nnewlines\""),
1354 "newline-containing bench name should be quoted"
1355 );
1356 }
1357
1358 #[test]
1359 fn csv_bench_name_with_commas_and_quotes() {
1360 let receipt = create_run_receipt_with_bench_name("a,\"b\",c");
1361 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1362 assert!(csv.contains("\"a,\"\"b\"\",c\""));
1364 }
1365
1366 #[test]
1369 fn jsonl_bench_name_with_unicode() {
1370 let receipt = create_run_receipt_with_bench_name("ベンチマーク-速度");
1371 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1372 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1373 assert_eq!(parsed["bench_name"], "ベンチマーク-速度");
1374 }
1375
1376 #[test]
1377 fn jsonl_bench_name_with_emoji() {
1378 let receipt = create_run_receipt_with_bench_name("bench-🚀-fast");
1379 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1380 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1381 assert_eq!(parsed["bench_name"], "bench-🚀-fast");
1382 }
1383
1384 #[test]
1385 fn jsonl_bench_name_with_special_json_chars() {
1386 let receipt = create_run_receipt_with_bench_name("bench\\with\"special\tchars");
1387 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1388 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1389 assert_eq!(parsed["bench_name"], "bench\\with\"special\tchars");
1390 }
1391
1392 #[test]
1395 fn html_run_with_all_optional_metrics_none() {
1396 let receipt = create_empty_run_receipt();
1397 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1398 assert!(html.contains("<html>"));
1399 assert!(html.contains("</html>"));
1400 assert!(html.contains("empty-bench"));
1402 }
1403
1404 #[test]
1405 fn html_bench_name_with_html_chars() {
1406 let receipt = create_run_receipt_with_bench_name("<script>alert('xss')</script>");
1407 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1408 assert!(
1409 !html.contains("<script>"),
1410 "HTML special chars should be escaped"
1411 );
1412 assert!(html.contains("<script>"));
1413 }
1414
1415 #[test]
1418 fn prometheus_bench_name_with_quotes() {
1419 let receipt = create_run_receipt_with_bench_name("bench\"name");
1420 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1421 assert!(
1422 prom.contains("bench="),
1423 "Prometheus output should have bench label"
1424 );
1425 assert!(
1426 !prom.contains("bench=\"bench\"name\""),
1427 "raw quotes should be escaped"
1428 );
1429 assert!(prom.contains("bench=\"bench\\\"name\""));
1430 }
1431
1432 #[test]
1433 fn prometheus_bench_name_with_backslash() {
1434 let receipt = create_run_receipt_with_bench_name("bench\\path");
1435 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1436 assert!(prom.contains("bench=\"bench\\\\path\""));
1437 }
1438
1439 #[test]
1440 fn prometheus_compare_with_all_metric_types() {
1441 let mut receipt = create_empty_compare_receipt();
1442 receipt.bench.name = "full-metrics".to_string();
1443 receipt.deltas.insert(
1444 Metric::WallMs,
1445 Delta {
1446 baseline: 100.0,
1447 current: 105.0,
1448 ratio: 1.05,
1449 pct: 0.05,
1450 regression: 0.05,
1451 cv: None,
1452 noise_threshold: None,
1453 statistic: MetricStatistic::Median,
1454 significance: None,
1455 status: MetricStatus::Pass,
1456 },
1457 );
1458 receipt.deltas.insert(
1459 Metric::MaxRssKb,
1460 Delta {
1461 baseline: 100.0,
1462 current: 105.0,
1463 ratio: 1.05,
1464 pct: 0.05,
1465 regression: 0.05,
1466 cv: None,
1467 noise_threshold: None,
1468 statistic: MetricStatistic::Median,
1469 significance: None,
1470 status: MetricStatus::Pass,
1471 },
1472 );
1473 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1474 assert!(prom.contains("metric=\"wall_ms\""));
1475 assert!(prom.contains("metric=\"max_rss_kb\""));
1476 assert!(prom.contains("perfgate_compare_baseline_value"));
1477 assert!(prom.contains("perfgate_compare_current_value"));
1478 assert!(prom.contains("perfgate_compare_status"));
1479 }
1480
1481 #[test]
1484 fn single_sample_run_exports_all_formats() {
1485 let receipt = create_run_receipt_with_bench_name("single");
1486
1487 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1488 assert!(csv.contains("single"));
1489 assert_eq!(csv.trim().lines().count(), 2);
1490
1491 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1492 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1493 assert_eq!(parsed["sample_count"], 1);
1494
1495 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1496 assert!(html.contains("<td>single</td>"));
1497
1498 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1499 assert!(prom.contains("perfgate_run_sample_count{bench=\"single\"} 1"));
1500 }
1501
1502 #[test]
1505 fn huge_values_run_receipt() {
1506 let mut receipt = create_empty_run_receipt();
1507 receipt.bench.name = "huge".to_string();
1508 receipt.stats.wall_ms = U64Summary::new(u64::MAX, u64::MAX - 1, u64::MAX);
1509 receipt.stats.max_rss_kb = Some(U64Summary::new(u64::MAX, u64::MAX, u64::MAX));
1510 receipt.stats.io_read_bytes = Some(U64Summary::new(u64::MAX, u64::MAX, u64::MAX));
1511
1512 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1513 assert!(csv.contains(&u64::MAX.to_string()));
1514
1515 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1516 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1517 assert_eq!(parsed["wall_ms_median"], u64::MAX);
1518
1519 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1520 assert!(html.contains(&u64::MAX.to_string()));
1521
1522 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1523 assert!(prom.contains(&u64::MAX.to_string()));
1524 }
1525
1526 #[test]
1529 fn warmup_only_samples_count_zero() {
1530 let mut receipt = create_empty_run_receipt();
1531 receipt.samples = vec![
1532 Sample {
1533 wall_ms: 10,
1534 exit_code: 0,
1535 warmup: true,
1536 timed_out: false,
1537 cpu_ms: None,
1538 page_faults: None,
1539 ctx_switches: None,
1540 max_rss_kb: None,
1541 io_read_bytes: None,
1542 io_write_bytes: None,
1543 network_packets: None,
1544 energy_uj: None,
1545 binary_bytes: None,
1546 stdout: None,
1547 stderr: None,
1548 },
1549 Sample {
1550 wall_ms: 11,
1551 exit_code: 0,
1552 warmup: true,
1553 timed_out: false,
1554 cpu_ms: None,
1555 page_faults: None,
1556 ctx_switches: None,
1557 max_rss_kb: None,
1558 io_read_bytes: None,
1559 io_write_bytes: None,
1560 network_packets: None,
1561 energy_uj: None,
1562 binary_bytes: None,
1563 stdout: None,
1564 stderr: None,
1565 },
1566 ];
1567
1568 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1569 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1570 assert_eq!(parsed["sample_count"], 0);
1571
1572 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1573 let data_line = csv.lines().nth(1).unwrap();
1575 assert!(
1576 data_line.contains(",0,"),
1577 "warmup-only should yield sample_count 0"
1578 );
1579 }
1580
1581 #[test]
1584 fn csv_bench_name_with_carriage_return() {
1585 let receipt = create_run_receipt_with_bench_name("bench\rwith\rcr");
1586 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1587 assert!(
1588 csv.contains("\"bench\rwith\rcr\""),
1589 "carriage-return-containing bench name should be quoted"
1590 );
1591 }
1592
1593 #[test]
1596 fn csv_compare_special_chars_in_bench_name() {
1597 let mut receipt = create_empty_compare_receipt();
1598 receipt.bench.name = "bench,\"special\"\nname".to_string();
1599 receipt.deltas.insert(
1600 Metric::WallMs,
1601 Delta {
1602 baseline: 100.0,
1603 current: 105.0,
1604 ratio: 1.05,
1605 pct: 0.05,
1606 regression: 0.05,
1607 cv: None,
1608 noise_threshold: None,
1609 statistic: MetricStatistic::Median,
1610 significance: None,
1611 status: MetricStatus::Pass,
1612 },
1613 );
1614 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1615 assert!(csv.contains("\"bench,\"\"special\"\"\nname\""));
1617 }
1618
1619 #[test]
1622 fn unicode_bench_name_all_formats() {
1623 let name = "日本語ベンチ_αβγ_🚀";
1624 let receipt = create_run_receipt_with_bench_name(name);
1625
1626 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1627 assert!(csv.contains(name));
1628
1629 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1630 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1631 assert_eq!(parsed["bench_name"], name);
1632
1633 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1634 assert!(html.contains(name));
1635
1636 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1637 assert!(prom.contains(name));
1638 }
1639
1640 #[test]
1643 fn html_compare_mixed_statuses() {
1644 let mut receipt = create_empty_compare_receipt();
1645 receipt.bench.name = "mixed".to_string();
1646 for (metric, status) in [
1647 (Metric::WallMs, MetricStatus::Pass),
1648 (Metric::CpuMs, MetricStatus::Warn),
1649 (Metric::MaxRssKb, MetricStatus::Fail),
1650 ] {
1651 receipt.deltas.insert(
1652 metric,
1653 Delta {
1654 baseline: 100.0,
1655 current: 120.0,
1656 ratio: 1.2,
1657 pct: 0.2,
1658 regression: 0.2,
1659 cv: None,
1660 noise_threshold: None,
1661 statistic: MetricStatistic::Median,
1662 significance: None,
1663 status,
1664 },
1665 );
1666 }
1667 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1668 assert!(html.contains("<td>pass</td>"));
1669 assert!(html.contains("<td>warn</td>"));
1670 assert!(html.contains("<td>fail</td>"));
1671 assert_eq!(html.matches("<tr><td>").count(), 3);
1673 }
1674
1675 #[test]
1678 fn html_empty_bench_name() {
1679 let receipt = create_run_receipt_with_bench_name("");
1680 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1681 assert!(html.contains("<td></td>"));
1682 assert!(html.contains("<html>"));
1683 }
1684
1685 #[test]
1688 fn prometheus_run_all_optional_metrics_present() {
1689 let mut receipt = create_empty_run_receipt();
1690 receipt.bench.name = "full".to_string();
1691 receipt.stats.cpu_ms = Some(U64Summary::new(50, 48, 52));
1692 receipt.stats.page_faults = Some(U64Summary::new(10, 8, 12));
1693 receipt.stats.ctx_switches = Some(U64Summary::new(5, 3, 7));
1694 receipt.stats.max_rss_kb = Some(U64Summary::new(2048, 2000, 2100));
1695 receipt.stats.io_read_bytes = Some(U64Summary::new(1000, 900, 1100));
1696 receipt.stats.io_write_bytes = Some(U64Summary::new(500, 400, 600));
1697 receipt.stats.network_packets = Some(U64Summary::new(10, 8, 12));
1698 receipt.stats.energy_uj = Some(U64Summary::new(1000, 900, 1100));
1699 receipt.stats.binary_bytes = Some(U64Summary::new(100000, 99000, 101000));
1700 receipt.stats.throughput_per_s = Some(F64Summary::new(1234.567890, 1200.0, 1300.0));
1701
1702 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1703 assert!(prom.contains("perfgate_run_cpu_ms_median{bench=\"full\"} 50"));
1704 assert!(prom.contains("perfgate_run_page_faults_median{bench=\"full\"} 10"));
1705 assert!(prom.contains("perfgate_run_ctx_switches_median{bench=\"full\"} 5"));
1706 assert!(prom.contains("perfgate_run_max_rss_kb_median{bench=\"full\"} 2048"));
1707 assert!(prom.contains("perfgate_run_io_read_bytes_median{bench=\"full\"} 1000"));
1708 assert!(prom.contains("perfgate_run_io_write_bytes_median{bench=\"full\"} 500"));
1709 assert!(prom.contains("perfgate_run_network_packets_median{bench=\"full\"} 10"));
1710 assert!(prom.contains("perfgate_run_energy_uj_median{bench=\"full\"} 1000"));
1711 assert!(prom.contains("perfgate_run_binary_bytes_median{bench=\"full\"} 100000"));
1712 assert!(
1713 prom.contains("perfgate_run_throughput_per_s_median{bench=\"full\"} 1234.567890")
1714 );
1715 }
1716
1717 #[test]
1720 fn prometheus_compare_status_codes() {
1721 let mut receipt = create_empty_compare_receipt();
1722 receipt.bench.name = "status-test".to_string();
1723 for (metric, status, expected_code) in [
1724 (Metric::WallMs, MetricStatus::Pass, "0"),
1725 (Metric::CpuMs, MetricStatus::Warn, "1"),
1726 (Metric::MaxRssKb, MetricStatus::Fail, "2"),
1727 ] {
1728 receipt.deltas.insert(
1729 metric,
1730 Delta {
1731 baseline: 100.0,
1732 current: 110.0,
1733 ratio: 1.1,
1734 pct: 0.1,
1735 regression: 0.1,
1736 cv: None,
1737 noise_threshold: None,
1738 statistic: MetricStatistic::Median,
1739 significance: None,
1740 status,
1741 },
1742 );
1743 receipt
1744 .budgets
1745 .insert(metric, Budget::new(0.2, 0.15, Direction::Lower));
1746 let _ = expected_code; }
1748
1749 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1750 assert!(prom.contains("status=\"pass\"} 0"));
1751 assert!(prom.contains("status=\"warn\"} 1"));
1752 assert!(prom.contains("status=\"fail\"} 2"));
1753 }
1754
1755 #[test]
1758 fn jsonl_compare_fields_match_receipt() {
1759 let receipt = create_test_compare_receipt();
1760 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1761
1762 let lines: Vec<&str> = jsonl.trim().lines().collect();
1763 assert_eq!(lines.len(), receipt.deltas.len());
1764
1765 for line in lines {
1766 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1767 assert_eq!(parsed["bench_name"], "alpha-bench");
1768 let metric_name = parsed["metric"].as_str().unwrap();
1769 assert!(
1770 ["wall_ms", "max_rss_kb"].contains(&metric_name),
1771 "unexpected metric: {}",
1772 metric_name
1773 );
1774 assert!(parsed["baseline_value"].as_f64().unwrap() > 0.0);
1775 assert!(parsed["current_value"].as_f64().unwrap() > 0.0);
1776 let status = parsed["status"].as_str().unwrap();
1777 assert!(
1778 ["pass", "warn", "fail"].contains(&status),
1779 "unexpected status: {}",
1780 status
1781 );
1782 }
1783 }
1784
1785 #[test]
1788 fn jsonl_run_round_trip() {
1789 let receipt = create_test_run_receipt();
1790 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1791 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1792
1793 assert_eq!(parsed["bench_name"], receipt.bench.name);
1794 assert_eq!(parsed["wall_ms_median"], receipt.stats.wall_ms.median);
1795 assert_eq!(parsed["wall_ms_min"], receipt.stats.wall_ms.min);
1796 assert_eq!(parsed["wall_ms_max"], receipt.stats.wall_ms.max);
1797 assert_eq!(
1798 parsed["cpu_ms_median"],
1799 receipt.stats.cpu_ms.as_ref().unwrap().median
1800 );
1801 assert_eq!(
1802 parsed["max_rss_kb_median"],
1803 receipt.stats.max_rss_kb.as_ref().unwrap().median
1804 );
1805 assert_eq!(
1806 parsed["sample_count"],
1807 receipt.samples.iter().filter(|s| !s.warmup).count()
1808 );
1809 assert_eq!(parsed["timestamp"], receipt.run.started_at);
1810 }
1811
1812 #[test]
1815 fn html_run_all_optional_metrics_present() {
1816 let mut receipt = create_empty_run_receipt();
1817 receipt.bench.name = "full-html".to_string();
1818 receipt.stats.cpu_ms = Some(U64Summary::new(50, 48, 52));
1819 receipt.stats.io_read_bytes = Some(U64Summary::new(1000, 900, 1100));
1820 receipt.stats.throughput_per_s = Some(F64Summary::new(999.123456, 900.0, 1100.0));
1821
1822 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1823 assert!(html.contains("<td>50</td>"));
1824 assert!(html.contains("<td>1000</td>"));
1825 assert!(html.contains("999.123456"));
1826 assert!(html.contains("full-html"));
1827 }
1828
1829 #[test]
1832 fn csv_escape_empty_string() {
1833 assert_eq!(csv_escape(""), "");
1834 }
1835
1836 #[test]
1837 fn csv_escape_only_quotes() {
1838 assert_eq!(csv_escape("\"\"\""), "\"\"\"\"\"\"\"\"");
1839 }
1840
1841 #[test]
1842 fn csv_escape_no_special_chars() {
1843 assert_eq!(csv_escape("plain-bench_name.v2"), "plain-bench_name.v2");
1844 }
1845
1846 #[test]
1849 fn prometheus_escape_newline_preserved() {
1850 let result = prometheus_escape_label_value("a\nb");
1853 assert_eq!(result, "a\nb");
1854 }
1855
1856 #[test]
1857 fn prometheus_escape_empty() {
1858 assert_eq!(prometheus_escape_label_value(""), "");
1859 }
1860
1861 #[test]
1864 fn html_escape_all_special_chars_combined() {
1865 assert_eq!(
1866 html_escape("<tag attr=\"val\">&</tag>"),
1867 "<tag attr="val">&</tag>"
1868 );
1869 }
1870
1871 #[test]
1872 fn html_escape_empty() {
1873 assert_eq!(html_escape(""), "");
1874 }
1875
1876 #[test]
1879 fn format_parse_prom_alias() {
1880 assert_eq!(ExportFormat::parse("prom"), Some(ExportFormat::Prometheus));
1881 assert_eq!(ExportFormat::parse("PROM"), Some(ExportFormat::Prometheus));
1882 }
1883
1884 #[test]
1885 fn format_parse_empty_string() {
1886 assert_eq!(ExportFormat::parse(""), None);
1887 }
1888
1889 #[test]
1892 fn compare_csv_threshold_percentage() {
1893 let receipt = create_test_compare_receipt();
1894 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1895 assert!(csv.contains("20.000000"));
1897 assert!(csv.contains("15.000000"));
1899 }
1900
1901 #[test]
1904 fn compare_regression_pct_is_percentage() {
1905 let receipt = create_test_compare_receipt();
1906 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1907
1908 for line in jsonl.trim().lines() {
1909 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1910 let metric = parsed["metric"].as_str().unwrap();
1911 let regression_pct = parsed["regression_pct"].as_f64().unwrap();
1912 match metric {
1913 "wall_ms" => {
1914 assert!((regression_pct - 10.0).abs() < 0.01);
1916 }
1917 "max_rss_kb" => {
1918 assert!((regression_pct - 25.0).abs() < 0.01);
1920 }
1921 _ => panic!("unexpected metric: {}", metric),
1922 }
1923 }
1924 }
1925 }
1926}
1927
1928#[cfg(test)]
1929mod property_tests {
1930 use super::*;
1931 use perfgate_types::{
1932 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
1933 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
1934 U64Summary, Verdict, VerdictCounts, VerdictStatus,
1935 };
1936 use proptest::prelude::*;
1937 use std::collections::BTreeMap;
1938
1939 fn non_empty_string() -> impl Strategy<Value = String> {
1940 "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
1941 }
1942
1943 fn rfc3339_timestamp() -> impl Strategy<Value = String> {
1944 (
1945 2020u32..2030,
1946 1u32..13,
1947 1u32..29,
1948 0u32..24,
1949 0u32..60,
1950 0u32..60,
1951 )
1952 .prop_map(|(year, month, day, hour, min, sec)| {
1953 format!(
1954 "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z",
1955 year, month, day, hour, min, sec
1956 )
1957 })
1958 }
1959
1960 fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
1961 (non_empty_string(), non_empty_string())
1962 .prop_map(|(name, version)| ToolInfo { name, version })
1963 }
1964
1965 fn host_info_strategy() -> impl Strategy<Value = HostInfo> {
1966 (non_empty_string(), non_empty_string()).prop_map(|(os, arch)| HostInfo {
1967 os,
1968 arch,
1969 cpu_count: None,
1970 memory_bytes: None,
1971 hostname_hash: None,
1972 })
1973 }
1974
1975 fn run_meta_strategy() -> impl Strategy<Value = RunMeta> {
1976 (
1977 non_empty_string(),
1978 rfc3339_timestamp(),
1979 rfc3339_timestamp(),
1980 host_info_strategy(),
1981 )
1982 .prop_map(|(id, started_at, ended_at, host)| RunMeta {
1983 id,
1984 started_at,
1985 ended_at,
1986 host,
1987 })
1988 }
1989
1990 fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
1991 (
1992 non_empty_string(),
1993 proptest::option::of(non_empty_string()),
1994 proptest::collection::vec(non_empty_string(), 1..5),
1995 1u32..100,
1996 0u32..10,
1997 proptest::option::of(1u64..10000),
1998 proptest::option::of(100u64..60000),
1999 )
2000 .prop_map(
2001 |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
2002 name,
2003 cwd,
2004 command,
2005 repeat,
2006 warmup,
2007 work_units,
2008 timeout_ms,
2009 },
2010 )
2011 }
2012
2013 fn sample_strategy() -> impl Strategy<Value = Sample> {
2014 (
2015 0u64..100000,
2016 -128i32..128,
2017 any::<bool>(),
2018 any::<bool>(),
2019 (
2020 proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), ),
2025 (
2026 proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..100000000), ),
2032 )
2033 .prop_map(
2034 |(
2035 wall_ms,
2036 exit_code,
2037 warmup,
2038 timed_out,
2039 (cpu_ms, page_faults, ctx_switches, max_rss_kb),
2040 (io_read_bytes, io_write_bytes, network_packets, energy_uj, binary_bytes),
2041 )| Sample {
2042 wall_ms,
2043 exit_code,
2044 warmup,
2045 timed_out,
2046 cpu_ms,
2047 page_faults,
2048 ctx_switches,
2049 max_rss_kb,
2050 io_read_bytes,
2051 io_write_bytes,
2052 network_packets,
2053 energy_uj,
2054 binary_bytes,
2055 stdout: None,
2056 stderr: None,
2057 },
2058 )
2059 }
2060
2061 fn u64_summary_strategy() -> impl Strategy<Value = U64Summary> {
2062 (0u64..1000000, 0u64..1000000, 0u64..1000000).prop_map(|(a, b, c)| {
2063 let mut vals = [a, b, c];
2064 vals.sort();
2065 U64Summary::new(vals[1], vals[0], vals[2])
2066 })
2067 }
2068
2069 fn f64_summary_strategy() -> impl Strategy<Value = F64Summary> {
2070 (0.0f64..1000000.0, 0.0f64..1000000.0, 0.0f64..1000000.0).prop_map(|(a, b, c)| {
2071 let mut vals = [a, b, c];
2072 vals.sort_by(|x, y| x.partial_cmp(y).unwrap());
2073 F64Summary::new(vals[1], vals[0], vals[2])
2074 })
2075 }
2076
2077 fn stats_strategy() -> impl Strategy<Value = Stats> {
2078 (
2079 u64_summary_strategy(),
2080 (
2081 proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), ),
2086 (
2087 proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), ),
2093 proptest::option::of(f64_summary_strategy()),
2094 )
2095 .prop_map(
2096 |(
2097 wall_ms,
2098 (cpu_ms, page_faults, ctx_switches, max_rss_kb),
2099 (io_read_bytes, io_write_bytes, network_packets, energy_uj, binary_bytes),
2100 throughput_per_s,
2101 )| Stats {
2102 wall_ms,
2103 cpu_ms,
2104 page_faults,
2105 ctx_switches,
2106 max_rss_kb,
2107 io_read_bytes,
2108 io_write_bytes,
2109 network_packets,
2110 energy_uj,
2111 binary_bytes,
2112 throughput_per_s,
2113 },
2114 )
2115 }
2116
2117 fn run_receipt_strategy() -> impl Strategy<Value = RunReceipt> {
2118 (
2119 tool_info_strategy(),
2120 run_meta_strategy(),
2121 bench_meta_strategy(),
2122 proptest::collection::vec(sample_strategy(), 1..10),
2123 stats_strategy(),
2124 )
2125 .prop_map(|(tool, run, bench, samples, stats)| RunReceipt {
2126 schema: RUN_SCHEMA_V1.to_string(),
2127 tool,
2128 run,
2129 bench,
2130 samples,
2131 stats,
2132 })
2133 }
2134
2135 fn direction_strategy() -> impl Strategy<Value = Direction> {
2136 prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
2137 }
2138
2139 fn budget_strategy() -> impl Strategy<Value = Budget> {
2140 (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
2141 |(threshold, warn_factor, direction)| {
2142 let warn_threshold = threshold * warn_factor;
2143 Budget {
2144 noise_threshold: None,
2145 noise_policy: perfgate_types::NoisePolicy::Ignore,
2146 threshold,
2147 warn_threshold,
2148 direction,
2149 }
2150 },
2151 )
2152 }
2153
2154 fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
2155 prop_oneof![
2156 Just(MetricStatus::Pass),
2157 Just(MetricStatus::Warn),
2158 Just(MetricStatus::Fail),
2159 Just(MetricStatus::Skip),
2160 ]
2161 }
2162
2163 fn delta_strategy() -> impl Strategy<Value = Delta> {
2164 (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
2165 |(baseline, current, status)| {
2166 let ratio = current / baseline;
2167 let pct = (current - baseline) / baseline;
2168 let regression = if pct > 0.0 { pct } else { 0.0 };
2169 Delta {
2170 baseline,
2171 current,
2172 ratio,
2173 pct,
2174 regression,
2175 cv: None,
2176 noise_threshold: None,
2177 statistic: MetricStatistic::Median,
2178 significance: None,
2179 status,
2180 }
2181 },
2182 )
2183 }
2184
2185 fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
2186 prop_oneof![
2187 Just(VerdictStatus::Pass),
2188 Just(VerdictStatus::Warn),
2189 Just(VerdictStatus::Fail),
2190 Just(VerdictStatus::Skip),
2191 ]
2192 }
2193
2194 fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
2195 (0u32..10, 0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail, skip)| {
2196 VerdictCounts {
2197 pass,
2198 warn,
2199 fail,
2200 skip,
2201 }
2202 })
2203 }
2204
2205 fn verdict_strategy() -> impl Strategy<Value = Verdict> {
2206 (
2207 verdict_status_strategy(),
2208 verdict_counts_strategy(),
2209 proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
2210 )
2211 .prop_map(|(status, counts, reasons)| Verdict {
2212 status,
2213 counts,
2214 reasons,
2215 })
2216 }
2217
2218 fn metric_strategy() -> impl Strategy<Value = Metric> {
2219 prop_oneof![
2220 Just(Metric::BinaryBytes),
2221 Just(Metric::CpuMs),
2222 Just(Metric::CtxSwitches),
2223 Just(Metric::IoReadBytes),
2224 Just(Metric::IoWriteBytes),
2225 Just(Metric::MaxRssKb),
2226 Just(Metric::NetworkPackets),
2227 Just(Metric::PageFaults),
2228 Just(Metric::ThroughputPerS),
2229 Just(Metric::WallMs),
2230 ]
2231 }
2232
2233 fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
2234 proptest::collection::btree_map(metric_strategy(), budget_strategy(), 1..8)
2235 }
2236
2237 fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
2238 proptest::collection::btree_map(metric_strategy(), delta_strategy(), 1..8)
2239 }
2240
2241 fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
2242 (
2243 proptest::option::of(non_empty_string()),
2244 proptest::option::of(non_empty_string()),
2245 )
2246 .prop_map(|(path, run_id)| CompareRef { path, run_id })
2247 }
2248
2249 fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
2250 (
2251 tool_info_strategy(),
2252 bench_meta_strategy(),
2253 compare_ref_strategy(),
2254 compare_ref_strategy(),
2255 budgets_map_strategy(),
2256 deltas_map_strategy(),
2257 verdict_strategy(),
2258 )
2259 .prop_map(
2260 |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
2261 CompareReceipt {
2262 schema: COMPARE_SCHEMA_V1.to_string(),
2263 tool,
2264 bench,
2265 baseline_ref,
2266 current_ref,
2267 budgets,
2268 deltas,
2269 verdict,
2270 }
2271 },
2272 )
2273 }
2274
2275 proptest! {
2276 #![proptest_config(ProptestConfig::with_cases(50))]
2277
2278 #[test]
2279 fn run_export_csv_has_header_and_data(receipt in run_receipt_strategy()) {
2280 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2281
2282 prop_assert!(csv.starts_with("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,io_read_bytes_median,io_write_bytes_median,network_packets_median,energy_uj_median,throughput_median,sample_count,timestamp\n"));
2283
2284 let lines: Vec<&str> = csv.trim().split('\n').collect();
2285 prop_assert_eq!(lines.len(), 2);
2286
2287 let bench_in_csv = csv.contains(&receipt.bench.name) || csv.contains(&format!("\"{}\"", receipt.bench.name));
2288 prop_assert!(bench_in_csv, "CSV should contain bench name");
2289 }
2290
2291 #[test]
2292 fn run_export_jsonl_is_valid_json(receipt in run_receipt_strategy()) {
2293 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2294
2295 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
2296 prop_assert_eq!(lines.len(), 1);
2297
2298 let parsed: Result<serde_json::Value, _> = serde_json::from_str(lines[0]);
2299 prop_assert!(parsed.is_ok());
2300
2301 let json = parsed.unwrap();
2302 prop_assert_eq!(json["bench_name"].as_str().unwrap(), receipt.bench.name);
2303 }
2304
2305 #[test]
2306 fn compare_export_csv_metrics_sorted(receipt in compare_receipt_strategy()) {
2307 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
2308
2309 let lines: Vec<&str> = csv.trim().split('\n').skip(1).collect();
2310
2311 let mut metrics: Vec<String> = vec![];
2312 for line in &lines {
2313 let parts: Vec<&str> = line.split(',').collect();
2314 if parts.len() > 1 {
2315 metrics.push(parts[1].trim_matches('"').to_string());
2316 }
2317 }
2318
2319 let mut sorted_metrics = metrics.clone();
2320 sorted_metrics.sort();
2321
2322 prop_assert_eq!(metrics, sorted_metrics, "Metrics should be sorted alphabetically");
2323 }
2324
2325 #[test]
2326 fn compare_export_jsonl_line_per_metric(receipt in compare_receipt_strategy()) {
2327 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
2328
2329 let lines: Vec<&str> = jsonl.trim().split('\n').filter(|s| !s.is_empty()).collect();
2330 prop_assert_eq!(lines.len(), receipt.deltas.len());
2331
2332 for line in &lines {
2333 let parsed: Result<serde_json::Value, _> = serde_json::from_str(line);
2334 prop_assert!(parsed.is_ok());
2335 }
2336 }
2337
2338 #[test]
2339 fn export_is_deterministic(receipt in run_receipt_strategy()) {
2340 let csv1 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2341 let csv2 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2342 prop_assert_eq!(csv1, csv2);
2343
2344 let jsonl1 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2345 let jsonl2 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2346 prop_assert_eq!(jsonl1, jsonl2);
2347 }
2348
2349 #[test]
2350 fn html_output_contains_valid_structure(receipt in run_receipt_strategy()) {
2351 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
2352
2353 prop_assert!(html.starts_with("<!doctype html>"));
2354 prop_assert!(html.contains("<html>"));
2355 prop_assert!(html.contains("</html>"));
2356 prop_assert!(html.contains("<table"));
2357 prop_assert!(html.contains("</table>"));
2358 prop_assert!(html.contains(&receipt.bench.name));
2359 }
2360
2361 #[test]
2362 fn prometheus_output_valid_format(receipt in run_receipt_strategy()) {
2363 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
2364
2365 prop_assert!(prom.contains("perfgate_run_wall_ms_median"));
2366 let bench_label = format!("bench=\"{}\"", receipt.bench.name);
2367 prop_assert!(prom.contains(&bench_label));
2368
2369 for line in prom.lines() {
2370 if !line.is_empty() {
2371 let has_open = line.chars().any(|c| c == '{');
2372 let has_close = line.chars().any(|c| c == '}');
2373 prop_assert!(has_open, "Prometheus line should contain opening brace");
2374 prop_assert!(has_close, "Prometheus line should contain closing brace");
2375 }
2376 }
2377 }
2378
2379 #[test]
2380 fn csv_escape_preserves_content(receipt in run_receipt_strategy()) {
2381 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2382
2383 let quoted_bench = format!("\"{}\"", receipt.bench.name);
2384 prop_assert!(csv.contains(&receipt.bench.name) || csv.contains("ed_bench));
2385
2386 for line in csv.lines() {
2387 let quoted_count = line.matches('"').count();
2388 prop_assert!(quoted_count % 2 == 0, "Quotes should be balanced in CSV");
2389 }
2390 }
2391 }
2392}