1use perfgate_types::{CompareReceipt, Metric, MetricStatus, RunReceipt};
58
59#[derive(Debug, Clone, Copy, PartialEq, Eq)]
71pub enum ExportFormat {
72 Csv,
74 Jsonl,
76 Html,
78 Prometheus,
80 JUnit,
82}
83
84impl ExportFormat {
85 pub fn parse(s: &str) -> Option<Self> {
96 s.parse().ok()
97 }
98}
99
100impl std::str::FromStr for ExportFormat {
101 type Err = ();
102
103 fn from_str(s: &str) -> Result<Self, Self::Err> {
117 match s.to_lowercase().as_str() {
118 "csv" => Ok(ExportFormat::Csv),
119 "jsonl" => Ok(ExportFormat::Jsonl),
120 "html" => Ok(ExportFormat::Html),
121 "prometheus" | "prom" => Ok(ExportFormat::Prometheus),
122 "junit" | "xml" => Ok(ExportFormat::JUnit),
123 _ => Err(()),
124 }
125 }
126}
127
128#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
157pub struct RunExportRow {
158 pub bench_name: String,
159 pub wall_ms_median: u64,
160 pub wall_ms_min: u64,
161 pub wall_ms_max: u64,
162 pub binary_bytes_median: Option<u64>,
163 pub cpu_ms_median: Option<u64>,
164 pub ctx_switches_median: Option<u64>,
165 pub energy_uj_median: Option<u64>,
166 pub max_rss_kb_median: Option<u64>,
167 pub page_faults_median: Option<u64>,
168 pub io_read_bytes_median: Option<u64>,
169 pub io_write_bytes_median: Option<u64>,
170 pub network_packets_median: Option<u64>,
171 pub throughput_median: Option<f64>,
172 pub sample_count: usize,
173 pub timestamp: String,
174}
175
176#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
199pub struct CompareExportRow {
200 pub bench_name: String,
201 pub metric: String,
202 pub baseline_value: f64,
203 pub current_value: f64,
204 pub regression_pct: f64,
205 pub status: String,
206 pub threshold: f64,
207 pub warn_threshold: Option<f64>,
208 pub cv: Option<f64>,
209 pub noise_threshold: Option<f64>,
210}
211
212pub struct ExportUseCase;
214
215impl ExportUseCase {
216 pub fn export_run(receipt: &RunReceipt, format: ExportFormat) -> anyhow::Result<String> {
255 let row = Self::run_to_row(receipt);
256
257 match format {
258 ExportFormat::Csv => Self::run_row_to_csv(&row),
259 ExportFormat::Jsonl => Self::run_row_to_jsonl(&row),
260 ExportFormat::Html => Self::run_row_to_html(&row),
261 ExportFormat::Prometheus => Self::run_row_to_prometheus(&row),
262 ExportFormat::JUnit => Self::run_row_to_junit_run(receipt, &row),
263 }
264 }
265
266 fn run_row_to_junit_run(receipt: &RunReceipt, _row: &RunExportRow) -> anyhow::Result<String> {
267 let mut out = String::new();
268 out.push_str("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
269 out.push_str("<testsuites name=\"perfgate\">\n");
270 out.push_str(&format!(
271 " <testsuite name=\"{}\" tests=\"1\" failures=\"0\" errors=\"0\">\n",
272 html_escape(&receipt.bench.name)
273 ));
274 out.push_str(&format!(
275 " <testcase name=\"execution\" classname=\"perfgate.{}\" time=\"{}\">\n",
276 html_escape(&receipt.bench.name),
277 receipt.stats.wall_ms.median as f64 / 1000.0
278 ));
279 out.push_str(" </testcase>\n");
280 out.push_str(" </testsuite>\n");
281 out.push_str("</testsuites>\n");
282 Ok(out)
283 }
284
285 pub fn export_compare(
318 receipt: &CompareReceipt,
319 format: ExportFormat,
320 ) -> anyhow::Result<String> {
321 let rows = Self::compare_to_rows(receipt);
322
323 match format {
324 ExportFormat::Csv => Self::compare_rows_to_csv(&rows),
325 ExportFormat::Jsonl => Self::compare_rows_to_jsonl(&rows),
326 ExportFormat::Html => Self::compare_rows_to_html(&rows),
327 ExportFormat::Prometheus => Self::compare_rows_to_prometheus(&rows),
328 ExportFormat::JUnit => Self::compare_rows_to_junit(receipt, &rows),
329 }
330 }
331
332 fn run_to_row(receipt: &RunReceipt) -> RunExportRow {
334 let sample_count = receipt.samples.iter().filter(|s| !s.warmup).count();
335
336 RunExportRow {
337 bench_name: receipt.bench.name.clone(),
338 wall_ms_median: receipt.stats.wall_ms.median,
339 wall_ms_min: receipt.stats.wall_ms.min,
340 wall_ms_max: receipt.stats.wall_ms.max,
341 binary_bytes_median: receipt.stats.binary_bytes.as_ref().map(|s| s.median),
342 cpu_ms_median: receipt.stats.cpu_ms.as_ref().map(|s| s.median),
343 ctx_switches_median: receipt.stats.ctx_switches.as_ref().map(|s| s.median),
344 energy_uj_median: receipt.stats.energy_uj.as_ref().map(|s| s.median),
345 max_rss_kb_median: receipt.stats.max_rss_kb.as_ref().map(|s| s.median),
346 page_faults_median: receipt.stats.page_faults.as_ref().map(|s| s.median),
347 io_read_bytes_median: receipt.stats.io_read_bytes.as_ref().map(|s| s.median),
348 io_write_bytes_median: receipt.stats.io_write_bytes.as_ref().map(|s| s.median),
349 network_packets_median: receipt.stats.network_packets.as_ref().map(|s| s.median),
350 throughput_median: receipt.stats.throughput_per_s.as_ref().map(|s| s.median),
351 sample_count,
352 timestamp: receipt.run.started_at.clone(),
353 }
354 }
355
356 fn compare_to_rows(receipt: &CompareReceipt) -> Vec<CompareExportRow> {
358 let mut rows: Vec<CompareExportRow> = receipt
359 .deltas
360 .iter()
361 .map(|(metric, delta)| {
362 let budget = receipt.budgets.get(metric);
363 let threshold = budget.map(|b| b.threshold).unwrap_or(0.0);
364 let warn_threshold = budget.map(|b| b.warn_threshold);
365
366 CompareExportRow {
367 bench_name: receipt.bench.name.clone(),
368 metric: metric_to_string(*metric),
369 baseline_value: delta.baseline,
370 current_value: delta.current,
371 regression_pct: delta.pct * 100.0,
372 status: status_to_string(delta.status),
373 threshold: threshold * 100.0,
374 warn_threshold: warn_threshold.map(|t| t * 100.0),
375 cv: delta.cv.map(|cv| cv * 100.0),
376 noise_threshold: delta.noise_threshold.map(|t| t * 100.0),
377 }
378 })
379 .collect();
380
381 rows.sort_by(|a, b| a.metric.cmp(&b.metric));
382 rows
383 }
384
385 fn run_row_to_csv(row: &RunExportRow) -> anyhow::Result<String> {
386 let mut output = String::new();
387
388 output.push_str("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,io_read_bytes_median,io_write_bytes_median,network_packets_median,energy_uj_median,throughput_median,sample_count,timestamp\n");
389
390 output.push_str(&csv_escape(&row.bench_name));
391 output.push(',');
392 output.push_str(&row.wall_ms_median.to_string());
393 output.push(',');
394 output.push_str(&row.wall_ms_min.to_string());
395 output.push(',');
396 output.push_str(&row.wall_ms_max.to_string());
397 output.push(',');
398 output.push_str(
399 &row.binary_bytes_median
400 .map_or(String::new(), |v| v.to_string()),
401 );
402 output.push(',');
403 output.push_str(&row.cpu_ms_median.map_or(String::new(), |v| v.to_string()));
404 output.push(',');
405 output.push_str(
406 &row.ctx_switches_median
407 .map_or(String::new(), |v| v.to_string()),
408 );
409 output.push(',');
410 output.push_str(
411 &row.max_rss_kb_median
412 .map_or(String::new(), |v| v.to_string()),
413 );
414 output.push(',');
415 output.push_str(
416 &row.page_faults_median
417 .map_or(String::new(), |v| v.to_string()),
418 );
419 output.push(',');
420 output.push_str(
421 &row.io_read_bytes_median
422 .map_or(String::new(), |v| v.to_string()),
423 );
424 output.push(',');
425 output.push_str(
426 &row.io_write_bytes_median
427 .map_or(String::new(), |v| v.to_string()),
428 );
429 output.push(',');
430 output.push_str(
431 &row.network_packets_median
432 .map_or(String::new(), |v| v.to_string()),
433 );
434 output.push(',');
435 output.push_str(
436 &row.energy_uj_median
437 .map_or(String::new(), |v| v.to_string()),
438 );
439 output.push(',');
440 output.push_str(
441 &row.throughput_median
442 .map_or(String::new(), |v| format!("{:.6}", v)),
443 );
444 output.push(',');
445 output.push_str(&row.sample_count.to_string());
446 output.push(',');
447 output.push_str(&csv_escape(&row.timestamp));
448 output.push('\n');
449
450 Ok(output)
451 }
452
453 fn run_row_to_jsonl(row: &RunExportRow) -> anyhow::Result<String> {
455 let json = serde_json::to_string(row)?;
456 Ok(format!("{}\n", json))
457 }
458
459 fn compare_rows_to_csv(rows: &[CompareExportRow]) -> anyhow::Result<String> {
461 let mut output = String::new();
462
463 output.push_str(
464 "bench_name,metric,baseline_value,current_value,regression_pct,status,threshold\n",
465 );
466
467 for row in rows {
468 output.push_str(&csv_escape(&row.bench_name));
469 output.push(',');
470 output.push_str(&csv_escape(&row.metric));
471 output.push(',');
472 output.push_str(&format!("{:.6}", row.baseline_value));
473 output.push(',');
474 output.push_str(&format!("{:.6}", row.current_value));
475 output.push(',');
476 output.push_str(&format!("{:.6}", row.regression_pct));
477 output.push(',');
478 output.push_str(&csv_escape(&row.status));
479 output.push(',');
480 output.push_str(&format!("{:.6}", row.threshold));
481 output.push('\n');
482 }
483
484 Ok(output)
485 }
486
487 fn compare_rows_to_jsonl(rows: &[CompareExportRow]) -> anyhow::Result<String> {
489 let mut output = String::new();
490
491 for row in rows {
492 let json = serde_json::to_string(row)?;
493 output.push_str(&json);
494 output.push('\n');
495 }
496
497 Ok(output)
498 }
499
500 fn run_row_to_html(row: &RunExportRow) -> anyhow::Result<String> {
501 let html = format!(
502 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate run export</title></head><body>\
503 <h1>perfgate run export</h1>\
504 <table border=\"1\">\
505 <thead><tr><th>bench_name</th><th>wall_ms_median</th><th>wall_ms_min</th><th>wall_ms_max</th><th>binary_bytes_median</th><th>cpu_ms_median</th><th>ctx_switches_median</th><th>max_rss_kb_median</th><th>page_faults_median</th><th>io_read_bytes_median</th><th>io_write_bytes_median</th><th>network_packets_median</th><th>energy_uj_median</th><th>throughput_median</th><th>sample_count</th><th>timestamp</th></tr></thead>\
506 <tbody><tr><td>{bench}</td><td>{wall_med}</td><td>{wall_min}</td><td>{wall_max}</td><td>{binary}</td><td>{cpu}</td><td>{ctx}</td><td>{rss}</td><td>{pf}</td><td>{io_read}</td><td>{io_write}</td><td>{net}</td><td>{energy}</td><td>{throughput}</td><td>{sample_count}</td><td>{timestamp}</td></tr></tbody>\
507 </table></body></html>\n",
508 bench = html_escape(&row.bench_name),
509 wall_med = row.wall_ms_median,
510 wall_min = row.wall_ms_min,
511 wall_max = row.wall_ms_max,
512 binary = row
513 .binary_bytes_median
514 .map_or(String::new(), |v| v.to_string()),
515 cpu = row.cpu_ms_median.map_or(String::new(), |v| v.to_string()),
516 ctx = row
517 .ctx_switches_median
518 .map_or(String::new(), |v| v.to_string()),
519 rss = row
520 .max_rss_kb_median
521 .map_or(String::new(), |v| v.to_string()),
522 pf = row
523 .page_faults_median
524 .map_or(String::new(), |v| v.to_string()),
525 io_read = row
526 .io_read_bytes_median
527 .map_or(String::new(), |v| v.to_string()),
528 io_write = row
529 .io_write_bytes_median
530 .map_or(String::new(), |v| v.to_string()),
531 net = row
532 .network_packets_median
533 .map_or(String::new(), |v| v.to_string()),
534 energy = row
535 .energy_uj_median
536 .map_or(String::new(), |v| v.to_string()),
537 throughput = row
538 .throughput_median
539 .map_or(String::new(), |v| format!("{:.6}", v)),
540 sample_count = row.sample_count,
541 timestamp = html_escape(&row.timestamp),
542 );
543 Ok(html)
544 }
545
546 fn compare_rows_to_html(rows: &[CompareExportRow]) -> anyhow::Result<String> {
547 let mut out = String::from(
548 "<!doctype html><html><head><meta charset=\"utf-8\"><title>perfgate compare export</title></head><body><h1>perfgate compare export</h1><table border=\"1\"><thead><tr><th>bench_name</th><th>metric</th><th>baseline_value</th><th>current_value</th><th>regression_pct</th><th>status</th><th>threshold</th></tr></thead><tbody>",
549 );
550
551 for row in rows {
552 out.push_str(&format!(
553 "<tr><td>{}</td><td>{}</td><td>{:.6}</td><td>{:.6}</td><td>{:.6}</td><td>{}</td><td>{:.6}</td></tr>",
554 html_escape(&row.bench_name),
555 html_escape(&row.metric),
556 row.baseline_value,
557 row.current_value,
558 row.regression_pct,
559 html_escape(&row.status),
560 row.threshold
561 ));
562 }
563
564 out.push_str("</tbody></table></body></html>\n");
565 Ok(out)
566 }
567
568 fn run_row_to_prometheus(row: &RunExportRow) -> anyhow::Result<String> {
569 let bench = prometheus_escape_label_value(&row.bench_name);
570 let mut out = String::new();
571 out.push_str(&format!(
572 "perfgate_run_wall_ms_median{{bench=\"{}\"}} {}\n",
573 bench, row.wall_ms_median
574 ));
575 out.push_str(&format!(
576 "perfgate_run_wall_ms_min{{bench=\"{}\"}} {}\n",
577 bench, row.wall_ms_min
578 ));
579 out.push_str(&format!(
580 "perfgate_run_wall_ms_max{{bench=\"{}\"}} {}\n",
581 bench, row.wall_ms_max
582 ));
583 if let Some(v) = row.binary_bytes_median {
584 out.push_str(&format!(
585 "perfgate_run_binary_bytes_median{{bench=\"{}\"}} {}\n",
586 bench, v
587 ));
588 }
589 if let Some(v) = row.cpu_ms_median {
590 out.push_str(&format!(
591 "perfgate_run_cpu_ms_median{{bench=\"{}\"}} {}\n",
592 bench, v
593 ));
594 }
595 if let Some(v) = row.ctx_switches_median {
596 out.push_str(&format!(
597 "perfgate_run_ctx_switches_median{{bench=\"{}\"}} {}\n",
598 bench, v
599 ));
600 }
601 if let Some(v) = row.max_rss_kb_median {
602 out.push_str(&format!(
603 "perfgate_run_max_rss_kb_median{{bench=\"{}\"}} {}\n",
604 bench, v
605 ));
606 }
607 if let Some(v) = row.page_faults_median {
608 out.push_str(&format!(
609 "perfgate_run_page_faults_median{{bench=\"{}\"}} {}\n",
610 bench, v
611 ));
612 }
613 if let Some(v) = row.io_read_bytes_median {
614 out.push_str(&format!(
615 "perfgate_run_io_read_bytes_median{{bench=\"{}\"}} {}\n",
616 bench, v
617 ));
618 }
619 if let Some(v) = row.io_write_bytes_median {
620 out.push_str(&format!(
621 "perfgate_run_io_write_bytes_median{{bench=\"{}\"}} {}\n",
622 bench, v
623 ));
624 }
625 if let Some(v) = row.network_packets_median {
626 out.push_str(&format!(
627 "perfgate_run_network_packets_median{{bench=\"{}\"}} {}\n",
628 bench, v
629 ));
630 }
631 if let Some(v) = row.energy_uj_median {
632 out.push_str(&format!(
633 "perfgate_run_energy_uj_median{{bench=\"{}\"}} {}\n",
634 bench, v
635 ));
636 }
637 if let Some(v) = row.throughput_median {
638 out.push_str(&format!(
639 "perfgate_run_throughput_per_s_median{{bench=\"{}\"}} {:.6}\n",
640 bench, v
641 ));
642 }
643 out.push_str(&format!(
644 "perfgate_run_sample_count{{bench=\"{}\"}} {}\n",
645 bench, row.sample_count
646 ));
647 Ok(out)
648 }
649
650 fn compare_rows_to_junit(
651 receipt: &CompareReceipt,
652 rows: &[CompareExportRow],
653 ) -> anyhow::Result<String> {
654 let mut out = String::new();
655 let total = rows.len();
656 let failures = rows.iter().filter(|r| r.status == "fail").count();
657 let errors = rows.iter().filter(|r| r.status == "error").count();
658
659 out.push_str("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
660 out.push_str(&format!(
661 "<testsuites name=\"perfgate\" tests=\"{}\" failures=\"{}\" errors=\"{}\">\n",
662 total, failures, errors
663 ));
664
665 out.push_str(&format!(
666 " <testsuite name=\"{}\" tests=\"{}\" failures=\"{}\" errors=\"{}\">\n",
667 html_escape(&receipt.bench.name),
668 total,
669 failures,
670 errors
671 ));
672
673 for row in rows {
674 let classname = format!("perfgate.{}", html_escape(&receipt.bench.name));
675 out.push_str(&format!(
676 " <testcase name=\"{}\" classname=\"{}\" time=\"0.0\">\n",
677 html_escape(&row.metric),
678 classname
679 ));
680
681 if row.status == "fail" {
682 out.push_str(&format!(
683 " <failure message=\"Performance regression detected for {}\">",
684 html_escape(&row.metric)
685 ));
686 out.push_str(&format!(
687 "Metric: {}\nBaseline: {:.6}\nCurrent: {:.6}\nRegression: {:.2}%\nThreshold: {:.2}%",
688 row.metric, row.baseline_value, row.current_value, row.regression_pct, row.threshold
689 ));
690 out.push_str("</failure>\n");
691 } else if row.status == "error" {
692 out.push_str(&format!(
693 " <error message=\"Error occurred during performance check for {}\">",
694 html_escape(&row.metric)
695 ));
696 out.push_str("</error>\n");
697 }
698
699 out.push_str(" </testcase>\n");
700 }
701
702 out.push_str(" </testsuite>\n");
703 out.push_str("</testsuites>\n");
704
705 Ok(out)
706 }
707
708 fn compare_rows_to_prometheus(rows: &[CompareExportRow]) -> anyhow::Result<String> {
709 let mut out = String::new();
710 for row in rows {
711 let bench = prometheus_escape_label_value(&row.bench_name);
712 let metric = prometheus_escape_label_value(&row.metric);
713 out.push_str(&format!(
714 "perfgate_compare_baseline_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
715 bench, metric, row.baseline_value
716 ));
717 out.push_str(&format!(
718 "perfgate_compare_current_value{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
719 bench, metric, row.current_value
720 ));
721 out.push_str(&format!(
722 "perfgate_compare_regression_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
723 bench, metric, row.regression_pct
724 ));
725 out.push_str(&format!(
726 "perfgate_compare_threshold_pct{{bench=\"{}\",metric=\"{}\"}} {:.6}\n",
727 bench, metric, row.threshold
728 ));
729
730 let status_code = match row.status.as_str() {
731 "pass" => 0,
732 "warn" => 1,
733 "fail" => 2,
734 _ => -1,
735 };
736 out.push_str(&format!(
737 "perfgate_compare_status{{bench=\"{}\",metric=\"{}\",status=\"{}\"}} {}\n",
738 bench,
739 metric,
740 prometheus_escape_label_value(&row.status),
741 status_code
742 ));
743 }
744 Ok(out)
745 }
746}
747
748fn metric_to_string(metric: Metric) -> String {
750 metric.as_str().to_string()
751}
752
753fn status_to_string(status: MetricStatus) -> String {
755 match status {
756 MetricStatus::Pass => "pass".to_string(),
757 MetricStatus::Warn => "warn".to_string(),
758 MetricStatus::Fail => "fail".to_string(),
759 MetricStatus::Skip => "skip".to_string(),
760 }
761}
762
763pub fn csv_escape(s: &str) -> String {
776 if s.contains(',') || s.contains('"') || s.contains('\n') || s.contains('\r') {
777 format!("\"{}\"", s.replace('"', "\"\""))
778 } else {
779 s.to_string()
780 }
781}
782
783fn html_escape(s: &str) -> String {
784 s.replace('&', "&")
785 .replace('<', "<")
786 .replace('>', ">")
787 .replace('"', """)
788}
789
790fn prometheus_escape_label_value(s: &str) -> String {
791 s.replace('\\', "\\\\").replace('"', "\\\"")
792}
793
794#[cfg(test)]
795mod tests {
796 use super::*;
797 use perfgate_types::{
798 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
799 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
800 U64Summary, Verdict, VerdictCounts, VerdictStatus,
801 };
802 use std::collections::BTreeMap;
803
804 fn create_test_run_receipt() -> RunReceipt {
805 RunReceipt {
806 schema: RUN_SCHEMA_V1.to_string(),
807 tool: ToolInfo {
808 name: "perfgate".to_string(),
809 version: "0.1.0".to_string(),
810 },
811 run: RunMeta {
812 id: "test-run-001".to_string(),
813 started_at: "2024-01-15T10:00:00Z".to_string(),
814 ended_at: "2024-01-15T10:00:05Z".to_string(),
815 host: HostInfo {
816 os: "linux".to_string(),
817 arch: "x86_64".to_string(),
818 cpu_count: None,
819 memory_bytes: None,
820 hostname_hash: None,
821 },
822 },
823 bench: BenchMeta {
824 name: "test-benchmark".to_string(),
825 cwd: None,
826 command: vec!["echo".to_string(), "hello".to_string()],
827 repeat: 5,
828 warmup: 0,
829 work_units: None,
830 timeout_ms: None,
831 },
832 samples: vec![
833 Sample {
834 wall_ms: 100,
835 exit_code: 0,
836 warmup: false,
837 timed_out: false,
838 cpu_ms: Some(50),
839 page_faults: None,
840 ctx_switches: None,
841 max_rss_kb: Some(1024),
842 io_read_bytes: None,
843 io_write_bytes: None,
844 network_packets: None,
845 energy_uj: None,
846 binary_bytes: None,
847 stdout: None,
848 stderr: None,
849 },
850 Sample {
851 wall_ms: 102,
852 exit_code: 0,
853 warmup: false,
854 timed_out: false,
855 cpu_ms: Some(52),
856 page_faults: None,
857 ctx_switches: None,
858 max_rss_kb: Some(1028),
859 io_read_bytes: None,
860 io_write_bytes: None,
861 network_packets: None,
862 energy_uj: None,
863 binary_bytes: None,
864 stdout: None,
865 stderr: None,
866 },
867 ],
868 stats: Stats {
869 wall_ms: U64Summary::new(100, 98, 102),
870 cpu_ms: Some(U64Summary::new(50, 48, 52)),
871 page_faults: None,
872 ctx_switches: None,
873 max_rss_kb: Some(U64Summary::new(1024, 1020, 1028)),
874 io_read_bytes: None,
875 io_write_bytes: None,
876 network_packets: None,
877 energy_uj: None,
878 binary_bytes: None,
879 throughput_per_s: None,
880 },
881 }
882 }
883
884 fn create_test_compare_receipt() -> CompareReceipt {
885 let mut budgets = BTreeMap::new();
886 budgets.insert(Metric::WallMs, Budget::new(0.2, 0.18, Direction::Lower));
887 budgets.insert(Metric::MaxRssKb, Budget::new(0.15, 0.135, Direction::Lower));
888
889 let mut deltas = BTreeMap::new();
890 deltas.insert(
891 Metric::WallMs,
892 Delta {
893 baseline: 100.0,
894 current: 110.0,
895 ratio: 1.1,
896 pct: 0.1,
897 regression: 0.1,
898 cv: None,
899 noise_threshold: None,
900 statistic: MetricStatistic::Median,
901 significance: None,
902 status: MetricStatus::Pass,
903 },
904 );
905 deltas.insert(
906 Metric::MaxRssKb,
907 Delta {
908 baseline: 1024.0,
909 current: 1280.0,
910 ratio: 1.25,
911 pct: 0.25,
912 regression: 0.25,
913 cv: None,
914 noise_threshold: None,
915 statistic: MetricStatistic::Median,
916 significance: None,
917 status: MetricStatus::Fail,
918 },
919 );
920
921 CompareReceipt {
922 schema: COMPARE_SCHEMA_V1.to_string(),
923 tool: ToolInfo {
924 name: "perfgate".to_string(),
925 version: "0.1.0".to_string(),
926 },
927 bench: BenchMeta {
928 name: "alpha-bench".to_string(),
929 cwd: None,
930 command: vec!["test".to_string()],
931 repeat: 5,
932 warmup: 0,
933 work_units: None,
934 timeout_ms: None,
935 },
936 baseline_ref: CompareRef {
937 path: Some("baseline.json".to_string()),
938 run_id: Some("baseline-001".to_string()),
939 },
940 current_ref: CompareRef {
941 path: Some("current.json".to_string()),
942 run_id: Some("current-001".to_string()),
943 },
944 budgets,
945 deltas,
946 verdict: Verdict {
947 status: VerdictStatus::Fail,
948 counts: VerdictCounts {
949 pass: 1,
950 warn: 0,
951 fail: 0,
952 skip: 0,
953 },
954 reasons: vec!["max_rss_kb_fail".to_string()],
955 },
956 }
957 }
958
959 #[test]
960 fn test_run_export_csv() {
961 let receipt = create_test_run_receipt();
962 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
963
964 assert!(csv.starts_with("bench_name,wall_ms_median,"));
965 assert!(csv.contains("test-benchmark"));
966 assert!(csv.contains("100,98,102"));
967 assert!(csv.contains("1024"));
968 assert!(csv.contains("2024-01-15T10:00:00Z"));
969 }
970
971 #[test]
972 fn test_run_export_jsonl() {
973 let receipt = create_test_run_receipt();
974 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
975
976 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
977 assert_eq!(lines.len(), 1);
978
979 let parsed: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
980 assert_eq!(parsed["bench_name"], "test-benchmark");
981 assert_eq!(parsed["wall_ms_median"], 100);
982 }
983
984 #[test]
985 fn test_compare_export_csv() {
986 let receipt = create_test_compare_receipt();
987 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
988
989 assert!(csv.starts_with("bench_name,metric,baseline_value,"));
990 assert!(csv.contains("alpha-bench"));
991 assert!(csv.contains("max_rss_kb"));
992 assert!(csv.contains("wall_ms"));
993 let max_rss_pos = csv.find("max_rss_kb").unwrap();
994 let wall_ms_pos = csv.find("wall_ms").unwrap();
995 assert!(max_rss_pos < wall_ms_pos);
996 }
997
998 #[test]
999 fn test_compare_export_jsonl() {
1000 let receipt = create_test_compare_receipt();
1001 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1002
1003 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
1004 assert_eq!(lines.len(), 2);
1005
1006 for line in &lines {
1007 let _: serde_json::Value = serde_json::from_str(line).unwrap();
1008 }
1009
1010 let first: serde_json::Value = serde_json::from_str(lines[0]).unwrap();
1011 assert_eq!(first["metric"], "max_rss_kb");
1012 }
1013
1014 #[test]
1015 fn test_csv_escape() {
1016 assert_eq!(csv_escape("simple"), "simple");
1017 assert_eq!(csv_escape("has,comma"), "\"has,comma\"");
1018 assert_eq!(csv_escape("has\"quote"), "\"has\"\"quote\"");
1019 assert_eq!(csv_escape("has\nnewline"), "\"has\nnewline\"");
1020 }
1021
1022 #[test]
1023 fn test_stable_ordering_across_runs() {
1024 let receipt = create_test_compare_receipt();
1025
1026 let csv1 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1027 let csv2 = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1028
1029 assert_eq!(csv1, csv2, "CSV output should be deterministic");
1030 }
1031
1032 #[test]
1033 fn test_export_format_from_str() {
1034 assert_eq!(ExportFormat::parse("csv"), Some(ExportFormat::Csv));
1035 assert_eq!(ExportFormat::parse("CSV"), Some(ExportFormat::Csv));
1036 assert_eq!(ExportFormat::parse("jsonl"), Some(ExportFormat::Jsonl));
1037 assert_eq!(ExportFormat::parse("JSONL"), Some(ExportFormat::Jsonl));
1038 assert_eq!(ExportFormat::parse("html"), Some(ExportFormat::Html));
1039 assert_eq!(
1040 ExportFormat::parse("prometheus"),
1041 Some(ExportFormat::Prometheus)
1042 );
1043 assert_eq!(ExportFormat::parse("invalid"), None);
1044 }
1045
1046 #[test]
1047 fn test_run_export_html_and_prometheus() {
1048 let receipt = create_test_run_receipt();
1049
1050 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1051 assert!(html.contains("<table"), "html output should contain table");
1052 assert!(html.contains("test-benchmark"));
1053
1054 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1055 assert!(prom.contains("perfgate_run_wall_ms_median"));
1056 assert!(prom.contains("bench=\"test-benchmark\""));
1057 }
1058
1059 #[test]
1060 fn test_compare_export_prometheus() {
1061 let receipt = create_test_compare_receipt();
1062 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1063 assert!(prom.contains("perfgate_compare_regression_pct"));
1064 assert!(prom.contains("metric=\"max_rss_kb\""));
1065 }
1066
1067 #[test]
1068 fn test_compare_export_junit() {
1069 let receipt = create_test_compare_receipt();
1070 let junit = ExportUseCase::export_compare(&receipt, ExportFormat::JUnit).unwrap();
1071
1072 assert!(junit.contains("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"));
1073 assert!(junit.contains("<testsuites name=\"perfgate\""));
1074 assert!(junit.contains("testsuite name=\"alpha-bench\""));
1075 assert!(junit.contains("testcase name=\"wall_ms\""));
1076 assert!(junit.contains("testcase name=\"max_rss_kb\""));
1077 assert!(
1078 junit.contains("<failure message=\"Performance regression detected for max_rss_kb\">")
1079 );
1080 assert!(junit.contains("Baseline: 1024.000000"));
1081 assert!(junit.contains("Current: 1280.000000"));
1082 }
1083
1084 #[test]
1085 fn test_html_escape() {
1086 assert_eq!(html_escape("simple"), "simple");
1087 assert_eq!(html_escape("<script>"), "<script>");
1088 assert_eq!(html_escape("a&b"), "a&b");
1089 assert_eq!(html_escape("\"quoted\""), ""quoted"");
1090 }
1091
1092 #[test]
1093 fn test_prometheus_escape() {
1094 assert_eq!(prometheus_escape_label_value("simple"), "simple");
1095 assert_eq!(prometheus_escape_label_value("has\"quote"), "has\\\"quote");
1096 assert_eq!(
1097 prometheus_escape_label_value("has\\backslash"),
1098 "has\\\\backslash"
1099 );
1100 }
1101
1102 mod snapshot_tests {
1103 use super::*;
1104 use insta::assert_snapshot;
1105
1106 #[test]
1107 fn test_run_html_snapshot() {
1108 let receipt = create_test_run_receipt();
1109 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1110 assert_snapshot!("run_html", html);
1111 }
1112
1113 #[test]
1114 fn test_run_prometheus_snapshot() {
1115 let receipt = create_test_run_receipt();
1116 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1117 assert_snapshot!("run_prometheus", prom);
1118 }
1119
1120 #[test]
1121 fn test_compare_html_snapshot() {
1122 let receipt = create_test_compare_receipt();
1123 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1124 assert_snapshot!("compare_html", html);
1125 }
1126
1127 #[test]
1128 fn test_compare_prometheus_snapshot() {
1129 let receipt = create_test_compare_receipt();
1130 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1131 assert_snapshot!("compare_prometheus", prom);
1132 }
1133 }
1134
1135 mod edge_case_tests {
1136 use super::*;
1137
1138 fn create_empty_run_receipt() -> RunReceipt {
1139 RunReceipt {
1140 schema: RUN_SCHEMA_V1.to_string(),
1141 tool: ToolInfo {
1142 name: "perfgate".to_string(),
1143 version: "0.1.0".to_string(),
1144 },
1145 run: RunMeta {
1146 id: "empty-run".to_string(),
1147 started_at: "2024-01-01T00:00:00Z".to_string(),
1148 ended_at: "2024-01-01T00:00:01Z".to_string(),
1149 host: HostInfo {
1150 os: "linux".to_string(),
1151 arch: "x86_64".to_string(),
1152 cpu_count: None,
1153 memory_bytes: None,
1154 hostname_hash: None,
1155 },
1156 },
1157 bench: BenchMeta {
1158 name: "empty-bench".to_string(),
1159 cwd: None,
1160 command: vec!["true".to_string()],
1161 repeat: 0,
1162 warmup: 0,
1163 work_units: None,
1164 timeout_ms: None,
1165 },
1166 samples: vec![],
1167 stats: Stats {
1168 wall_ms: U64Summary::new(0, 0, 0),
1169 cpu_ms: None,
1170 page_faults: None,
1171 ctx_switches: None,
1172 max_rss_kb: None,
1173 io_read_bytes: None,
1174 io_write_bytes: None,
1175 network_packets: None,
1176 energy_uj: None,
1177 binary_bytes: None,
1178 throughput_per_s: None,
1179 },
1180 }
1181 }
1182
1183 fn create_empty_compare_receipt() -> CompareReceipt {
1184 CompareReceipt {
1185 schema: COMPARE_SCHEMA_V1.to_string(),
1186 tool: ToolInfo {
1187 name: "perfgate".to_string(),
1188 version: "0.1.0".to_string(),
1189 },
1190 bench: BenchMeta {
1191 name: "empty-bench".to_string(),
1192 cwd: None,
1193 command: vec!["true".to_string()],
1194 repeat: 0,
1195 warmup: 0,
1196 work_units: None,
1197 timeout_ms: None,
1198 },
1199 baseline_ref: CompareRef {
1200 path: None,
1201 run_id: None,
1202 },
1203 current_ref: CompareRef {
1204 path: None,
1205 run_id: None,
1206 },
1207 budgets: BTreeMap::new(),
1208 deltas: BTreeMap::new(),
1209 verdict: Verdict {
1210 status: VerdictStatus::Pass,
1211 counts: VerdictCounts {
1212 pass: 1,
1213 warn: 0,
1214 fail: 0,
1215 skip: 0,
1216 },
1217 reasons: vec![],
1218 },
1219 }
1220 }
1221
1222 fn create_run_receipt_with_bench_name(name: &str) -> RunReceipt {
1223 let mut receipt = create_empty_run_receipt();
1224 receipt.bench.name = name.to_string();
1225 receipt.samples.push(Sample {
1226 wall_ms: 42,
1227 exit_code: 0,
1228 warmup: false,
1229 timed_out: false,
1230 cpu_ms: None,
1231 page_faults: None,
1232 ctx_switches: None,
1233 max_rss_kb: None,
1234 io_read_bytes: None,
1235 io_write_bytes: None,
1236 network_packets: None,
1237 energy_uj: None,
1238 binary_bytes: None,
1239 stdout: None,
1240 stderr: None,
1241 });
1242 receipt.stats.wall_ms = U64Summary::new(42, 42, 42);
1243 receipt
1244 }
1245
1246 #[test]
1249 fn empty_run_receipt_csv_has_header_and_one_row() {
1250 let receipt = create_empty_run_receipt();
1251 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1252 let lines: Vec<&str> = csv.trim().split('\n').collect();
1253 assert_eq!(lines.len(), 2, "should have header + 1 data row");
1254 assert!(lines[0].starts_with("bench_name,"));
1255 assert!(csv.contains("empty-bench"));
1256 }
1257
1258 #[test]
1259 fn empty_run_receipt_jsonl_is_valid() {
1260 let receipt = create_empty_run_receipt();
1261 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1262 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1263 assert_eq!(parsed["bench_name"], "empty-bench");
1264 assert_eq!(parsed["sample_count"], 0);
1265 }
1266
1267 #[test]
1268 fn empty_run_receipt_html_is_valid() {
1269 let receipt = create_empty_run_receipt();
1270 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1271 assert!(html.starts_with("<!doctype html>"));
1272 assert!(html.contains("<table"));
1273 assert!(html.contains("</table>"));
1274 assert!(html.contains("empty-bench"));
1275 }
1276
1277 #[test]
1278 fn empty_run_receipt_prometheus_is_valid() {
1279 let receipt = create_empty_run_receipt();
1280 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1281 assert!(prom.contains("perfgate_run_wall_ms_median"));
1282 assert!(prom.contains("bench=\"empty-bench\""));
1283 assert!(prom.contains("perfgate_run_sample_count"));
1284 }
1285
1286 #[test]
1287 fn empty_compare_receipt_csv_has_header_only() {
1288 let receipt = create_empty_compare_receipt();
1289 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1290 let lines: Vec<&str> = csv.trim().split('\n').collect();
1291 assert_eq!(lines.len(), 1, "should have header only with no deltas");
1292 assert!(lines[0].starts_with("bench_name,metric,"));
1293 }
1294
1295 #[test]
1296 fn empty_compare_receipt_jsonl_is_empty() {
1297 let receipt = create_empty_compare_receipt();
1298 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1299 assert!(
1300 jsonl.trim().is_empty(),
1301 "JSONL should be empty for no deltas"
1302 );
1303 }
1304
1305 #[test]
1306 fn empty_compare_receipt_html_has_valid_structure() {
1307 let receipt = create_empty_compare_receipt();
1308 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1309 assert!(html.starts_with("<!doctype html>"));
1310 assert!(html.contains("<table"));
1311 assert!(html.contains("</table>"));
1312 assert!(html.contains("<thead>"));
1313 assert!(html.contains("</tbody>"));
1314 }
1315
1316 #[test]
1317 fn empty_compare_receipt_prometheus_is_empty() {
1318 let receipt = create_empty_compare_receipt();
1319 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1320 assert!(
1321 prom.trim().is_empty(),
1322 "Prometheus output should be empty for no deltas"
1323 );
1324 }
1325
1326 #[test]
1329 fn csv_bench_name_with_comma() {
1330 let receipt = create_run_receipt_with_bench_name("bench,with,commas");
1331 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1332 assert!(
1333 csv.contains("\"bench,with,commas\""),
1334 "comma-containing bench name should be quoted"
1335 );
1336 let lines: Vec<&str> = csv.trim().split('\n').collect();
1337 assert_eq!(lines.len(), 2, "should still have exactly 2 lines");
1338 }
1339
1340 #[test]
1341 fn csv_bench_name_with_quotes() {
1342 let receipt = create_run_receipt_with_bench_name("bench\"quoted\"name");
1343 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1344 assert!(
1345 csv.contains("\"bench\"\"quoted\"\"name\""),
1346 "quotes should be escaped as double-quotes in CSV"
1347 );
1348 }
1349
1350 #[test]
1351 fn csv_bench_name_with_newline() {
1352 let receipt = create_run_receipt_with_bench_name("bench\nwith\nnewlines");
1353 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1354 assert!(
1355 csv.contains("\"bench\nwith\nnewlines\""),
1356 "newline-containing bench name should be quoted"
1357 );
1358 }
1359
1360 #[test]
1361 fn csv_bench_name_with_commas_and_quotes() {
1362 let receipt = create_run_receipt_with_bench_name("a,\"b\",c");
1363 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1364 assert!(csv.contains("\"a,\"\"b\"\",c\""));
1366 }
1367
1368 #[test]
1371 fn jsonl_bench_name_with_unicode() {
1372 let receipt = create_run_receipt_with_bench_name("ベンチマーク-速度");
1373 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1374 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1375 assert_eq!(parsed["bench_name"], "ベンチマーク-速度");
1376 }
1377
1378 #[test]
1379 fn jsonl_bench_name_with_emoji() {
1380 let receipt = create_run_receipt_with_bench_name("bench-🚀-fast");
1381 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1382 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1383 assert_eq!(parsed["bench_name"], "bench-🚀-fast");
1384 }
1385
1386 #[test]
1387 fn jsonl_bench_name_with_special_json_chars() {
1388 let receipt = create_run_receipt_with_bench_name("bench\\with\"special\tchars");
1389 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1390 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1391 assert_eq!(parsed["bench_name"], "bench\\with\"special\tchars");
1392 }
1393
1394 #[test]
1397 fn html_run_with_all_optional_metrics_none() {
1398 let receipt = create_empty_run_receipt();
1399 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1400 assert!(html.contains("<html>"));
1401 assert!(html.contains("</html>"));
1402 assert!(html.contains("empty-bench"));
1404 }
1405
1406 #[test]
1407 fn html_bench_name_with_html_chars() {
1408 let receipt = create_run_receipt_with_bench_name("<script>alert('xss')</script>");
1409 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1410 assert!(
1411 !html.contains("<script>"),
1412 "HTML special chars should be escaped"
1413 );
1414 assert!(html.contains("<script>"));
1415 }
1416
1417 #[test]
1420 fn prometheus_bench_name_with_quotes() {
1421 let receipt = create_run_receipt_with_bench_name("bench\"name");
1422 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1423 assert!(
1424 prom.contains("bench="),
1425 "Prometheus output should have bench label"
1426 );
1427 assert!(
1428 !prom.contains("bench=\"bench\"name\""),
1429 "raw quotes should be escaped"
1430 );
1431 assert!(prom.contains("bench=\"bench\\\"name\""));
1432 }
1433
1434 #[test]
1435 fn prometheus_bench_name_with_backslash() {
1436 let receipt = create_run_receipt_with_bench_name("bench\\path");
1437 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1438 assert!(prom.contains("bench=\"bench\\\\path\""));
1439 }
1440
1441 #[test]
1442 fn prometheus_compare_with_all_metric_types() {
1443 let mut receipt = create_empty_compare_receipt();
1444 receipt.bench.name = "full-metrics".to_string();
1445 receipt.deltas.insert(
1446 Metric::WallMs,
1447 Delta {
1448 baseline: 100.0,
1449 current: 105.0,
1450 ratio: 1.05,
1451 pct: 0.05,
1452 regression: 0.05,
1453 cv: None,
1454 noise_threshold: None,
1455 statistic: MetricStatistic::Median,
1456 significance: None,
1457 status: MetricStatus::Pass,
1458 },
1459 );
1460 receipt.deltas.insert(
1461 Metric::MaxRssKb,
1462 Delta {
1463 baseline: 100.0,
1464 current: 105.0,
1465 ratio: 1.05,
1466 pct: 0.05,
1467 regression: 0.05,
1468 cv: None,
1469 noise_threshold: None,
1470 statistic: MetricStatistic::Median,
1471 significance: None,
1472 status: MetricStatus::Pass,
1473 },
1474 );
1475 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1476 assert!(prom.contains("metric=\"wall_ms\""));
1477 assert!(prom.contains("metric=\"max_rss_kb\""));
1478 assert!(prom.contains("perfgate_compare_baseline_value"));
1479 assert!(prom.contains("perfgate_compare_current_value"));
1480 assert!(prom.contains("perfgate_compare_status"));
1481 }
1482
1483 #[test]
1486 fn single_sample_run_exports_all_formats() {
1487 let receipt = create_run_receipt_with_bench_name("single");
1488
1489 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1490 assert!(csv.contains("single"));
1491 assert_eq!(csv.trim().lines().count(), 2);
1492
1493 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1494 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1495 assert_eq!(parsed["sample_count"], 1);
1496
1497 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1498 assert!(html.contains("<td>single</td>"));
1499
1500 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1501 assert!(prom.contains("perfgate_run_sample_count{bench=\"single\"} 1"));
1502 }
1503
1504 #[test]
1507 fn huge_values_run_receipt() {
1508 let mut receipt = create_empty_run_receipt();
1509 receipt.bench.name = "huge".to_string();
1510 receipt.stats.wall_ms = U64Summary::new(u64::MAX, u64::MAX - 1, u64::MAX);
1511 receipt.stats.max_rss_kb = Some(U64Summary::new(u64::MAX, u64::MAX, u64::MAX));
1512 receipt.stats.io_read_bytes = Some(U64Summary::new(u64::MAX, u64::MAX, u64::MAX));
1513
1514 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1515 assert!(csv.contains(&u64::MAX.to_string()));
1516
1517 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1518 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1519 assert_eq!(parsed["wall_ms_median"], u64::MAX);
1520
1521 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1522 assert!(html.contains(&u64::MAX.to_string()));
1523
1524 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1525 assert!(prom.contains(&u64::MAX.to_string()));
1526 }
1527
1528 #[test]
1531 fn warmup_only_samples_count_zero() {
1532 let mut receipt = create_empty_run_receipt();
1533 receipt.samples = vec![
1534 Sample {
1535 wall_ms: 10,
1536 exit_code: 0,
1537 warmup: true,
1538 timed_out: false,
1539 cpu_ms: None,
1540 page_faults: None,
1541 ctx_switches: None,
1542 max_rss_kb: None,
1543 io_read_bytes: None,
1544 io_write_bytes: None,
1545 network_packets: None,
1546 energy_uj: None,
1547 binary_bytes: None,
1548 stdout: None,
1549 stderr: None,
1550 },
1551 Sample {
1552 wall_ms: 11,
1553 exit_code: 0,
1554 warmup: true,
1555 timed_out: false,
1556 cpu_ms: None,
1557 page_faults: None,
1558 ctx_switches: None,
1559 max_rss_kb: None,
1560 io_read_bytes: None,
1561 io_write_bytes: None,
1562 network_packets: None,
1563 energy_uj: None,
1564 binary_bytes: None,
1565 stdout: None,
1566 stderr: None,
1567 },
1568 ];
1569
1570 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1571 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1572 assert_eq!(parsed["sample_count"], 0);
1573
1574 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1575 let data_line = csv.lines().nth(1).unwrap();
1577 assert!(
1578 data_line.contains(",0,"),
1579 "warmup-only should yield sample_count 0"
1580 );
1581 }
1582
1583 #[test]
1586 fn csv_bench_name_with_carriage_return() {
1587 let receipt = create_run_receipt_with_bench_name("bench\rwith\rcr");
1588 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1589 assert!(
1590 csv.contains("\"bench\rwith\rcr\""),
1591 "carriage-return-containing bench name should be quoted"
1592 );
1593 }
1594
1595 #[test]
1598 fn csv_compare_special_chars_in_bench_name() {
1599 let mut receipt = create_empty_compare_receipt();
1600 receipt.bench.name = "bench,\"special\"\nname".to_string();
1601 receipt.deltas.insert(
1602 Metric::WallMs,
1603 Delta {
1604 baseline: 100.0,
1605 current: 105.0,
1606 ratio: 1.05,
1607 pct: 0.05,
1608 regression: 0.05,
1609 cv: None,
1610 noise_threshold: None,
1611 statistic: MetricStatistic::Median,
1612 significance: None,
1613 status: MetricStatus::Pass,
1614 },
1615 );
1616 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1617 assert!(csv.contains("\"bench,\"\"special\"\"\nname\""));
1619 }
1620
1621 #[test]
1624 fn unicode_bench_name_all_formats() {
1625 let name = "日本語ベンチ_αβγ_🚀";
1626 let receipt = create_run_receipt_with_bench_name(name);
1627
1628 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
1629 assert!(csv.contains(name));
1630
1631 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1632 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1633 assert_eq!(parsed["bench_name"], name);
1634
1635 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1636 assert!(html.contains(name));
1637
1638 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1639 assert!(prom.contains(name));
1640 }
1641
1642 #[test]
1645 fn html_compare_mixed_statuses() {
1646 let mut receipt = create_empty_compare_receipt();
1647 receipt.bench.name = "mixed".to_string();
1648 for (metric, status) in [
1649 (Metric::WallMs, MetricStatus::Pass),
1650 (Metric::CpuMs, MetricStatus::Warn),
1651 (Metric::MaxRssKb, MetricStatus::Fail),
1652 ] {
1653 receipt.deltas.insert(
1654 metric,
1655 Delta {
1656 baseline: 100.0,
1657 current: 120.0,
1658 ratio: 1.2,
1659 pct: 0.2,
1660 regression: 0.2,
1661 cv: None,
1662 noise_threshold: None,
1663 statistic: MetricStatistic::Median,
1664 significance: None,
1665 status,
1666 },
1667 );
1668 }
1669 let html = ExportUseCase::export_compare(&receipt, ExportFormat::Html).unwrap();
1670 assert!(html.contains("<td>pass</td>"));
1671 assert!(html.contains("<td>warn</td>"));
1672 assert!(html.contains("<td>fail</td>"));
1673 assert_eq!(html.matches("<tr><td>").count(), 3);
1675 }
1676
1677 #[test]
1680 fn html_empty_bench_name() {
1681 let receipt = create_run_receipt_with_bench_name("");
1682 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1683 assert!(html.contains("<td></td>"));
1684 assert!(html.contains("<html>"));
1685 }
1686
1687 #[test]
1690 fn prometheus_run_all_optional_metrics_present() {
1691 let mut receipt = create_empty_run_receipt();
1692 receipt.bench.name = "full".to_string();
1693 receipt.stats.cpu_ms = Some(U64Summary::new(50, 48, 52));
1694 receipt.stats.page_faults = Some(U64Summary::new(10, 8, 12));
1695 receipt.stats.ctx_switches = Some(U64Summary::new(5, 3, 7));
1696 receipt.stats.max_rss_kb = Some(U64Summary::new(2048, 2000, 2100));
1697 receipt.stats.io_read_bytes = Some(U64Summary::new(1000, 900, 1100));
1698 receipt.stats.io_write_bytes = Some(U64Summary::new(500, 400, 600));
1699 receipt.stats.network_packets = Some(U64Summary::new(10, 8, 12));
1700 receipt.stats.energy_uj = Some(U64Summary::new(1000, 900, 1100));
1701 receipt.stats.binary_bytes = Some(U64Summary::new(100000, 99000, 101000));
1702 receipt.stats.throughput_per_s = Some(F64Summary::new(1234.567890, 1200.0, 1300.0));
1703
1704 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
1705 assert!(prom.contains("perfgate_run_cpu_ms_median{bench=\"full\"} 50"));
1706 assert!(prom.contains("perfgate_run_page_faults_median{bench=\"full\"} 10"));
1707 assert!(prom.contains("perfgate_run_ctx_switches_median{bench=\"full\"} 5"));
1708 assert!(prom.contains("perfgate_run_max_rss_kb_median{bench=\"full\"} 2048"));
1709 assert!(prom.contains("perfgate_run_io_read_bytes_median{bench=\"full\"} 1000"));
1710 assert!(prom.contains("perfgate_run_io_write_bytes_median{bench=\"full\"} 500"));
1711 assert!(prom.contains("perfgate_run_network_packets_median{bench=\"full\"} 10"));
1712 assert!(prom.contains("perfgate_run_energy_uj_median{bench=\"full\"} 1000"));
1713 assert!(prom.contains("perfgate_run_binary_bytes_median{bench=\"full\"} 100000"));
1714 assert!(
1715 prom.contains("perfgate_run_throughput_per_s_median{bench=\"full\"} 1234.567890")
1716 );
1717 }
1718
1719 #[test]
1722 fn prometheus_compare_status_codes() {
1723 let mut receipt = create_empty_compare_receipt();
1724 receipt.bench.name = "status-test".to_string();
1725 for (metric, status, expected_code) in [
1726 (Metric::WallMs, MetricStatus::Pass, "0"),
1727 (Metric::CpuMs, MetricStatus::Warn, "1"),
1728 (Metric::MaxRssKb, MetricStatus::Fail, "2"),
1729 ] {
1730 receipt.deltas.insert(
1731 metric,
1732 Delta {
1733 baseline: 100.0,
1734 current: 110.0,
1735 ratio: 1.1,
1736 pct: 0.1,
1737 regression: 0.1,
1738 cv: None,
1739 noise_threshold: None,
1740 statistic: MetricStatistic::Median,
1741 significance: None,
1742 status,
1743 },
1744 );
1745 receipt
1746 .budgets
1747 .insert(metric, Budget::new(0.2, 0.15, Direction::Lower));
1748 let _ = expected_code; }
1750
1751 let prom = ExportUseCase::export_compare(&receipt, ExportFormat::Prometheus).unwrap();
1752 assert!(prom.contains("status=\"pass\"} 0"));
1753 assert!(prom.contains("status=\"warn\"} 1"));
1754 assert!(prom.contains("status=\"fail\"} 2"));
1755 }
1756
1757 #[test]
1760 fn jsonl_compare_fields_match_receipt() {
1761 let receipt = create_test_compare_receipt();
1762 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1763
1764 let lines: Vec<&str> = jsonl.trim().lines().collect();
1765 assert_eq!(lines.len(), receipt.deltas.len());
1766
1767 for line in lines {
1768 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1769 assert_eq!(parsed["bench_name"], "alpha-bench");
1770 let metric_name = parsed["metric"].as_str().unwrap();
1771 assert!(
1772 ["wall_ms", "max_rss_kb"].contains(&metric_name),
1773 "unexpected metric: {}",
1774 metric_name
1775 );
1776 assert!(parsed["baseline_value"].as_f64().unwrap() > 0.0);
1777 assert!(parsed["current_value"].as_f64().unwrap() > 0.0);
1778 let status = parsed["status"].as_str().unwrap();
1779 assert!(
1780 ["pass", "warn", "fail"].contains(&status),
1781 "unexpected status: {}",
1782 status
1783 );
1784 }
1785 }
1786
1787 #[test]
1790 fn jsonl_run_round_trip() {
1791 let receipt = create_test_run_receipt();
1792 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
1793 let parsed: serde_json::Value = serde_json::from_str(jsonl.trim()).unwrap();
1794
1795 assert_eq!(parsed["bench_name"], receipt.bench.name);
1796 assert_eq!(parsed["wall_ms_median"], receipt.stats.wall_ms.median);
1797 assert_eq!(parsed["wall_ms_min"], receipt.stats.wall_ms.min);
1798 assert_eq!(parsed["wall_ms_max"], receipt.stats.wall_ms.max);
1799 assert_eq!(
1800 parsed["cpu_ms_median"],
1801 receipt.stats.cpu_ms.as_ref().unwrap().median
1802 );
1803 assert_eq!(
1804 parsed["max_rss_kb_median"],
1805 receipt.stats.max_rss_kb.as_ref().unwrap().median
1806 );
1807 assert_eq!(
1808 parsed["sample_count"],
1809 receipt.samples.iter().filter(|s| !s.warmup).count()
1810 );
1811 assert_eq!(parsed["timestamp"], receipt.run.started_at);
1812 }
1813
1814 #[test]
1817 fn html_run_all_optional_metrics_present() {
1818 let mut receipt = create_empty_run_receipt();
1819 receipt.bench.name = "full-html".to_string();
1820 receipt.stats.cpu_ms = Some(U64Summary::new(50, 48, 52));
1821 receipt.stats.io_read_bytes = Some(U64Summary::new(1000, 900, 1100));
1822 receipt.stats.throughput_per_s = Some(F64Summary::new(999.123456, 900.0, 1100.0));
1823
1824 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
1825 assert!(html.contains("<td>50</td>"));
1826 assert!(html.contains("<td>1000</td>"));
1827 assert!(html.contains("999.123456"));
1828 assert!(html.contains("full-html"));
1829 }
1830
1831 #[test]
1834 fn csv_escape_empty_string() {
1835 assert_eq!(csv_escape(""), "");
1836 }
1837
1838 #[test]
1839 fn csv_escape_only_quotes() {
1840 assert_eq!(csv_escape("\"\"\""), "\"\"\"\"\"\"\"\"");
1841 }
1842
1843 #[test]
1844 fn csv_escape_no_special_chars() {
1845 assert_eq!(csv_escape("plain-bench_name.v2"), "plain-bench_name.v2");
1846 }
1847
1848 #[test]
1851 fn prometheus_escape_newline_preserved() {
1852 let result = prometheus_escape_label_value("a\nb");
1855 assert_eq!(result, "a\nb");
1856 }
1857
1858 #[test]
1859 fn prometheus_escape_empty() {
1860 assert_eq!(prometheus_escape_label_value(""), "");
1861 }
1862
1863 #[test]
1866 fn html_escape_all_special_chars_combined() {
1867 assert_eq!(
1868 html_escape("<tag attr=\"val\">&</tag>"),
1869 "<tag attr="val">&</tag>"
1870 );
1871 }
1872
1873 #[test]
1874 fn html_escape_empty() {
1875 assert_eq!(html_escape(""), "");
1876 }
1877
1878 #[test]
1881 fn format_parse_prom_alias() {
1882 assert_eq!(ExportFormat::parse("prom"), Some(ExportFormat::Prometheus));
1883 assert_eq!(ExportFormat::parse("PROM"), Some(ExportFormat::Prometheus));
1884 }
1885
1886 #[test]
1887 fn format_parse_empty_string() {
1888 assert_eq!(ExportFormat::parse(""), None);
1889 }
1890
1891 #[test]
1894 fn compare_csv_threshold_percentage() {
1895 let receipt = create_test_compare_receipt();
1896 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
1897 assert!(csv.contains("20.000000"));
1899 assert!(csv.contains("15.000000"));
1901 }
1902
1903 #[test]
1906 fn compare_regression_pct_is_percentage() {
1907 let receipt = create_test_compare_receipt();
1908 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
1909
1910 for line in jsonl.trim().lines() {
1911 let parsed: serde_json::Value = serde_json::from_str(line).unwrap();
1912 let metric = parsed["metric"].as_str().unwrap();
1913 let regression_pct = parsed["regression_pct"].as_f64().unwrap();
1914 match metric {
1915 "wall_ms" => {
1916 assert!((regression_pct - 10.0).abs() < 0.01);
1918 }
1919 "max_rss_kb" => {
1920 assert!((regression_pct - 25.0).abs() < 0.01);
1922 }
1923 _ => panic!("unexpected metric: {}", metric),
1924 }
1925 }
1926 }
1927 }
1928}
1929
1930#[cfg(test)]
1931mod property_tests {
1932 use super::*;
1933 use perfgate_types::{
1934 BenchMeta, Budget, COMPARE_SCHEMA_V1, CompareRef, Delta, Direction, F64Summary, HostInfo,
1935 Metric, MetricStatistic, MetricStatus, RUN_SCHEMA_V1, RunMeta, Sample, Stats, ToolInfo,
1936 U64Summary, Verdict, VerdictCounts, VerdictStatus,
1937 };
1938 use proptest::prelude::*;
1939 use std::collections::BTreeMap;
1940
1941 fn non_empty_string() -> impl Strategy<Value = String> {
1942 "[a-zA-Z0-9_-]{1,20}".prop_map(|s| s)
1943 }
1944
1945 fn rfc3339_timestamp() -> impl Strategy<Value = String> {
1946 (
1947 2020u32..2030,
1948 1u32..13,
1949 1u32..29,
1950 0u32..24,
1951 0u32..60,
1952 0u32..60,
1953 )
1954 .prop_map(|(year, month, day, hour, min, sec)| {
1955 format!(
1956 "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z",
1957 year, month, day, hour, min, sec
1958 )
1959 })
1960 }
1961
1962 fn tool_info_strategy() -> impl Strategy<Value = ToolInfo> {
1963 (non_empty_string(), non_empty_string())
1964 .prop_map(|(name, version)| ToolInfo { name, version })
1965 }
1966
1967 fn host_info_strategy() -> impl Strategy<Value = HostInfo> {
1968 (non_empty_string(), non_empty_string()).prop_map(|(os, arch)| HostInfo {
1969 os,
1970 arch,
1971 cpu_count: None,
1972 memory_bytes: None,
1973 hostname_hash: None,
1974 })
1975 }
1976
1977 fn run_meta_strategy() -> impl Strategy<Value = RunMeta> {
1978 (
1979 non_empty_string(),
1980 rfc3339_timestamp(),
1981 rfc3339_timestamp(),
1982 host_info_strategy(),
1983 )
1984 .prop_map(|(id, started_at, ended_at, host)| RunMeta {
1985 id,
1986 started_at,
1987 ended_at,
1988 host,
1989 })
1990 }
1991
1992 fn bench_meta_strategy() -> impl Strategy<Value = BenchMeta> {
1993 (
1994 non_empty_string(),
1995 proptest::option::of(non_empty_string()),
1996 proptest::collection::vec(non_empty_string(), 1..5),
1997 1u32..100,
1998 0u32..10,
1999 proptest::option::of(1u64..10000),
2000 proptest::option::of(100u64..60000),
2001 )
2002 .prop_map(
2003 |(name, cwd, command, repeat, warmup, work_units, timeout_ms)| BenchMeta {
2004 name,
2005 cwd,
2006 command,
2007 repeat,
2008 warmup,
2009 work_units,
2010 timeout_ms,
2011 },
2012 )
2013 }
2014
2015 fn sample_strategy() -> impl Strategy<Value = Sample> {
2016 (
2017 0u64..100000,
2018 -128i32..128,
2019 any::<bool>(),
2020 any::<bool>(),
2021 (
2022 proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), ),
2027 (
2028 proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..1000000), proptest::option::of(0u64..100000000), ),
2034 )
2035 .prop_map(
2036 |(
2037 wall_ms,
2038 exit_code,
2039 warmup,
2040 timed_out,
2041 (cpu_ms, page_faults, ctx_switches, max_rss_kb),
2042 (io_read_bytes, io_write_bytes, network_packets, energy_uj, binary_bytes),
2043 )| Sample {
2044 wall_ms,
2045 exit_code,
2046 warmup,
2047 timed_out,
2048 cpu_ms,
2049 page_faults,
2050 ctx_switches,
2051 max_rss_kb,
2052 io_read_bytes,
2053 io_write_bytes,
2054 network_packets,
2055 energy_uj,
2056 binary_bytes,
2057 stdout: None,
2058 stderr: None,
2059 },
2060 )
2061 }
2062
2063 fn u64_summary_strategy() -> impl Strategy<Value = U64Summary> {
2064 (0u64..1000000, 0u64..1000000, 0u64..1000000).prop_map(|(a, b, c)| {
2065 let mut vals = [a, b, c];
2066 vals.sort();
2067 U64Summary::new(vals[1], vals[0], vals[2])
2068 })
2069 }
2070
2071 fn f64_summary_strategy() -> impl Strategy<Value = F64Summary> {
2072 (0.0f64..1000000.0, 0.0f64..1000000.0, 0.0f64..1000000.0).prop_map(|(a, b, c)| {
2073 let mut vals = [a, b, c];
2074 vals.sort_by(|x, y| x.partial_cmp(y).unwrap());
2075 F64Summary::new(vals[1], vals[0], vals[2])
2076 })
2077 }
2078
2079 fn stats_strategy() -> impl Strategy<Value = Stats> {
2080 (
2081 u64_summary_strategy(),
2082 (
2083 proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), ),
2088 (
2089 proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), proptest::option::of(u64_summary_strategy()), ),
2095 proptest::option::of(f64_summary_strategy()),
2096 )
2097 .prop_map(
2098 |(
2099 wall_ms,
2100 (cpu_ms, page_faults, ctx_switches, max_rss_kb),
2101 (io_read_bytes, io_write_bytes, network_packets, energy_uj, binary_bytes),
2102 throughput_per_s,
2103 )| Stats {
2104 wall_ms,
2105 cpu_ms,
2106 page_faults,
2107 ctx_switches,
2108 max_rss_kb,
2109 io_read_bytes,
2110 io_write_bytes,
2111 network_packets,
2112 energy_uj,
2113 binary_bytes,
2114 throughput_per_s,
2115 },
2116 )
2117 }
2118
2119 fn run_receipt_strategy() -> impl Strategy<Value = RunReceipt> {
2120 (
2121 tool_info_strategy(),
2122 run_meta_strategy(),
2123 bench_meta_strategy(),
2124 proptest::collection::vec(sample_strategy(), 1..10),
2125 stats_strategy(),
2126 )
2127 .prop_map(|(tool, run, bench, samples, stats)| RunReceipt {
2128 schema: RUN_SCHEMA_V1.to_string(),
2129 tool,
2130 run,
2131 bench,
2132 samples,
2133 stats,
2134 })
2135 }
2136
2137 fn direction_strategy() -> impl Strategy<Value = Direction> {
2138 prop_oneof![Just(Direction::Lower), Just(Direction::Higher),]
2139 }
2140
2141 fn budget_strategy() -> impl Strategy<Value = Budget> {
2142 (0.01f64..1.0, 0.01f64..1.0, direction_strategy()).prop_map(
2143 |(threshold, warn_factor, direction)| {
2144 let warn_threshold = threshold * warn_factor;
2145 Budget {
2146 noise_threshold: None,
2147 noise_policy: perfgate_types::NoisePolicy::Ignore,
2148 threshold,
2149 warn_threshold,
2150 direction,
2151 }
2152 },
2153 )
2154 }
2155
2156 fn metric_status_strategy() -> impl Strategy<Value = MetricStatus> {
2157 prop_oneof![
2158 Just(MetricStatus::Pass),
2159 Just(MetricStatus::Warn),
2160 Just(MetricStatus::Fail),
2161 Just(MetricStatus::Skip),
2162 ]
2163 }
2164
2165 fn delta_strategy() -> impl Strategy<Value = Delta> {
2166 (0.1f64..10000.0, 0.1f64..10000.0, metric_status_strategy()).prop_map(
2167 |(baseline, current, status)| {
2168 let ratio = current / baseline;
2169 let pct = (current - baseline) / baseline;
2170 let regression = if pct > 0.0 { pct } else { 0.0 };
2171 Delta {
2172 baseline,
2173 current,
2174 ratio,
2175 pct,
2176 regression,
2177 cv: None,
2178 noise_threshold: None,
2179 statistic: MetricStatistic::Median,
2180 significance: None,
2181 status,
2182 }
2183 },
2184 )
2185 }
2186
2187 fn verdict_status_strategy() -> impl Strategy<Value = VerdictStatus> {
2188 prop_oneof![
2189 Just(VerdictStatus::Pass),
2190 Just(VerdictStatus::Warn),
2191 Just(VerdictStatus::Fail),
2192 Just(VerdictStatus::Skip),
2193 ]
2194 }
2195
2196 fn verdict_counts_strategy() -> impl Strategy<Value = VerdictCounts> {
2197 (0u32..10, 0u32..10, 0u32..10, 0u32..10).prop_map(|(pass, warn, fail, skip)| {
2198 VerdictCounts {
2199 pass,
2200 warn,
2201 fail,
2202 skip,
2203 }
2204 })
2205 }
2206
2207 fn verdict_strategy() -> impl Strategy<Value = Verdict> {
2208 (
2209 verdict_status_strategy(),
2210 verdict_counts_strategy(),
2211 proptest::collection::vec("[a-zA-Z0-9 ]{1,50}", 0..5),
2212 )
2213 .prop_map(|(status, counts, reasons)| Verdict {
2214 status,
2215 counts,
2216 reasons,
2217 })
2218 }
2219
2220 fn metric_strategy() -> impl Strategy<Value = Metric> {
2221 prop_oneof![
2222 Just(Metric::BinaryBytes),
2223 Just(Metric::CpuMs),
2224 Just(Metric::CtxSwitches),
2225 Just(Metric::IoReadBytes),
2226 Just(Metric::IoWriteBytes),
2227 Just(Metric::MaxRssKb),
2228 Just(Metric::NetworkPackets),
2229 Just(Metric::PageFaults),
2230 Just(Metric::ThroughputPerS),
2231 Just(Metric::WallMs),
2232 ]
2233 }
2234
2235 fn budgets_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Budget>> {
2236 proptest::collection::btree_map(metric_strategy(), budget_strategy(), 1..8)
2237 }
2238
2239 fn deltas_map_strategy() -> impl Strategy<Value = BTreeMap<Metric, Delta>> {
2240 proptest::collection::btree_map(metric_strategy(), delta_strategy(), 1..8)
2241 }
2242
2243 fn compare_ref_strategy() -> impl Strategy<Value = CompareRef> {
2244 (
2245 proptest::option::of(non_empty_string()),
2246 proptest::option::of(non_empty_string()),
2247 )
2248 .prop_map(|(path, run_id)| CompareRef { path, run_id })
2249 }
2250
2251 fn compare_receipt_strategy() -> impl Strategy<Value = CompareReceipt> {
2252 (
2253 tool_info_strategy(),
2254 bench_meta_strategy(),
2255 compare_ref_strategy(),
2256 compare_ref_strategy(),
2257 budgets_map_strategy(),
2258 deltas_map_strategy(),
2259 verdict_strategy(),
2260 )
2261 .prop_map(
2262 |(tool, bench, baseline_ref, current_ref, budgets, deltas, verdict)| {
2263 CompareReceipt {
2264 schema: COMPARE_SCHEMA_V1.to_string(),
2265 tool,
2266 bench,
2267 baseline_ref,
2268 current_ref,
2269 budgets,
2270 deltas,
2271 verdict,
2272 }
2273 },
2274 )
2275 }
2276
2277 proptest! {
2278 #![proptest_config(ProptestConfig::with_cases(50))]
2279
2280 #[test]
2281 fn run_export_csv_has_header_and_data(receipt in run_receipt_strategy()) {
2282 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2283
2284 prop_assert!(csv.starts_with("bench_name,wall_ms_median,wall_ms_min,wall_ms_max,binary_bytes_median,cpu_ms_median,ctx_switches_median,max_rss_kb_median,page_faults_median,io_read_bytes_median,io_write_bytes_median,network_packets_median,energy_uj_median,throughput_median,sample_count,timestamp\n"));
2285
2286 let lines: Vec<&str> = csv.trim().split('\n').collect();
2287 prop_assert_eq!(lines.len(), 2);
2288
2289 let bench_in_csv = csv.contains(&receipt.bench.name) || csv.contains(&format!("\"{}\"", receipt.bench.name));
2290 prop_assert!(bench_in_csv, "CSV should contain bench name");
2291 }
2292
2293 #[test]
2294 fn run_export_jsonl_is_valid_json(receipt in run_receipt_strategy()) {
2295 let jsonl = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2296
2297 let lines: Vec<&str> = jsonl.trim().split('\n').collect();
2298 prop_assert_eq!(lines.len(), 1);
2299
2300 let parsed: Result<serde_json::Value, _> = serde_json::from_str(lines[0]);
2301 prop_assert!(parsed.is_ok());
2302
2303 let json = parsed.unwrap();
2304 prop_assert_eq!(json["bench_name"].as_str().unwrap(), receipt.bench.name);
2305 }
2306
2307 #[test]
2308 fn compare_export_csv_metrics_sorted(receipt in compare_receipt_strategy()) {
2309 let csv = ExportUseCase::export_compare(&receipt, ExportFormat::Csv).unwrap();
2310
2311 let lines: Vec<&str> = csv.trim().split('\n').skip(1).collect();
2312
2313 let mut metrics: Vec<String> = vec![];
2314 for line in &lines {
2315 let parts: Vec<&str> = line.split(',').collect();
2316 if parts.len() > 1 {
2317 metrics.push(parts[1].trim_matches('"').to_string());
2318 }
2319 }
2320
2321 let mut sorted_metrics = metrics.clone();
2322 sorted_metrics.sort();
2323
2324 prop_assert_eq!(metrics, sorted_metrics, "Metrics should be sorted alphabetically");
2325 }
2326
2327 #[test]
2328 fn compare_export_jsonl_line_per_metric(receipt in compare_receipt_strategy()) {
2329 let jsonl = ExportUseCase::export_compare(&receipt, ExportFormat::Jsonl).unwrap();
2330
2331 let lines: Vec<&str> = jsonl.trim().split('\n').filter(|s| !s.is_empty()).collect();
2332 prop_assert_eq!(lines.len(), receipt.deltas.len());
2333
2334 for line in &lines {
2335 let parsed: Result<serde_json::Value, _> = serde_json::from_str(line);
2336 prop_assert!(parsed.is_ok());
2337 }
2338 }
2339
2340 #[test]
2341 fn export_is_deterministic(receipt in run_receipt_strategy()) {
2342 let csv1 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2343 let csv2 = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2344 prop_assert_eq!(csv1, csv2);
2345
2346 let jsonl1 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2347 let jsonl2 = ExportUseCase::export_run(&receipt, ExportFormat::Jsonl).unwrap();
2348 prop_assert_eq!(jsonl1, jsonl2);
2349 }
2350
2351 #[test]
2352 fn html_output_contains_valid_structure(receipt in run_receipt_strategy()) {
2353 let html = ExportUseCase::export_run(&receipt, ExportFormat::Html).unwrap();
2354
2355 prop_assert!(html.starts_with("<!doctype html>"));
2356 prop_assert!(html.contains("<html>"));
2357 prop_assert!(html.contains("</html>"));
2358 prop_assert!(html.contains("<table"));
2359 prop_assert!(html.contains("</table>"));
2360 prop_assert!(html.contains(&receipt.bench.name));
2361 }
2362
2363 #[test]
2364 fn prometheus_output_valid_format(receipt in run_receipt_strategy()) {
2365 let prom = ExportUseCase::export_run(&receipt, ExportFormat::Prometheus).unwrap();
2366
2367 prop_assert!(prom.contains("perfgate_run_wall_ms_median"));
2368 let bench_label = format!("bench=\"{}\"", receipt.bench.name);
2369 prop_assert!(prom.contains(&bench_label));
2370
2371 for line in prom.lines() {
2372 if !line.is_empty() {
2373 let has_open = line.chars().any(|c| c == '{');
2374 let has_close = line.chars().any(|c| c == '}');
2375 prop_assert!(has_open, "Prometheus line should contain opening brace");
2376 prop_assert!(has_close, "Prometheus line should contain closing brace");
2377 }
2378 }
2379 }
2380
2381 #[test]
2382 fn csv_escape_preserves_content(receipt in run_receipt_strategy()) {
2383 let csv = ExportUseCase::export_run(&receipt, ExportFormat::Csv).unwrap();
2384
2385 let quoted_bench = format!("\"{}\"", receipt.bench.name);
2386 prop_assert!(csv.contains(&receipt.bench.name) || csv.contains("ed_bench));
2387
2388 for line in csv.lines() {
2389 let quoted_count = line.matches('"').count();
2390 prop_assert!(quoted_count % 2 == 0, "Quotes should be balanced in CSV");
2391 }
2392 }
2393 }
2394}