1use crate::Clock;
9use perfgate_domain::compute_stats;
10use perfgate_types::{BenchMeta, HostInfo, RUN_SCHEMA_V1, RunMeta, RunReceipt, Sample, ToolInfo};
11use std::path::{Path, PathBuf};
12
13#[derive(Debug, Clone, PartialEq)]
15pub struct ParsedBenchmark {
16 pub name: String,
18 pub estimate_ns: f64,
20 pub error_ns: Option<f64>,
22 pub source: BenchSource,
24}
25
26#[derive(Debug, Clone, Copy, PartialEq, Eq)]
28pub enum BenchSource {
29 Criterion,
30 Libtest,
31}
32
33#[derive(Debug, serde::Deserialize)]
35pub struct CriterionEstimates {
36 pub mean: Option<CriterionEstimate>,
37 pub median: Option<CriterionEstimate>,
38 pub slope: Option<CriterionEstimate>,
39}
40
41#[derive(Debug, serde::Deserialize)]
42pub struct CriterionEstimate {
43 pub confidence_interval: CriterionConfidenceInterval,
45 pub point_estimate: f64,
47 pub standard_error: f64,
49}
50
51#[derive(Debug, serde::Deserialize)]
52pub struct CriterionConfidenceInterval {
53 pub confidence_level: f64,
54 pub lower_bound: f64,
55 pub upper_bound: f64,
56}
57
58#[derive(Debug, Clone, Default)]
60pub struct CargoBenchRequest {
61 pub bench_target: Option<String>,
63 pub extra_args: Vec<String>,
65 pub out: Option<PathBuf>,
67 pub compare_baseline: Option<PathBuf>,
69 pub pretty: bool,
71 pub target_dir: Option<PathBuf>,
73 pub include_hostname_hash: bool,
75}
76
77#[derive(Debug, Clone)]
79pub struct CargoBenchOutcome {
80 pub receipts: Vec<RunReceipt>,
82 pub source: BenchSource,
84 pub bench_count: usize,
86}
87
88pub fn scan_criterion_dir(criterion_dir: &Path) -> anyhow::Result<Vec<ParsedBenchmark>> {
99 let mut results = Vec::new();
100 scan_criterion_recursive(criterion_dir, criterion_dir, &mut results)?;
101 results.sort_by(|a, b| a.name.cmp(&b.name));
102 Ok(results)
103}
104
105fn scan_criterion_recursive(
106 base_dir: &Path,
107 current_dir: &Path,
108 results: &mut Vec<ParsedBenchmark>,
109) -> anyhow::Result<()> {
110 let estimates_path = current_dir.join("new").join("estimates.json");
111 if estimates_path.is_file() {
112 let relative = current_dir
113 .strip_prefix(base_dir)
114 .unwrap_or(current_dir)
115 .to_string_lossy()
116 .replace('\\', "/");
117
118 if let Ok(parsed) = parse_criterion_estimates(&estimates_path) {
119 results.push(ParsedBenchmark {
120 name: relative,
121 estimate_ns: parsed.estimate_ns,
122 error_ns: parsed.error_ns,
123 source: BenchSource::Criterion,
124 });
125 }
126 return Ok(());
127 }
128
129 if current_dir.is_dir() {
130 let entries = std::fs::read_dir(current_dir)?;
131 for entry in entries {
132 let entry = entry?;
133 let path = entry.path();
134 if path.is_dir() {
135 let name = entry.file_name();
137 let name_str = name.to_string_lossy();
138 if name_str == "report" || name_str.starts_with('.') {
139 continue;
140 }
141 scan_criterion_recursive(base_dir, &path, results)?;
142 }
143 }
144 }
145
146 Ok(())
147}
148
149struct ParsedEstimate {
150 estimate_ns: f64,
151 error_ns: Option<f64>,
152}
153
154fn parse_criterion_estimates(path: &Path) -> anyhow::Result<ParsedEstimate> {
155 let content = std::fs::read_to_string(path)?;
156 let estimates: CriterionEstimates = serde_json::from_str(&content)?;
157
158 let est = estimates
160 .slope
161 .as_ref()
162 .or(estimates.mean.as_ref())
163 .or(estimates.median.as_ref())
164 .ok_or_else(|| anyhow::anyhow!("no estimate found in {}", path.display()))?;
165
166 Ok(ParsedEstimate {
167 estimate_ns: est.point_estimate,
168 error_ns: Some(est.standard_error),
169 })
170}
171
172pub fn parse_libtest_output(output: &str) -> Vec<ParsedBenchmark> {
180 let mut results = Vec::new();
181
182 for line in output.lines() {
183 if let Some(parsed) = parse_libtest_line(line) {
184 results.push(parsed);
185 }
186 }
187
188 results.sort_by(|a, b| a.name.cmp(&b.name));
189 results
190}
191
192fn parse_libtest_line(line: &str) -> Option<ParsedBenchmark> {
193 let line = line.trim();
195 if !line.starts_with("test ") {
196 return None;
197 }
198
199 let rest = &line["test ".len()..];
200
201 let bench_marker = "... bench:";
203 let bench_idx = rest.find(bench_marker)?;
204 let name = rest[..bench_idx].trim().to_string();
205 let after_bench = &rest[bench_idx + bench_marker.len()..];
206
207 let after_bench = after_bench.trim();
209
210 let ns_iter_idx = after_bench.find("ns/iter")?;
212 let ns_str = after_bench[..ns_iter_idx].trim().replace(',', "");
213 let estimate_ns: f64 = ns_str.parse().ok()?;
214
215 let error_ns = if let Some(paren_start) = after_bench.find("(+/- ") {
217 let after_paren = &after_bench[paren_start + "(+/- ".len()..];
218 if let Some(paren_end) = after_paren.find(')') {
219 let error_str = after_paren[..paren_end].trim().replace(',', "");
220 error_str.parse().ok()
221 } else {
222 None
223 }
224 } else {
225 None
226 };
227
228 Some(ParsedBenchmark {
229 name,
230 estimate_ns,
231 error_ns,
232 source: BenchSource::Libtest,
233 })
234}
235
236pub fn detect_criterion(target_dir: &Path) -> bool {
243 let criterion_dir = target_dir.join("criterion");
244 if !criterion_dir.is_dir() {
245 return false;
246 }
247 has_estimates_json(&criterion_dir)
249}
250
251fn has_estimates_json(dir: &Path) -> bool {
252 let estimates = dir.join("new").join("estimates.json");
253 if estimates.is_file() {
254 return true;
255 }
256 if let Ok(entries) = std::fs::read_dir(dir) {
257 for entry in entries.flatten() {
258 let path = entry.path();
259 if path.is_dir() {
260 let name = entry.file_name();
261 let name_str = name.to_string_lossy();
262 if name_str != "report" && !name_str.starts_with('.') && has_estimates_json(&path) {
263 return true;
264 }
265 }
266 }
267 }
268 false
269}
270
271pub fn benchmarks_to_receipt(
278 benchmarks: &[ParsedBenchmark],
279 name: &str,
280 tool: &ToolInfo,
281 host: &HostInfo,
282 clock: &dyn Clock,
283 command: &[String],
284) -> anyhow::Result<RunReceipt> {
285 if benchmarks.is_empty() {
286 anyhow::bail!("no benchmarks found");
287 }
288
289 let run_id = uuid::Uuid::new_v4().to_string();
290 let started_at = clock.now_rfc3339();
291
292 let mut samples: Vec<Sample> = Vec::new();
295
296 for bench in benchmarks {
297 let wall_ms = (bench.estimate_ns / 1_000_000.0).round().max(1.0) as u64;
298 samples.push(Sample {
299 wall_ms,
300 exit_code: 0,
301 warmup: false,
302 timed_out: false,
303 cpu_ms: None,
304 page_faults: None,
305 ctx_switches: None,
306 max_rss_kb: None,
307 io_read_bytes: None,
308 io_write_bytes: None,
309 network_packets: None,
310 energy_uj: None,
311 binary_bytes: None,
312 stdout: None,
313 stderr: None,
314 });
315 }
316
317 let stats = compute_stats(&samples, None)?;
318 let ended_at = clock.now_rfc3339();
319
320 Ok(RunReceipt {
321 schema: RUN_SCHEMA_V1.to_string(),
322 tool: tool.clone(),
323 run: RunMeta {
324 id: run_id,
325 started_at,
326 ended_at,
327 host: host.clone(),
328 },
329 bench: BenchMeta {
330 name: name.to_string(),
331 cwd: None,
332 command: command.to_vec(),
333 repeat: benchmarks.len() as u32,
334 warmup: 0,
335 work_units: None,
336 timeout_ms: None,
337 },
338 samples,
339 stats,
340 })
341}
342
343pub fn benchmarks_to_individual_receipts(
345 benchmarks: &[ParsedBenchmark],
346 tool: &ToolInfo,
347 host: &HostInfo,
348 clock: &dyn Clock,
349 command: &[String],
350) -> anyhow::Result<Vec<RunReceipt>> {
351 let mut receipts = Vec::new();
352
353 for bench in benchmarks {
354 let wall_ms = (bench.estimate_ns / 1_000_000.0).round().max(1.0) as u64;
355
356 let sample = Sample {
357 wall_ms,
358 exit_code: 0,
359 warmup: false,
360 timed_out: false,
361 cpu_ms: None,
362 page_faults: None,
363 ctx_switches: None,
364 max_rss_kb: None,
365 io_read_bytes: None,
366 io_write_bytes: None,
367 network_packets: None,
368 energy_uj: None,
369 binary_bytes: None,
370 stdout: None,
371 stderr: None,
372 };
373
374 let run_id = uuid::Uuid::new_v4().to_string();
375 let ts = clock.now_rfc3339();
376 let stats = compute_stats(std::slice::from_ref(&sample), None)?;
377
378 receipts.push(RunReceipt {
379 schema: RUN_SCHEMA_V1.to_string(),
380 tool: tool.clone(),
381 run: RunMeta {
382 id: run_id,
383 started_at: ts.clone(),
384 ended_at: ts,
385 host: host.clone(),
386 },
387 bench: BenchMeta {
388 name: bench.name.clone(),
389 cwd: None,
390 command: command.to_vec(),
391 repeat: 1,
392 warmup: 0,
393 work_units: None,
394 timeout_ms: None,
395 },
396 samples: vec![sample],
397 stats,
398 });
399 }
400
401 Ok(receipts)
402}
403
404pub fn build_cargo_bench_command(bench_target: Option<&str>, extra_args: &[String]) -> Vec<String> {
406 let mut cmd = vec!["cargo".to_string(), "bench".to_string()];
407
408 if let Some(target) = bench_target {
409 cmd.push("--bench".to_string());
410 cmd.push(target.to_string());
411 }
412
413 if !extra_args.is_empty() {
414 cmd.push("--".to_string());
415 cmd.extend(extra_args.iter().cloned());
416 }
417
418 cmd
419}
420
421pub fn detect_target_dir() -> PathBuf {
423 if let Ok(dir) = std::env::var("CARGO_TARGET_DIR") {
425 return PathBuf::from(dir);
426 }
427
428 PathBuf::from("target")
430}
431
432#[cfg(test)]
437mod tests {
438 use super::*;
439
440 #[test]
443 fn parse_criterion_estimates_with_slope() {
444 let json = r#"{
445 "mean": {
446 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 100.0, "upper_bound": 200.0},
447 "point_estimate": 150.0,
448 "standard_error": 5.0
449 },
450 "median": {
451 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 90.0, "upper_bound": 180.0},
452 "point_estimate": 140.0,
453 "standard_error": 4.0
454 },
455 "slope": {
456 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 95.0, "upper_bound": 190.0},
457 "point_estimate": 145.0,
458 "standard_error": 3.0
459 }
460 }"#;
461
462 let estimates: CriterionEstimates = serde_json::from_str(json).unwrap();
463 let est = estimates
465 .slope
466 .as_ref()
467 .or(estimates.mean.as_ref())
468 .or(estimates.median.as_ref())
469 .unwrap();
470 assert!((est.point_estimate - 145.0).abs() < f64::EPSILON);
471 assert!((est.standard_error - 3.0).abs() < f64::EPSILON);
472 }
473
474 #[test]
475 fn parse_criterion_estimates_fallback_to_mean() {
476 let json = r#"{
477 "mean": {
478 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 100.0, "upper_bound": 200.0},
479 "point_estimate": 150.0,
480 "standard_error": 5.0
481 },
482 "median": {
483 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 90.0, "upper_bound": 180.0},
484 "point_estimate": 140.0,
485 "standard_error": 4.0
486 },
487 "slope": null
488 }"#;
489
490 let estimates: CriterionEstimates = serde_json::from_str(json).unwrap();
491 let est = estimates
492 .slope
493 .as_ref()
494 .or(estimates.mean.as_ref())
495 .or(estimates.median.as_ref())
496 .unwrap();
497 assert!((est.point_estimate - 150.0).abs() < f64::EPSILON);
498 }
499
500 #[test]
503 fn parse_libtest_basic_line() {
504 let line = "test bench_sort ... bench: 5,000 ns/iter (+/- 150)";
505 let result = parse_libtest_line(line).unwrap();
506 assert_eq!(result.name, "bench_sort");
507 assert!((result.estimate_ns - 5000.0).abs() < f64::EPSILON);
508 assert!((result.error_ns.unwrap() - 150.0).abs() < f64::EPSILON);
509 assert_eq!(result.source, BenchSource::Libtest);
510 }
511
512 #[test]
513 fn parse_libtest_no_comma() {
514 let line = "test bench_add ... bench: 100 ns/iter (+/- 10)";
515 let result = parse_libtest_line(line).unwrap();
516 assert_eq!(result.name, "bench_add");
517 assert!((result.estimate_ns - 100.0).abs() < f64::EPSILON);
518 assert!((result.error_ns.unwrap() - 10.0).abs() < f64::EPSILON);
519 }
520
521 #[test]
522 fn parse_libtest_large_number() {
523 let line = "test bench_heavy ... bench: 1,234,567 ns/iter (+/- 12,345)";
524 let result = parse_libtest_line(line).unwrap();
525 assert_eq!(result.name, "bench_heavy");
526 assert!((result.estimate_ns - 1_234_567.0).abs() < f64::EPSILON);
527 assert!((result.error_ns.unwrap() - 12_345.0).abs() < f64::EPSILON);
528 }
529
530 #[test]
531 fn parse_libtest_ignores_non_bench_lines() {
532 assert!(parse_libtest_line("running 3 tests").is_none());
533 assert!(parse_libtest_line("test bench_ok ... ok").is_none());
534 assert!(parse_libtest_line("").is_none());
535 assert!(parse_libtest_line("test result: ok").is_none());
536 }
537
538 #[test]
539 fn parse_libtest_output_multiple_lines() {
540 let output = r#"
541running 3 tests
542test bench_add ... bench: 100 ns/iter (+/- 10)
543test bench_mul ... bench: 200 ns/iter (+/- 20)
544test bench_sort ... bench: 5,000 ns/iter (+/- 150)
545
546test result: ok. 0 passed; 0 failed; 0 ignored; 3 measured; 0 filtered out
547"#;
548 let results = parse_libtest_output(output);
549 assert_eq!(results.len(), 3);
550 assert_eq!(results[0].name, "bench_add");
552 assert_eq!(results[1].name, "bench_mul");
553 assert_eq!(results[2].name, "bench_sort");
554 }
555
556 #[test]
557 fn parse_libtest_empty_output() {
558 let output = "running 0 tests\n\ntest result: ok.\n";
559 let results = parse_libtest_output(output);
560 assert!(results.is_empty());
561 }
562
563 #[test]
566 fn build_command_no_args() {
567 let cmd = build_cargo_bench_command(None, &[]);
568 assert_eq!(cmd, vec!["cargo", "bench"]);
569 }
570
571 #[test]
572 fn build_command_with_bench_target() {
573 let cmd = build_cargo_bench_command(Some("my_bench"), &[]);
574 assert_eq!(cmd, vec!["cargo", "bench", "--bench", "my_bench"]);
575 }
576
577 #[test]
578 fn build_command_with_extra_args() {
579 let cmd =
580 build_cargo_bench_command(None, &["--features".to_string(), "my-feature".to_string()]);
581 assert_eq!(
582 cmd,
583 vec!["cargo", "bench", "--", "--features", "my-feature"]
584 );
585 }
586
587 #[test]
588 fn build_command_with_both() {
589 let cmd = build_cargo_bench_command(Some("my_bench"), &["--nocapture".to_string()]);
590 assert_eq!(
591 cmd,
592 vec!["cargo", "bench", "--bench", "my_bench", "--", "--nocapture"]
593 );
594 }
595
596 #[test]
599 fn detect_criterion_returns_false_on_missing_dir() {
600 assert!(!detect_criterion(Path::new("/nonexistent/target")));
601 }
602
603 #[test]
604 fn detect_target_dir_default() {
605 unsafe { std::env::remove_var("CARGO_TARGET_DIR") };
608 let dir = detect_target_dir();
609 assert_eq!(dir, PathBuf::from("target"));
610 }
611
612 #[test]
615 fn benchmarks_to_receipt_empty_fails() {
616 struct FakeClock;
617 impl Clock for FakeClock {
618 fn now_rfc3339(&self) -> String {
619 "2024-01-01T00:00:00Z".to_string()
620 }
621 }
622
623 let tool = ToolInfo {
624 name: "perfgate".into(),
625 version: "0.1.0".into(),
626 };
627 let host = HostInfo {
628 os: "linux".into(),
629 arch: "x86_64".into(),
630 cpu_count: None,
631 memory_bytes: None,
632 hostname_hash: None,
633 };
634 let clock = FakeClock;
635
636 let result = benchmarks_to_receipt(&[], "test", &tool, &host, &clock, &["cargo".into()]);
637 assert!(result.is_err());
638 }
639
640 #[test]
641 fn benchmarks_to_receipt_creates_valid_receipt() {
642 struct FakeClock;
643 impl Clock for FakeClock {
644 fn now_rfc3339(&self) -> String {
645 "2024-01-01T00:00:00Z".to_string()
646 }
647 }
648
649 let benchmarks = vec![
650 ParsedBenchmark {
651 name: "bench_a".into(),
652 estimate_ns: 5_000_000.0, error_ns: Some(100_000.0),
654 source: BenchSource::Criterion,
655 },
656 ParsedBenchmark {
657 name: "bench_b".into(),
658 estimate_ns: 10_000_000.0, error_ns: Some(200_000.0),
660 source: BenchSource::Criterion,
661 },
662 ];
663
664 let tool = ToolInfo {
665 name: "perfgate".into(),
666 version: "0.1.0".into(),
667 };
668 let host = HostInfo {
669 os: "linux".into(),
670 arch: "x86_64".into(),
671 cpu_count: None,
672 memory_bytes: None,
673 hostname_hash: None,
674 };
675 let clock = FakeClock;
676
677 let receipt = benchmarks_to_receipt(
678 &benchmarks,
679 "cargo-bench",
680 &tool,
681 &host,
682 &clock,
683 &["cargo".into(), "bench".into()],
684 )
685 .unwrap();
686
687 assert_eq!(receipt.schema, "perfgate.run.v1");
688 assert_eq!(receipt.bench.name, "cargo-bench");
689 assert_eq!(receipt.samples.len(), 2);
690 assert_eq!(receipt.bench.repeat, 2);
691 assert_eq!(receipt.samples[0].wall_ms, 5);
692 assert_eq!(receipt.samples[1].wall_ms, 10);
693 }
694
695 #[test]
696 fn benchmarks_to_individual_receipts_creates_one_per_bench() {
697 struct FakeClock;
698 impl Clock for FakeClock {
699 fn now_rfc3339(&self) -> String {
700 "2024-01-01T00:00:00Z".to_string()
701 }
702 }
703
704 let benchmarks = vec![
705 ParsedBenchmark {
706 name: "bench_a".into(),
707 estimate_ns: 5_000_000.0,
708 error_ns: Some(100_000.0),
709 source: BenchSource::Libtest,
710 },
711 ParsedBenchmark {
712 name: "bench_b".into(),
713 estimate_ns: 10_000_000.0,
714 error_ns: None,
715 source: BenchSource::Libtest,
716 },
717 ];
718
719 let tool = ToolInfo {
720 name: "perfgate".into(),
721 version: "0.1.0".into(),
722 };
723 let host = HostInfo {
724 os: "linux".into(),
725 arch: "x86_64".into(),
726 cpu_count: None,
727 memory_bytes: None,
728 hostname_hash: None,
729 };
730 let clock = FakeClock;
731
732 let receipts = benchmarks_to_individual_receipts(
733 &benchmarks,
734 &tool,
735 &host,
736 &clock,
737 &["cargo".into(), "bench".into()],
738 )
739 .unwrap();
740
741 assert_eq!(receipts.len(), 2);
742 assert_eq!(receipts[0].bench.name, "bench_a");
743 assert_eq!(receipts[0].samples[0].wall_ms, 5);
744 assert_eq!(receipts[1].bench.name, "bench_b");
745 assert_eq!(receipts[1].samples[0].wall_ms, 10);
746 }
747
748 #[test]
751 fn scan_criterion_dir_on_tempdir() {
752 let tmp = tempfile::tempdir().unwrap();
753 let criterion_dir = tmp.path().join("criterion");
754
755 let bench_dir = criterion_dir.join("my_group").join("my_bench").join("new");
757 std::fs::create_dir_all(&bench_dir).unwrap();
758 std::fs::write(
759 bench_dir.join("estimates.json"),
760 r#"{
761 "mean": {
762 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 100.0, "upper_bound": 200.0},
763 "point_estimate": 150.0,
764 "standard_error": 5.0
765 },
766 "median": {
767 "confidence_interval": {"confidence_level": 0.95, "lower_bound": 90.0, "upper_bound": 180.0},
768 "point_estimate": 140.0,
769 "standard_error": 4.0
770 },
771 "slope": null
772 }"#,
773 )
774 .unwrap();
775
776 let results = scan_criterion_dir(&criterion_dir).unwrap();
777 assert_eq!(results.len(), 1);
778 assert_eq!(results[0].name, "my_group/my_bench");
779 assert!((results[0].estimate_ns - 150.0).abs() < f64::EPSILON);
780 assert_eq!(results[0].source, BenchSource::Criterion);
781 }
782
783 #[test]
784 fn scan_criterion_dir_multiple_benches() {
785 let tmp = tempfile::tempdir().unwrap();
786 let criterion_dir = tmp.path().join("criterion");
787
788 let estimates_json = |ns: f64| {
789 format!(
790 r#"{{"mean": {{
791 "confidence_interval": {{"confidence_level": 0.95, "lower_bound": 0.0, "upper_bound": 1000.0}},
792 "point_estimate": {},
793 "standard_error": 1.0
794 }}, "median": null, "slope": null}}"#,
795 ns
796 )
797 };
798
799 for (name, ns) in &[("bench_a", 100.0), ("bench_b", 200.0)] {
801 let dir = criterion_dir.join(name).join("new");
802 std::fs::create_dir_all(&dir).unwrap();
803 std::fs::write(dir.join("estimates.json"), estimates_json(*ns)).unwrap();
804 }
805
806 let results = scan_criterion_dir(&criterion_dir).unwrap();
807 assert_eq!(results.len(), 2);
808 assert_eq!(results[0].name, "bench_a");
809 assert_eq!(results[1].name, "bench_b");
810 }
811
812 #[test]
813 fn scan_criterion_dir_skips_report_dir() {
814 let tmp = tempfile::tempdir().unwrap();
815 let criterion_dir = tmp.path().join("criterion");
816
817 let report_dir = criterion_dir.join("report").join("new");
819 std::fs::create_dir_all(&report_dir).unwrap();
820 std::fs::write(report_dir.join("estimates.json"), "{}").unwrap();
821
822 let results = scan_criterion_dir(&criterion_dir).unwrap();
823 assert!(results.is_empty());
824 }
825
826 #[test]
827 fn scan_criterion_dir_empty() {
828 let tmp = tempfile::tempdir().unwrap();
829 let criterion_dir = tmp.path().join("criterion");
830 std::fs::create_dir_all(&criterion_dir).unwrap();
831
832 let results = scan_criterion_dir(&criterion_dir).unwrap();
833 assert!(results.is_empty());
834 }
835
836 #[test]
837 fn detect_criterion_with_results() {
838 let tmp = tempfile::tempdir().unwrap();
839
840 let bench_dir = tmp.path().join("criterion").join("my_bench").join("new");
842 std::fs::create_dir_all(&bench_dir).unwrap();
843 std::fs::write(
844 bench_dir.join("estimates.json"),
845 r#"{"mean": null, "median": null, "slope": null}"#,
846 )
847 .unwrap();
848
849 assert!(detect_criterion(tmp.path()));
850 }
851
852 #[test]
853 fn detect_criterion_without_results() {
854 let tmp = tempfile::tempdir().unwrap();
855 std::fs::create_dir_all(tmp.path().join("criterion")).unwrap();
856 assert!(!detect_criterion(tmp.path()));
857 }
858
859 #[test]
862 fn sub_millisecond_bench_rounds_to_1ms_minimum() {
863 struct FakeClock;
864 impl Clock for FakeClock {
865 fn now_rfc3339(&self) -> String {
866 "2024-01-01T00:00:00Z".to_string()
867 }
868 }
869
870 let benchmarks = vec![ParsedBenchmark {
871 name: "fast_bench".into(),
872 estimate_ns: 500.0, error_ns: None,
874 source: BenchSource::Libtest,
875 }];
876
877 let tool = ToolInfo {
878 name: "perfgate".into(),
879 version: "0.1.0".into(),
880 };
881 let host = HostInfo {
882 os: "linux".into(),
883 arch: "x86_64".into(),
884 cpu_count: None,
885 memory_bytes: None,
886 hostname_hash: None,
887 };
888 let clock = FakeClock;
889
890 let receipt = benchmarks_to_receipt(
891 &benchmarks,
892 "test",
893 &tool,
894 &host,
895 &clock,
896 &["cargo".into(), "bench".into()],
897 )
898 .unwrap();
899
900 assert_eq!(receipt.samples[0].wall_ms, 1);
902 }
903}