Skip to main content

oxilean_kernel/bench_support/
functions.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::types::{
6    AdaptiveWarmup, BatchTimer, BenchAnnotationSet, BenchConfig, BenchEventLog, BenchFilter,
7    BenchGroup, BenchHarnessExt, BenchHarnessV2, BenchHistogram, BenchMatrix, BenchPlan,
8    BenchProfiler, BenchRegistry, BenchReporter, BenchResult, BenchResultExt, BenchSuite,
9    BenchSummary, BenchTimer, ColdCacheSimulator, CompareResult, ConfidenceInterval, CpuPinner,
10    FuzzInput, HdrHistogram, IterationPolicy, LatencyPercentile, MetricSet, MovingAverage,
11    MultiArmBandit, OlsRegression, ProgressBar, RegressionTest, SampleBuffer, ScalingTest,
12    StabilityChecker, ThroughputTracker, ThroughputUnit, TimeSlice,
13};
14
15#[cfg(test)]
16mod tests {
17    use super::*;
18    #[test]
19    fn test_bench_timer_start() {
20        let timer = BenchTimer::start();
21        let _ = timer;
22    }
23    #[test]
24    fn test_bench_timer_elapsed() {
25        let timer = BenchTimer::start();
26        let mut sum = 0u64;
27        for i in 0..1000u64 {
28            sum = sum.wrapping_add(i);
29        }
30        let _ = sum;
31        let ms = timer.elapsed_ms();
32        let us = timer.elapsed_us();
33        assert!(ms >= 0.0, "elapsed_ms should be non-negative");
34        assert!(us >= 0.0, "elapsed_us should be non-negative");
35        assert!(
36            us >= ms - 1.0,
37            "elapsed_us should be >= elapsed_ms (roughly)"
38        );
39    }
40    #[test]
41    fn test_bench_result_creation() {
42        let r = BenchResult::new("my_bench", 100.0, 1000);
43        assert_eq!(r.name, "my_bench");
44        assert_eq!(r.duration_ms, 100.0);
45        assert_eq!(r.iterations, 1000);
46        assert!((r.avg_ms() - 0.1).abs() < 1e-10);
47        assert!((r.avg_us() - 100.0).abs() < 1e-10);
48        assert!(r.iters_per_sec() > 0.0);
49        let zero_iters = BenchResult::new("zero", 50.0, 0);
50        assert_eq!(zero_iters.avg_ms(), 0.0);
51        assert_eq!(zero_iters.avg_us(), 0.0);
52        let s = format!("{}", r);
53        assert!(s.contains("my_bench"));
54    }
55    #[test]
56    fn test_bench_suite_new() {
57        let suite = BenchSuite::new();
58        assert!(suite.is_empty());
59        assert_eq!(suite.len(), 0);
60        assert!(suite.results().is_empty());
61    }
62    #[test]
63    fn test_bench_suite_run() {
64        let mut suite = BenchSuite::new();
65        suite.run("noop", 10, || {});
66        suite.run("sum", 100, || {
67            let mut x = 0u64;
68            for i in 0..10u64 {
69                x = x.wrapping_add(i);
70            }
71            let _ = x;
72        });
73        assert_eq!(suite.len(), 2);
74        assert_eq!(suite.results()[0].name, "noop");
75        assert_eq!(suite.results()[0].iterations, 10);
76        assert_eq!(suite.results()[1].name, "sum");
77        assert_eq!(suite.results()[1].iterations, 100);
78    }
79    #[test]
80    fn test_bench_suite_report() {
81        let mut suite = BenchSuite::new();
82        suite.run("alpha", 5, || {});
83        let report = suite.report();
84        assert!(report.contains("alpha"));
85        assert!(report.contains("Benchmark Suite Report"));
86    }
87    #[test]
88    fn test_bench_harness_new() {
89        let harness = BenchHarnessV2::new(5, 50);
90        assert_eq!(harness.warmup_count(), 5);
91        assert_eq!(harness.iteration_count(), 50);
92        let default_harness = BenchHarnessV2::default();
93        assert_eq!(default_harness.warmup_count(), 10);
94        assert_eq!(default_harness.iteration_count(), 100);
95        let result = harness.bench("simple", || {});
96        assert_eq!(result.name, "simple");
97        assert_eq!(result.iterations, 50);
98        assert!(result.duration_ms >= 0.0);
99    }
100    #[test]
101    fn test_bench_harness_throughput() {
102        let harness = BenchHarnessV2::new(0, 1000);
103        let result = harness.bench("throughput_test", || {
104            let mut x = 0u64;
105            for i in 0..100u64 {
106                x = x.wrapping_add(i);
107            }
108            let _ = x;
109        });
110        let tp = harness.throughput(&result, 100);
111        assert!(tp > 0.0, "throughput should be positive, got {}", tp);
112        let zero_dur = BenchResult::new("instant", 0.0, 100);
113        let inf_tp = harness.throughput(&zero_dur, 10);
114        assert!(inf_tp.is_infinite());
115    }
116}
117#[cfg(test)]
118mod tests_bench_extra {
119    use super::*;
120    #[test]
121    fn test_histogram() {
122        let mut h = BenchHistogram::new(0.0, 1000.0, 10);
123        h.record(50.0);
124        h.record(150.0);
125        h.record(50.0);
126        assert_eq!(h.total_samples(), 3);
127        h.record(2000.0);
128        assert_eq!(h.overflow_count(), 1);
129    }
130    #[test]
131    fn test_moving_average() {
132        let mut ma = MovingAverage::new(0.5);
133        assert!(ma.current().is_none());
134        ma.update(100.0);
135        assert!((ma.current().expect("current should succeed") - 100.0).abs() < 1e-9);
136        ma.update(200.0);
137        assert!((ma.current().expect("current should succeed") - 150.0).abs() < 1e-9);
138    }
139    #[test]
140    fn test_adaptive_warmup() {
141        let mut w = AdaptiveWarmup::new(5, 0.10);
142        for _ in 0..10 {
143            w.record(100.0);
144        }
145        assert!(w.is_warmed());
146    }
147    #[test]
148    fn test_throughput_tracker() {
149        let mut t = ThroughputTracker::new(1000.0);
150        t.record(100);
151        t.record(100);
152        let _ips = t.items_per_sec();
153    }
154    #[test]
155    fn test_bench_matrix() {
156        let mut m = BenchMatrix::new();
157        let r0 = m.add_row("case_a");
158        let c0 = m.add_col("metric_1");
159        let c1 = m.add_col("metric_2");
160        m.set(r0, c0, 1.23);
161        m.set(r0, c1, 4.56);
162        assert!((m.get(r0, c0).expect("element at r0, c0 should exist") - 1.23).abs() < 1e-9);
163        let md = m.to_markdown();
164        assert!(md.contains("case_a"));
165        assert!(md.contains("metric_1"));
166    }
167    #[test]
168    fn test_latency_percentile() {
169        let mut lp = LatencyPercentile::new();
170        for i in 1..=100 {
171            lp.record(i as f64);
172        }
173        let (p50, p90, p99) = lp.summary().expect("summary should succeed");
174        assert!(p50 <= p90 && p90 <= p99);
175    }
176    #[test]
177    fn test_regression_test() {
178        let rt = RegressionTest::new("foo", 100.0, 1.10);
179        assert!(!rt.is_regression(105.0));
180        assert!(rt.is_regression(115.0));
181        assert_eq!(rt.verdict(105.0), "PASS");
182        assert_eq!(rt.verdict(115.0), "REGRESSION");
183    }
184    #[test]
185    fn test_bench_summary() {
186        let mut s = BenchSummary::new("suite_a");
187        s.passed = 5;
188        s.regressions = 1;
189        s.skipped = 2;
190        s.total_ms = 500.0;
191        assert_eq!(s.total(), 8);
192        assert!(!s.all_passed());
193        let line = s.result_line();
194        assert!(line.contains("REGRESSION") || line.contains("regression"));
195    }
196    #[test]
197    fn test_cold_cache_simulator() {
198        let mut sim = ColdCacheSimulator::new(1024 * 1024);
199        assert_eq!(sim.buffer_size(), 1024 * 1024);
200        sim.flush();
201    }
202    #[test]
203    fn test_bench_annotation_set() {
204        let mut s = BenchAnnotationSet::new();
205        s.add("os", "linux");
206        s.add("arch", "x86_64");
207        assert_eq!(s.get("os"), Some("linux"));
208        assert_eq!(s.get("arch"), Some("x86_64"));
209        assert_eq!(s.get("cpu"), None);
210        assert_eq!(s.len(), 2);
211    }
212    #[test]
213    fn test_iteration_policy() {
214        assert_eq!(IterationPolicy::Fixed(10).min_iters(), 10);
215        assert!(IterationPolicy::TimeBounded(100).is_time_bounded());
216        let adaptive = IterationPolicy::Adaptive { min: 5, max: 100 };
217        assert_eq!(adaptive.min_iters(), 5);
218    }
219    #[test]
220    fn test_bench_harness_fixed() {
221        let mut harness = BenchHarnessExt::new("noop", IterationPolicy::Fixed(10));
222        harness.run(|| {
223            std::hint::black_box(42u64);
224        });
225        assert_eq!(harness.num_samples(), 10);
226        assert!(harness.median_us().is_some());
227    }
228}
229#[cfg(test)]
230mod tests_bench_metric {
231    use super::*;
232    #[test]
233    fn test_metric_set() {
234        let mut ms = MetricSet::new();
235        ms.add("latency", 12.5, "µs");
236        ms.add("throughput", 1e6, "op/s");
237        assert_eq!(ms.len(), 2);
238        assert!(
239            (ms.get("latency")
240                .expect("element at \'latency\' should exist")
241                - 12.5)
242                .abs()
243                < 1e-9
244        );
245        let disp = ms.display_all();
246        assert!(disp.contains("latency"));
247    }
248}
249/// Compares `new_us` against `baseline_us` with a given threshold fraction.
250#[allow(dead_code)]
251pub fn compare_timings(baseline_us: f64, new_us: f64, threshold: f64) -> CompareResult {
252    if baseline_us < f64::EPSILON {
253        return CompareResult::Neutral;
254    }
255    let ratio = new_us / baseline_us;
256    if ratio < 1.0 - threshold {
257        CompareResult::Improvement
258    } else if ratio > 1.0 + threshold {
259        CompareResult::Regression
260    } else {
261        CompareResult::Neutral
262    }
263}
264#[cfg(test)]
265mod tests_bench_extra2 {
266    use super::*;
267    #[test]
268    fn test_bench_event_log() {
269        let mut log = BenchEventLog::new();
270        log.record("start");
271        log.record("end");
272        assert_eq!(log.count(), 2);
273        assert!(log.since_last_ms() >= 0.0);
274    }
275    #[test]
276    fn test_scaling_test() {
277        let mut st = ScalingTest::new();
278        st.add_point(100, 100.0);
279        st.add_point(1000, 1000.0);
280        let exp = st.scaling_exponent().expect("exp should be present");
281        assert!((exp - 1.0).abs() < 0.1, "expected ~1.0, got {}", exp);
282        assert!(st.is_at_most_order(1.1));
283    }
284    #[test]
285    fn test_bench_filter() {
286        let mut f = BenchFilter::new();
287        f.include("fast");
288        f.exclude("slow");
289        assert!(f.accepts("fast_sort"));
290        assert!(!f.accepts("slow_sort"));
291        assert!(!f.accepts("bubble_sort"));
292    }
293    #[test]
294    fn test_bench_registry() {
295        let mut reg = BenchRegistry::new();
296        let mut g = BenchGroup::new("group_a");
297        g.add("bench_1");
298        g.add("bench_2");
299        reg.add_group(g);
300        assert_eq!(reg.total_benchmarks(), 2);
301        let names = reg.all_benchmark_names();
302        assert!(names.contains(&"bench_1"));
303        let found = reg.find_group("bench_2");
304        assert!(found.is_some());
305        assert_eq!(found.expect("found should be valid").name, "group_a");
306    }
307    #[test]
308    fn test_sample_buffer() {
309        let mut buf = SampleBuffer::new(5);
310        for i in 1..=7 {
311            buf.push(i as f64);
312        }
313        assert_eq!(buf.len(), 5);
314        let mean = buf.mean().expect("mean should be present");
315        assert!(mean > 0.0);
316    }
317    #[test]
318    fn test_progress_bar() {
319        let mut pb = ProgressBar::new(10, 20);
320        assert_eq!(pb.fraction(), 0.0);
321        for _ in 0..5 {
322            pb.step();
323        }
324        assert!((pb.fraction() - 0.5).abs() < 1e-9);
325        let r = pb.render();
326        assert!(r.contains("5/10"));
327        for _ in 0..5 {
328            pb.step();
329        }
330        assert!(pb.is_complete());
331    }
332    #[test]
333    fn test_compare_timings() {
334        assert_eq!(
335            compare_timings(100.0, 85.0, 0.10),
336            CompareResult::Improvement
337        );
338        assert_eq!(compare_timings(100.0, 100.0, 0.10), CompareResult::Neutral);
339        assert_eq!(
340            compare_timings(100.0, 115.0, 0.10),
341            CompareResult::Regression
342        );
343    }
344    #[test]
345    fn test_cpu_pinner() {
346        let pinner = CpuPinner::new(0);
347        assert_eq!(pinner.cpu(), 0);
348        assert!(pinner.pin());
349    }
350}
351#[cfg(test)]
352mod tests_bench_result {
353    use super::*;
354    #[test]
355    fn test_bench_result_from_samples() {
356        let samples = vec![10.0, 12.0, 11.0, 10.5, 11.5];
357        let r = BenchResultExt::from_samples("my_bench", &samples).expect("r should be present");
358        assert!((r.mean_us - 11.0).abs() < 1.0);
359        assert_eq!(r.iterations, 5);
360        let csv = r.to_csv();
361        assert!(csv.starts_with("my_bench,"));
362        assert!(r.is_stable(0.5));
363    }
364    #[test]
365    fn test_bench_result_empty() {
366        let r = BenchResultExt::from_samples("empty", &[]);
367        assert!(r.is_none());
368    }
369}
370#[cfg(test)]
371mod tests_bench_profiler {
372    use super::*;
373    #[test]
374    fn test_bench_profiler() {
375        let p = BenchProfiler::start("my_op");
376        assert_eq!(p.label(), "my_op");
377        let elapsed = p.stop();
378        assert!(elapsed >= 0.0);
379    }
380}
381#[cfg(test)]
382mod tests_bench_final {
383    use super::*;
384    #[test]
385    fn test_stability_checker() {
386        let mut sc = StabilityChecker::new(5, 0.02);
387        for _ in 0..4 {
388            assert!(!sc.push(100.0));
389        }
390        assert!(sc.push(100.0));
391        assert_eq!(sc.count(), 5);
392    }
393    #[test]
394    fn test_multi_arm_bandit() {
395        let mut bandit = MultiArmBandit::new(3, 0.1);
396        for _ in 0..30 {
397            bandit.update(0, 1.0);
398            bandit.update(1, 0.5);
399            bandit.update(2, 0.2);
400        }
401        assert_eq!(bandit.best_arm(), 0);
402        assert!((bandit.estimate(0) - 1.0).abs() < 0.01);
403    }
404    #[test]
405    fn test_time_slice() {
406        let mut ts = TimeSlice::new("phase_1", 100.0);
407        assert!(!ts.consume(30.0));
408        assert!(!ts.consume(50.0));
409        assert!(ts.consume(30.0));
410        assert!(ts.remaining_ms() == 0.0);
411        let util = ts.utilisation();
412        assert!(util > 1.0);
413    }
414    #[test]
415    fn test_bench_reporter() {
416        let mut rep = BenchReporter::new("suite");
417        rep.add(
418            BenchResultExt::from_samples("alpha", &[10.0, 11.0, 10.5])
419                .expect("value should be present"),
420        );
421        rep.add(
422            BenchResultExt::from_samples("beta", &[20.0, 21.0, 20.5])
423                .expect("value should be present"),
424        );
425        assert_eq!(rep.count(), 2);
426        assert_eq!(rep.fastest().expect("fastest should succeed").name, "alpha");
427        assert_eq!(rep.slowest().expect("slowest should succeed").name, "beta");
428        let csv = rep.to_csv();
429        assert!(csv.contains("alpha"));
430    }
431    #[test]
432    fn test_fuzz_input() {
433        let mut fi = FuzzInput::new(12345);
434        let v1 = fi.next_u64();
435        let v2 = fi.next_u64();
436        assert_ne!(v1, v2);
437        let idx = fi.next_usize(10);
438        assert!(idx < 10);
439        let f = fi.next_f64();
440        assert!((0.0..1.0).contains(&f));
441        let mut buf = [0u8; 16];
442        fi.fill_bytes(&mut buf);
443        assert!(buf.iter().any(|&b| b != 0));
444    }
445    #[test]
446    fn test_bench_plan() {
447        let mut plan = BenchPlan::default_plan();
448        plan.add("bench_a");
449        plan.add("bench_b");
450        plan.add("bench_c");
451        assert_eq!(plan.len(), 3);
452        plan.reverse();
453        assert_eq!(plan.order[0], "bench_c");
454        assert_eq!(plan.warmup_iters, 3);
455        assert_eq!(plan.measure_iters, 10);
456    }
457}
458#[cfg(test)]
459mod tests_bench_final3 {
460    use super::*;
461    #[test]
462    fn test_confidence_interval() {
463        let samples: Vec<f64> = (1..=100).map(|i| i as f64).collect();
464        let ci = ConfidenceInterval::compute_95(&samples).expect("ci should be present");
465        assert!((ci.estimate - 50.5).abs() < 1.0);
466        assert!(ci.upper > ci.lower);
467        assert!(ci.half_width() > 0.0);
468        let disp = ci.display();
469        assert!(disp.contains("CI"));
470    }
471    #[test]
472    fn test_ols_regression() {
473        let mut reg = OlsRegression::new();
474        for i in 0..=10 {
475            reg.add(i as f64, 2.0 * i as f64 + 1.0);
476        }
477        let (a, b) = reg.fit().expect("fit should succeed");
478        assert!((a - 1.0).abs() < 1e-9, "intercept: {}", a);
479        assert!((b - 2.0).abs() < 1e-9, "slope: {}", b);
480        let pred = reg.predict(5.0).expect("pred should be present");
481        assert!((pred - 11.0).abs() < 1e-9);
482    }
483    #[test]
484    fn test_hdr_histogram() {
485        let mut h = HdrHistogram::new(1000.0, 100);
486        for i in 0..100 {
487            h.record(i as f64 * 10.0);
488        }
489        assert_eq!(h.total_count(), 100);
490        let p50 = h.value_at_percentile(50.0);
491        let p99 = h.value_at_percentile(99.0);
492        assert!(p50 <= p99);
493    }
494    #[test]
495    fn test_batch_timer() {
496        let timer = BatchTimer::start(1000);
497        let _elapsed = timer.total_us();
498        assert!(timer.ns_per_item() >= 0.0);
499    }
500}
501/// Calculates throughput given byte count and elapsed microseconds.
502#[allow(dead_code)]
503pub fn calc_throughput(bytes: u64, elapsed_us: f64, unit: ThroughputUnit) -> f64 {
504    if elapsed_us < f64::EPSILON {
505        return 0.0;
506    }
507    let bps = bytes as f64 / (elapsed_us * 1e-6);
508    unit.from_bytes_per_sec(bps)
509}
510#[cfg(test)]
511mod tests_throughput_unit {
512    use super::*;
513    #[test]
514    fn test_calc_throughput() {
515        let tp = calc_throughput(1_000_000_000, 1_000_000.0, ThroughputUnit::GBPerSec);
516        assert!((tp - 1.0).abs() < 0.01, "expected 1.0 GB/s, got {}", tp);
517    }
518    #[test]
519    fn test_throughput_unit_label() {
520        assert_eq!(ThroughputUnit::MBPerSec.label(), "MB/s");
521        assert_eq!(ThroughputUnit::OpsPerSec.label(), "ops/s");
522    }
523}
524#[cfg(test)]
525mod tests_bench_config {
526    use super::*;
527    #[test]
528    fn test_default_config() {
529        let cfg = BenchConfig::default_config();
530        assert!(!cfg.verbose);
531        assert_eq!(cfg.warmup_iters, 3);
532        assert_eq!(cfg.measure_iters, 10);
533        assert!(!cfg.has_fuzz());
534    }
535    #[test]
536    fn test_ci_config() {
537        let cfg = BenchConfig::ci_config();
538        assert!(cfg.json_output);
539        assert!(cfg.has_fuzz());
540        assert_eq!(cfg.fuzz_seed, Some(42));
541    }
542}