BenchmarkRunner

Struct BenchmarkRunner 

Source
pub struct BenchmarkRunner {
    pub iterations: usize,
    pub measure_memory: bool,
    pub warmup_iterations: usize,
}
Expand description

Benchmark runner for dataset operations

Fields§

§iterations: usize

Number of iterations for each benchmark

§measure_memory: bool

Whether to include memory measurements

§warmup_iterations: usize

Warmup iterations before actual benchmarks

Implementations§

Source§

impl BenchmarkRunner

Source

pub fn new() -> Self

Create a new benchmark runner

Examples found in repository?
examples/scikit_learn_benchmark.rs (line 25)
21fn main() -> Result<(), Box<dyn std::error::Error>> {
22    println!("🚀 SciRS2 vs Scikit-Learn Performance Benchmarks");
23    println!("================================================\n");
24
25    let runner = BenchmarkRunner::new()
26        .with_iterations(5)
27        .with_warmup(2)
28        .with_memory_measurement(false);
29
30    // Run comprehensive SciRS2 benchmarks
31    let scirs2suites = runner.run_comprehensive_benchmarks();
32
33    println!("{}", "\n".to_owned() + &"=".repeat(60));
34    println!("DETAILED ANALYSIS");
35    println!("{}", "=".repeat(60));
36
37    // Analyze toy dataset performance
38    analyze_toy_dataset_performance(&scirs2suites);
39
40    // Analyze data generation performance
41    analyze_data_generation_performance(&scirs2suites);
42
43    // Run Python comparison benchmarks (if available)
44    run_python_comparison_benchmarks();
45
46    // Generate performance report
47    generate_performance_report(&scirs2suites);
48
49    println!("\n🎉 Benchmark suite completed successfully!");
50    println!("Check the generated performance report for detailed analysis.");
51
52    Ok(())
53}
More examples
Hide additional examples
examples/real_world_datasets.rs (line 358)
354fn demonstrate_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
355    println!("⚡ PERFORMANCE COMPARISON");
356    println!("{}", "-".repeat(40));
357
358    let runner = BenchmarkRunner::new().with_iterations(3).with_warmup(1);
359
360    // Benchmark real-world dataset loading
361    println!("Benchmarking real-world dataset operations...");
362
363    // Titanic loading benchmark
364    let titanic_params = HashMap::from([("dataset".to_string(), "titanic".to_string())]);
365    let titanic_result =
366        runner.run_benchmark("load_titanic", titanic_params, || match load_titanic() {
367            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
368            Err(e) => Err(format!("Failed to load Titanic: {e}")),
369        });
370
371    // California Housing loading benchmark
372    let housing_params = HashMap::from([("dataset".to_string(), "california_housing".to_string())]);
373    let housing_result = runner.run_benchmark("load_california_housing", housing_params, || {
374        match load_california_housing() {
375            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
376            Err(e) => Err(format!("Failed to load California Housing: {e}")),
377        }
378    });
379
380    // Heart Disease loading benchmark
381    let heart_params = HashMap::from([("dataset".to_string(), "heart_disease".to_string())]);
382    let heart_result =
383        runner.run_benchmark(
384            "load_heart_disease",
385            heart_params,
386            || match load_heart_disease() {
387                Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
388                Err(e) => Err(format!("Failed to load Heart Disease: {e}")),
389            },
390        );
391
392    // Display results
393    println!("\nReal-world dataset loading performance:");
394
395    let results = vec![
396        ("Titanic", &titanic_result),
397        ("California Housing", &housing_result),
398        ("Heart Disease", &heart_result),
399    ];
400
401    for (name, result) in results {
402        if result.success {
403            println!(
404                "  {}: {} ({} samples, {} features, {:.1} samples/s)",
405                name,
406                result.formatted_duration(),
407                result.samples,
408                result.features,
409                result.throughput
410            );
411        } else {
412            println!(
413                "  {}: Failed - {}",
414                name,
415                result
416                    .error
417                    .as_ref()
418                    .unwrap_or(&"Unknown error".to_string())
419            );
420        }
421    }
422
423    // Memory usage estimation
424    let total_samples = titanic_result.samples + housing_result.samples + heart_result.samples;
425    let total_features = titanic_result.features + housing_result.features + heart_result.features;
426    let estimated_memory_mb = (total_samples * total_features * 8) as f64 / (1024.0 * 1024.0);
427
428    println!("\nMemory usage estimate:");
429    println!("  Total samples: {total_samples}");
430    println!("  Total features: {total_features}");
431    println!("  Estimated memory: {estimated_memory_mb:.1} MB");
432
433    // Performance recommendations
434    println!("\nPerformance recommendations:");
435    if estimated_memory_mb > 100.0 {
436        println!("  • Consider using streaming for large datasets");
437        println!("  • Enable caching for frequently accessed datasets");
438    }
439    println!("  • Use train/test splitting to reduce memory usage");
440    println!("  • Apply feature selection to reduce dimensionality");
441
442    println!();
443    Ok(())
444}
Source

pub fn with_iterations(self, iterations: usize) -> Self

Set number of iterations

Examples found in repository?
examples/scikit_learn_benchmark.rs (line 26)
21fn main() -> Result<(), Box<dyn std::error::Error>> {
22    println!("🚀 SciRS2 vs Scikit-Learn Performance Benchmarks");
23    println!("================================================\n");
24
25    let runner = BenchmarkRunner::new()
26        .with_iterations(5)
27        .with_warmup(2)
28        .with_memory_measurement(false);
29
30    // Run comprehensive SciRS2 benchmarks
31    let scirs2suites = runner.run_comprehensive_benchmarks();
32
33    println!("{}", "\n".to_owned() + &"=".repeat(60));
34    println!("DETAILED ANALYSIS");
35    println!("{}", "=".repeat(60));
36
37    // Analyze toy dataset performance
38    analyze_toy_dataset_performance(&scirs2suites);
39
40    // Analyze data generation performance
41    analyze_data_generation_performance(&scirs2suites);
42
43    // Run Python comparison benchmarks (if available)
44    run_python_comparison_benchmarks();
45
46    // Generate performance report
47    generate_performance_report(&scirs2suites);
48
49    println!("\n🎉 Benchmark suite completed successfully!");
50    println!("Check the generated performance report for detailed analysis.");
51
52    Ok(())
53}
More examples
Hide additional examples
examples/real_world_datasets.rs (line 358)
354fn demonstrate_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
355    println!("⚡ PERFORMANCE COMPARISON");
356    println!("{}", "-".repeat(40));
357
358    let runner = BenchmarkRunner::new().with_iterations(3).with_warmup(1);
359
360    // Benchmark real-world dataset loading
361    println!("Benchmarking real-world dataset operations...");
362
363    // Titanic loading benchmark
364    let titanic_params = HashMap::from([("dataset".to_string(), "titanic".to_string())]);
365    let titanic_result =
366        runner.run_benchmark("load_titanic", titanic_params, || match load_titanic() {
367            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
368            Err(e) => Err(format!("Failed to load Titanic: {e}")),
369        });
370
371    // California Housing loading benchmark
372    let housing_params = HashMap::from([("dataset".to_string(), "california_housing".to_string())]);
373    let housing_result = runner.run_benchmark("load_california_housing", housing_params, || {
374        match load_california_housing() {
375            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
376            Err(e) => Err(format!("Failed to load California Housing: {e}")),
377        }
378    });
379
380    // Heart Disease loading benchmark
381    let heart_params = HashMap::from([("dataset".to_string(), "heart_disease".to_string())]);
382    let heart_result =
383        runner.run_benchmark(
384            "load_heart_disease",
385            heart_params,
386            || match load_heart_disease() {
387                Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
388                Err(e) => Err(format!("Failed to load Heart Disease: {e}")),
389            },
390        );
391
392    // Display results
393    println!("\nReal-world dataset loading performance:");
394
395    let results = vec![
396        ("Titanic", &titanic_result),
397        ("California Housing", &housing_result),
398        ("Heart Disease", &heart_result),
399    ];
400
401    for (name, result) in results {
402        if result.success {
403            println!(
404                "  {}: {} ({} samples, {} features, {:.1} samples/s)",
405                name,
406                result.formatted_duration(),
407                result.samples,
408                result.features,
409                result.throughput
410            );
411        } else {
412            println!(
413                "  {}: Failed - {}",
414                name,
415                result
416                    .error
417                    .as_ref()
418                    .unwrap_or(&"Unknown error".to_string())
419            );
420        }
421    }
422
423    // Memory usage estimation
424    let total_samples = titanic_result.samples + housing_result.samples + heart_result.samples;
425    let total_features = titanic_result.features + housing_result.features + heart_result.features;
426    let estimated_memory_mb = (total_samples * total_features * 8) as f64 / (1024.0 * 1024.0);
427
428    println!("\nMemory usage estimate:");
429    println!("  Total samples: {total_samples}");
430    println!("  Total features: {total_features}");
431    println!("  Estimated memory: {estimated_memory_mb:.1} MB");
432
433    // Performance recommendations
434    println!("\nPerformance recommendations:");
435    if estimated_memory_mb > 100.0 {
436        println!("  • Consider using streaming for large datasets");
437        println!("  • Enable caching for frequently accessed datasets");
438    }
439    println!("  • Use train/test splitting to reduce memory usage");
440    println!("  • Apply feature selection to reduce dimensionality");
441
442    println!();
443    Ok(())
444}
Source

pub fn with_memory_measurement(self, measure: bool) -> Self

Enable memory measurement

Examples found in repository?
examples/scikit_learn_benchmark.rs (line 28)
21fn main() -> Result<(), Box<dyn std::error::Error>> {
22    println!("🚀 SciRS2 vs Scikit-Learn Performance Benchmarks");
23    println!("================================================\n");
24
25    let runner = BenchmarkRunner::new()
26        .with_iterations(5)
27        .with_warmup(2)
28        .with_memory_measurement(false);
29
30    // Run comprehensive SciRS2 benchmarks
31    let scirs2suites = runner.run_comprehensive_benchmarks();
32
33    println!("{}", "\n".to_owned() + &"=".repeat(60));
34    println!("DETAILED ANALYSIS");
35    println!("{}", "=".repeat(60));
36
37    // Analyze toy dataset performance
38    analyze_toy_dataset_performance(&scirs2suites);
39
40    // Analyze data generation performance
41    analyze_data_generation_performance(&scirs2suites);
42
43    // Run Python comparison benchmarks (if available)
44    run_python_comparison_benchmarks();
45
46    // Generate performance report
47    generate_performance_report(&scirs2suites);
48
49    println!("\n🎉 Benchmark suite completed successfully!");
50    println!("Check the generated performance report for detailed analysis.");
51
52    Ok(())
53}
Source

pub fn with_warmup(self, warmupiterations: usize) -> Self

Set warmup iterations

Examples found in repository?
examples/scikit_learn_benchmark.rs (line 27)
21fn main() -> Result<(), Box<dyn std::error::Error>> {
22    println!("🚀 SciRS2 vs Scikit-Learn Performance Benchmarks");
23    println!("================================================\n");
24
25    let runner = BenchmarkRunner::new()
26        .with_iterations(5)
27        .with_warmup(2)
28        .with_memory_measurement(false);
29
30    // Run comprehensive SciRS2 benchmarks
31    let scirs2suites = runner.run_comprehensive_benchmarks();
32
33    println!("{}", "\n".to_owned() + &"=".repeat(60));
34    println!("DETAILED ANALYSIS");
35    println!("{}", "=".repeat(60));
36
37    // Analyze toy dataset performance
38    analyze_toy_dataset_performance(&scirs2suites);
39
40    // Analyze data generation performance
41    analyze_data_generation_performance(&scirs2suites);
42
43    // Run Python comparison benchmarks (if available)
44    run_python_comparison_benchmarks();
45
46    // Generate performance report
47    generate_performance_report(&scirs2suites);
48
49    println!("\n🎉 Benchmark suite completed successfully!");
50    println!("Check the generated performance report for detailed analysis.");
51
52    Ok(())
53}
More examples
Hide additional examples
examples/real_world_datasets.rs (line 358)
354fn demonstrate_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
355    println!("⚡ PERFORMANCE COMPARISON");
356    println!("{}", "-".repeat(40));
357
358    let runner = BenchmarkRunner::new().with_iterations(3).with_warmup(1);
359
360    // Benchmark real-world dataset loading
361    println!("Benchmarking real-world dataset operations...");
362
363    // Titanic loading benchmark
364    let titanic_params = HashMap::from([("dataset".to_string(), "titanic".to_string())]);
365    let titanic_result =
366        runner.run_benchmark("load_titanic", titanic_params, || match load_titanic() {
367            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
368            Err(e) => Err(format!("Failed to load Titanic: {e}")),
369        });
370
371    // California Housing loading benchmark
372    let housing_params = HashMap::from([("dataset".to_string(), "california_housing".to_string())]);
373    let housing_result = runner.run_benchmark("load_california_housing", housing_params, || {
374        match load_california_housing() {
375            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
376            Err(e) => Err(format!("Failed to load California Housing: {e}")),
377        }
378    });
379
380    // Heart Disease loading benchmark
381    let heart_params = HashMap::from([("dataset".to_string(), "heart_disease".to_string())]);
382    let heart_result =
383        runner.run_benchmark(
384            "load_heart_disease",
385            heart_params,
386            || match load_heart_disease() {
387                Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
388                Err(e) => Err(format!("Failed to load Heart Disease: {e}")),
389            },
390        );
391
392    // Display results
393    println!("\nReal-world dataset loading performance:");
394
395    let results = vec![
396        ("Titanic", &titanic_result),
397        ("California Housing", &housing_result),
398        ("Heart Disease", &heart_result),
399    ];
400
401    for (name, result) in results {
402        if result.success {
403            println!(
404                "  {}: {} ({} samples, {} features, {:.1} samples/s)",
405                name,
406                result.formatted_duration(),
407                result.samples,
408                result.features,
409                result.throughput
410            );
411        } else {
412            println!(
413                "  {}: Failed - {}",
414                name,
415                result
416                    .error
417                    .as_ref()
418                    .unwrap_or(&"Unknown error".to_string())
419            );
420        }
421    }
422
423    // Memory usage estimation
424    let total_samples = titanic_result.samples + housing_result.samples + heart_result.samples;
425    let total_features = titanic_result.features + housing_result.features + heart_result.features;
426    let estimated_memory_mb = (total_samples * total_features * 8) as f64 / (1024.0 * 1024.0);
427
428    println!("\nMemory usage estimate:");
429    println!("  Total samples: {total_samples}");
430    println!("  Total features: {total_features}");
431    println!("  Estimated memory: {estimated_memory_mb:.1} MB");
432
433    // Performance recommendations
434    println!("\nPerformance recommendations:");
435    if estimated_memory_mb > 100.0 {
436        println!("  • Consider using streaming for large datasets");
437        println!("  • Enable caching for frequently accessed datasets");
438    }
439    println!("  • Use train/test splitting to reduce memory usage");
440    println!("  • Apply feature selection to reduce dimensionality");
441
442    println!();
443    Ok(())
444}
Source

pub fn run_benchmark<F>( &self, name: &str, parameters: HashMap<String, String>, benchmark_fn: F, ) -> BenchmarkResult
where F: FnMut() -> Result<(usize, usize), String>,

Run a benchmark function multiple times and return average result

Examples found in repository?
examples/real_world_datasets.rs (lines 366-369)
354fn demonstrate_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
355    println!("⚡ PERFORMANCE COMPARISON");
356    println!("{}", "-".repeat(40));
357
358    let runner = BenchmarkRunner::new().with_iterations(3).with_warmup(1);
359
360    // Benchmark real-world dataset loading
361    println!("Benchmarking real-world dataset operations...");
362
363    // Titanic loading benchmark
364    let titanic_params = HashMap::from([("dataset".to_string(), "titanic".to_string())]);
365    let titanic_result =
366        runner.run_benchmark("load_titanic", titanic_params, || match load_titanic() {
367            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
368            Err(e) => Err(format!("Failed to load Titanic: {e}")),
369        });
370
371    // California Housing loading benchmark
372    let housing_params = HashMap::from([("dataset".to_string(), "california_housing".to_string())]);
373    let housing_result = runner.run_benchmark("load_california_housing", housing_params, || {
374        match load_california_housing() {
375            Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
376            Err(e) => Err(format!("Failed to load California Housing: {e}")),
377        }
378    });
379
380    // Heart Disease loading benchmark
381    let heart_params = HashMap::from([("dataset".to_string(), "heart_disease".to_string())]);
382    let heart_result =
383        runner.run_benchmark(
384            "load_heart_disease",
385            heart_params,
386            || match load_heart_disease() {
387                Ok(dataset) => Ok((dataset.n_samples(), dataset.n_features())),
388                Err(e) => Err(format!("Failed to load Heart Disease: {e}")),
389            },
390        );
391
392    // Display results
393    println!("\nReal-world dataset loading performance:");
394
395    let results = vec![
396        ("Titanic", &titanic_result),
397        ("California Housing", &housing_result),
398        ("Heart Disease", &heart_result),
399    ];
400
401    for (name, result) in results {
402        if result.success {
403            println!(
404                "  {}: {} ({} samples, {} features, {:.1} samples/s)",
405                name,
406                result.formatted_duration(),
407                result.samples,
408                result.features,
409                result.throughput
410            );
411        } else {
412            println!(
413                "  {}: Failed - {}",
414                name,
415                result
416                    .error
417                    .as_ref()
418                    .unwrap_or(&"Unknown error".to_string())
419            );
420        }
421    }
422
423    // Memory usage estimation
424    let total_samples = titanic_result.samples + housing_result.samples + heart_result.samples;
425    let total_features = titanic_result.features + housing_result.features + heart_result.features;
426    let estimated_memory_mb = (total_samples * total_features * 8) as f64 / (1024.0 * 1024.0);
427
428    println!("\nMemory usage estimate:");
429    println!("  Total samples: {total_samples}");
430    println!("  Total features: {total_features}");
431    println!("  Estimated memory: {estimated_memory_mb:.1} MB");
432
433    // Performance recommendations
434    println!("\nPerformance recommendations:");
435    if estimated_memory_mb > 100.0 {
436        println!("  • Consider using streaming for large datasets");
437        println!("  • Enable caching for frequently accessed datasets");
438    }
439    println!("  • Use train/test splitting to reduce memory usage");
440    println!("  • Apply feature selection to reduce dimensionality");
441
442    println!();
443    Ok(())
444}
Source

pub fn benchmark_toy_datasets(&self) -> BenchmarkSuite

Benchmark toy dataset loading

Source

pub fn benchmark_data_generation(&self) -> BenchmarkSuite

Benchmark synthetic data generation

Source

pub fn benchmark_csv_loading<P: AsRef<Path>>( &self, csvpath: P, ) -> BenchmarkSuite

Benchmark CSV loading performance

Source

pub fn run_comprehensive_benchmarks(&self) -> Vec<BenchmarkSuite>

Run comprehensive benchmarks comparing SciRS2 performance

Examples found in repository?
examples/scikit_learn_benchmark.rs (line 31)
21fn main() -> Result<(), Box<dyn std::error::Error>> {
22    println!("🚀 SciRS2 vs Scikit-Learn Performance Benchmarks");
23    println!("================================================\n");
24
25    let runner = BenchmarkRunner::new()
26        .with_iterations(5)
27        .with_warmup(2)
28        .with_memory_measurement(false);
29
30    // Run comprehensive SciRS2 benchmarks
31    let scirs2suites = runner.run_comprehensive_benchmarks();
32
33    println!("{}", "\n".to_owned() + &"=".repeat(60));
34    println!("DETAILED ANALYSIS");
35    println!("{}", "=".repeat(60));
36
37    // Analyze toy dataset performance
38    analyze_toy_dataset_performance(&scirs2suites);
39
40    // Analyze data generation performance
41    analyze_data_generation_performance(&scirs2suites);
42
43    // Run Python comparison benchmarks (if available)
44    run_python_comparison_benchmarks();
45
46    // Generate performance report
47    generate_performance_report(&scirs2suites);
48
49    println!("\n🎉 Benchmark suite completed successfully!");
50    println!("Check the generated performance report for detailed analysis.");
51
52    Ok(())
53}

Trait Implementations§

Source§

impl Default for BenchmarkRunner

Source§

fn default() -> Self

Returns the “default value” for a type. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V