optirs_bench/lib.rs
1//! # OptiRS Bench - Benchmarking and Performance Analysis
2//!
3//! **Version:** 0.3.1
4//! **Status:** Available
5//!
6//! This crate provides comprehensive benchmarking, profiling, performance analysis, and regression
7//! detection tools for ML optimization algorithms in the OptiRS ecosystem.
8//!
9//! ## Dependencies
10//!
11//! - `scirs2-core` 0.1.1 - Required foundation
12//! - `optirs-core` 0.1.0 - Core optimizers
13//!
14//! ## Features
15//!
16//! - **Performance Benchmarking**: Compare optimizers across standard test functions
17//! - **Gradient Flow Analysis**: Monitor optimization dynamics and convergence patterns
18//! - **Memory Profiling**: Track memory usage, detect leaks, and optimize allocation
19//! - **Regression Detection**: Detect performance regressions across different versions
20//! - **Cross-Platform Testing**: Validate optimizers across different hardware and OS
21//! - **Security Auditing**: Scan for security vulnerabilities and compliance issues
22//! - **CI/CD Integration**: Automated testing and reporting for continuous integration
23//! - **Visualization Tools**: Generate plots and reports for optimization analysis
24//!
25//! ## Architecture
26//!
27//! The crate is organized into several main modules:
28//!
29//! - `benchmarking`: Core benchmarking functionality and test suites
30//! - `memory`: Memory profiling, leak detection, and optimization
31//! - `regression`: Performance regression detection and alerting
32//! - `security`: Security auditing and vulnerability scanning
33//! - `visualization`: Plotting and reporting tools
34//! - `ci_cd_automation`: Continuous integration and deployment automation
35//! - `cross_platform`: Cross-platform testing and validation
36//!
37//! ## Usage
38//!
39//! ```rust
40//! use optirs_bench::{
41//! OptimizerBenchmark, GradientFlowAnalyzer,
42//! visualization::OptimizerStateVisualizer,
43//! };
44//! use scirs2_core::ndarray::{Array1, Ix1};
45//!
46//! // Create a benchmark suite
47//! let mut benchmark = OptimizerBenchmark::<f64>::new();
48//! benchmark.add_standard_test_functions();
49//!
50//! // Set up gradient flow analysis
51//! let mut analyzer = GradientFlowAnalyzer::<f64, Ix1>::new(1000);
52//!
53//! // Set up state visualization
54//! let mut visualizer = OptimizerStateVisualizer::<f64, Ix1>::new(500);
55//! ```
56
57// Re-export error types from optirs-core for consistency
58pub use optirs_core::error::{OptimError, Result};
59
60// Re-export key types for external users
61pub mod error {
62 pub use optirs_core::error::{OptimError, OptimizerError, Result};
63}
64
65// Core benchmarking and analysis functionality
66mod mod_impl;
67
68// Re-export the main types and functions
69pub use mod_impl::*;
70
71// Advanced modules for specific functionality
72pub mod advanced_cross_platform_orchestrator;
73pub mod advanced_leak_detectors;
74pub mod advanced_memory_leak_detector;
75pub mod advanced_pattern_detection;
76pub mod automated_test_runners;
77pub mod ci_cd_automation;
78pub mod comprehensive_security_auditor;
79pub mod cross_framework;
80pub mod cross_platform_tester;
81pub mod documentation_analyzer;
82pub mod enhanced_memory_monitor;
83pub mod memory_leak_detector;
84pub mod memory_optimizer;
85pub mod performance_profiler;
86pub mod performance_regression_detector;
87pub mod regression_tester;
88pub mod security_auditor;
89
90// Re-export common types for convenience
91pub use mod_impl::{
92 BenchmarkReport, BenchmarkResult, GradientFlowAnalyzer, GradientFlowStats, GradientFunction,
93 ObjectiveFunction, OptimizerBenchmark, OptimizerComparison, OptimizerPerformance,
94 ParameterGroupStats, TestFunction, VisualizationData,
95};
96
97// Re-export visualization types
98pub use mod_impl::visualization::{
99 ComparisonMetric, OptimizerDashboard, OptimizerStateSnapshot, OptimizerStateVisualizer,
100 VisualizationExport,
101};
102
103/// Prelude module for common imports
104pub mod prelude {
105 pub use crate::{
106 BenchmarkReport, BenchmarkResult, GradientFlowAnalyzer, GradientFlowStats,
107 GradientFunction, ObjectiveFunction, OptimError, OptimizerBenchmark, OptimizerComparison,
108 OptimizerPerformance, ParameterGroupStats, Result, TestFunction, VisualizationData,
109 };
110
111 pub use crate::visualization::{
112 ComparisonMetric, OptimizerDashboard, OptimizerStateSnapshot, OptimizerStateVisualizer,
113 VisualizationExport,
114 };
115
116 pub use scirs2_core::ndarray::{Array, Array1, Array2, ArrayView, ArrayViewMut};
117 pub use scirs2_core::random::{thread_rng, Rng};
118}
119
120#[cfg(test)]
121mod tests {
122 use super::*;
123 use approx::assert_relative_eq;
124
125 #[test]
126 fn test_library_integration() {
127 // Test that all major components can be instantiated
128 let mut benchmark = OptimizerBenchmark::<f64>::new();
129 benchmark.add_standard_test_functions();
130
131 let analyzer = GradientFlowAnalyzer::<f64, scirs2_core::ndarray::Ix1>::new(10);
132 let visualizer =
133 visualization::OptimizerStateVisualizer::<f64, scirs2_core::ndarray::Ix1>::new(10);
134
135 assert_eq!(analyzer.step_count(), 0);
136 assert_eq!(visualizer.step_count(), 0);
137 assert!(!benchmark.get_results().is_empty() || benchmark.get_results().is_empty());
138 // Just test it exists
139 }
140
141 #[test]
142 fn test_error_types() {
143 // Test that error types are properly re-exported
144 let error = OptimError::InvalidConfig("test".to_string());
145 let result: Result<()> = Err(error);
146
147 assert!(result.is_err());
148 if let Err(e) = result {
149 assert!(e.to_string().contains("Invalid configuration"));
150 }
151 }
152
153 #[test]
154 fn test_prelude_imports() {
155 use crate::prelude::*;
156
157 // Test that prelude imports work
158 let benchmark = OptimizerBenchmark::<f64>::new();
159 let analyzer = GradientFlowAnalyzer::<f64, scirs2_core::ndarray::Ix1>::new(5);
160
161 assert_eq!(analyzer.step_count(), 0);
162 assert!(benchmark.get_results().is_empty());
163 }
164}