scirs2_optimize/lib.rs
1#![allow(clippy::all)]
2#![allow(dead_code)]
3#![allow(unreachable_patterns)]
4#![allow(unused_assignments)]
5#![allow(unused_variables)]
6#![allow(private_interfaces)]
7//! # SciRS2 Optimize - Mathematical Optimization for Rust
8//!
9//! **scirs2-optimize** provides comprehensive optimization algorithms modeled after SciPy's
10//! `optimize` module, offering everything from simple function minimization to complex
11//! constrained optimization and global search.
12//!
13//! ## 🎯 Key Features
14//!
15//! - **Unconstrained Optimization**: BFGS, CG, Nelder-Mead, Powell
16//! - **Constrained Optimization**: SLSQP, Trust-region methods
17//! - **Global Optimization**: Differential Evolution, Basin-hopping, Simulated Annealing
18//! - **Least Squares**: Levenberg-Marquardt, robust fitting, bounded problems
19//! - **Root Finding**: Newton, Brent, Bisection methods
20//! - **Scalar Optimization**: Brent, Golden section search
21//! - **Bounds Support**: Box constraints for all major algorithms
22
23#![allow(clippy::field_reassign_with_default)]
24#![recursion_limit = "512"]
25// Allow common mathematical conventions in optimization code
26#![allow(clippy::many_single_char_names)] // x, f, g, h, n, m etc. are standard in optimization
27#![allow(clippy::similar_names)] // x_pp, x_pm, x_mp, x_mm are standard for finite differences
28//!
29//! ## 📦 Module Overview
30//!
31//! | Module | Description | SciPy Equivalent |
32//! |--------|-------------|------------------|
33//! | [`unconstrained`] | Unconstrained minimization (BFGS, CG, Powell) | `scipy.optimize.minimize` |
34//! | [`constrained`] | Constrained optimization (SLSQP, Trust-region) | `scipy.optimize.minimize` with constraints |
35//! | [`global`] | Global optimization (DE, Basin-hopping) | `scipy.optimize.differential_evolution` |
36//! | [`mod@least_squares`] | Nonlinear least squares (LM, robust methods) | `scipy.optimize.least_squares` |
37//! | [`roots`] | Root finding algorithms | `scipy.optimize.root` |
38//! | [`scalar`] | 1-D minimization | `scipy.optimize.minimize_scalar` |
39//!
40//! ## 🚀 Quick Start
41//!
42//! ### Installation
43//!
44//! ```toml
45//! [dependencies]
46//! scirs2-optimize = "0.4.0"
47//! ```
48//!
49//! ### Unconstrained Minimization (Rosenbrock Function)
50//!
51//! ```rust
52//! use scirs2_optimize::unconstrained::{minimize, Method};
53//! use scirs2_core::ndarray::ArrayView1;
54//!
55//! // Rosenbrock function: (1-x)² + 100(y-x²)²
56//! fn rosenbrock(x: &ArrayView1<f64>) -> f64 {
57//! let x0 = x[0];
58//! let x1 = x[1];
59//! (1.0 - x0).powi(2) + 100.0 * (x1 - x0.powi(2)).powi(2)
60//! }
61//!
62//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
63//! let initial_guess = [0.0, 0.0];
64//! let result = minimize(rosenbrock, &initial_guess, Method::BFGS, None)?;
65//!
66//! println!("Minimum at: {:?}", result.x);
67//! println!("Function value: {}", result.fun);
68//! println!("Converged: {}", result.success);
69//! # Ok(())
70//! # }
71//! ```
72//!
73//! ### Optimization with Bounds
74//!
75//! Constrain variables to specific ranges:
76//!
77//! ```rust
78//! use scirs2_optimize::{Bounds, unconstrained::{minimize, Method, Options}};
79//! use scirs2_core::ndarray::ArrayView1;
80//!
81//! fn objective(x: &ArrayView1<f64>) -> f64 {
82//! (x[0] + 1.0).powi(2) + (x[1] + 1.0).powi(2)
83//! }
84//!
85//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
86//! // Constrain to positive quadrant: x >= 0, y >= 0
87//! let bounds = Bounds::new(&[
88//! (Some(0.0), None), // x >= 0
89//! (Some(0.0), None), // y >= 0
90//! ]);
91//!
92//! let mut options = Options::default();
93//! options.bounds = Some(bounds);
94//!
95//! let result = minimize(objective, &[0.5, 0.5], Method::Powell, Some(options))?;
96//! println!("Constrained minimum: {:?}", result.x); // [0.0, 0.0]
97//! # Ok(())
98//! # }
99//! ```
100//!
101//! ### Optimization with User-Provided Jacobian (Gradient)
102//!
103//! Supply an analytic gradient via the `Jacobian` enum for faster, more accurate convergence:
104//!
105//! ```rust
106//! use scirs2_optimize::unconstrained::{Options, minimize_bfgs_with_jacobian};
107//! use scirs2_optimize::Jacobian;
108//! use scirs2_core::ndarray::{array, Array1, ArrayView1};
109//!
110//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
111//! // Rosenbrock function
112//! let rosenbrock = |x: &ArrayView1<f64>| -> f64 {
113//! (1.0 - x[0]).powi(2) + 100.0 * (x[1] - x[0].powi(2)).powi(2)
114//! };
115//!
116//! // Analytic gradient of Rosenbrock
117//! let jac = Jacobian::Function(Box::new(|x: &ArrayView1<f64>| {
118//! array![
119//! -2.0 * (1.0 - x[0]) - 400.0 * x[0] * (x[1] - x[0].powi(2)),
120//! 200.0 * (x[1] - x[0].powi(2))
121//! ]
122//! }));
123//!
124//! let x0 = Array1::from_vec(vec![0.0, 0.0]);
125//! let mut options = Options::default();
126//! options.max_iter = 2000;
127//!
128//! let result: scirs2_optimize::unconstrained::OptimizeResult<f64> =
129//! minimize_bfgs_with_jacobian(rosenbrock, x0, Some(&jac), &options)?;
130//!
131//! assert!(result.success);
132//! println!("Minimum at: ({:.4}, {:.4})", result.x[0], result.x[1]);
133//! # Ok(())
134//! # }
135//! ```
136//!
137//! ### Robust Least Squares
138//!
139//! Fit data with outliers using robust loss functions:
140//!
141//! ```rust
142//! use scirs2_optimize::least_squares::{robust_least_squares, HuberLoss};
143//! use scirs2_core::ndarray::{array, Array1};
144//!
145//! // Linear model residual: y - (a + b*x)
146//! fn residual(params: &[f64], data: &[f64]) -> Array1<f64> {
147//! let n = data.len() / 2;
148//! let x = &data[0..n];
149//! let y = &data[n..];
150//!
151//! let mut res = Array1::zeros(n);
152//! for i in 0..n {
153//! res[i] = y[i] - (params[0] + params[1] * x[i]);
154//! }
155//! res
156//! }
157//!
158//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
159//! // Data: x = [0,1,2,3,4], y = [0.1,0.9,2.1,2.9,10.0] (last point is outlier)
160//! let data = array![0.,1.,2.,3.,4., 0.1,0.9,2.1,2.9,10.0];
161//!
162//! let huber = HuberLoss::new(1.0); // Robust to outliers
163//! let x0 = array![0.0, 0.0];
164//! let result = robust_least_squares(
165//! residual, &x0, huber, None::<fn(&[f64], &[f64]) -> scirs2_core::ndarray::Array2<f64>>, &data, None
166//! )?;
167//!
168//! println!("Robust fit: y = {:.3} + {:.3}x", result.x[0], result.x[1]);
169//! # Ok(())
170//! # }
171//! ```
172//!
173//! ### Global Optimization
174//!
175//! Find global minimum of multi-modal functions:
176//!
177//! ```rust,no_run
178//! use scirs2_optimize::global::{differential_evolution, DifferentialEvolutionOptions};
179//! use scirs2_core::ndarray::ArrayView1;
180//!
181//! // Rastrigin function (multiple local minima)
182//! fn rastrigin(x: &ArrayView1<f64>) -> f64 {
183//! let n = x.len() as f64;
184//! 10.0 * n + x.iter().map(|xi| xi.powi(2) - 10.0 * (2.0 * std::f64::consts::PI * xi).cos()).sum::<f64>()
185//! }
186//!
187//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
188//! let bounds = vec![(-5.12, 5.12); 5]; // 5-dimensional search space
189//! let options = Some(DifferentialEvolutionOptions::default());
190//!
191//! let result = differential_evolution(rastrigin, bounds, options, None)?;
192//! println!("Global minimum: {:?}", result.x);
193//! # Ok(())
194//! # }
195//! ```
196//!
197//! ### Root Finding
198//!
199//! Solve equations f(x) = 0:
200//!
201//! ```rust,no_run
202//! use scirs2_optimize::roots::{root, Method};
203//! use scirs2_core::ndarray::{array, Array1};
204//!
205//! // Find root of x² - 2 = 0 (i.e., √2)
206//! fn f(x: &[f64]) -> Array1<f64> {
207//! array![x[0] * x[0] - 2.0]
208//! }
209//!
210//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
211//! let x0 = array![1.5]; // Initial guess
212//! let result = root(f, &x0, Method::Hybr, None::<fn(&[f64]) -> scirs2_core::ndarray::Array2<f64>>, None)?;
213//! println!("√2 ≈ {:.10}", result.x[0]); // 1.4142135624
214//! # Ok(())
215//! # }
216//! ```
217//! ## Submodules
218//!
219//! * `unconstrained`: Unconstrained optimization algorithms
220//! * `constrained`: Constrained optimization algorithms
221//! * `least_squares`: Least squares minimization (including robust methods)
222//! * `roots`: Root finding algorithms
223//! * `scalar`: Scalar (univariate) optimization algorithms
224//! * `global`: Global optimization algorithms
225//!
226//! ## Optimization Methods
227//!
228//! The following optimization methods are currently implemented:
229//!
230//! ### Unconstrained:
231//! - **Nelder-Mead**: A derivative-free method using simplex-based approach
232//! - **Powell**: Derivative-free method using conjugate directions
233//! - **BFGS**: Quasi-Newton method with BFGS update
234//! - **CG**: Nonlinear conjugate gradient method
235//!
236//! ### Constrained:
237//! - **SLSQP**: Sequential Least SQuares Programming
238//! - **TrustConstr**: Trust-region constrained optimizer
239//!
240//! ### Scalar (Univariate) Optimization:
241//! - **Brent**: Combines parabolic interpolation with golden section search
242//! - **Bounded**: Brent's method with bounds constraints
243//! - **Golden**: Golden section search
244//!
245//! ### Global:
246//! - **Differential Evolution**: Stochastic global optimization method
247//! - **Basin-hopping**: Random perturbations with local minimization
248//! - **Dual Annealing**: Simulated annealing with fast annealing
249//! - **Particle Swarm**: Population-based optimization inspired by swarm behavior
250//! - **Simulated Annealing**: Probabilistic optimization with cooling schedule
251//!
252//! ### Least Squares:
253//! - **Levenberg-Marquardt**: Trust-region algorithm for nonlinear least squares
254//! - **Trust Region Reflective**: Bounds-constrained least squares
255//! - **Robust Least Squares**: M-estimators for outlier-resistant regression
256//! - Huber loss: Reduces influence of moderate outliers
257//! - Bisquare loss: Completely rejects extreme outliers
258//! - Cauchy loss: Provides very strong outlier resistance
259//! - **Weighted Least Squares**: Handles heteroscedastic data (varying variance)
260//! - **Bounded Least Squares**: Box constraints on parameters
261//! - **Separable Least Squares**: Variable projection for partially linear models
262//! - **Total Least Squares**: Errors-in-variables regression
263//! ## Bounds Support
264//!
265//! The `unconstrained` module now supports bounds constraints for variables.
266//! You can specify lower and upper bounds for each variable, and the optimizer
267//! will ensure that all iterates remain within these bounds.
268//!
269//! The following methods support bounds constraints:
270//! - Powell
271//! - Nelder-Mead
272//! - BFGS
273//! - CG (Conjugate Gradient)
274//!
275//! ## Examples
276//!
277//! ### Basic Optimization
278//!
279//! ```
280//! // Example of minimizing a function using BFGS
281//! use scirs2_core::ndarray::{array, ArrayView1};
282//! use scirs2_optimize::unconstrained::{minimize, Method};
283//!
284//! fn rosenbrock(x: &ArrayView1<f64>) -> f64 {
285//! let a = 1.0;
286//! let b = 100.0;
287//! let x0 = x[0];
288//! let x1 = x[1];
289//! (a - x0).powi(2) + b * (x1 - x0.powi(2)).powi(2)
290//! }
291//!
292//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
293//! let initial_guess = [0.0, 0.0];
294//! let result = minimize(rosenbrock, &initial_guess, Method::BFGS, None)?;
295//!
296//! println!("Solution: {:?}", result.x);
297//! println!("Function value at solution: {}", result.fun);
298//! println!("Number of nit: {}", result.nit);
299//! println!("Success: {}", result.success);
300//! # Ok(())
301//! # }
302//! ```
303//!
304//! ### Optimization with Bounds
305//!
306//! ```
307//! // Example of minimizing a function with bounds constraints
308//! use scirs2_core::ndarray::{array, ArrayView1};
309//! use scirs2_optimize::{Bounds, unconstrained::{minimize, Method, Options}};
310//!
311//! // A function with minimum at (-1, -1)
312//! fn func(x: &ArrayView1<f64>) -> f64 {
313//! (x[0] + 1.0).powi(2) + (x[1] + 1.0).powi(2)
314//! }
315//!
316//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
317//! // Create bounds: x >= 0, y >= 0
318//! // This will constrain the optimization to the positive quadrant
319//! let bounds = Bounds::new(&[(Some(0.0), None), (Some(0.0), None)]);
320//!
321//! let initial_guess = [0.5, 0.5];
322//! let mut options = Options::default();
323//! options.bounds = Some(bounds);
324//!
325//! // Use Powell's method which supports bounds
326//! let result = minimize(func, &initial_guess, Method::Powell, Some(options))?;
327//!
328//! // The constrained minimum should be at [0, 0] with value 2.0
329//! println!("Solution: {:?}", result.x);
330//! println!("Function value at solution: {}", result.fun);
331//! # Ok(())
332//! # }
333//! ```
334//!
335//! ### Bounds Creation Options
336//!
337//! ```
338//! use scirs2_optimize::Bounds;
339//!
340//! // Create bounds from pairs
341//! // Format: [(min_x1, max_x1), (min_x2, max_x2), ...] where None = unbounded
342//! let bounds1 = Bounds::new(&[
343//! (Some(0.0), Some(1.0)), // 0 <= x[0] <= 1
344//! (Some(-1.0), None), // x[1] >= -1, no upper bound
345//! (None, Some(10.0)), // x[2] <= 10, no lower bound
346//! (None, None) // x[3] is completely unbounded
347//! ]);
348//!
349//! // Alternative: create from separate lower and upper bound vectors
350//! let lb = vec![Some(0.0), Some(-1.0), None, None];
351//! let ub = vec![Some(1.0), None, Some(10.0), None];
352//! let bounds2 = Bounds::from_vecs(lb, ub).expect("valid input");
353//! ```
354//!
355//! ### Robust Least Squares Example
356//!
357//! ```
358//! use scirs2_core::ndarray::{array, Array1, Array2};
359//! use scirs2_optimize::least_squares::{robust_least_squares, HuberLoss};
360//!
361//! // Define residual function for linear regression
362//! fn residual(params: &[f64], data: &[f64]) -> Array1<f64> {
363//! let n = data.len() / 2;
364//! let x_vals = &data[0..n];
365//! let y_vals = &data[n..];
366//!
367//! let mut res = Array1::zeros(n);
368//! for i in 0..n {
369//! res[i] = y_vals[i] - (params[0] + params[1] * x_vals[i]);
370//! }
371//! res
372//! }
373//!
374//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
375//! // Data with outliers
376//! let data = array![0., 1., 2., 3., 4., 0.1, 0.9, 2.1, 2.9, 10.0];
377//! let x0 = array![0.0, 0.0];
378//!
379//! // Use Huber loss for robustness
380//! let huber_loss = HuberLoss::new(1.0);
381//! let result = robust_least_squares(
382//! residual,
383//! &x0,
384//! huber_loss,
385//! None::<fn(&[f64], &[f64]) -> Array2<f64>>,
386//! &data,
387//! None
388//! )?;
389//!
390//! println!("Robust solution: intercept={:.3}, slope={:.3}",
391//! result.x[0], result.x[1]);
392//! # Ok(())
393//! # }
394//! ```
395
396// BLAS backend linking handled through scirs2-core
397
398// Export error types
399pub mod error;
400pub use error::{OptimizeError, OptimizeResult};
401
402// Python API wrappers
403// Note: python_api module not yet implemented
404// #[cfg(feature = "python")]
405// pub mod python_api;
406
407// Module structure (used by other modules, must be unconditional)
408pub mod advanced_coordinator;
409#[cfg(feature = "async")]
410pub mod async_parallel;
411pub mod automatic_differentiation;
412pub mod bayesian;
413pub mod benchmarking;
414pub mod constrained;
415pub mod distributed;
416pub mod distributed_gpu;
417pub mod global;
418pub mod gpu;
419pub mod jit_optimization;
420pub mod learned_optimizers;
421pub mod least_squares;
422pub mod ml_optimizers;
423pub mod multi_objective;
424pub mod neural_integration;
425pub mod neuromorphic;
426pub mod parallel;
427pub mod quantum_inspired;
428pub mod reinforcement_learning;
429pub mod roots;
430pub mod roots_anderson;
431pub mod roots_krylov;
432pub mod scalar;
433pub mod self_tuning;
434pub mod simd_ops;
435pub mod sparse_numdiff; // Refactored into a module with submodules
436pub mod stochastic;
437pub mod streaming;
438pub mod unconstrained;
439pub mod unified_pipeline;
440pub mod visualization;
441
442// Coordinate descent methods
443pub mod coordinate_descent;
444// DARTS differentiable NAS
445pub mod darts;
446// Differentiable optimization (OptNet)
447pub mod differentiable_optimization;
448// Distributed ADMM/PDMM/EXTRA
449pub mod distributed_admm;
450// Distributionally robust optimization
451pub mod dro;
452// Hardware-aware NAS
453pub mod hardware_nas;
454// High-dimensional optimization (randomized SVD, stochastic coordinate)
455pub mod high_dimensional;
456// Integer programming (knapsack, CDCL, column generation, lattice, lift-project)
457pub mod integer;
458// Kaczmarz and randomized projection
459pub mod kaczmarz;
460// Multi-fidelity optimization (Hyperband)
461pub mod multi_fidelity;
462// Quantum-classical hybrid (QAOA, VQE, MPS)
463pub mod quantum_classical;
464// Second-order methods (L-BFGS-B, SR1, SLBFGS)
465pub mod second_order;
466// Sketched optimization
467pub mod sketched;
468// Subspace embedding
469pub mod subspace_embed;
470
471// Common optimization result structure
472pub mod result;
473pub use result::OptimizeResults;
474
475// Convenience re-exports for common functions
476pub use advanced_coordinator::{
477 advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
478 StrategyPerformance,
479};
480#[cfg(feature = "async")]
481pub use async_parallel::{
482 AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
483 SlowEvaluationStrategy,
484};
485pub use automatic_differentiation::{
486 autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
487 AutoDiffFunction, AutoDiffOptions,
488};
489pub use bayesian::{
490 optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
491 BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
492 GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig, SamplingStrategy,
493 SurrogateKernel,
494};
495pub use benchmarking::{
496 benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
497 BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
498 TestProblem,
499};
500pub use constrained::minimize_constrained;
501pub use distributed::{
502 algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
503 DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
504 MPIInterface, WorkAssignment,
505};
506pub use distributed_gpu::{
507 DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
508 GpuCommunicationStrategy, IterationStats,
509};
510pub use global::{
511 basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
512 generate_diverse_start_points, multi_start, multi_start_with_clustering, particle_swarm,
513 simulated_annealing,
514};
515pub use gpu::{
516 acceleration::{
517 AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
518 },
519 algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
520 GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
521};
522pub use jit_optimization::{optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats};
523pub use learned_optimizers::{
524 learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
525 AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
526 LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
527 OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder, TrainingTask,
528};
529pub use least_squares::{
530 bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
531 total_least_squares, weighted_least_squares, BisquareLoss, CauchyLoss, HuberLoss,
532};
533pub use ml_optimizers::{
534 ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
535 GroupLassoOptimizer, LassoOptimizer,
536};
537pub use multi_objective::{
538 MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
539};
540pub use neural_integration::{optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer};
541pub use neuromorphic::{
542 neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
543 NeuromorphicOptimizer, NeuronState, SpikeEvent,
544};
545pub use quantum_inspired::{
546 quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
547 QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
548};
549pub use reinforcement_learning::{
550 actor_critic_optimize, bandit_optimize, evolutionary_optimize, meta_learning_optimize,
551 policy_gradient_optimize, BanditOptimizer, EvolutionaryStrategy, Experience,
552 MetaLearningOptimizer, OptimizationAction, OptimizationState, QLearningOptimizer,
553 RLOptimizationConfig, RLOptimizer,
554};
555pub use roots::root;
556pub use scalar::minimize_scalar;
557pub use self_tuning::{
558 presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
559 PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
560};
561pub use sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
562pub use stochastic::{
563 minimize_adam, minimize_adamw, minimize_rmsprop, minimize_sgd, minimize_sgd_momentum,
564 minimize_stochastic, AdamOptions, AdamWOptions, DataProvider, InMemoryDataProvider,
565 LearningRateSchedule, MomentumOptions, RMSPropOptions, SGDOptions, StochasticGradientFunction,
566 StochasticMethod, StochasticOptions,
567};
568pub use streaming::{
569 exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
570 incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
571 online_linear_regression, online_logistic_regression, real_time_linear_regression,
572 recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
573 rolling_window_linear_regression, rolling_window_weighted_least_squares,
574 streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
575 IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
576 LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
577 StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer, StreamingStats,
578 StreamingTrustRegion,
579};
580pub use unconstrained::{
581 cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
582 Jacobian, TrustRegionConfig, TrustRegionResult,
583};
584pub use unified_pipeline::{
585 presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
586 UnifiedOptimizer,
587};
588pub use visualization::{
589 tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
590 OutputFormat, VisualizationConfig,
591};
592
593// Prelude module for convenient imports
594pub mod prelude {
595 pub use crate::advanced_coordinator::{
596 advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
597 StrategyPerformance,
598 };
599 #[cfg(feature = "async")]
600 pub use crate::async_parallel::{
601 AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
602 SlowEvaluationStrategy,
603 };
604 pub use crate::automatic_differentiation::{
605 autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
606 AutoDiffFunction, AutoDiffOptions, Dual, DualNumber,
607 };
608 pub use crate::bayesian::{
609 optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
610 BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
611 GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig,
612 SamplingStrategy, SurrogateKernel,
613 };
614 pub use crate::benchmarking::{
615 benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
616 BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
617 TestProblem,
618 };
619 pub use crate::constrained::{minimize_constrained, Method as ConstrainedMethod};
620 pub use crate::distributed::{
621 algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
622 DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
623 MPIInterface, WorkAssignment,
624 };
625 pub use crate::distributed_gpu::{
626 DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
627 GpuCommunicationStrategy, IterationStats,
628 };
629 pub use crate::error::{OptimizeError, OptimizeResult};
630 pub use crate::global::{
631 basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
632 generate_diverse_start_points, multi_start_with_clustering, particle_swarm,
633 simulated_annealing, AcquisitionFunctionType, BasinHoppingOptions,
634 BayesianOptimizationOptions, BayesianOptimizer, ClusterCentroid, ClusteringAlgorithm,
635 ClusteringOptions, ClusteringResult, DifferentialEvolutionOptions, DualAnnealingOptions,
636 InitialPointGenerator, KernelType, LocalMinimum, Parameter, ParticleSwarmOptions,
637 SimulatedAnnealingOptions, Space, StartPointStrategy,
638 };
639 pub use crate::gpu::{
640 acceleration::{
641 AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
642 },
643 algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
644 GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
645 };
646 pub use crate::jit_optimization::{
647 optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats,
648 };
649 pub use crate::learned_optimizers::{
650 learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
651 AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
652 LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
653 OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder,
654 TrainingTask,
655 };
656 pub use crate::least_squares::{
657 bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
658 total_least_squares, weighted_least_squares, BisquareLoss, BoundedOptions, CauchyLoss,
659 HuberLoss, LinearSolver, Method as LeastSquaresMethod, RobustLoss, RobustOptions,
660 SeparableOptions, SeparableResult, TLSMethod, TotalLeastSquaresOptions,
661 TotalLeastSquaresResult, WeightedOptions,
662 };
663 pub use crate::ml_optimizers::{
664 ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
665 GroupLassoOptimizer, LassoOptimizer,
666 };
667 pub use crate::multi_objective::{
668 MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
669 };
670 pub use crate::neural_integration::{
671 optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer,
672 };
673 pub use crate::neuromorphic::{
674 neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
675 NeuromorphicOptimizer, NeuronState, SpikeEvent,
676 };
677 pub use crate::parallel::{
678 parallel_evaluate_batch, parallel_finite_diff_gradient, ParallelOptions,
679 };
680 pub use crate::quantum_inspired::{
681 quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
682 QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
683 };
684 pub use crate::reinforcement_learning::{
685 bandit_optimize, evolutionary_optimize, meta_learning_optimize, policy_gradient_optimize,
686 BanditOptimizer, EvolutionaryStrategy, Experience, MetaLearningOptimizer,
687 OptimizationAction, OptimizationState, QLearningOptimizer, RLOptimizationConfig,
688 RLOptimizer,
689 };
690 pub use crate::result::OptimizeResults;
691 pub use crate::roots::{root, Method as RootMethod};
692 pub use crate::scalar::{
693 minimize_scalar, Method as ScalarMethod, Options as ScalarOptions, ScalarOptimizeResult,
694 };
695 pub use crate::self_tuning::{
696 presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
697 PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
698 };
699 pub use crate::sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
700 pub use crate::streaming::{
701 exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
702 incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
703 online_linear_regression, online_logistic_regression, real_time_linear_regression,
704 recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
705 rolling_window_linear_regression, rolling_window_weighted_least_squares,
706 streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
707 IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
708 LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
709 StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer,
710 StreamingStats, StreamingTrustRegion,
711 };
712 pub use crate::unconstrained::{
713 cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
714 Jacobian, Method as UnconstrainedMethod, Options, TrustRegionConfig, TrustRegionResult,
715 };
716 pub use crate::unified_pipeline::{
717 presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
718 UnifiedOptimizer,
719 };
720 pub use crate::visualization::{
721 tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
722 OutputFormat, VisualizationConfig,
723 };
724}
725
726#[cfg(test)]
727mod tests {
728 #[test]
729 fn it_works() {
730 assert_eq!(2 + 2, 4);
731 }
732}