scirs2-optimize 0.4.2

Optimization module for SciRS2 (scirs2-optimize)
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
#![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unreachable_patterns)]
#![allow(unused_assignments)]
#![allow(unused_variables)]
#![allow(private_interfaces)]
//! # SciRS2 Optimize - Mathematical Optimization for Rust
//!
//! **scirs2-optimize** provides comprehensive optimization algorithms modeled after SciPy's
//! `optimize` module, offering everything from simple function minimization to complex
//! constrained optimization and global search.
//!
//! ## 🎯 Key Features
//!
//! - **Unconstrained Optimization**: BFGS, CG, Nelder-Mead, Powell
//! - **Constrained Optimization**: SLSQP, Trust-region methods
//! - **Global Optimization**: Differential Evolution, Basin-hopping, Simulated Annealing
//! - **Least Squares**: Levenberg-Marquardt, robust fitting, bounded problems
//! - **Root Finding**: Newton, Brent, Bisection methods
//! - **Scalar Optimization**: Brent, Golden section search
//! - **Bounds Support**: Box constraints for all major algorithms

#![allow(clippy::field_reassign_with_default)]
#![recursion_limit = "512"]
// Allow common mathematical conventions in optimization code
#![allow(clippy::many_single_char_names)] // x, f, g, h, n, m etc. are standard in optimization
#![allow(clippy::similar_names)] // x_pp, x_pm, x_mp, x_mm are standard for finite differences
//!
//! ## 📦 Module Overview
//!
//! | Module | Description | SciPy Equivalent |
//! |--------|-------------|------------------|
//! | [`unconstrained`] | Unconstrained minimization (BFGS, CG, Powell) | `scipy.optimize.minimize` |
//! | [`constrained`] | Constrained optimization (SLSQP, Trust-region) | `scipy.optimize.minimize` with constraints |
//! | [`global`] | Global optimization (DE, Basin-hopping) | `scipy.optimize.differential_evolution` |
//! | [`mod@least_squares`] | Nonlinear least squares (LM, robust methods) | `scipy.optimize.least_squares` |
//! | [`roots`] | Root finding algorithms | `scipy.optimize.root` |
//! | [`scalar`] | 1-D minimization | `scipy.optimize.minimize_scalar` |
//!
//! ## 🚀 Quick Start
//!
//! ### Installation
//!
//! ```toml
//! [dependencies]
//! scirs2-optimize = "0.4.2"
//! ```
//!
//! ### Unconstrained Minimization (Rosenbrock Function)
//!
//! ```rust
//! use scirs2_optimize::unconstrained::{minimize, Method};
//! use scirs2_core::ndarray::ArrayView1;
//!
//! // Rosenbrock function: (1-x)² + 100(y-x²)²
//! fn rosenbrock(x: &ArrayView1<f64>) -> f64 {
//!     let x0 = x[0];
//!     let x1 = x[1];
//!     (1.0 - x0).powi(2) + 100.0 * (x1 - x0.powi(2)).powi(2)
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let initial_guess = [0.0, 0.0];
//! let result = minimize(rosenbrock, &initial_guess, Method::BFGS, None)?;
//!
//! println!("Minimum at: {:?}", result.x);
//! println!("Function value: {}", result.fun);
//! println!("Converged: {}", result.success);
//! # Ok(())
//! # }
//! ```
//!
//! ### Optimization with Bounds
//!
//! Constrain variables to specific ranges:
//!
//! ```rust
//! use scirs2_optimize::{Bounds, unconstrained::{minimize, Method, Options}};
//! use scirs2_core::ndarray::ArrayView1;
//!
//! fn objective(x: &ArrayView1<f64>) -> f64 {
//!     (x[0] + 1.0).powi(2) + (x[1] + 1.0).powi(2)
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Constrain to positive quadrant: x >= 0, y >= 0
//! let bounds = Bounds::new(&[
//!     (Some(0.0), None),  // x >= 0
//!     (Some(0.0), None),  // y >= 0
//! ]);
//!
//! let mut options = Options::default();
//! options.bounds = Some(bounds);
//!
//! let result = minimize(objective, &[0.5, 0.5], Method::Powell, Some(options))?;
//! println!("Constrained minimum: {:?}", result.x);  // [0.0, 0.0]
//! # Ok(())
//! # }
//! ```
//!
//! ### Optimization with User-Provided Jacobian (Gradient)
//!
//! Supply an analytic gradient via the `Jacobian` enum for faster, more accurate convergence:
//!
//! ```rust
//! use scirs2_optimize::unconstrained::{Options, minimize_bfgs_with_jacobian};
//! use scirs2_optimize::Jacobian;
//! use scirs2_core::ndarray::{array, Array1, ArrayView1};
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Rosenbrock function
//! let rosenbrock = |x: &ArrayView1<f64>| -> f64 {
//!     (1.0 - x[0]).powi(2) + 100.0 * (x[1] - x[0].powi(2)).powi(2)
//! };
//!
//! // Analytic gradient of Rosenbrock
//! let jac = Jacobian::Function(Box::new(|x: &ArrayView1<f64>| {
//!     array![
//!         -2.0 * (1.0 - x[0]) - 400.0 * x[0] * (x[1] - x[0].powi(2)),
//!         200.0 * (x[1] - x[0].powi(2))
//!     ]
//! }));
//!
//! let x0 = Array1::from_vec(vec![0.0, 0.0]);
//! let mut options = Options::default();
//! options.max_iter = 2000;
//!
//! let result: scirs2_optimize::unconstrained::OptimizeResult<f64> =
//!     minimize_bfgs_with_jacobian(rosenbrock, x0, Some(&jac), &options)?;
//!
//! assert!(result.success);
//! println!("Minimum at: ({:.4}, {:.4})", result.x[0], result.x[1]);
//! # Ok(())
//! # }
//! ```
//!
//! ### Robust Least Squares
//!
//! Fit data with outliers using robust loss functions:
//!
//! ```rust
//! use scirs2_optimize::least_squares::{robust_least_squares, HuberLoss};
//! use scirs2_core::ndarray::{array, Array1};
//!
//! // Linear model residual: y - (a + b*x)
//! fn residual(params: &[f64], data: &[f64]) -> Array1<f64> {
//!     let n = data.len() / 2;
//!     let x = &data[0..n];
//!     let y = &data[n..];
//!
//!     let mut res = Array1::zeros(n);
//!     for i in 0..n {
//!         res[i] = y[i] - (params[0] + params[1] * x[i]);
//!     }
//!     res
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Data: x = [0,1,2,3,4], y = [0.1,0.9,2.1,2.9,10.0] (last point is outlier)
//! let data = array![0.,1.,2.,3.,4., 0.1,0.9,2.1,2.9,10.0];
//!
//! let huber = HuberLoss::new(1.0);  // Robust to outliers
//! let x0 = array![0.0, 0.0];
//! let result = robust_least_squares(
//!     residual, &x0, huber, None::<fn(&[f64], &[f64]) -> scirs2_core::ndarray::Array2<f64>>, &data, None
//! )?;
//!
//! println!("Robust fit: y = {:.3} + {:.3}x", result.x[0], result.x[1]);
//! # Ok(())
//! # }
//! ```
//!
//! ### Global Optimization
//!
//! Find global minimum of multi-modal functions:
//!
//! ```rust,no_run
//! use scirs2_optimize::global::{differential_evolution, DifferentialEvolutionOptions};
//! use scirs2_core::ndarray::ArrayView1;
//!
//! // Rastrigin function (multiple local minima)
//! fn rastrigin(x: &ArrayView1<f64>) -> f64 {
//!     let n = x.len() as f64;
//!     10.0 * n + x.iter().map(|xi| xi.powi(2) - 10.0 * (2.0 * std::f64::consts::PI * xi).cos()).sum::<f64>()
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let bounds = vec![(-5.12, 5.12); 5];  // 5-dimensional search space
//! let options = Some(DifferentialEvolutionOptions::default());
//!
//! let result = differential_evolution(rastrigin, bounds, options, None)?;
//! println!("Global minimum: {:?}", result.x);
//! # Ok(())
//! # }
//! ```
//!
//! ### Root Finding
//!
//! Solve equations f(x) = 0:
//!
//! ```rust,no_run
//! use scirs2_optimize::roots::{root, Method};
//! use scirs2_core::ndarray::{array, Array1};
//!
//! // Find root of x² - 2 = 0 (i.e., √2)
//! fn f(x: &[f64]) -> Array1<f64> {
//!     array![x[0] * x[0] - 2.0]
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let x0 = array![1.5];  // Initial guess
//! let result = root(f, &x0, Method::Hybr, None::<fn(&[f64]) -> scirs2_core::ndarray::Array2<f64>>, None)?;
//! println!("√2 ≈ {:.10}", result.x[0]);  // 1.4142135624
//! # Ok(())
//! # }
//! ```
//! ## Submodules
//!
//! * `unconstrained`: Unconstrained optimization algorithms
//! * `constrained`: Constrained optimization algorithms
//! * `least_squares`: Least squares minimization (including robust methods)
//! * `roots`: Root finding algorithms
//! * `scalar`: Scalar (univariate) optimization algorithms
//! * `global`: Global optimization algorithms
//!
//! ## Optimization Methods
//!
//! The following optimization methods are currently implemented:
//!
//! ### Unconstrained:
//! - **Nelder-Mead**: A derivative-free method using simplex-based approach
//! - **Powell**: Derivative-free method using conjugate directions
//! - **BFGS**: Quasi-Newton method with BFGS update
//! - **CG**: Nonlinear conjugate gradient method
//!
//! ### Constrained:
//! - **SLSQP**: Sequential Least SQuares Programming
//! - **TrustConstr**: Trust-region constrained optimizer
//!
//! ### Scalar (Univariate) Optimization:
//! - **Brent**: Combines parabolic interpolation with golden section search
//! - **Bounded**: Brent's method with bounds constraints
//! - **Golden**: Golden section search
//!
//! ### Global:
//! - **Differential Evolution**: Stochastic global optimization method
//! - **Basin-hopping**: Random perturbations with local minimization
//! - **Dual Annealing**: Simulated annealing with fast annealing
//! - **Particle Swarm**: Population-based optimization inspired by swarm behavior
//! - **Simulated Annealing**: Probabilistic optimization with cooling schedule
//!
//! ### Least Squares:
//! - **Levenberg-Marquardt**: Trust-region algorithm for nonlinear least squares
//! - **Trust Region Reflective**: Bounds-constrained least squares
//! - **Robust Least Squares**: M-estimators for outlier-resistant regression
//!   - Huber loss: Reduces influence of moderate outliers
//!   - Bisquare loss: Completely rejects extreme outliers
//!   - Cauchy loss: Provides very strong outlier resistance
//! - **Weighted Least Squares**: Handles heteroscedastic data (varying variance)
//! - **Bounded Least Squares**: Box constraints on parameters
//! - **Separable Least Squares**: Variable projection for partially linear models
//! - **Total Least Squares**: Errors-in-variables regression
//! ## Bounds Support
//!
//! The `unconstrained` module now supports bounds constraints for variables.
//! You can specify lower and upper bounds for each variable, and the optimizer
//! will ensure that all iterates remain within these bounds.
//!
//! The following methods support bounds constraints:
//! - Powell
//! - Nelder-Mead
//! - BFGS
//! - CG (Conjugate Gradient)
//!
//! ## Examples
//!
//! ### Basic Optimization
//!
//! ```
//! // Example of minimizing a function using BFGS
//! use scirs2_core::ndarray::{array, ArrayView1};
//! use scirs2_optimize::unconstrained::{minimize, Method};
//!
//! fn rosenbrock(x: &ArrayView1<f64>) -> f64 {
//!     let a = 1.0;
//!     let b = 100.0;
//!     let x0 = x[0];
//!     let x1 = x[1];
//!     (a - x0).powi(2) + b * (x1 - x0.powi(2)).powi(2)
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let initial_guess = [0.0, 0.0];
//! let result = minimize(rosenbrock, &initial_guess, Method::BFGS, None)?;
//!
//! println!("Solution: {:?}", result.x);
//! println!("Function value at solution: {}", result.fun);
//! println!("Number of nit: {}", result.nit);
//! println!("Success: {}", result.success);
//! # Ok(())
//! # }
//! ```
//!
//! ### Optimization with Bounds
//!
//! ```
//! // Example of minimizing a function with bounds constraints
//! use scirs2_core::ndarray::{array, ArrayView1};
//! use scirs2_optimize::{Bounds, unconstrained::{minimize, Method, Options}};
//!
//! // A function with minimum at (-1, -1)
//! fn func(x: &ArrayView1<f64>) -> f64 {
//!     (x[0] + 1.0).powi(2) + (x[1] + 1.0).powi(2)
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create bounds: x >= 0, y >= 0
//! // This will constrain the optimization to the positive quadrant
//! let bounds = Bounds::new(&[(Some(0.0), None), (Some(0.0), None)]);
//!
//! let initial_guess = [0.5, 0.5];
//! let mut options = Options::default();
//! options.bounds = Some(bounds);
//!
//! // Use Powell's method which supports bounds
//! let result = minimize(func, &initial_guess, Method::Powell, Some(options))?;
//!
//! // The constrained minimum should be at [0, 0] with value 2.0
//! println!("Solution: {:?}", result.x);
//! println!("Function value at solution: {}", result.fun);
//! # Ok(())
//! # }
//! ```
//!
//! ### Bounds Creation Options
//!
//! ```
//! use scirs2_optimize::Bounds;
//!
//! // Create bounds from pairs
//! // Format: [(min_x1, max_x1), (min_x2, max_x2), ...] where None = unbounded
//! let bounds1 = Bounds::new(&[
//!     (Some(0.0), Some(1.0)),  // 0 <= x[0] <= 1
//!     (Some(-1.0), None),      // x[1] >= -1, no upper bound
//!     (None, Some(10.0)),      // x[2] <= 10, no lower bound
//!     (None, None)             // x[3] is completely unbounded
//! ]);
//!
//! // Alternative: create from separate lower and upper bound vectors
//! let lb = vec![Some(0.0), Some(-1.0), None, None];
//! let ub = vec![Some(1.0), None, Some(10.0), None];
//! let bounds2 = Bounds::from_vecs(lb, ub).expect("valid input");
//! ```
//!
//! ### Robust Least Squares Example
//!
//! ```
//! use scirs2_core::ndarray::{array, Array1, Array2};
//! use scirs2_optimize::least_squares::{robust_least_squares, HuberLoss};
//!
//! // Define residual function for linear regression
//! fn residual(params: &[f64], data: &[f64]) -> Array1<f64> {
//!     let n = data.len() / 2;
//!     let x_vals = &data[0..n];
//!     let y_vals = &data[n..];
//!     
//!     let mut res = Array1::zeros(n);
//!     for i in 0..n {
//!         res[i] = y_vals[i] - (params[0] + params[1] * x_vals[i]);
//!     }
//!     res
//! }
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Data with outliers
//! let data = array![0., 1., 2., 3., 4., 0.1, 0.9, 2.1, 2.9, 10.0];
//! let x0 = array![0.0, 0.0];
//!
//! // Use Huber loss for robustness
//! let huber_loss = HuberLoss::new(1.0);
//! let result = robust_least_squares(
//!     residual,
//!     &x0,
//!     huber_loss,
//!     None::<fn(&[f64], &[f64]) -> Array2<f64>>,
//!     &data,
//!     None
//! )?;
//!
//! println!("Robust solution: intercept={:.3}, slope={:.3}",
//!          result.x[0], result.x[1]);
//! # Ok(())
//! # }
//! ```

// BLAS backend linking handled through scirs2-core

// Export error types
pub mod error;
pub use error::{OptimizeError, OptimizeResult};

// Python API wrappers
// Note: python_api module not yet implemented
// #[cfg(feature = "python")]
// pub mod python_api;

// Module structure (used by other modules, must be unconditional)
pub mod advanced_coordinator;
#[cfg(feature = "async")]
pub mod async_parallel;
pub mod automatic_differentiation;
pub mod bayesian;
pub mod benchmarking;
pub mod constrained;
pub mod distributed;
pub mod distributed_gpu;
pub mod global;
pub mod gpu;
pub mod jit_optimization;
pub mod learned_optimizers;
pub mod least_squares;
pub mod ml_optimizers;
pub mod multi_objective;
pub mod neural_integration;
pub mod neuromorphic;
pub mod parallel;
pub mod quantum_inspired;
pub mod reinforcement_learning;
pub mod roots;
pub mod roots_anderson;
pub mod roots_krylov;
pub mod scalar;
pub mod self_tuning;
pub mod simd_ops;
pub mod sparse_numdiff; // Refactored into a module with submodules
pub mod stochastic;
pub mod streaming;
pub mod unconstrained;
pub mod unified_pipeline;
pub mod visualization;

// Coordinate descent methods
pub mod coordinate_descent;
// DARTS differentiable NAS
pub mod darts;
// Differentiable optimization (OptNet)
pub mod differentiable_optimization;
// Distributed ADMM/PDMM/EXTRA
pub mod distributed_admm;
// Distributionally robust optimization
pub mod dro;
// Hardware-aware NAS
pub mod hardware_nas;
// Neural Architecture Search (random, evolutionary, differentiable, AutoML)
pub mod nas;
// High-dimensional optimization (randomized SVD, stochastic coordinate)
pub mod high_dimensional;
// Integer programming (knapsack, CDCL, column generation, lattice, lift-project)
pub mod integer;
// Kaczmarz and randomized projection
pub mod kaczmarz;
// Multi-fidelity optimization (Hyperband)
pub mod multi_fidelity;
// Quantum-classical hybrid (QAOA, VQE, MPS)
pub mod quantum_classical;
// Second-order methods (L-BFGS-B, SR1, SLBFGS)
pub mod second_order;
// Sketched optimization
pub mod sketched;
// Subspace embedding
pub mod subspace_embed;
pub mod subspace_embedding;

// Common optimization result structure
pub mod result;
pub use result::OptimizeResults;

// Convenience re-exports for common functions
pub use advanced_coordinator::{
    advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
    StrategyPerformance,
};
#[cfg(feature = "async")]
pub use async_parallel::{
    AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
    SlowEvaluationStrategy,
};
pub use automatic_differentiation::{
    autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
    AutoDiffFunction, AutoDiffOptions,
};
pub use bayesian::{
    optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
    BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
    GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig, SamplingStrategy,
    SurrogateKernel,
};
pub use benchmarking::{
    benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
    BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
    TestProblem,
};
pub use constrained::minimize_constrained;
pub use distributed::{
    algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
    DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
    MPIInterface, WorkAssignment,
};
pub use distributed_gpu::{
    DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
    GpuCommunicationStrategy, IterationStats,
};
pub use global::{
    basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
    generate_diverse_start_points, multi_start, multi_start_with_clustering, particle_swarm,
    simulated_annealing,
};
pub use gpu::{
    acceleration::{
        AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
    },
    algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
    GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
};
pub use jit_optimization::{optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats};
pub use learned_optimizers::{
    learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
    AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
    LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
    OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder, TrainingTask,
};
pub use least_squares::{
    bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
    total_least_squares, weighted_least_squares, BisquareLoss, CauchyLoss, HuberLoss,
};
pub use ml_optimizers::{
    ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
    GroupLassoOptimizer, LassoOptimizer,
};
pub use multi_objective::{
    MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
};
pub use neural_integration::{optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer};
pub use neuromorphic::{
    neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
    NeuromorphicOptimizer, NeuronState, SpikeEvent,
};
pub use quantum_inspired::{
    quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
    QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
};
pub use reinforcement_learning::{
    actor_critic_optimize, bandit_optimize, evolutionary_optimize, meta_learning_optimize,
    policy_gradient_optimize, BanditOptimizer, EvolutionaryStrategy, Experience,
    MetaLearningOptimizer, OptimizationAction, OptimizationState, QLearningOptimizer,
    RLOptimizationConfig, RLOptimizer,
};
pub use roots::root;
pub use scalar::minimize_scalar;
pub use self_tuning::{
    presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
    PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
};
pub use sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
pub use stochastic::{
    minimize_adam, minimize_adamw, minimize_rmsprop, minimize_sgd, minimize_sgd_momentum,
    minimize_stochastic, AdamOptions, AdamWOptions, DataProvider, InMemoryDataProvider,
    LearningRateSchedule, MomentumOptions, RMSPropOptions, SGDOptions, StochasticGradientFunction,
    StochasticMethod, StochasticOptions,
};
pub use streaming::{
    exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
    incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
    online_linear_regression, online_logistic_regression, real_time_linear_regression,
    recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
    rolling_window_linear_regression, rolling_window_weighted_least_squares,
    streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
    IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
    LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
    StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer, StreamingStats,
    StreamingTrustRegion,
};
pub use unconstrained::{
    cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
    Jacobian, TrustRegionConfig, TrustRegionResult,
};
pub use unified_pipeline::{
    presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
    UnifiedOptimizer,
};
pub use visualization::{
    tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
    OutputFormat, VisualizationConfig,
};

// Prelude module for convenient imports
pub mod prelude {
    pub use crate::advanced_coordinator::{
        advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
        StrategyPerformance,
    };
    #[cfg(feature = "async")]
    pub use crate::async_parallel::{
        AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
        SlowEvaluationStrategy,
    };
    pub use crate::automatic_differentiation::{
        autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
        AutoDiffFunction, AutoDiffOptions, Dual, DualNumber,
    };
    pub use crate::bayesian::{
        optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
        BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
        GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig,
        SamplingStrategy, SurrogateKernel,
    };
    pub use crate::benchmarking::{
        benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
        BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
        TestProblem,
    };
    pub use crate::constrained::{minimize_constrained, Method as ConstrainedMethod};
    pub use crate::distributed::{
        algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
        DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
        MPIInterface, WorkAssignment,
    };
    pub use crate::distributed_gpu::{
        DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
        GpuCommunicationStrategy, IterationStats,
    };
    pub use crate::error::{OptimizeError, OptimizeResult};
    pub use crate::global::{
        basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
        generate_diverse_start_points, multi_start_with_clustering, particle_swarm,
        simulated_annealing, AcquisitionFunctionType, BasinHoppingOptions,
        BayesianOptimizationOptions, BayesianOptimizer, ClusterCentroid, ClusteringAlgorithm,
        ClusteringOptions, ClusteringResult, DifferentialEvolutionOptions, DualAnnealingOptions,
        InitialPointGenerator, KernelType, LocalMinimum, Parameter, ParticleSwarmOptions,
        SimulatedAnnealingOptions, Space, StartPointStrategy,
    };
    pub use crate::gpu::{
        acceleration::{
            AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
        },
        algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
        GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
    };
    pub use crate::jit_optimization::{
        optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats,
    };
    pub use crate::learned_optimizers::{
        learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
        AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
        LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
        OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder,
        TrainingTask,
    };
    pub use crate::least_squares::{
        bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
        total_least_squares, weighted_least_squares, BisquareLoss, BoundedOptions, CauchyLoss,
        HuberLoss, LinearSolver, Method as LeastSquaresMethod, RobustLoss, RobustOptions,
        SeparableOptions, SeparableResult, TLSMethod, TotalLeastSquaresOptions,
        TotalLeastSquaresResult, WeightedOptions,
    };
    pub use crate::ml_optimizers::{
        ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
        GroupLassoOptimizer, LassoOptimizer,
    };
    pub use crate::multi_objective::{
        MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
    };
    pub use crate::neural_integration::{
        optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer,
    };
    pub use crate::neuromorphic::{
        neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
        NeuromorphicOptimizer, NeuronState, SpikeEvent,
    };
    pub use crate::parallel::{
        parallel_evaluate_batch, parallel_finite_diff_gradient, ParallelOptions,
    };
    pub use crate::quantum_inspired::{
        quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
        QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
    };
    pub use crate::reinforcement_learning::{
        bandit_optimize, evolutionary_optimize, meta_learning_optimize, policy_gradient_optimize,
        BanditOptimizer, EvolutionaryStrategy, Experience, MetaLearningOptimizer,
        OptimizationAction, OptimizationState, QLearningOptimizer, RLOptimizationConfig,
        RLOptimizer,
    };
    pub use crate::result::OptimizeResults;
    pub use crate::roots::{root, Method as RootMethod};
    pub use crate::scalar::{
        minimize_scalar, Method as ScalarMethod, Options as ScalarOptions, ScalarOptimizeResult,
    };
    pub use crate::self_tuning::{
        presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
        PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
    };
    pub use crate::sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
    pub use crate::streaming::{
        exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
        incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
        online_linear_regression, online_logistic_regression, real_time_linear_regression,
        recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
        rolling_window_linear_regression, rolling_window_weighted_least_squares,
        streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
        IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
        LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
        StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer,
        StreamingStats, StreamingTrustRegion,
    };
    pub use crate::unconstrained::{
        cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
        Jacobian, Method as UnconstrainedMethod, Options, TrustRegionConfig, TrustRegionResult,
    };
    pub use crate::unified_pipeline::{
        presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
        UnifiedOptimizer,
    };
    pub use crate::visualization::{
        tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
        OutputFormat, VisualizationConfig,
    };
}

#[cfg(test)]
mod tests {
    #[test]
    fn it_works() {
        assert_eq!(2 + 2, 4);
    }
}