#![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unreachable_patterns)]
#![allow(unused_assignments)]
#![allow(unused_variables)]
#![allow(private_interfaces)]
#![allow(clippy::field_reassign_with_default)]
#![recursion_limit = "512"]
#![allow(clippy::many_single_char_names)] #![allow(clippy::similar_names)]
pub mod error;
pub use error::{OptimizeError, OptimizeResult};
pub mod advanced_coordinator;
#[cfg(feature = "async")]
pub mod async_parallel;
pub mod automatic_differentiation;
pub mod bayesian;
pub mod benchmarking;
pub mod constrained;
pub mod distributed;
pub mod distributed_gpu;
pub mod global;
pub mod gpu;
pub mod jit_optimization;
pub mod learned_optimizers;
pub mod least_squares;
pub mod ml_optimizers;
pub mod multi_objective;
pub mod neural_integration;
pub mod neuromorphic;
pub mod parallel;
pub mod quantum_inspired;
pub mod reinforcement_learning;
pub mod roots;
pub mod roots_anderson;
pub mod roots_krylov;
pub mod scalar;
pub mod self_tuning;
pub mod simd_ops;
pub mod sparse_numdiff; pub mod stochastic;
pub mod streaming;
pub mod unconstrained;
pub mod unified_pipeline;
pub mod visualization;
pub mod coordinate_descent;
pub mod darts;
pub mod differentiable_optimization;
pub mod distributed_admm;
pub mod dro;
pub mod hardware_nas;
pub mod nas;
pub mod high_dimensional;
pub mod integer;
pub mod kaczmarz;
pub mod multi_fidelity;
pub mod quantum_classical;
pub mod second_order;
pub mod sketched;
pub mod subspace_embed;
pub mod subspace_embedding;
pub mod result;
pub use result::OptimizeResults;
pub use advanced_coordinator::{
advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
StrategyPerformance,
};
#[cfg(feature = "async")]
pub use async_parallel::{
AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
SlowEvaluationStrategy,
};
pub use automatic_differentiation::{
autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
AutoDiffFunction, AutoDiffOptions,
};
pub use bayesian::{
optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig, SamplingStrategy,
SurrogateKernel,
};
pub use benchmarking::{
benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
TestProblem,
};
pub use constrained::minimize_constrained;
pub use distributed::{
algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
MPIInterface, WorkAssignment,
};
pub use distributed_gpu::{
DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
GpuCommunicationStrategy, IterationStats,
};
pub use global::{
basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
generate_diverse_start_points, multi_start, multi_start_with_clustering, particle_swarm,
simulated_annealing,
};
pub use gpu::{
acceleration::{
AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
},
algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
};
pub use jit_optimization::{optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats};
pub use learned_optimizers::{
learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder, TrainingTask,
};
pub use least_squares::{
bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
total_least_squares, weighted_least_squares, BisquareLoss, CauchyLoss, HuberLoss,
};
pub use ml_optimizers::{
ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
GroupLassoOptimizer, LassoOptimizer,
};
pub use multi_objective::{
MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
};
pub use neural_integration::{optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer};
pub use neuromorphic::{
neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
NeuromorphicOptimizer, NeuronState, SpikeEvent,
};
pub use quantum_inspired::{
quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
};
pub use reinforcement_learning::{
actor_critic_optimize, bandit_optimize, evolutionary_optimize, meta_learning_optimize,
policy_gradient_optimize, BanditOptimizer, EvolutionaryStrategy, Experience,
MetaLearningOptimizer, OptimizationAction, OptimizationState, QLearningOptimizer,
RLOptimizationConfig, RLOptimizer,
};
pub use roots::root;
pub use scalar::minimize_scalar;
pub use self_tuning::{
presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
};
pub use sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
pub use stochastic::{
minimize_adam, minimize_adamw, minimize_rmsprop, minimize_sgd, minimize_sgd_momentum,
minimize_stochastic, AdamOptions, AdamWOptions, DataProvider, InMemoryDataProvider,
LearningRateSchedule, MomentumOptions, RMSPropOptions, SGDOptions, StochasticGradientFunction,
StochasticMethod, StochasticOptions,
};
pub use streaming::{
exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
online_linear_regression, online_logistic_regression, real_time_linear_regression,
recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
rolling_window_linear_regression, rolling_window_weighted_least_squares,
streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer, StreamingStats,
StreamingTrustRegion,
};
pub use unconstrained::{
cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
Jacobian, TrustRegionConfig, TrustRegionResult,
};
pub use unified_pipeline::{
presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
UnifiedOptimizer,
};
pub use visualization::{
tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
OutputFormat, VisualizationConfig,
};
pub mod prelude {
pub use crate::advanced_coordinator::{
advanced_optimize, AdvancedConfig, AdvancedCoordinator, AdvancedStats, AdvancedStrategy,
StrategyPerformance,
};
#[cfg(feature = "async")]
pub use crate::async_parallel::{
AsyncDifferentialEvolution, AsyncOptimizationConfig, AsyncOptimizationStats,
SlowEvaluationStrategy,
};
pub use crate::automatic_differentiation::{
autodiff, create_ad_gradient, create_ad_hessian, optimize_ad_mode, ADMode, ADResult,
AutoDiffFunction, AutoDiffOptions, Dual, DualNumber,
};
pub use crate::bayesian::{
optimize as bayesian_optimize_advanced, AcquisitionFn, AcquisitionType, BayesianOptResult,
BayesianOptimizer as AdvancedBayesianOptimizer, BayesianOptimizerConfig, GpSurrogate,
GpSurrogateConfig, MaternKernel, MaternVariant, RbfKernel, SamplingConfig,
SamplingStrategy, SurrogateKernel,
};
pub use crate::benchmarking::{
benchmark_suites, test_functions, AlgorithmRanking, BenchmarkConfig, BenchmarkResults,
BenchmarkRun, BenchmarkSummary, BenchmarkSystem, ProblemCharacteristics, RuntimeStats,
TestProblem,
};
pub use crate::constrained::{minimize_constrained, Method as ConstrainedMethod};
pub use crate::distributed::{
algorithms::{DistributedDifferentialEvolution, DistributedParticleSwarm},
DistributedConfig, DistributedOptimizationContext, DistributedStats, DistributionStrategy,
MPIInterface, WorkAssignment,
};
pub use crate::distributed_gpu::{
DistributedGpuConfig, DistributedGpuOptimizer, DistributedGpuResults, DistributedGpuStats,
GpuCommunicationStrategy, IterationStats,
};
pub use crate::error::{OptimizeError, OptimizeResult};
pub use crate::global::{
basinhopping, bayesian_optimization, differential_evolution, dual_annealing,
generate_diverse_start_points, multi_start_with_clustering, particle_swarm,
simulated_annealing, AcquisitionFunctionType, BasinHoppingOptions,
BayesianOptimizationOptions, BayesianOptimizer, ClusterCentroid, ClusteringAlgorithm,
ClusteringOptions, ClusteringResult, DifferentialEvolutionOptions, DualAnnealingOptions,
InitialPointGenerator, KernelType, LocalMinimum, Parameter, ParticleSwarmOptions,
SimulatedAnnealingOptions, Space, StartPointStrategy,
};
pub use crate::gpu::{
acceleration::{
AccelerationConfig, AccelerationManager, AccelerationStrategy, PerformanceStats,
},
algorithms::{GpuDifferentialEvolution, GpuParticleSwarm},
GpuFunction, GpuOptimizationConfig, GpuOptimizationContext, GpuPrecision,
};
pub use crate::jit_optimization::{
optimize_function, FunctionPattern, JitCompiler, JitOptions, JitStats,
};
pub use crate::learned_optimizers::{
learned_optimize, ActivationType, AdaptationStatistics, AdaptiveNASSystem,
AdaptiveTransformerOptimizer, FewShotLearningOptimizer, LearnedHyperparameterTuner,
LearnedOptimizationConfig, LearnedOptimizer, MetaOptimizerState, NeuralAdaptiveOptimizer,
OptimizationNetwork, OptimizationProblem, ParameterDistribution, ProblemEncoder,
TrainingTask,
};
pub use crate::least_squares::{
bounded_least_squares, least_squares, robust_least_squares, separable_least_squares,
total_least_squares, weighted_least_squares, BisquareLoss, BoundedOptions, CauchyLoss,
HuberLoss, LinearSolver, Method as LeastSquaresMethod, RobustLoss, RobustOptions,
SeparableOptions, SeparableResult, TLSMethod, TotalLeastSquaresOptions,
TotalLeastSquaresResult, WeightedOptions,
};
pub use crate::ml_optimizers::{
ml_problems, ADMMOptimizer, CoordinateDescentOptimizer, ElasticNetOptimizer,
GroupLassoOptimizer, LassoOptimizer,
};
pub use crate::multi_objective::{
MultiObjectiveConfig, MultiObjectiveResult, MultiObjectiveSolution, NSGAII, NSGAIII,
};
pub use crate::neural_integration::{
optimizers, NeuralOptimizer, NeuralParameters, NeuralTrainer,
};
pub use crate::neuromorphic::{
neuromorphic_optimize, BasicNeuromorphicOptimizer, NeuromorphicConfig, NeuromorphicNetwork,
NeuromorphicOptimizer, NeuronState, SpikeEvent,
};
pub use crate::parallel::{
parallel_evaluate_batch, parallel_finite_diff_gradient, ParallelOptions,
};
pub use crate::quantum_inspired::{
quantum_optimize, quantum_particle_swarm_optimize, Complex, CoolingSchedule,
QuantumAnnealingSchedule, QuantumInspiredOptimizer, QuantumOptimizationStats, QuantumState,
};
pub use crate::reinforcement_learning::{
bandit_optimize, evolutionary_optimize, meta_learning_optimize, policy_gradient_optimize,
BanditOptimizer, EvolutionaryStrategy, Experience, MetaLearningOptimizer,
OptimizationAction, OptimizationState, QLearningOptimizer, RLOptimizationConfig,
RLOptimizer,
};
pub use crate::result::OptimizeResults;
pub use crate::roots::{root, Method as RootMethod};
pub use crate::scalar::{
minimize_scalar, Method as ScalarMethod, Options as ScalarOptions, ScalarOptimizeResult,
};
pub use crate::self_tuning::{
presets, AdaptationResult, AdaptationStrategy, ParameterChange, ParameterValue,
PerformanceMetrics, SelfTuningConfig, SelfTuningOptimizer, TunableParameter,
};
pub use crate::sparse_numdiff::{sparse_hessian, sparse_jacobian, SparseFiniteDiffOptions};
pub use crate::streaming::{
exponentially_weighted_rls, incremental_bfgs, incremental_lbfgs,
incremental_lbfgs_linear_regression, kalman_filter_estimator, online_gradient_descent,
online_linear_regression, online_logistic_regression, real_time_linear_regression,
recursive_least_squares, rolling_window_gradient_descent, rolling_window_least_squares,
rolling_window_linear_regression, rolling_window_weighted_least_squares,
streaming_trust_region_linear_regression, streaming_trust_region_logistic_regression,
IncrementalNewton, IncrementalNewtonMethod, LinearRegressionObjective,
LogisticRegressionObjective, RealTimeEstimator, RealTimeMethod, RollingWindowOptimizer,
StreamingConfig, StreamingDataPoint, StreamingObjective, StreamingOptimizer,
StreamingStats, StreamingTrustRegion,
};
pub use crate::unconstrained::{
cauchy_point, dogleg_step, minimize, solve_trust_subproblem, trust_region_minimize, Bounds,
Jacobian, Method as UnconstrainedMethod, Options, TrustRegionConfig, TrustRegionResult,
};
pub use crate::unified_pipeline::{
presets as unified_presets, UnifiedOptimizationConfig, UnifiedOptimizationResults,
UnifiedOptimizer,
};
pub use crate::visualization::{
tracking::TrajectoryTracker, ColorScheme, OptimizationTrajectory, OptimizationVisualizer,
OutputFormat, VisualizationConfig,
};
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}