pub mod bfgs;
pub mod conjugate_gradient;
pub mod genetic;
pub mod gradient_descent;
pub mod interior_point;
pub mod lbfgs;
pub mod nelder_mead;
pub mod newton;
pub mod simplex;
pub mod simulated_annealing;
use num_traits::{Float, Zero};
use std::fmt::Debug;
pub use bfgs::minimize as bfgs_minimize;
pub use conjugate_gradient::minimize as conjugate_gradient_minimize;
pub use genetic::minimize as genetic_minimize;
pub use gradient_descent::minimize as gradient_descent_minimize;
pub use interior_point::minimize as interior_point_minimize;
pub use lbfgs::minimize as lbfgs_minimize;
pub use nelder_mead::minimize as nelder_mead_minimize;
pub use newton::minimize as newton_minimize;
pub use simplex::minimize as simplex_minimize;
pub use simulated_annealing::minimize as simulated_annealing_minimize;
pub trait ObjectiveFunction<T>
where
T: Float + Debug,
{
fn evaluate(&self, point: &[T]) -> T;
fn gradient(&self, _point: &[T]) -> Option<Vec<T>> {
None
}
fn hessian(&self, _point: &[T]) -> Option<Vec<Vec<T>>> {
None
}
}
#[derive(Debug, Clone)]
pub struct OptimizationConfig<T>
where
T: Float + Debug,
{
pub max_iterations: usize,
pub tolerance: T,
pub learning_rate: T,
}
impl<T> Default for OptimizationConfig<T>
where
T: Float + Debug + Zero,
{
fn default() -> Self {
Self {
max_iterations: 1000,
tolerance: T::from(1e-6).unwrap(),
learning_rate: T::from(0.01).unwrap(),
}
}
}
#[derive(Debug, Clone)]
pub struct OptimizationResult<T>
where
T: Float + Debug,
{
pub optimal_point: Vec<T>,
pub optimal_value: T,
pub iterations: usize,
pub converged: bool,
}