Newton

Struct Newton 

Source
pub struct Newton { /* private fields */ }

Implementations§

Source§

impl Newton

Auto-generated by derive_getters::Getters.

Source

pub fn tol(&self) -> &Floating

Get field tol from instance of Newton.

Source

pub fn decrement_squared(&self) -> &Option<Floating>

Get field decrement_squared from instance of Newton.

Examples found in repository?
examples/newton_example.rs (line 70)
4fn main() {
5    // Setting up logging
6    std::env::set_var("RUST_LOG", "info");
7    let _ = Tracer::default().with_normal_stdout_layer().build();
8
9    // Convex function: f(x,y) = x^2 + y^2 + exp(x^2 + y^2)
10    // This function is convex and has a unique minimum at (0, 0)
11    let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
12        let x1 = x[0];
13        let x2 = x[1];
14
15        // Function value
16        let f = x1.powi(2) + x2.powi(2) + (x1.powi(2) + x2.powi(2)).exp();
17
18        // Gradient: ∇f = [2x + 2x*exp(x^2+y^2), 2y + 2y*exp(x^2+y^2)]
19        let exp_term = (x1.powi(2) + x2.powi(2)).exp();
20        let g1 = 2.0 * x1 * (1.0 + exp_term);
21        let g2 = 2.0 * x2 * (1.0 + exp_term);
22        let g = DVector::from_vec(vec![g1, g2]);
23
24        // Hessian: ∇²f = [[2(1+exp) + 4x^2*exp, 4xy*exp], [4xy*exp, 2(1+exp) + 4y^2*exp]]
25        let h11 = 2.0 * (1.0 + exp_term) + 4.0 * x1.powi(2) * exp_term;
26        let h12 = 4.0 * x1 * x2 * exp_term;
27        let h21 = h12;
28        let h22 = 2.0 * (1.0 + exp_term) + 4.0 * x2.powi(2) * exp_term;
29        let hessian = DMatrix::from_vec(2, 2, vec![h11, h21, h12, h22]);
30
31        FuncEvalMultivariate::new(f, g).with_hessian(hessian)
32    };
33
34    // Setting up the line search (More-Thuente line search)
35    let mut ls = MoreThuente::default();
36
37    // Setting up the solver
38    let tol = 1e-6;
39    let x0 = DVector::from_vec(vec![1.0, 1.0]); // Starting point
40    let mut solver = Newton::new(tol, x0.clone());
41
42    // Running the solver
43    let max_iter_solver = 20;
44    let max_iter_line_search = 20;
45
46    println!("=== Newton's Method Example ===");
47    println!("Objective: f(x,y) = x^2 + y^2 + exp(x^2 + y^2) (convex)");
48    println!("Global minimum: (0, 0) with f(0,0) = 1");
49    println!("Starting point: {:?}", x0);
50    println!("Tolerance: {}", tol);
51    println!();
52
53    match solver.minimize(
54        &mut ls,
55        f_and_g,
56        max_iter_solver,
57        max_iter_line_search,
58        None,
59    ) {
60        Ok(()) => {
61            let x = solver.x();
62            let eval = f_and_g(x);
63            println!("✅ Optimization completed successfully!");
64            println!("Final iterate: {:?}", x);
65            println!("Function value: {:.6}", eval.f());
66            println!("Gradient norm: {:.6}", eval.g().norm());
67            println!("Iterations: {}", solver.k());
68
69            // Show Newton decrement
70            if let Some(decrement_squared) = solver.decrement_squared() {
71                println!("Newton decrement squared: {:.6}", decrement_squared);
72                println!("Newton decrement: {:.6}", decrement_squared.sqrt());
73            }
74
75            // Check if we're close to the known minimum
76            let true_min = DVector::from_vec(vec![0.0, 0.0]);
77            let distance_to_min = (x - true_min).norm();
78            println!("Distance to true minimum: {:.6}", distance_to_min);
79            println!("Expected function value: 1.0");
80        }
81        Err(e) => {
82            println!("❌ Optimization failed: {:?}", e);
83        }
84    }
85}
Source

pub fn x(&self) -> &DVector<Floating>

Get field x from instance of Newton.

Examples found in repository?
examples/newton_example.rs (line 61)
4fn main() {
5    // Setting up logging
6    std::env::set_var("RUST_LOG", "info");
7    let _ = Tracer::default().with_normal_stdout_layer().build();
8
9    // Convex function: f(x,y) = x^2 + y^2 + exp(x^2 + y^2)
10    // This function is convex and has a unique minimum at (0, 0)
11    let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
12        let x1 = x[0];
13        let x2 = x[1];
14
15        // Function value
16        let f = x1.powi(2) + x2.powi(2) + (x1.powi(2) + x2.powi(2)).exp();
17
18        // Gradient: ∇f = [2x + 2x*exp(x^2+y^2), 2y + 2y*exp(x^2+y^2)]
19        let exp_term = (x1.powi(2) + x2.powi(2)).exp();
20        let g1 = 2.0 * x1 * (1.0 + exp_term);
21        let g2 = 2.0 * x2 * (1.0 + exp_term);
22        let g = DVector::from_vec(vec![g1, g2]);
23
24        // Hessian: ∇²f = [[2(1+exp) + 4x^2*exp, 4xy*exp], [4xy*exp, 2(1+exp) + 4y^2*exp]]
25        let h11 = 2.0 * (1.0 + exp_term) + 4.0 * x1.powi(2) * exp_term;
26        let h12 = 4.0 * x1 * x2 * exp_term;
27        let h21 = h12;
28        let h22 = 2.0 * (1.0 + exp_term) + 4.0 * x2.powi(2) * exp_term;
29        let hessian = DMatrix::from_vec(2, 2, vec![h11, h21, h12, h22]);
30
31        FuncEvalMultivariate::new(f, g).with_hessian(hessian)
32    };
33
34    // Setting up the line search (More-Thuente line search)
35    let mut ls = MoreThuente::default();
36
37    // Setting up the solver
38    let tol = 1e-6;
39    let x0 = DVector::from_vec(vec![1.0, 1.0]); // Starting point
40    let mut solver = Newton::new(tol, x0.clone());
41
42    // Running the solver
43    let max_iter_solver = 20;
44    let max_iter_line_search = 20;
45
46    println!("=== Newton's Method Example ===");
47    println!("Objective: f(x,y) = x^2 + y^2 + exp(x^2 + y^2) (convex)");
48    println!("Global minimum: (0, 0) with f(0,0) = 1");
49    println!("Starting point: {:?}", x0);
50    println!("Tolerance: {}", tol);
51    println!();
52
53    match solver.minimize(
54        &mut ls,
55        f_and_g,
56        max_iter_solver,
57        max_iter_line_search,
58        None,
59    ) {
60        Ok(()) => {
61            let x = solver.x();
62            let eval = f_and_g(x);
63            println!("✅ Optimization completed successfully!");
64            println!("Final iterate: {:?}", x);
65            println!("Function value: {:.6}", eval.f());
66            println!("Gradient norm: {:.6}", eval.g().norm());
67            println!("Iterations: {}", solver.k());
68
69            // Show Newton decrement
70            if let Some(decrement_squared) = solver.decrement_squared() {
71                println!("Newton decrement squared: {:.6}", decrement_squared);
72                println!("Newton decrement: {:.6}", decrement_squared.sqrt());
73            }
74
75            // Check if we're close to the known minimum
76            let true_min = DVector::from_vec(vec![0.0, 0.0]);
77            let distance_to_min = (x - true_min).norm();
78            println!("Distance to true minimum: {:.6}", distance_to_min);
79            println!("Expected function value: 1.0");
80        }
81        Err(e) => {
82            println!("❌ Optimization failed: {:?}", e);
83        }
84    }
85}
Source

pub fn k(&self) -> &usize

Get field k from instance of Newton.

Examples found in repository?
examples/newton_example.rs (line 67)
4fn main() {
5    // Setting up logging
6    std::env::set_var("RUST_LOG", "info");
7    let _ = Tracer::default().with_normal_stdout_layer().build();
8
9    // Convex function: f(x,y) = x^2 + y^2 + exp(x^2 + y^2)
10    // This function is convex and has a unique minimum at (0, 0)
11    let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
12        let x1 = x[0];
13        let x2 = x[1];
14
15        // Function value
16        let f = x1.powi(2) + x2.powi(2) + (x1.powi(2) + x2.powi(2)).exp();
17
18        // Gradient: ∇f = [2x + 2x*exp(x^2+y^2), 2y + 2y*exp(x^2+y^2)]
19        let exp_term = (x1.powi(2) + x2.powi(2)).exp();
20        let g1 = 2.0 * x1 * (1.0 + exp_term);
21        let g2 = 2.0 * x2 * (1.0 + exp_term);
22        let g = DVector::from_vec(vec![g1, g2]);
23
24        // Hessian: ∇²f = [[2(1+exp) + 4x^2*exp, 4xy*exp], [4xy*exp, 2(1+exp) + 4y^2*exp]]
25        let h11 = 2.0 * (1.0 + exp_term) + 4.0 * x1.powi(2) * exp_term;
26        let h12 = 4.0 * x1 * x2 * exp_term;
27        let h21 = h12;
28        let h22 = 2.0 * (1.0 + exp_term) + 4.0 * x2.powi(2) * exp_term;
29        let hessian = DMatrix::from_vec(2, 2, vec![h11, h21, h12, h22]);
30
31        FuncEvalMultivariate::new(f, g).with_hessian(hessian)
32    };
33
34    // Setting up the line search (More-Thuente line search)
35    let mut ls = MoreThuente::default();
36
37    // Setting up the solver
38    let tol = 1e-6;
39    let x0 = DVector::from_vec(vec![1.0, 1.0]); // Starting point
40    let mut solver = Newton::new(tol, x0.clone());
41
42    // Running the solver
43    let max_iter_solver = 20;
44    let max_iter_line_search = 20;
45
46    println!("=== Newton's Method Example ===");
47    println!("Objective: f(x,y) = x^2 + y^2 + exp(x^2 + y^2) (convex)");
48    println!("Global minimum: (0, 0) with f(0,0) = 1");
49    println!("Starting point: {:?}", x0);
50    println!("Tolerance: {}", tol);
51    println!();
52
53    match solver.minimize(
54        &mut ls,
55        f_and_g,
56        max_iter_solver,
57        max_iter_line_search,
58        None,
59    ) {
60        Ok(()) => {
61            let x = solver.x();
62            let eval = f_and_g(x);
63            println!("✅ Optimization completed successfully!");
64            println!("Final iterate: {:?}", x);
65            println!("Function value: {:.6}", eval.f());
66            println!("Gradient norm: {:.6}", eval.g().norm());
67            println!("Iterations: {}", solver.k());
68
69            // Show Newton decrement
70            if let Some(decrement_squared) = solver.decrement_squared() {
71                println!("Newton decrement squared: {:.6}", decrement_squared);
72                println!("Newton decrement: {:.6}", decrement_squared.sqrt());
73            }
74
75            // Check if we're close to the known minimum
76            let true_min = DVector::from_vec(vec![0.0, 0.0]);
77            let distance_to_min = (x - true_min).norm();
78            println!("Distance to true minimum: {:.6}", distance_to_min);
79            println!("Expected function value: 1.0");
80        }
81        Err(e) => {
82            println!("❌ Optimization failed: {:?}", e);
83        }
84    }
85}
Source§

impl Newton

Source

pub fn new(tol: Floating, x0: DVector<Floating>) -> Self

Examples found in repository?
examples/newton_example.rs (line 40)
4fn main() {
5    // Setting up logging
6    std::env::set_var("RUST_LOG", "info");
7    let _ = Tracer::default().with_normal_stdout_layer().build();
8
9    // Convex function: f(x,y) = x^2 + y^2 + exp(x^2 + y^2)
10    // This function is convex and has a unique minimum at (0, 0)
11    let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
12        let x1 = x[0];
13        let x2 = x[1];
14
15        // Function value
16        let f = x1.powi(2) + x2.powi(2) + (x1.powi(2) + x2.powi(2)).exp();
17
18        // Gradient: ∇f = [2x + 2x*exp(x^2+y^2), 2y + 2y*exp(x^2+y^2)]
19        let exp_term = (x1.powi(2) + x2.powi(2)).exp();
20        let g1 = 2.0 * x1 * (1.0 + exp_term);
21        let g2 = 2.0 * x2 * (1.0 + exp_term);
22        let g = DVector::from_vec(vec![g1, g2]);
23
24        // Hessian: ∇²f = [[2(1+exp) + 4x^2*exp, 4xy*exp], [4xy*exp, 2(1+exp) + 4y^2*exp]]
25        let h11 = 2.0 * (1.0 + exp_term) + 4.0 * x1.powi(2) * exp_term;
26        let h12 = 4.0 * x1 * x2 * exp_term;
27        let h21 = h12;
28        let h22 = 2.0 * (1.0 + exp_term) + 4.0 * x2.powi(2) * exp_term;
29        let hessian = DMatrix::from_vec(2, 2, vec![h11, h21, h12, h22]);
30
31        FuncEvalMultivariate::new(f, g).with_hessian(hessian)
32    };
33
34    // Setting up the line search (More-Thuente line search)
35    let mut ls = MoreThuente::default();
36
37    // Setting up the solver
38    let tol = 1e-6;
39    let x0 = DVector::from_vec(vec![1.0, 1.0]); // Starting point
40    let mut solver = Newton::new(tol, x0.clone());
41
42    // Running the solver
43    let max_iter_solver = 20;
44    let max_iter_line_search = 20;
45
46    println!("=== Newton's Method Example ===");
47    println!("Objective: f(x,y) = x^2 + y^2 + exp(x^2 + y^2) (convex)");
48    println!("Global minimum: (0, 0) with f(0,0) = 1");
49    println!("Starting point: {:?}", x0);
50    println!("Tolerance: {}", tol);
51    println!();
52
53    match solver.minimize(
54        &mut ls,
55        f_and_g,
56        max_iter_solver,
57        max_iter_line_search,
58        None,
59    ) {
60        Ok(()) => {
61            let x = solver.x();
62            let eval = f_and_g(x);
63            println!("✅ Optimization completed successfully!");
64            println!("Final iterate: {:?}", x);
65            println!("Function value: {:.6}", eval.f());
66            println!("Gradient norm: {:.6}", eval.g().norm());
67            println!("Iterations: {}", solver.k());
68
69            // Show Newton decrement
70            if let Some(decrement_squared) = solver.decrement_squared() {
71                println!("Newton decrement squared: {:.6}", decrement_squared);
72                println!("Newton decrement: {:.6}", decrement_squared.sqrt());
73            }
74
75            // Check if we're close to the known minimum
76            let true_min = DVector::from_vec(vec![0.0, 0.0]);
77            let distance_to_min = (x - true_min).norm();
78            println!("Distance to true minimum: {:.6}", distance_to_min);
79            println!("Expected function value: 1.0");
80        }
81        Err(e) => {
82            println!("❌ Optimization failed: {:?}", e);
83        }
84    }
85}

Trait Implementations§

Source§

impl ComputeDirection for Newton

Source§

impl LineSearchSolver for Newton

Source§

fn xk(&self) -> &DVector<Floating>

Source§

fn k(&self) -> &usize

Source§

fn xk_mut(&mut self) -> &mut DVector<Floating>

Source§

fn k_mut(&mut self) -> &mut usize

Source§

fn has_converged(&self, _: &FuncEvalMultivariate) -> bool

Source§

fn setup(&mut self)

Source§

fn evaluate_x_k( &mut self, oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, ) -> Result<FuncEvalMultivariate, SolverError>

Source§

fn update_next_iterate<LS: LineSearch>( &mut self, line_search: &mut LS, eval_x_k: &FuncEvalMultivariate, oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, direction: &DVector<Floating>, max_iter_line_search: usize, ) -> Result<(), SolverError>

Source§

fn minimize<LS: LineSearch>( &mut self, line_search: &mut LS, oracle: impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, max_iter_solver: usize, max_iter_line_search: usize, callback: Option<&mut dyn FnMut(&Self)>, ) -> Result<(), SolverError>

Auto Trait Implementations§

§

impl Freeze for Newton

§

impl RefUnwindSafe for Newton

§

impl Send for Newton

§

impl Sync for Newton

§

impl Unpin for Newton

§

impl UnwindSafe for Newton

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more