pub struct GradientDescent {
pub grad_tol: Floating,
pub x: DVector<Floating>,
pub k: usize,
}Fields§
§grad_tol: Floating§x: DVector<Floating>§k: usizeImplementations§
Source§impl GradientDescent
Auto-generated by derive_getters::Getters.
impl GradientDescent
Auto-generated by derive_getters::Getters.
Sourcepub fn x(&self) -> &DVector<Floating>
pub fn x(&self) -> &DVector<Floating>
Get field x from instance of GradientDescent.
Examples found in repository?
examples/quadratic_with_plots.rs (line 28)
6fn main() {
7 // Setting up log verbosity and _.
8 std::env::set_var("RUST_LOG", "debug");
9 let _ = Tracer::default().with_normal_stdout_layer().build();
10 // Setting up the oracle
11 let matrix = DMatrix::from_vec(2, 2, vec![100., 0., 0., 100.]);
12 let mut f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
13 let f = x.dot(&(&matrix * x));
14 let g = 2. * &matrix * x;
15 FuncEvalMultivariate::new(f, g)
16 };
17 // Setting up the line search
18 let armijo_factr = 1e-4;
19 let beta = 0.5; // (beta in (0, 1), ntice that beta = 0.5 corresponds to bisection)
20 let mut ls = BackTracking::new(armijo_factr, beta);
21 // Setting up the main solver, with its parameters and the initial guess
22 let tol = 1e-6;
23 let x0 = DVector::from_vec(vec![10., 10.]);
24 let mut solver = GradientDescent::new(tol, x0);
25 // We define a callback to store iterates and function evaluations
26 let mut iterates = vec![];
27 let mut solver_callback = |s: &GradientDescent| {
28 iterates.push(s.x().clone());
29 };
30 // Running the solver
31 let max_iter_solver = 100;
32 let max_iter_line_search = 10;
33
34 solver
35 .minimize(
36 &mut ls,
37 f_and_g,
38 max_iter_solver,
39 max_iter_line_search,
40 Some(&mut solver_callback),
41 )
42 .unwrap();
43 // Printing the result
44 let x = solver.x();
45 let eval = f_and_g(x);
46 println!("x: {:?}", x);
47 println!("f(x): {}", eval.f());
48 println!("g(x): {:?}", eval.g());
49
50 // Plotting the iterates
51 let n = 50;
52 let start = -5.0;
53 let end = 5.0;
54 let plotter = Plotter3d::new(start, end, start, end, n)
55 .append_plot(&mut f_and_g, "Objective function", 0.5)
56 .append_scatter_points(&mut f_and_g, &iterates, "Iterates")
57 .set_layout_size(1600, 1000);
58 plotter.build("quadratic.html");
59}More examples
examples/gradient_descent_example.rs (line 57)
6fn main() {
7 // Setting up logging
8 std::env::set_var("RUST_LOG", "info");
9 let _ = Tracer::default().with_normal_stdout_layer().build();
10
11 // Convex quadratic function: f(x,y) = x^2 + 2y^2
12 // Global minimum at (0, 0) with f(0,0) = 0
13 let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
14 let x1 = x[0];
15 let x2 = x[1];
16
17 // Function value
18 let f = x1.powi(2) + 2.0 * x2.powi(2);
19
20 // Gradient
21 let g1 = 2.0 * x1;
22 let g2 = 4.0 * x2;
23 let g = DVector::from_vec(vec![g1, g2]);
24
25 FuncEvalMultivariate::new(f, g)
26 };
27
28 // Setting up the line search (backtracking with Armijo condition)
29 let armijo_factor = 1e-4;
30 let beta = 0.5;
31 let mut ls = BackTracking::new(armijo_factor, beta);
32
33 // Setting up the solver
34 let tol = 1e-6;
35 let x0 = DVector::from_vec(vec![2.0, 1.0]); // Starting point
36 let mut solver = GradientDescent::new(tol, x0.clone());
37
38 // Running the solver
39 let max_iter_solver = 100;
40 let max_iter_line_search = 20;
41
42 println!("=== Gradient Descent Example ===");
43 println!("Objective: f(x,y) = x^2 + 2y^2 (convex quadratic)");
44 println!("Global minimum: (0, 0) with f(0,0) = 0");
45 println!("Starting point: {:?}", x0);
46 println!("Tolerance: {}", tol);
47 println!();
48
49 match solver.minimize(
50 &mut ls,
51 f_and_g,
52 max_iter_solver,
53 max_iter_line_search,
54 None,
55 ) {
56 Ok(()) => {
57 let x = solver.x();
58 let eval = f_and_g(x);
59 println!("✅ Optimization completed successfully!");
60 println!("Final iterate: {:?}", x);
61 println!("Function value: {:.6}", eval.f());
62 println!("Gradient norm: {:.6}", eval.g().norm());
63 println!("Iterations: {}", solver.k());
64
65 // Check if we're close to the known minimum
66 let true_min = DVector::from_vec(vec![0.0, 0.0]);
67 let distance_to_min = (x - true_min).norm();
68 println!("Distance to true minimum: {:.6}", distance_to_min);
69 println!("Expected function value: 0.0");
70 }
71 Err(e) => {
72 println!("❌ Optimization failed: {:?}", e);
73 }
74 }
75}Sourcepub fn k(&self) -> &usize
pub fn k(&self) -> &usize
Get field k from instance of GradientDescent.
Examples found in repository?
examples/gradient_descent_example.rs (line 63)
6fn main() {
7 // Setting up logging
8 std::env::set_var("RUST_LOG", "info");
9 let _ = Tracer::default().with_normal_stdout_layer().build();
10
11 // Convex quadratic function: f(x,y) = x^2 + 2y^2
12 // Global minimum at (0, 0) with f(0,0) = 0
13 let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
14 let x1 = x[0];
15 let x2 = x[1];
16
17 // Function value
18 let f = x1.powi(2) + 2.0 * x2.powi(2);
19
20 // Gradient
21 let g1 = 2.0 * x1;
22 let g2 = 4.0 * x2;
23 let g = DVector::from_vec(vec![g1, g2]);
24
25 FuncEvalMultivariate::new(f, g)
26 };
27
28 // Setting up the line search (backtracking with Armijo condition)
29 let armijo_factor = 1e-4;
30 let beta = 0.5;
31 let mut ls = BackTracking::new(armijo_factor, beta);
32
33 // Setting up the solver
34 let tol = 1e-6;
35 let x0 = DVector::from_vec(vec![2.0, 1.0]); // Starting point
36 let mut solver = GradientDescent::new(tol, x0.clone());
37
38 // Running the solver
39 let max_iter_solver = 100;
40 let max_iter_line_search = 20;
41
42 println!("=== Gradient Descent Example ===");
43 println!("Objective: f(x,y) = x^2 + 2y^2 (convex quadratic)");
44 println!("Global minimum: (0, 0) with f(0,0) = 0");
45 println!("Starting point: {:?}", x0);
46 println!("Tolerance: {}", tol);
47 println!();
48
49 match solver.minimize(
50 &mut ls,
51 f_and_g,
52 max_iter_solver,
53 max_iter_line_search,
54 None,
55 ) {
56 Ok(()) => {
57 let x = solver.x();
58 let eval = f_and_g(x);
59 println!("✅ Optimization completed successfully!");
60 println!("Final iterate: {:?}", x);
61 println!("Function value: {:.6}", eval.f());
62 println!("Gradient norm: {:.6}", eval.g().norm());
63 println!("Iterations: {}", solver.k());
64
65 // Check if we're close to the known minimum
66 let true_min = DVector::from_vec(vec![0.0, 0.0]);
67 let distance_to_min = (x - true_min).norm();
68 println!("Distance to true minimum: {:.6}", distance_to_min);
69 println!("Expected function value: 0.0");
70 }
71 Err(e) => {
72 println!("❌ Optimization failed: {:?}", e);
73 }
74 }
75}Source§impl GradientDescent
impl GradientDescent
Sourcepub fn new(grad_tol: Floating, x0: DVector<Floating>) -> Self
pub fn new(grad_tol: Floating, x0: DVector<Floating>) -> Self
Examples found in repository?
examples/quadratic_with_plots.rs (line 24)
6fn main() {
7 // Setting up log verbosity and _.
8 std::env::set_var("RUST_LOG", "debug");
9 let _ = Tracer::default().with_normal_stdout_layer().build();
10 // Setting up the oracle
11 let matrix = DMatrix::from_vec(2, 2, vec![100., 0., 0., 100.]);
12 let mut f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
13 let f = x.dot(&(&matrix * x));
14 let g = 2. * &matrix * x;
15 FuncEvalMultivariate::new(f, g)
16 };
17 // Setting up the line search
18 let armijo_factr = 1e-4;
19 let beta = 0.5; // (beta in (0, 1), ntice that beta = 0.5 corresponds to bisection)
20 let mut ls = BackTracking::new(armijo_factr, beta);
21 // Setting up the main solver, with its parameters and the initial guess
22 let tol = 1e-6;
23 let x0 = DVector::from_vec(vec![10., 10.]);
24 let mut solver = GradientDescent::new(tol, x0);
25 // We define a callback to store iterates and function evaluations
26 let mut iterates = vec![];
27 let mut solver_callback = |s: &GradientDescent| {
28 iterates.push(s.x().clone());
29 };
30 // Running the solver
31 let max_iter_solver = 100;
32 let max_iter_line_search = 10;
33
34 solver
35 .minimize(
36 &mut ls,
37 f_and_g,
38 max_iter_solver,
39 max_iter_line_search,
40 Some(&mut solver_callback),
41 )
42 .unwrap();
43 // Printing the result
44 let x = solver.x();
45 let eval = f_and_g(x);
46 println!("x: {:?}", x);
47 println!("f(x): {}", eval.f());
48 println!("g(x): {:?}", eval.g());
49
50 // Plotting the iterates
51 let n = 50;
52 let start = -5.0;
53 let end = 5.0;
54 let plotter = Plotter3d::new(start, end, start, end, n)
55 .append_plot(&mut f_and_g, "Objective function", 0.5)
56 .append_scatter_points(&mut f_and_g, &iterates, "Iterates")
57 .set_layout_size(1600, 1000);
58 plotter.build("quadratic.html");
59}More examples
examples/gradient_descent_example.rs (line 36)
6fn main() {
7 // Setting up logging
8 std::env::set_var("RUST_LOG", "info");
9 let _ = Tracer::default().with_normal_stdout_layer().build();
10
11 // Convex quadratic function: f(x,y) = x^2 + 2y^2
12 // Global minimum at (0, 0) with f(0,0) = 0
13 let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
14 let x1 = x[0];
15 let x2 = x[1];
16
17 // Function value
18 let f = x1.powi(2) + 2.0 * x2.powi(2);
19
20 // Gradient
21 let g1 = 2.0 * x1;
22 let g2 = 4.0 * x2;
23 let g = DVector::from_vec(vec![g1, g2]);
24
25 FuncEvalMultivariate::new(f, g)
26 };
27
28 // Setting up the line search (backtracking with Armijo condition)
29 let armijo_factor = 1e-4;
30 let beta = 0.5;
31 let mut ls = BackTracking::new(armijo_factor, beta);
32
33 // Setting up the solver
34 let tol = 1e-6;
35 let x0 = DVector::from_vec(vec![2.0, 1.0]); // Starting point
36 let mut solver = GradientDescent::new(tol, x0.clone());
37
38 // Running the solver
39 let max_iter_solver = 100;
40 let max_iter_line_search = 20;
41
42 println!("=== Gradient Descent Example ===");
43 println!("Objective: f(x,y) = x^2 + 2y^2 (convex quadratic)");
44 println!("Global minimum: (0, 0) with f(0,0) = 0");
45 println!("Starting point: {:?}", x0);
46 println!("Tolerance: {}", tol);
47 println!();
48
49 match solver.minimize(
50 &mut ls,
51 f_and_g,
52 max_iter_solver,
53 max_iter_line_search,
54 None,
55 ) {
56 Ok(()) => {
57 let x = solver.x();
58 let eval = f_and_g(x);
59 println!("✅ Optimization completed successfully!");
60 println!("Final iterate: {:?}", x);
61 println!("Function value: {:.6}", eval.f());
62 println!("Gradient norm: {:.6}", eval.g().norm());
63 println!("Iterations: {}", solver.k());
64
65 // Check if we're close to the known minimum
66 let true_min = DVector::from_vec(vec![0.0, 0.0]);
67 let distance_to_min = (x - true_min).norm();
68 println!("Distance to true minimum: {:.6}", distance_to_min);
69 println!("Expected function value: 0.0");
70 }
71 Err(e) => {
72 println!("❌ Optimization failed: {:?}", e);
73 }
74 }
75}Trait Implementations§
Source§impl ComputeDirection for GradientDescent
impl ComputeDirection for GradientDescent
fn compute_direction( &mut self, eval: &FuncEvalMultivariate, ) -> Result<DVector<Floating>, SolverError>
Source§impl LineSearchSolver for GradientDescent
impl LineSearchSolver for GradientDescent
fn xk(&self) -> &DVector<Floating>
fn xk_mut(&mut self) -> &mut DVector<Floating>
fn k(&self) -> &usize
fn k_mut(&mut self) -> &mut usize
fn has_converged(&self, eval: &FuncEvalMultivariate) -> bool
fn update_next_iterate<LS: LineSearch>( &mut self, line_search: &mut LS, eval_x_k: &FuncEvalMultivariate, oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, direction: &DVector<Floating>, max_iter_line_search: usize, ) -> Result<(), SolverError>
fn setup(&mut self)
fn evaluate_x_k( &mut self, oracle: &mut impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, ) -> Result<FuncEvalMultivariate, SolverError>
fn minimize<LS: LineSearch>( &mut self, line_search: &mut LS, oracle: impl FnMut(&DVector<Floating>) -> FuncEvalMultivariate, max_iter_solver: usize, max_iter_line_search: usize, callback: Option<&mut dyn FnMut(&Self)>, ) -> Result<(), SolverError>
Auto Trait Implementations§
impl Freeze for GradientDescent
impl RefUnwindSafe for GradientDescent
impl Send for GradientDescent
impl Sync for GradientDescent
impl Unpin for GradientDescent
impl UnwindSafe for GradientDescent
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.