pnorm_descent_example/
pnorm_descent_example.rs

1use nalgebra::{DMatrix, DVector};
2use optimization_solvers::{
3    BackTracking, FuncEvalMultivariate, LineSearchSolver, PnormDescent, Tracer,
4};
5
6fn main() {
7    // Setting up logging
8    std::env::set_var("RUST_LOG", "info");
9    let _ = Tracer::default().with_normal_stdout_layer().build();
10
11    // Convex quadratic function: f(x,y) = x^2 + 4y^2
12    // This function has a minimum at (0, 0)
13    let f_and_g = |x: &DVector<f64>| -> FuncEvalMultivariate {
14        let x1 = x[0];
15        let x2 = x[1];
16
17        // Function value
18        let f = x1.powi(2) + 4.0 * x2.powi(2);
19
20        // Gradient
21        let g1 = 2.0 * x1;
22        let g2 = 8.0 * x2;
23        let g = DVector::from_vec(vec![g1, g2]);
24
25        FuncEvalMultivariate::new(f, g)
26    };
27
28    // Setting up the line search (backtracking)
29    let armijo_factor = 1e-4;
30    let beta = 0.5;
31    let mut ls = BackTracking::new(armijo_factor, beta);
32
33    // Setting up the solver with a diagonal preconditioner
34    let tol = 1e-6;
35    let x0 = DVector::from_vec(vec![2.0, 1.0]); // Starting point
36
37    // Create a diagonal preconditioner matrix P = diag(1, 1/4) to improve conditioning
38    let inverse_p = DMatrix::from_vec(2, 2, vec![1.0, 0.0, 0.0, 0.25]);
39    let mut solver = PnormDescent::new(tol, x0.clone(), inverse_p);
40
41    // Running the solver
42    let max_iter_solver = 50;
43    let max_iter_line_search = 20;
44
45    println!("=== P-Norm Descent Example ===");
46    println!("Objective: f(x,y) = x^2 + 4y^2 (convex quadratic)");
47    println!("Global minimum: (0, 0) with f(0,0) = 0");
48    println!("Preconditioner: P = diag(1, 1/4)");
49    println!("Starting point: {:?}", x0);
50    println!("Tolerance: {}", tol);
51    println!();
52
53    match solver.minimize(
54        &mut ls,
55        f_and_g,
56        max_iter_solver,
57        max_iter_line_search,
58        None,
59    ) {
60        Ok(()) => {
61            let x = solver.x();
62            let eval = f_and_g(x);
63            println!("✅ Optimization completed successfully!");
64            println!("Final iterate: {:?}", x);
65            println!("Function value: {:.6}", eval.f());
66            println!("Gradient norm: {:.6}", eval.g().norm());
67            println!("Iterations: {}", solver.k());
68
69            // Check if we're close to the known minimum
70            let true_min = DVector::from_vec(vec![0.0, 0.0]);
71            let distance_to_min = (x - true_min).norm();
72            println!("Distance to true minimum: {:.6}", distance_to_min);
73            println!("Expected function value: 0.0");
74
75            // Verify optimality conditions
76            let gradient_at_solution = eval.g();
77            println!("Gradient at solution: {:?}", gradient_at_solution);
78            println!(
79                "Gradient norm should be close to 0: {}",
80                gradient_at_solution.norm()
81            );
82
83            // Show some properties of P-norm descent
84            println!("P-norm descent properties:");
85            println!("  - Uses a preconditioner P to improve convergence");
86            println!("  - Equivalent to steepest descent with P = identity");
87            println!("  - Good preconditioner can significantly improve convergence rate");
88        }
89        Err(e) => {
90            println!("❌ Optimization failed: {:?}", e);
91        }
92    }
93}