#![allow(clippy::disallowed_methods)]
#![allow(non_snake_case)]
use aprender::optim::{AugmentedLagrangian, InteriorPoint, ProjectedGradientDescent};
use aprender::primitives::{Matrix, Vector};
fn nonnegative_quadratic_pgd() {
println!("=== Example 1: Non-Negative Quadratic with Projected GD ===\n");
let n = 8;
let target = Vector::from_slice(&[1.5, -0.8, 2.3, -1.2, 0.5, 1.8, -0.3, 0.9]);
println!("Quadratic Minimization Problem:");
println!("minimize ½‖x - target‖²");
println!("subject to: x ≥ 0 (non-negativity)");
println!("Problem size: {n} variables\n");
let objective = |x: &Vector<f32>| -> f32 {
let diff = x - ⌖
0.5 * diff.dot(&diff)
};
let gradient = |x: &Vector<f32>| -> Vector<f32> { x - &target };
let project = |x: &Vector<f32>| -> Vector<f32> {
let mut x_proj = Vector::zeros(n);
for i in 0..n {
x_proj[i] = x[i].max(0.0);
}
x_proj
};
let x0 = Vector::zeros(n);
let mut pgd = ProjectedGradientDescent::new(500, 0.1, 1e-6).with_line_search(0.5);
println!("Running Projected Gradient Descent...\n");
let result = pgd.minimize(objective, gradient, project, x0);
println!("Convergence status: {:?}", result.status);
println!("Iterations: {}", result.iterations);
println!("Final objective: {:.6}", result.objective_value);
println!("Gradient norm: {:.6}", result.gradient_norm);
println!("Elapsed time: {:?}", result.elapsed_time);
println!("\nOptimal solution:");
print!(" x = [");
for i in 0..n {
print!("{:.3}", result.solution[i]);
if i < n - 1 {
print!(", ");
}
}
println!("]");
println!("\nExpected (projection of target onto x ≥ 0):");
print!(" [");
for i in 0..n {
print!("{:.3}", target[i].max(0.0));
if i < n - 1 {
print!(", ");
}
}
println!("]");
let mut min_val = f32::INFINITY;
for i in 0..n {
if result.solution[i] < min_val {
min_val = result.solution[i];
}
}
println!("\nConstraint satisfaction:");
println!(" Minimum value: {min_val:.6} (should be ≥ 0.0)");
println!();
}
fn equality_constrained_least_squares() {
println!("=== Example 2: Equality-Constrained Least Squares ===\n");
let n = 10; let m = 20; let p = 3;
let mut a_data = vec![0.0; m * n];
for i in 0..m {
for j in 0..n {
a_data[i * n + j] = ((i as f32 + 1.0) * (j as f32 + 1.0) * 0.2).sin();
}
}
let A = Matrix::from_vec(m, n, a_data).expect("Valid matrix");
let mut b_data = vec![0.0; m];
for (i, item) in b_data.iter_mut().enumerate().take(m) {
*item = (i as f32 * 0.3).cos();
}
let b = Vector::from_slice(&b_data);
let mut c_data = vec![0.0; p * n];
c_data[0] = 1.0;
c_data[1] = 1.0;
c_data[2] = 1.0;
c_data[n + 3] = 1.0;
c_data[n + 4] = 1.0;
c_data[2 * n + 5] = 1.0;
c_data[2 * n + 6] = -1.0;
let C = Matrix::from_vec(p, n, c_data).expect("Valid constraint matrix");
let d = Vector::from_slice(&[1.0, 0.5, 0.0]);
println!("Equality-Constrained Least Squares:");
println!("Variables: {n}");
println!("Observations: {m}");
println!("Equality constraints: {p}");
println!("Constraints:");
println!(" x₀ + x₁ + x₂ = 1.0");
println!(" x₃ + x₄ = 0.5");
println!(" x₅ - x₆ = 0.0\n");
let objective = |x: &Vector<f32>| -> f32 {
let ax = A.matvec(x).expect("Matrix-vector multiplication");
let residual = &ax - &b;
0.5 * residual.dot(&residual)
};
let gradient = |x: &Vector<f32>| -> Vector<f32> {
let ax = A.matvec(x).expect("Matrix-vector multiplication");
let residual = &ax - &b;
A.transpose()
.matvec(&residual)
.expect("Matrix-vector multiplication")
};
let equality = |x: &Vector<f32>| -> Vector<f32> {
let cx = C.matvec(x).expect("Matrix-vector multiplication");
&cx - &d
};
let equality_jac = |_x: &Vector<f32>| -> Vec<Vector<f32>> {
let mut jac = Vec::with_capacity(p);
for i in 0..p {
let mut row = Vector::zeros(n);
for j in 0..n {
row[j] = C.get(i, j);
}
jac.push(row);
}
jac
};
let x0 = Vector::from_slice(&[0.3, 0.3, 0.4, 0.2, 0.3, 0.1, 0.1, 0.0, 0.0, 0.0]);
let mut auglag = AugmentedLagrangian::new(100, 1e-4, 0.1);
println!("Running Augmented Lagrangian Method...\n");
let result = auglag.minimize_equality(objective, gradient, equality, equality_jac, x0);
println!("Convergence status: {:?}", result.status);
println!("Iterations: {}", result.iterations);
println!("Final objective: {:.6}", result.objective_value);
println!("Elapsed time: {:?}", result.elapsed_time);
let h_final = equality(&result.solution);
println!("\nConstraint satisfaction (h(x) should be ≈ 0):");
for i in 0..p {
println!(" h[{}] = {:.6}", i, h_final[i]);
}
let constraint_violation = h_final.norm();
println!(" ‖h(x)‖ = {constraint_violation:.6}");
println!("\nVerifying constraints:");
let sum_012 = result.solution[0] + result.solution[1] + result.solution[2];
println!(" x₀ + x₁ + x₂ = {sum_012:.6} (target: 1.0)");
let sum_34 = result.solution[3] + result.solution[4];
println!(" x₃ + x₄ = {sum_34:.6} (target: 0.5)");
let diff_56 = result.solution[5] - result.solution[6];
println!(" x₅ - x₆ = {diff_56:.6} (target: 0.0)");
let y_pred = A
.matvec(&result.solution)
.expect("Matrix-vector multiplication");
let mut residual_norm = 0.0;
for i in 0..m {
let diff = y_pred[i] - b[i];
residual_norm += diff * diff;
}
residual_norm = residual_norm.sqrt();
println!("\nLeast squares residual ‖Ax - b‖: {residual_norm:.6}");
println!();
}
fn linear_programming_interior_point() {
println!("=== Example 3: Linear Programming with Interior Point ===\n");
println!("Linear Programming Problem:");
println!("minimize -2x₀ - 3x₁");
println!("subject to:");
println!(" x₀ + 2x₁ ≤ 8");
println!(" 3x₀ + 2x₁ ≤ 12");
println!(" x₀ ≥ 0, x₁ ≥ 0\n");
let c = Vector::from_slice(&[-2.0, -3.0]);
let objective = |x: &Vector<f32>| -> f32 { c.dot(x) };
let gradient = |_x: &Vector<f32>| -> Vector<f32> { c.clone() };
let inequality = |x: &Vector<f32>| -> Vector<f32> {
let g0 = x[0] + 2.0 * x[1] - 8.0;
let g1 = 3.0 * x[0] + 2.0 * x[1] - 12.0;
let g2 = -x[0];
let g3 = -x[1];
Vector::from_slice(&[g0, g1, g2, g3])
};
let inequality_jac = |_x: &Vector<f32>| -> Vec<Vector<f32>> {
vec![
Vector::from_slice(&[1.0, 2.0]), Vector::from_slice(&[3.0, 2.0]), Vector::from_slice(&[-1.0, 0.0]), Vector::from_slice(&[0.0, -1.0]), ]
};
let x0 = Vector::from_slice(&[1.0, 2.0]);
let mut interior_point = InteriorPoint::new(100, 1e-5, 0.1);
println!("Running Interior Point Method...\n");
let result = interior_point.minimize(objective, gradient, inequality, inequality_jac, x0);
println!("Convergence status: {:?}", result.status);
println!("Iterations: {}", result.iterations);
println!("Final objective: {:.6}", result.objective_value);
println!(
"Optimal solution: x₀ = {:.6}, x₁ = {:.6}",
result.solution[0], result.solution[1]
);
println!("Elapsed time: {:?}", result.elapsed_time);
let g_final = inequality(&result.solution);
println!("\nConstraint satisfaction (g(x) ≤ 0):");
println!(" x₀ + 2x₁ - 8 = {:.6} (≤ 0)", g_final[0]);
println!(" 3x₀ + 2x₁ - 12 = {:.6} (≤ 0)", g_final[1]);
println!(" -x₀ = {:.6} (≤ 0, i.e., x₀ ≥ 0)", g_final[2]);
println!(" -x₁ = {:.6} (≤ 0, i.e., x₁ ≥ 0)", g_final[3]);
println!("\nActive constraints (g(x) ≈ 0):");
let constraint_names = ["x₀ + 2x₁ ≤ 8", "3x₀ + 2x₁ ≤ 12", "x₀ ≥ 0", "x₁ ≥ 0"];
for i in 0..4 {
if g_final[i].abs() < 0.1 {
println!(" {} (active)", constraint_names[i]);
}
}
println!();
}
include!("includes/constrained_optimization_include_01.rs");