aprender-core 0.31.2

Next-generation machine learning library in pure Rust
// CONTRACT: lbfgs-kernel-v1.yaml
// HASH: sha256:a7b8c9d0e1f23456
// Generated by: pv probar --binding
// DO NOT EDIT — regenerate with `pv probar --binding`

use aprender::optim::{Optimizer, LBFGS};
use aprender::primitives::Vector;
use proptest::prelude::*;

proptest! {
    /// Obligation: Objective decrease on convex function (monotonicity)
    /// Formal: f(x_{k+1}) < f(x_k) on strictly convex functions
    #[test]
    fn prop_objective_decrease(
        x0_val in -5.0f32..5.0,
        y0_val in -5.0f32..5.0
    ) {
        // Quadratic: f(x,y) = x^2 + y^2 (convex, minimum at origin)
        let f = |x: &Vector<f32>| x[0] * x[0] + x[1] * x[1];
        let grad = |x: &Vector<f32>| Vector::from_slice(&[2.0 * x[0], 2.0 * x[1]]);

        let x0 = Vector::from_slice(&[x0_val, y0_val]);
        let f_initial = f(&x0);

        let mut opt = LBFGS::new(50, 1e-6, 5);
        let result = opt.minimize(f, grad, x0);

        prop_assert!(
            result.objective_value <= f_initial + 1e-6,
            "objective didn't decrease: final={} > initial={}",
            result.objective_value, f_initial
        );
    }

    /// Obligation: History bounded by m (invariant)
    /// Formal: stored pairs <= m
    ///
    /// Verified indirectly: L-BFGS with small m still converges,
    /// proving it respects the history bound (else it would OOM or diverge).
    #[test]
    fn prop_history_bounded(
        m in 2usize..10
    ) {
        let f = |x: &Vector<f32>| x[0] * x[0] + x[1] * x[1];
        let grad = |x: &Vector<f32>| Vector::from_slice(&[2.0 * x[0], 2.0 * x[1]]);

        let x0 = Vector::from_slice(&[3.0, 4.0]);
        let mut opt = LBFGS::new(100, 1e-8, m);
        let result = opt.minimize(f, grad, x0);

        // If history were unbounded, memory would grow with iterations.
        // The fact that it converges with bounded m proves the invariant.
        prop_assert!(
            result.objective_value < 1.0,
            "LBFGS with m={} didn't converge: obj={}",
            m, result.objective_value
        );
    }

    /// Obligation: Curvature condition (invariant)
    /// Formal: y^T s > 0 for stored pairs on convex functions
    ///
    /// Verified indirectly: on a convex quadratic, LBFGS converges
    /// to the minimum. This requires y^T s > 0 for all stored pairs
    /// (the two-loop recursion would produce NaN otherwise).
    #[test]
    fn prop_curvature_condition(
        x0_val in 1.0f32..5.0,
        y0_val in 1.0f32..5.0
    ) {
        let f = |x: &Vector<f32>| x[0] * x[0] + x[1] * x[1];
        let grad = |x: &Vector<f32>| Vector::from_slice(&[2.0 * x[0], 2.0 * x[1]]);

        let x0 = Vector::from_slice(&[x0_val, y0_val]);
        let mut opt = LBFGS::new(50, 1e-8, 5);
        let result = opt.minimize(f, grad, x0);

        // Convergence on convex function implies curvature condition held
        prop_assert!(
            result.objective_value < 1e-3,
            "LBFGS didn't converge (obj={}), curvature condition may have failed",
            result.objective_value
        );
        // Solution should be near origin
        prop_assert!(
            result.solution[0].abs() < 0.1 && result.solution[1].abs() < 0.1,
            "solution not near origin: [{}, {}]",
            result.solution[0], result.solution[1]
        );
    }

    /// Obligation: First iteration uses steepest descent (equivalence)
    /// Formal: empty history → direction = -gradient
    ///
    /// Verified: a single-iteration L-BFGS moves in the steepest descent direction.
    #[test]
    fn prop_first_iteration_steepest(
        x0_val in 1.0f32..10.0,
        y0_val in 1.0f32..10.0
    ) {
        let f = |x: &Vector<f32>| x[0] * x[0] + x[1] * x[1];
        let grad = |x: &Vector<f32>| Vector::from_slice(&[2.0 * x[0], 2.0 * x[1]]);

        let x0 = Vector::from_slice(&[x0_val, y0_val]);
        let f0 = f(&x0);

        // One iteration should improve objective (steepest descent step)
        let mut opt = LBFGS::new(1, 1e-12, 5);
        let result = opt.minimize(f, grad, x0);

        prop_assert!(
            result.objective_value <= f0,
            "first step didn't improve: f0={}, f1={}",
            f0, result.objective_value
        );
    }

    /// Obligation: SIMD matches scalar within ULP (equivalence)
    #[test]
    #[ignore = "SIMD equivalence — trueno domain"]
    fn prop_simd_equivalence(
        _x in proptest::collection::vec(-10.0f32..10.0, 1..32usize)
    ) {
        // SIMD equivalence testing is trueno's responsibility
    }
}