smartcore/linear/
lasso_optimizer.rs

1//! An Interior-Point Method for Large-Scale l1-Regularized Least Squares
2//!
3//! This is a specialized interior-point method for solving large-scale 1-regularized LSPs that uses the
4//! preconditioned conjugate gradients algorithm to compute the search direction.
5//!
6//! The interior-point method can solve large sparse problems, with a million variables and observations, in a few tens of minutes on a PC.
7//! It can efficiently solve large dense problems, that arise in sparse signal recovery with orthogonal transforms, by exploiting fast algorithms for these transforms.
8//!
9//! ## References:
10//! * ["An Interior-Point Method for Large-Scale l1-Regularized Least Squares",  K. Koh, M. Lustig, S. Boyd, D. Gorinevsky](https://web.stanford.edu/~boyd/papers/pdf/l1_ls.pdf)
11//! * [Simple Matlab Solver for l1-regularized Least Squares Problems](https://web.stanford.edu/~boyd/l1_ls/)
12//!
13
14use crate::error::Failed;
15use crate::linalg::basic::arrays::{Array1, Array2, ArrayView1, MutArray, MutArrayView1};
16use crate::linear::bg_solver::BiconjugateGradientSolver;
17use crate::numbers::floatnum::FloatNumber;
18
19/// Interior Point Optimizer
20pub struct InteriorPointOptimizer<T: FloatNumber, X: Array2<T>> {
21    ata: X,
22    d1: Vec<T>,
23    d2: Vec<T>,
24    prb: Vec<T>,
25    prs: Vec<T>,
26}
27
28impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
29    /// Initialize a new Interior Point Optimizer
30    pub fn new(a: &X, n: usize) -> InteriorPointOptimizer<T, X> {
31        InteriorPointOptimizer {
32            ata: a.ab(true, a, false),
33            d1: vec![T::zero(); n],
34            d2: vec![T::zero(); n],
35            prb: vec![T::zero(); n],
36            prs: vec![T::zero(); n],
37        }
38    }
39
40    /// Run the optimization
41    pub fn optimize(
42        &mut self,
43        x: &X,
44        y: &Vec<T>,
45        lambda: T,
46        max_iter: usize,
47        tol: T,
48    ) -> Result<Vec<T>, Failed> {
49        let (n, p) = x.shape();
50        let p_f64 = T::from_usize(p).unwrap();
51
52        let lambda = lambda.max(T::epsilon());
53
54        //parameters
55        let pcgmaxi = 5000;
56        let min_pcgtol = T::from_f64(0.1).unwrap();
57        let eta = T::from_f64(1E-3).unwrap();
58        let alpha = T::from_f64(0.01).unwrap();
59        let beta = T::from_f64(0.5).unwrap();
60        let gamma = T::from_f64(-0.25).unwrap();
61        let mu = T::two();
62
63        // let y = M::from_row_vector(y.sub_scalar(y.mean_by())).transpose();
64        let y = y.sub_scalar(T::from_f64(y.mean_by()).unwrap());
65
66        let mut max_ls_iter = 100;
67        let mut pitr = 0;
68        let mut w = Vec::zeros(p);
69        let mut neww = w.clone();
70        let mut u = Vec::ones(p);
71        let mut newu = u.clone();
72
73        let mut f = X::fill(p, 2, -T::one());
74        let mut newf = f.clone();
75
76        let mut q1 = vec![T::zero(); p];
77        let mut q2 = vec![T::zero(); p];
78
79        let mut dx = Vec::zeros(p);
80        let mut du = Vec::zeros(p);
81        let mut dxu = Vec::zeros(2 * p);
82        let mut grad = Vec::zeros(2 * p);
83
84        let mut nu = Vec::zeros(n);
85        let mut dobj = T::zero();
86        let mut s = T::infinity();
87        let mut t = T::one()
88            .max(T::one() / lambda)
89            .min(T::two() * p_f64 / T::from(1e-3).unwrap());
90
91        let lambda_f64 = lambda.to_f64().unwrap();
92
93        for ntiter in 0..max_iter {
94            let mut z = w.xa(true, x);
95
96            for i in 0..n {
97                z[i] -= y[i];
98                nu[i] = T::two() * z[i];
99            }
100
101            // CALCULATE DUALITY GAP
102            let xnu = nu.xa(false, x);
103            let max_xnu = xnu.norm(f64::INFINITY);
104            if max_xnu > lambda_f64 {
105                let lnu = T::from_f64(lambda_f64 / max_xnu).unwrap();
106                nu.mul_scalar_mut(lnu);
107            }
108
109            let pobj = z.dot(&z) + lambda * T::from_f64(w.norm(1f64)).unwrap();
110            dobj = dobj.max(gamma * nu.dot(&nu) - nu.dot(&y));
111
112            let gap = pobj - dobj;
113
114            // STOPPING CRITERION
115            if gap / dobj < tol {
116                break;
117            }
118
119            // UPDATE t
120            if s >= T::half() {
121                t = t.max((T::two() * p_f64 * mu / gap).min(mu * t));
122            }
123
124            // CALCULATE NEWTON STEP
125            for i in 0..p {
126                let q1i = T::one() / (u[i] + w[i]);
127                let q2i = T::one() / (u[i] - w[i]);
128                q1[i] = q1i;
129                q2[i] = q2i;
130                self.d1[i] = (q1i * q1i + q2i * q2i) / t;
131                self.d2[i] = (q1i * q1i - q2i * q2i) / t;
132            }
133
134            let mut gradphi = z.xa(false, x);
135
136            for i in 0..p {
137                let g1 = T::two() * gradphi[i] - (q1[i] - q2[i]) / t;
138                let g2 = lambda - (q1[i] + q2[i]) / t;
139                gradphi[i] = g1;
140                grad[i] = -g1;
141                grad[i + p] = -g2;
142            }
143
144            for i in 0..p {
145                self.prb[i] = T::two() + self.d1[i];
146                self.prs[i] = self.prb[i] * self.d1[i] - self.d2[i].powi(2);
147            }
148
149            let normg = T::from_f64(grad.norm2()).unwrap();
150            let mut pcgtol = min_pcgtol.min(eta * gap / T::one().min(normg));
151            if ntiter != 0 && pitr == 0 {
152                pcgtol *= min_pcgtol;
153            }
154
155            let error = self.solve_mut(x, &grad, &mut dxu, pcgtol, pcgmaxi)?;
156            if error > pcgtol {
157                pitr = pcgmaxi;
158            }
159
160            dx[..p].copy_from_slice(&dxu[..p]);
161            du[..p].copy_from_slice(&dxu[p..(p + p)]);
162
163            // BACKTRACKING LINE SEARCH
164            let phi = z.dot(&z) + lambda * u.sum() - Self::sumlogneg(&f) / t;
165            s = T::one();
166            let gdx = grad.dot(&dxu);
167
168            let lsiter = 0;
169            while lsiter < max_ls_iter {
170                for i in 0..p {
171                    neww[i] = w[i] + s * dx[i];
172                    newu[i] = u[i] + s * du[i];
173                    newf.set((i, 0), neww[i] - newu[i]);
174                    newf.set((i, 1), -neww[i] - newu[i]);
175                }
176
177                if newf
178                    .iterator(0)
179                    .fold(T::neg_infinity(), |max, v| v.max(max))
180                    < T::zero()
181                {
182                    let mut newz = neww.xa(true, x);
183                    for i in 0..n {
184                        newz[i] -= y[i];
185                    }
186
187                    let newphi = newz.dot(&newz) + lambda * newu.sum() - Self::sumlogneg(&newf) / t;
188                    if newphi - phi <= alpha * s * gdx {
189                        break;
190                    }
191                }
192                s = beta * s;
193                max_ls_iter += 1;
194            }
195
196            if lsiter == max_ls_iter {
197                return Err(Failed::fit(
198                    "Exceeded maximum number of iteration for interior point optimizer",
199                ));
200            }
201
202            w.copy_from(&neww);
203            u.copy_from(&newu);
204            f.copy_from(&newf);
205        }
206
207        Ok(w)
208    }
209
210    fn sumlogneg(f: &X) -> T {
211        let (n, _) = f.shape();
212        let mut sum = T::zero();
213        for i in 0..n {
214            sum += (-*f.get((i, 0))).ln();
215            sum += (-*f.get((i, 1))).ln();
216        }
217        sum
218    }
219}
220
221impl<'a, T: FloatNumber, X: Array2<T>> BiconjugateGradientSolver<'a, T, X>
222    for InteriorPointOptimizer<T, X>
223{
224    fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) {
225        let (_, p) = a.shape();
226
227        for i in 0..p {
228            x[i] = (self.d1[i] * b[i] - self.d2[i] * b[i + p]) / self.prs[i];
229            x[i + p] = (-self.d2[i] * b[i] + self.prb[i] * b[i + p]) / self.prs[i];
230        }
231    }
232
233    fn mat_vec_mul(&self, _: &X, x: &Vec<T>, y: &mut Vec<T>) {
234        let (_, p) = self.ata.shape();
235        let x_slice = Vec::from_slice(x.slice(0..p).as_ref());
236        let atax = x_slice.xa(true, &self.ata);
237
238        for i in 0..p {
239            y[i] = T::two() * atax[i] + self.d1[i] * x[i] + self.d2[i] * x[i + p];
240            y[i + p] = self.d2[i] * x[i] + self.d1[i] * x[i + p];
241        }
242    }
243
244    fn mat_t_vec_mul(&self, a: &X, x: &Vec<T>, y: &mut Vec<T>) {
245        self.mat_vec_mul(a, x, y);
246    }
247}