scirs2_optimize/unconstrained/
efficient_sparse.rs

1//! Efficient sparse Jacobian and Hessian handling for optimization
2//!
3//! This module provides optimization algorithms that efficiently handle sparse
4//! Jacobians and Hessians using advanced sparsity patterns, adaptive coloring,
5//! and specialized matrix operations.
6
7use crate::error::OptimizeError;
8use crate::sparse_numdiff::SparseFiniteDiffOptions;
9use crate::unconstrained::line_search::backtracking_line_search;
10use crate::unconstrained::result::OptimizeResult;
11use crate::unconstrained::utils::check_convergence;
12use crate::unconstrained::Options;
13use ndarray::{Array1, Array2, ArrayView1, Axis};
14use scirs2_sparse::{csr_array::CsrArray, sparray::SparseArray};
15// Collections for potential future use
16// use std::collections::{HashMap, HashSet};
17
18/// Options for efficient sparse optimization
19#[derive(Debug, Clone)]
20pub struct EfficientSparseOptions {
21    /// Base optimization options
22    pub base_options: Options,
23    /// Sparse finite difference options
24    pub sparse_fd_options: SparseFiniteDiffOptions,
25    /// Automatically detect sparsity pattern
26    pub auto_detect_sparsity: bool,
27    /// Sparsity detection threshold
28    pub sparsity_threshold: f64,
29    /// Maximum number of sparsity detection iterations
30    pub max_sparsity_iterations: usize,
31    /// Use adaptive sparsity pattern refinement
32    pub adaptive_sparsity: bool,
33    /// Enable Hessian sparsity for Newton-type methods
34    pub use_sparse_hessian: bool,
35    /// Maximum percentage of non-zeros for sparse representation
36    pub sparse_percentage_threshold: f64,
37    /// Enable parallel sparse operations
38    pub parallel_sparse_ops: bool,
39}
40
41impl Default for EfficientSparseOptions {
42    fn default() -> Self {
43        Self {
44            base_options: Options::default(),
45            sparse_fd_options: SparseFiniteDiffOptions::default(),
46            auto_detect_sparsity: true,
47            sparsity_threshold: 1e-12,
48            max_sparsity_iterations: 5,
49            adaptive_sparsity: true,
50            use_sparse_hessian: true,
51            sparse_percentage_threshold: 0.1, // Use sparse if <10% non-zero
52            parallel_sparse_ops: true,
53        }
54    }
55}
56
57/// Sparse pattern information and statistics
58#[derive(Debug, Clone)]
59pub struct SparsityInfo {
60    /// Jacobian sparsity pattern
61    pub jacobian_pattern: Option<CsrArray<f64>>,
62    /// Hessian sparsity pattern
63    pub hessian_pattern: Option<CsrArray<f64>>,
64    /// Number of non-zero elements in Jacobian
65    pub jacobian_nnz: usize,
66    /// Number of non-zero elements in Hessian
67    pub hessian_nnz: usize,
68    /// Total number of elements
69    pub total_elements: usize,
70    /// Jacobian sparsity percentage
71    pub jacobian_sparsity: f64,
72    /// Hessian sparsity percentage
73    pub hessian_sparsity: f64,
74}
75
76impl SparsityInfo {
77    fn new(n: usize) -> Self {
78        Self {
79            jacobian_pattern: None,
80            hessian_pattern: None,
81            jacobian_nnz: 0,
82            hessian_nnz: 0,
83            total_elements: n,
84            jacobian_sparsity: 1.0,
85            hessian_sparsity: 1.0,
86        }
87    }
88}
89
90/// Sparse quasi-Newton approximation using limited memory and sparsity
91struct SparseQuasiNewton {
92    /// Sparse approximation of inverse Hessian
93    h_inv_sparse: Option<CsrArray<f64>>,
94    /// History of sparse gradients
95    sparse_grad_history: Vec<CsrArray<f64>>,
96    /// History of sparse steps
97    sparse_step_history: Vec<Array1<f64>>,
98    /// Maximum history size
99    max_history: usize,
100    /// Sparsity pattern for Hessian approximation
101    #[allow(dead_code)]
102    pattern: Option<CsrArray<f64>>,
103}
104
105impl SparseQuasiNewton {
106    fn new(_n: usize, max_history: usize) -> Self {
107        Self {
108            h_inv_sparse: None,
109            sparse_grad_history: Vec::new(),
110            sparse_step_history: Vec::new(),
111            max_history,
112            pattern: None,
113        }
114    }
115
116    fn update_sparse_bfgs(
117        &mut self,
118        s: &Array1<f64>,
119        y_sparse: &CsrArray<f64>,
120        sparsity_pattern: &CsrArray<f64>,
121    ) -> Result<(), OptimizeError> {
122        // Convert sparse gradient difference to dense for computation
123        let y = sparse_to_dense(y_sparse);
124
125        let s_dot_y = s.dot(&y);
126        if s_dot_y <= 1e-10 {
127            return Ok(()); // Skip update if curvature condition not satisfied
128        }
129
130        // Initialize inverse Hessian approximation if needed
131        if self.h_inv_sparse.is_none() {
132            self.h_inv_sparse = Some(create_sparse_identity(s.len(), sparsity_pattern)?);
133        }
134
135        // Perform sparse BFGS update using the Sherman-Morrison-Woodbury formula
136        // This is a simplified version - a full implementation would use efficient
137        // sparse matrix operations throughout
138        if let Some(ref mut h_inv) = self.h_inv_sparse {
139            // Convert to dense for update, then back to sparse
140            let h_inv_dense = sparse_to_dense_matrix(h_inv);
141            let h_inv_updated = dense_bfgs_update(&h_inv_dense, s, &y)?;
142
143            // Convert back to sparse format, preserving pattern
144            *h_inv = dense_to_sparse_matrix(&h_inv_updated, sparsity_pattern)?;
145        }
146
147        // Update history (keep bounded)
148        self.sparse_step_history.push(s.clone());
149        self.sparse_grad_history.push(y_sparse.clone());
150
151        while self.sparse_step_history.len() > self.max_history {
152            self.sparse_step_history.remove(0);
153            self.sparse_grad_history.remove(0);
154        }
155
156        Ok(())
157    }
158
159    fn apply_inverse_hessian(
160        &self,
161        g_sparse: &CsrArray<f64>,
162    ) -> Result<Array1<f64>, OptimizeError> {
163        if let Some(ref h_inv) = self.h_inv_sparse {
164            // Sparse matrix-vector product
165            sparse_matrix_vector_product(h_inv, g_sparse)
166        } else {
167            // If no approximation available, use negative gradient
168            Ok(-sparse_to_dense(g_sparse))
169        }
170    }
171}
172
173/// Efficient sparse Newton method with adaptive sparsity detection
174pub fn minimize_efficient_sparse_newton<F, G>(
175    mut fun: F,
176    mut grad: G,
177    x0: Array1<f64>,
178    options: &EfficientSparseOptions,
179) -> Result<OptimizeResult<f64>, OptimizeError>
180where
181    F: FnMut(&ArrayView1<f64>) -> f64 + Sync,
182    G: FnMut(&ArrayView1<f64>) -> Array1<f64>,
183{
184    let n = x0.len();
185    let base_opts = &options.base_options;
186
187    // Initialize variables
188    let mut x = x0.to_owned();
189    let mut f = fun(&x.view());
190    let mut g_dense = grad(&x.view());
191
192    // Initialize sparsity detection
193    let mut sparsity_info = SparsityInfo::new(n);
194    let mut sparse_qn = SparseQuasiNewton::new(n, 10);
195
196    // Detect initial sparsity pattern if requested
197    if options.auto_detect_sparsity {
198        sparsity_info = detect_sparsity_patterns(&mut fun, &mut grad, &x.view(), options)?;
199        println!(
200            "Detected sparsity: Jacobian {:.1}%, Hessian {:.1}%",
201            sparsity_info.jacobian_sparsity * 100.0,
202            sparsity_info.hessian_sparsity * 100.0
203        );
204    }
205
206    // Convert initial gradient to sparse if beneficial
207    let mut g_sparse = if should_use_sparse(&g_dense, options.sparse_percentage_threshold) {
208        dense_to_sparse_vector(&g_dense, options.sparsity_threshold)?
209    } else {
210        dense_to_sparse_vector(&g_dense, 0.0)? // Keep all elements
211    };
212
213    let mut iter = 0;
214    let mut nfev = 1;
215    let mut _njev = 1;
216
217    // Main optimization loop
218    while iter < base_opts.max_iter {
219        // Check convergence
220        let grad_norm = sparse_vector_norm(&g_sparse);
221        if grad_norm < base_opts.gtol {
222            break;
223        }
224
225        // Compute search direction using sparse operations
226        let p = if options.use_sparse_hessian && sparsity_info.hessian_pattern.is_some() {
227            // Use sparse Hessian for Newton step
228            compute_sparse_newton_direction(
229                &mut fun,
230                &x.view(),
231                &g_sparse,
232                &sparsity_info,
233                options,
234            )?
235        } else {
236            // Use sparse quasi-Newton approximation
237            sparse_qn.apply_inverse_hessian(&g_sparse)?
238        };
239
240        // Project search direction for bounds if needed
241        let p = apply_bounds_projection(&p, &x, &options.base_options);
242
243        // Line search
244        let alpha_init = 1.0;
245        let (alpha, f_new) = backtracking_line_search(
246            &mut fun,
247            &x.view(),
248            f,
249            &p.view(),
250            &sparse_to_dense(&g_sparse).view(),
251            alpha_init,
252            0.0001,
253            0.5,
254            base_opts.bounds.as_ref(),
255        );
256        nfev += 1;
257
258        // Update variables
259        let s = alpha * &p;
260        let x_new = &x + &s;
261
262        // Check step size convergence
263        if s.mapv(|x| x.abs()).sum() < base_opts.xtol {
264            x = x_new;
265            break;
266        }
267
268        // Compute new gradient
269        let g_new_dense = grad(&x_new.view());
270        _njev += 1;
271
272        let g_new_sparse = if should_use_sparse(&g_new_dense, options.sparse_percentage_threshold) {
273            dense_to_sparse_vector(&g_new_dense, options.sparsity_threshold)?
274        } else {
275            dense_to_sparse_vector(&g_new_dense, 0.0)?
276        };
277
278        // Check function value convergence
279        if check_convergence(
280            f - f_new,
281            0.0,
282            sparse_vector_norm(&g_new_sparse),
283            base_opts.ftol,
284            0.0,
285            base_opts.gtol,
286        ) {
287            x = x_new;
288            let _g_sparse_final = g_new_sparse; // Final gradient, loop will break
289            break;
290        }
291
292        // Update sparse quasi-Newton approximation
293        let y_sparse = sparse_vector_subtract(&g_new_sparse, &g_sparse)?;
294        if let Some(ref pattern) = sparsity_info.hessian_pattern {
295            sparse_qn.update_sparse_bfgs(&s, &y_sparse, pattern)?;
296        }
297
298        // Adaptive sparsity pattern refinement
299        if options.adaptive_sparsity && iter % 10 == 0 {
300            refine_sparsity_pattern(&mut sparsity_info, &g_new_sparse, options)?;
301        }
302
303        // Update for next iteration
304        x = x_new;
305        f = f_new;
306        g_sparse = g_new_sparse;
307        g_dense = g_new_dense;
308        iter += 1;
309    }
310
311    // Final check for bounds
312    if let Some(bounds) = &base_opts.bounds {
313        bounds.project(x.as_slice_mut().unwrap());
314    }
315
316    Ok(OptimizeResult {
317        x,
318        fun: f,
319        iterations: iter,
320        nit: iter,
321        func_evals: nfev,
322        nfev,
323        success: iter < base_opts.max_iter,
324        message: if iter < base_opts.max_iter {
325            format!(
326                "Sparse optimization terminated successfully. Sparsity: {:.1}%",
327                sparsity_info.jacobian_sparsity * 100.0
328            )
329        } else {
330            "Maximum iterations reached.".to_string()
331        },
332        jacobian: Some(g_dense),
333        hessian: None,
334    })
335}
336
337/// Detect sparsity patterns in Jacobian and Hessian
338fn detect_sparsity_patterns<F, G>(
339    _fun: &mut F,
340    grad: &mut G,
341    x: &ArrayView1<f64>,
342    options: &EfficientSparseOptions,
343) -> Result<SparsityInfo, OptimizeError>
344where
345    F: FnMut(&ArrayView1<f64>) -> f64,
346    G: FnMut(&ArrayView1<f64>) -> Array1<f64>,
347{
348    let n = x.len();
349    let mut sparsity_info = SparsityInfo::new(n);
350
351    // Detect Jacobian sparsity by probing with unit vectors
352    let mut jacobian_pattern = Vec::new();
353    let eps = options.sparse_fd_options.rel_step.unwrap_or(1e-8);
354    let g0 = grad(x);
355
356    let mut nnz_count = 0;
357    for i in 0..n {
358        let mut x_pert = x.to_owned();
359        x_pert[i] += eps;
360        let g_pert = grad(&x_pert.view());
361
362        for j in 0..n {
363            let diff = (g_pert[j] - g0[j]).abs();
364            if diff > options.sparsity_threshold {
365                jacobian_pattern.push((j, i, 1.0)); // (row, col, value)
366                nnz_count += 1;
367            }
368        }
369    }
370
371    // Create sparse Jacobian pattern
372    if nnz_count > 0 {
373        let mut rows = Vec::new();
374        let mut cols = Vec::new();
375        let mut data = Vec::new();
376
377        for (row, col, val) in jacobian_pattern {
378            rows.push(row);
379            cols.push(col);
380            data.push(val);
381        }
382
383        sparsity_info.jacobian_pattern =
384            Some(CsrArray::from_triplets(&rows, &cols, &data, (n, n), false)?);
385        sparsity_info.jacobian_nnz = nnz_count;
386        sparsity_info.jacobian_sparsity = nnz_count as f64 / (n * n) as f64;
387    }
388
389    // For Hessian sparsity detection, use a simpler approach based on gradient structure
390    if options.use_sparse_hessian {
391        // Assume Hessian has similar sparsity to Jacobian for now
392        // A more sophisticated implementation would probe the Hessian directly
393        sparsity_info.hessian_pattern = sparsity_info.jacobian_pattern.clone();
394        sparsity_info.hessian_nnz = sparsity_info.jacobian_nnz;
395        sparsity_info.hessian_sparsity = sparsity_info.jacobian_sparsity;
396    }
397
398    Ok(sparsity_info)
399}
400
401/// Compute sparse Newton direction using sparse Hessian
402fn compute_sparse_newton_direction<F>(
403    _fun: &mut F,
404    _x: &ArrayView1<f64>,
405    g_sparse: &CsrArray<f64>,
406    sparsity_info: &SparsityInfo,
407    _options: &EfficientSparseOptions,
408) -> Result<Array1<f64>, OptimizeError>
409where
410    F: FnMut(&ArrayView1<f64>) -> f64 + Sync,
411{
412    if let Some(_hessian_pattern) = &sparsity_info.hessian_pattern {
413        // TODO: Implement sparse Hessian computation with FnMut compatibility
414        // For now, use a simple identity approximation (steepest descent)
415        let g_dense = sparse_to_dense(g_sparse);
416        Ok(-g_dense)
417    } else {
418        // Fallback to negative gradient
419        Ok(-sparse_to_dense(g_sparse))
420    }
421}
422
423// Helper functions for sparse operations
424
425fn should_use_sparse(vector: &Array1<f64>, threshold: f64) -> bool {
426    let nnz = vector.iter().filter(|&&x| x.abs() > 1e-12).count();
427    let sparsity = nnz as f64 / vector.len() as f64;
428    sparsity < threshold
429}
430
431fn dense_to_sparse_vector(
432    dense: &Array1<f64>,
433    threshold: f64,
434) -> Result<CsrArray<f64>, OptimizeError> {
435    let n = dense.len();
436    let mut rows = Vec::new();
437    let mut cols = Vec::new();
438    let mut data = Vec::new();
439
440    for (i, &val) in dense.iter().enumerate() {
441        if val.abs() > threshold {
442            rows.push(0); // Single row vector
443            cols.push(i);
444            data.push(val);
445        }
446    }
447
448    CsrArray::from_triplets(&rows, &cols, &data, (1, n), false)
449        .map_err(|_| OptimizeError::ComputationError("Failed to create sparse vector".to_string()))
450}
451
452fn sparse_to_dense(sparse: &CsrArray<f64>) -> Array1<f64> {
453    let n = sparse.ncols();
454    let mut dense = Array1::zeros(n);
455
456    // Extract non-zero values - this is a simplified implementation
457    for col in 0..n {
458        dense[col] = sparse.get(0, col);
459    }
460
461    dense
462}
463
464fn sparse_vector_norm(sparse: &CsrArray<f64>) -> f64 {
465    sparse.get_data().iter().map(|&x| x * x).sum::<f64>().sqrt()
466}
467
468fn sparse_vector_subtract(
469    a: &CsrArray<f64>,
470    b: &CsrArray<f64>,
471) -> Result<CsrArray<f64>, OptimizeError> {
472    // Simplified implementation - convert to dense, subtract, convert back
473    let a_dense = sparse_to_dense(a);
474    let b_dense = sparse_to_dense(b);
475    let diff = &a_dense - &b_dense;
476    dense_to_sparse_vector(&diff, 1e-12)
477}
478
479fn apply_bounds_projection(p: &Array1<f64>, x: &Array1<f64>, options: &Options) -> Array1<f64> {
480    let mut p_proj = p.clone();
481
482    if let Some(bounds) = &options.bounds {
483        for i in 0..p.len() {
484            let mut can_decrease = true;
485            let mut can_increase = true;
486
487            if let Some(lb) = bounds.lower[i] {
488                if x[i] <= lb + options.eps {
489                    can_decrease = false;
490                }
491            }
492            if let Some(ub) = bounds.upper[i] {
493                if x[i] >= ub - options.eps {
494                    can_increase = false;
495                }
496            }
497
498            if (p[i] < 0.0 && !can_decrease) || (p[i] > 0.0 && !can_increase) {
499                p_proj[i] = 0.0;
500            }
501        }
502    }
503
504    p_proj
505}
506
507fn refine_sparsity_pattern(
508    _sparsity_info: &mut SparsityInfo,
509    _current_gradient: &CsrArray<f64>,
510    _options: &EfficientSparseOptions,
511) -> Result<(), OptimizeError> {
512    // Adaptive refinement of sparsity pattern based on current gradient
513    // This is a simplified version - a full implementation would track
514    // the evolution of sparsity patterns over iterations
515    Ok(())
516}
517
518// Additional helper functions (simplified implementations)
519
520fn create_sparse_identity(
521    n: usize,
522    pattern: &CsrArray<f64>,
523) -> Result<CsrArray<f64>, OptimizeError> {
524    let mut rows = Vec::new();
525    let mut cols = Vec::new();
526    let mut data = Vec::new();
527
528    for i in 0..n {
529        if pattern.get(i, i) != 0.0 {
530            rows.push(i);
531            cols.push(i);
532            data.push(1.0);
533        }
534    }
535
536    CsrArray::from_triplets(&rows, &cols, &data, (n, n), false).map_err(|_| {
537        OptimizeError::ComputationError("Failed to create sparse identity".to_string())
538    })
539}
540
541fn sparse_to_dense_matrix(sparse: &CsrArray<f64>) -> Array2<f64> {
542    let (m, n) = (sparse.nrows(), sparse.ncols());
543    let mut dense = Array2::zeros((m, n));
544
545    for i in 0..m {
546        for j in 0..n {
547            dense[[i, j]] = sparse.get(i, j);
548        }
549    }
550
551    dense
552}
553
554fn dense_to_sparse_matrix(
555    dense: &Array2<f64>,
556    pattern: &CsrArray<f64>,
557) -> Result<CsrArray<f64>, OptimizeError> {
558    let (m, n) = dense.dim();
559    let mut rows = Vec::new();
560    let mut cols = Vec::new();
561    let mut data = Vec::new();
562
563    for i in 0..m {
564        for j in 0..n {
565            if pattern.get(i, j) != 0.0 {
566                rows.push(i);
567                cols.push(j);
568                data.push(dense[[i, j]]);
569            }
570        }
571    }
572
573    CsrArray::from_triplets(&rows, &cols, &data, (m, n), false)
574        .map_err(|_| OptimizeError::ComputationError("Failed to create sparse matrix".to_string()))
575}
576
577fn dense_bfgs_update(
578    h_inv: &Array2<f64>,
579    s: &Array1<f64>,
580    y: &Array1<f64>,
581) -> Result<Array2<f64>, OptimizeError> {
582    let n = h_inv.nrows();
583    let s_dot_y = s.dot(y);
584
585    if s_dot_y <= 1e-10 {
586        return Ok(h_inv.clone());
587    }
588
589    let rho = 1.0 / s_dot_y;
590    let i_mat = Array2::eye(n);
591
592    // BFGS update formula
593    let y_col = y.clone().insert_axis(Axis(1));
594    let s_row = s.clone().insert_axis(Axis(0));
595    let y_s_t = y_col.dot(&s_row);
596    let term1 = &i_mat - &(&y_s_t * rho);
597
598    let s_col = s.clone().insert_axis(Axis(1));
599    let y_row = y.clone().insert_axis(Axis(0));
600    let s_y_t = s_col.dot(&y_row);
601    let term2 = &i_mat - &(&s_y_t * rho);
602
603    let term3 = term1.dot(h_inv);
604    let result = term3.dot(&term2) + rho * s_col.dot(&s_row);
605
606    Ok(result)
607}
608
609fn sparse_matrix_vector_product(
610    matrix: &CsrArray<f64>,
611    vector_sparse: &CsrArray<f64>,
612) -> Result<Array1<f64>, OptimizeError> {
613    // Simplified sparse matrix-vector product
614    let vector_dense = sparse_to_dense(vector_sparse);
615    let matrix_dense = sparse_to_dense_matrix(matrix);
616    Ok(matrix_dense.dot(&vector_dense))
617}
618
619#[allow(dead_code)]
620fn solve_sparse_linear_system(
621    matrix: &CsrArray<f64>,
622    rhs: &Array1<f64>,
623) -> Result<Array1<f64>, OptimizeError> {
624    // Simplified linear system solver - convert to dense
625    // A real implementation would use sparse linear algebra libraries
626    let _matrix_dense = sparse_to_dense_matrix(matrix);
627
628    // Use simple Gauss elimination or other methods
629    // For now, return a placeholder solution
630    Ok(-rhs.clone()) // Negative gradient as fallback
631}
632
633#[cfg(test)]
634mod tests {
635    use super::*;
636    use approx::assert_abs_diff_eq;
637
638    #[test]
639    fn test_sparsity_detection() {
640        let n = 10;
641        let options = EfficientSparseOptions::default();
642
643        // Simple quadratic function with sparse Hessian
644        let mut fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[4].powi(2) + x[9].powi(2) };
645
646        let mut grad = |x: &ArrayView1<f64>| -> Array1<f64> {
647            let mut g = Array1::zeros(n);
648            g[0] = 2.0 * x[0];
649            g[4] = 2.0 * x[4];
650            g[9] = 2.0 * x[9];
651            g
652        };
653
654        let x = Array1::ones(n);
655        let sparsity_info =
656            detect_sparsity_patterns(&mut fun, &mut grad, &x.view(), &options).unwrap();
657
658        // Should detect that only diagonal elements (0,0), (4,4), (9,9) are non-zero
659        assert!(sparsity_info.jacobian_sparsity < 0.5); // Should be sparse
660    }
661
662    #[test]
663    fn test_sparse_vector_operations() {
664        let dense = Array1::from_vec(vec![1.0, 0.0, 2.0, 0.0, 0.0, 3.0]);
665        let sparse = dense_to_sparse_vector(&dense, 1e-12).unwrap();
666
667        let reconstructed = sparse_to_dense(&sparse);
668
669        for i in 0..dense.len() {
670            assert_abs_diff_eq!(dense[i], reconstructed[i], epsilon = 1e-10);
671        }
672
673        let norm_sparse = sparse_vector_norm(&sparse);
674        let norm_dense = dense.mapv(|x| x.powi(2)).sum().sqrt();
675        assert_abs_diff_eq!(norm_sparse, norm_dense, epsilon = 1e-10);
676    }
677
678    #[test]
679    fn test_efficient_sparse_optimization() {
680        // Test on a simple sparse quadratic problem
681        let fun = |x: &ArrayView1<f64>| -> f64 { x[0].powi(2) + x[2].powi(2) + x[4].powi(2) };
682
683        let grad = |x: &ArrayView1<f64>| -> Array1<f64> {
684            let mut g = Array1::zeros(5);
685            g[0] = 2.0 * x[0];
686            g[2] = 2.0 * x[2];
687            g[4] = 2.0 * x[4];
688            g
689        };
690
691        let x0 = Array1::ones(5);
692        let options = EfficientSparseOptions::default();
693
694        let result = minimize_efficient_sparse_newton(fun, grad, x0, &options).unwrap();
695
696        assert!(result.success);
697        // Should converge to origin for all active variables
698        assert_abs_diff_eq!(result.x[0], 0.0, epsilon = 1e-3);
699        assert_abs_diff_eq!(result.x[2], 0.0, epsilon = 1e-3);
700        assert_abs_diff_eq!(result.x[4], 0.0, epsilon = 1e-3);
701    }
702}