scirs2_optimize/unconstrained/
subspace_methods.rs

1//! Subspace methods for very high-dimensional optimization
2//!
3//! This module implements various subspace methods that are effective for
4//! optimization problems with thousands to millions of variables. These methods
5//! work by restricting the optimization to lower-dimensional subspaces, making
6//! them computationally feasible for very large-scale problems.
7
8use crate::error::OptimizeError;
9use crate::unconstrained::{line_search::backtracking_line_search, OptimizeResult};
10use scirs2_core::ndarray::{Array1, ArrayView1};
11use scirs2_core::random::rngs::StdRng;
12use scirs2_core::random::{Rng, SeedableRng};
13use std::collections::VecDeque;
14
15/// Options for subspace optimization methods
16#[derive(Debug, Clone)]
17pub struct SubspaceOptions {
18    /// Maximum number of iterations
19    pub max_iter: usize,
20    /// Tolerance for convergence
21    pub tol: f64,
22    /// Subspace dimension (for random subspace methods)
23    pub subspace_dim: usize,
24    /// Block size for block coordinate descent
25    pub block_size: usize,
26    /// Maximum number of coordinate descent iterations per block
27    pub coord_max_iter: usize,
28    /// Memory limit for storing gradient differences (for subspace construction)
29    pub memory_limit: usize,
30    /// Random seed for reproducible results
31    pub seed: Option<u64>,
32    /// Use adaptive subspace selection
33    pub adaptive_subspace: bool,
34    /// Frequency of subspace updates
35    pub subspace_update_freq: usize,
36    /// Minimum improvement threshold for continuing optimization
37    pub min_improvement: f64,
38}
39
40impl Default for SubspaceOptions {
41    fn default() -> Self {
42        Self {
43            max_iter: 1000,
44            tol: 1e-6,
45            subspace_dim: 100,
46            block_size: 50,
47            coord_max_iter: 10,
48            memory_limit: 20,
49            seed: None,
50            adaptive_subspace: true,
51            subspace_update_freq: 10,
52            min_improvement: 1e-12,
53        }
54    }
55}
56
57/// Subspace method types
58#[derive(Debug, Clone, Copy)]
59pub enum SubspaceMethod {
60    /// Random coordinate descent
61    RandomCoordinateDescent,
62    /// Block coordinate descent
63    BlockCoordinateDescent,
64    /// Random subspace gradient method
65    RandomSubspace,
66    /// Adaptive subspace method using gradient history
67    AdaptiveSubspace,
68    /// Cyclical coordinate descent
69    CyclicalCoordinateDescent,
70}
71
72/// Subspace state for maintaining optimization history
73struct SubspaceState {
74    /// Gradient history for subspace construction
75    gradient_history: VecDeque<Array1<f64>>,
76    /// Function value history
77    function_history: VecDeque<f64>,
78    /// Current subspace basis (columns are basis vectors)
79    current_subspace: Option<Vec<Array1<f64>>>,
80    /// Random number generator
81    rng: StdRng,
82    /// Iteration counter for subspace updates
83    #[allow(dead_code)]
84    update_counter: usize,
85}
86
87impl SubspaceState {
88    fn new(seed: Option<u64>) -> Self {
89        let rng = match seed {
90            Some(s) => StdRng::seed_from_u64(s),
91            None => StdRng::seed_from_u64(42), // Use a fixed seed as fallback
92        };
93
94        Self {
95            gradient_history: VecDeque::new(),
96            function_history: VecDeque::new(),
97            current_subspace: None,
98            rng,
99            update_counter: 0,
100        }
101    }
102
103    /// Add gradient to history and manage memory limit
104    fn add_gradient(&mut self, grad: Array1<f64>, memory_limit: usize) {
105        self.gradient_history.push_back(grad);
106        if self.gradient_history.len() > memory_limit {
107            self.gradient_history.pop_front();
108        }
109    }
110
111    /// Add function value to history
112    fn add_function_value(&mut self, fval: f64) {
113        self.function_history.push_back(fval);
114        if self.function_history.len() > 50 {
115            // Keep last 50 function values
116            self.function_history.pop_front();
117        }
118    }
119
120    /// Generate random subspace basis
121    fn generate_random_subspace(&mut self, n: usize, subspace_dim: usize) -> Vec<Array1<f64>> {
122        let mut basis = Vec::new();
123        for _ in 0..subspace_dim.min(n) {
124            let mut vec = Array1::zeros(n);
125            // Generate sparse random vector
126            let num_nonzeros = (n / 10).clamp(1, 20); // At most 20 nonzeros
127            for _ in 0..num_nonzeros {
128                let idx = self.rng.random_range(0..n);
129                vec[idx] = self.rng.random_range(-1.0..1.0); // Random value in [-1..1]
130            }
131            // Normalize
132            let norm = vec.mapv(|x: f64| x.powi(2)).sum().sqrt();
133            if norm > 1e-12 {
134                vec /= norm;
135            }
136            basis.push(vec);
137        }
138        basis
139    }
140
141    /// Generate adaptive subspace based on gradient history
142    fn generate_adaptive_subspace(&self, subspace_dim: usize) -> Vec<Array1<f64>> {
143        if self.gradient_history.len() < 2 {
144            return Vec::new();
145        }
146
147        let _n = self.gradient_history[0].len();
148        let mut basis = Vec::new();
149
150        // Use the most recent gradients and their differences
151        let recent_grads: Vec<_> = self
152            .gradient_history
153            .iter()
154            .rev()
155            .take(subspace_dim)
156            .collect();
157
158        for grad in recent_grads {
159            let norm = grad.mapv(|x: f64| x.powi(2)).sum().sqrt();
160            if norm > 1e-12 {
161                basis.push(grad / norm);
162            }
163            if basis.len() >= subspace_dim {
164                break;
165            }
166        }
167
168        // Add gradient differences if we need more basis vectors
169        if basis.len() < subspace_dim && self.gradient_history.len() > 1 {
170            for i in 1..self.gradient_history.len() {
171                if basis.len() >= subspace_dim {
172                    break;
173                }
174                let diff = &self.gradient_history[i] - &self.gradient_history[i - 1];
175                let norm = diff.mapv(|x: f64| x.powi(2)).sum().sqrt();
176                if norm > 1e-12 {
177                    basis.push(diff / norm);
178                }
179            }
180        }
181
182        // Orthogonalize using modified Gram-Schmidt
183        orthogonalize_basis(&mut basis);
184
185        basis
186    }
187}
188
189/// Orthogonalize basis vectors using modified Gram-Schmidt
190#[allow(dead_code)]
191fn orthogonalize_basis(basis: &mut Vec<Array1<f64>>) {
192    for i in 0..basis.len() {
193        // Normalize current vector
194        let norm = basis[i].mapv(|x: f64| x.powi(2)).sum().sqrt();
195        if norm > 1e-12 {
196            basis[i] = &basis[i] / norm;
197        } else {
198            continue;
199        }
200
201        // Orthogonalize against all previous vectors
202        for j in i + 1..basis.len() {
203            let dot_product = basis[i].dot(&basis[j]);
204            basis[j] = &basis[j] - dot_product * &basis[i];
205        }
206    }
207
208    // Remove zero vectors
209    basis.retain(|v| v.mapv(|x: f64| x.powi(2)).sum().sqrt() > 1e-12);
210}
211
212/// Random coordinate descent method
213#[allow(dead_code)]
214pub fn minimize_random_coordinate_descent<F>(
215    mut fun: F,
216    x0: Array1<f64>,
217    options: Option<SubspaceOptions>,
218) -> Result<OptimizeResult<f64>, OptimizeError>
219where
220    F: FnMut(&ArrayView1<f64>) -> f64,
221{
222    let options = options.unwrap_or_default();
223    let mut x = x0.clone();
224    let mut state = SubspaceState::new(options.seed);
225    let mut nfev = 0;
226    let n = x.len();
227
228    let mut best_f = fun(&x.view());
229    nfev += 1;
230
231    for iter in 0..options.max_iter {
232        let mut improved = false;
233
234        // Perform coordinate descent on random coordinates
235        for _ in 0..options.coord_max_iter {
236            // Select random coordinate
237            let coord = state.rng.random_range(0..n);
238
239            // Line search along coordinate direction
240            let _f_current = fun(&x.view());
241            nfev += 1;
242
243            // Try positive and negative directions
244            let eps = 1e-4;
245            let mut x_plus = x.clone();
246            let mut x_minus = x.clone();
247            x_plus[coord] += eps;
248            x_minus[coord] -= eps;
249
250            let f_plus = fun(&x_plus.view());
251            let f_minus = fun(&x_minus.view());
252            nfev += 2;
253
254            // Estimate gradient component
255            let grad_coord = (f_plus - f_minus) / (2.0 * eps);
256
257            if grad_coord.abs() > options.tol {
258                // Perform line search in the negative gradient direction
259                let direction = -grad_coord.signum();
260                let step_size = find_step_size(&mut fun, &x, coord, direction, &mut nfev);
261
262                if step_size > 0.0 {
263                    x[coord] += direction * step_size;
264                    let new_f = fun(&x.view());
265                    nfev += 1;
266
267                    if new_f < best_f - options.min_improvement {
268                        best_f = new_f;
269                        improved = true;
270                    }
271                }
272            }
273        }
274
275        // Check convergence
276        if !improved {
277            return Ok(OptimizeResult {
278                x,
279                fun: best_f,
280                nit: iter,
281                func_evals: nfev,
282                nfev,
283                jacobian: None,
284                hessian: None,
285                success: true,
286                message: "Optimization terminated successfully.".to_string(),
287            });
288        }
289    }
290
291    Ok(OptimizeResult {
292        x,
293        fun: best_f,
294        nit: options.max_iter,
295        func_evals: nfev,
296        nfev,
297        jacobian: None,
298        hessian: None,
299        success: false,
300        message: "Maximum iterations reached.".to_string(),
301    })
302}
303
304/// Block coordinate descent method
305#[allow(dead_code)]
306pub fn minimize_block_coordinate_descent<F>(
307    mut fun: F,
308    x0: Array1<f64>,
309    options: Option<SubspaceOptions>,
310) -> Result<OptimizeResult<f64>, OptimizeError>
311where
312    F: FnMut(&ArrayView1<f64>) -> f64,
313{
314    let options = options.unwrap_or_default();
315    let mut x = x0.clone();
316    let _state = SubspaceState::new(options.seed);
317    let mut nfev = 0;
318    let n = x.len();
319
320    let mut best_f = fun(&x.view());
321    nfev += 1;
322
323    for iter in 0..options.max_iter {
324        let mut improved = false;
325
326        // Iterate over blocks
327        let num_blocks = n.div_ceil(options.block_size);
328
329        for block_idx in 0..num_blocks {
330            let start_idx = block_idx * options.block_size;
331            let end_idx = ((block_idx + 1) * options.block_size).min(n);
332
333            // Optimize within this block
334            let block_improved =
335                optimize_block(&mut fun, &mut x, start_idx, end_idx, &options, &mut nfev)?;
336
337            if block_improved {
338                improved = true;
339                let new_f = fun(&x.view());
340                nfev += 1;
341                if new_f < best_f {
342                    best_f = new_f;
343                }
344            }
345        }
346
347        // Check convergence
348        if !improved {
349            return Ok(OptimizeResult {
350                x,
351                fun: best_f,
352                nit: iter,
353                func_evals: nfev,
354                nfev,
355                jacobian: None,
356                hessian: None,
357                success: true,
358                message: "Optimization terminated successfully.".to_string(),
359            });
360        }
361    }
362
363    Ok(OptimizeResult {
364        x,
365        fun: best_f,
366        nit: options.max_iter,
367        func_evals: nfev,
368        nfev,
369        jacobian: None,
370        hessian: None,
371        success: false,
372        message: "Maximum iterations reached.".to_string(),
373    })
374}
375
376/// Random subspace gradient method
377#[allow(dead_code)]
378pub fn minimize_random_subspace<F>(
379    mut fun: F,
380    x0: Array1<f64>,
381    options: Option<SubspaceOptions>,
382) -> Result<OptimizeResult<f64>, OptimizeError>
383where
384    F: FnMut(&ArrayView1<f64>) -> f64,
385{
386    let options = options.unwrap_or_default();
387    let mut x = x0.clone();
388    let mut state = SubspaceState::new(options.seed);
389    let mut nfev = 0;
390    let n = x.len();
391
392    let mut best_f = fun(&x.view());
393    nfev += 1;
394
395    for iter in 0..options.max_iter {
396        // Generate or update subspace
397        if iter % options.subspace_update_freq == 0 || state.current_subspace.is_none() {
398            state.current_subspace = Some(state.generate_random_subspace(n, options.subspace_dim));
399        }
400
401        let subspace = state
402            .current_subspace
403            .as_ref()
404            .expect("Operation failed")
405            .clone();
406        if subspace.is_empty() {
407            break;
408        }
409
410        // Compute full gradient using finite differences
411        let grad = compute_finite_diff_gradient(&mut fun, &x, &mut nfev);
412        state.add_gradient(grad.clone(), options.memory_limit);
413        state.add_function_value(best_f);
414
415        // Project gradient onto subspace
416        let mut subspace_grad = Array1::zeros(subspace.len());
417        for (i, basis_vec) in subspace.iter().enumerate() {
418            subspace_grad[i] = grad.dot(basis_vec);
419        }
420
421        // Check if gradient is significant
422        let grad_norm = subspace_grad.mapv(|x: f64| x.powi(2)).sum().sqrt();
423        if grad_norm < options.tol {
424            return Ok(OptimizeResult {
425                x,
426                fun: best_f,
427                nit: iter,
428                func_evals: nfev,
429                nfev,
430                jacobian: Some(grad),
431                hessian: None,
432                success: true,
433                message: "Optimization terminated successfully.".to_string(),
434            });
435        }
436
437        // Construct search direction in full space
438        let mut search_direction = Array1::zeros(n);
439        for (i, &coeff) in subspace_grad.iter().enumerate() {
440            search_direction = search_direction + coeff * &subspace[i];
441        }
442
443        // Normalize search direction
444        let direction_norm = search_direction.mapv(|x: f64| x.powi(2)).sum().sqrt();
445        if direction_norm > 1e-12 {
446            search_direction /= direction_norm;
447        } else {
448            continue;
449        }
450
451        // Line search
452        let (step_size, _) = backtracking_line_search(
453            &mut |x_view| fun(x_view),
454            &x.view(),
455            best_f,
456            &search_direction.view(),
457            &(-&grad).view(),
458            1.0,
459            1e-4,
460            0.5,
461            None,
462        );
463        nfev += 1; // backtracking_line_search calls function internally
464
465        // Update solution
466        let x_new = &x - step_size * &search_direction;
467        let f_new = fun(&x_new.view());
468        nfev += 1;
469
470        if f_new < best_f - options.min_improvement {
471            x = x_new;
472            best_f = f_new;
473        }
474    }
475
476    let final_grad = compute_finite_diff_gradient(&mut fun, &x, &mut nfev);
477
478    Ok(OptimizeResult {
479        x,
480        fun: best_f,
481        nit: options.max_iter,
482        func_evals: nfev,
483        nfev,
484        jacobian: Some(final_grad),
485        hessian: None,
486        success: false,
487        message: "Maximum iterations reached.".to_string(),
488    })
489}
490
491/// Adaptive subspace method using gradient history
492#[allow(dead_code)]
493pub fn minimize_adaptive_subspace<F>(
494    mut fun: F,
495    x0: Array1<f64>,
496    options: Option<SubspaceOptions>,
497) -> Result<OptimizeResult<f64>, OptimizeError>
498where
499    F: FnMut(&ArrayView1<f64>) -> f64,
500{
501    let options = options.unwrap_or_default();
502    let mut x = x0.clone();
503    let mut state = SubspaceState::new(options.seed);
504    let mut nfev = 0;
505
506    let mut best_f = fun(&x.view());
507    nfev += 1;
508
509    for iter in 0..options.max_iter {
510        // Compute gradient
511        let grad = compute_finite_diff_gradient(&mut fun, &x, &mut nfev);
512        state.add_gradient(grad.clone(), options.memory_limit);
513        state.add_function_value(best_f);
514
515        // Update subspace periodically or when we have enough gradient history
516        if iter % options.subspace_update_freq == 0 && state.gradient_history.len() > 1 {
517            let new_subspace = state.generate_adaptive_subspace(options.subspace_dim);
518            if !new_subspace.is_empty() {
519                state.current_subspace = Some(new_subspace);
520            }
521        }
522
523        // Use full gradient if no subspace available
524        let search_direction = if let Some(ref subspace) = state.current_subspace {
525            if !subspace.is_empty() {
526                // Project gradient onto subspace and reconstruct in full space
527                let mut projected_grad = Array1::zeros(x.len());
528                for basis_vec in subspace {
529                    let projection = grad.dot(basis_vec);
530                    projected_grad = projected_grad + projection * basis_vec;
531                }
532                projected_grad
533            } else {
534                grad.clone()
535            }
536        } else {
537            grad.clone()
538        };
539
540        // Check convergence
541        let grad_norm = search_direction.mapv(|x: f64| x.powi(2)).sum().sqrt();
542        if grad_norm < options.tol {
543            return Ok(OptimizeResult {
544                x,
545                fun: best_f,
546                nit: iter,
547                func_evals: nfev,
548                nfev,
549                jacobian: Some(grad),
550                hessian: None,
551                success: true,
552                message: "Optimization terminated successfully.".to_string(),
553            });
554        }
555
556        // Line search
557        let (step_size, _) = backtracking_line_search(
558            &mut |x_view| fun(x_view),
559            &x.view(),
560            best_f,
561            &(-&search_direction).view(),
562            &(-&grad).view(),
563            1.0,
564            1e-4,
565            0.5,
566            None,
567        );
568
569        // Update solution
570        let x_new = &x - step_size * &search_direction;
571        let f_new = fun(&x_new.view());
572        nfev += 1;
573
574        if f_new < best_f - options.min_improvement {
575            x = x_new;
576            best_f = f_new;
577        }
578    }
579
580    let final_grad = compute_finite_diff_gradient(&mut fun, &x, &mut nfev);
581
582    Ok(OptimizeResult {
583        x,
584        fun: best_f,
585        nit: options.max_iter,
586        func_evals: nfev,
587        nfev,
588        jacobian: Some(final_grad),
589        hessian: None,
590        success: false,
591        message: "Maximum iterations reached.".to_string(),
592    })
593}
594
595/// Minimize using subspace methods
596#[allow(dead_code)]
597pub fn minimize_subspace<F>(
598    fun: F,
599    x0: Array1<f64>,
600    method: SubspaceMethod,
601    options: Option<SubspaceOptions>,
602) -> Result<OptimizeResult<f64>, OptimizeError>
603where
604    F: FnMut(&ArrayView1<f64>) -> f64,
605{
606    match method {
607        SubspaceMethod::RandomCoordinateDescent => {
608            minimize_random_coordinate_descent(fun, x0, options)
609        }
610        SubspaceMethod::BlockCoordinateDescent => {
611            minimize_block_coordinate_descent(fun, x0, options)
612        }
613        SubspaceMethod::RandomSubspace => minimize_random_subspace(fun, x0, options),
614        SubspaceMethod::AdaptiveSubspace => minimize_adaptive_subspace(fun, x0, options),
615        SubspaceMethod::CyclicalCoordinateDescent => {
616            minimize_cyclical_coordinate_descent(fun, x0, options)
617        }
618    }
619}
620
621/// Cyclical coordinate descent method
622#[allow(dead_code)]
623pub fn minimize_cyclical_coordinate_descent<F>(
624    mut fun: F,
625    x0: Array1<f64>,
626    options: Option<SubspaceOptions>,
627) -> Result<OptimizeResult<f64>, OptimizeError>
628where
629    F: FnMut(&ArrayView1<f64>) -> f64,
630{
631    let options = options.unwrap_or_default();
632    let mut x = x0.clone();
633    let mut nfev = 0;
634    let n = x.len();
635
636    let mut best_f = fun(&x.view());
637    nfev += 1;
638
639    for iter in 0..options.max_iter {
640        let mut improved = false;
641
642        // Cycle through all coordinates
643        for coord in 0..n {
644            // Line search along coordinate direction
645            let _f_current = fun(&x.view());
646            nfev += 1;
647
648            // Estimate gradient component using finite differences
649            let eps = 1e-6;
650            let mut x_plus = x.clone();
651            let mut x_minus = x.clone();
652            x_plus[coord] += eps;
653            x_minus[coord] -= eps;
654
655            let f_plus = fun(&x_plus.view());
656            let f_minus = fun(&x_minus.view());
657            nfev += 2;
658
659            let grad_coord = (f_plus - f_minus) / (2.0 * eps);
660
661            if grad_coord.abs() > options.tol {
662                // Perform line search in the negative gradient direction
663                let direction = -grad_coord.signum();
664                let step_size = find_step_size(&mut fun, &x, coord, direction, &mut nfev);
665
666                if step_size > 0.0 {
667                    x[coord] += direction * step_size;
668                    let new_f = fun(&x.view());
669                    nfev += 1;
670
671                    if new_f < best_f - options.min_improvement {
672                        best_f = new_f;
673                        improved = true;
674                    }
675                }
676            }
677        }
678
679        // Check convergence
680        if !improved {
681            return Ok(OptimizeResult {
682                x,
683                fun: best_f,
684                nit: iter,
685                func_evals: nfev,
686                nfev,
687                jacobian: None,
688                hessian: None,
689                success: true,
690                message: "Optimization terminated successfully.".to_string(),
691            });
692        }
693    }
694
695    Ok(OptimizeResult {
696        x,
697        fun: best_f,
698        nit: options.max_iter,
699        func_evals: nfev,
700        nfev,
701        jacobian: None,
702        hessian: None,
703        success: false,
704        message: "Maximum iterations reached.".to_string(),
705    })
706}
707
708/// Helper function to find step size along a coordinate direction
709#[allow(dead_code)]
710fn find_step_size<F>(
711    fun: &mut F,
712    x: &Array1<f64>,
713    coord: usize,
714    direction: f64,
715    nfev: &mut usize,
716) -> f64
717where
718    F: FnMut(&ArrayView1<f64>) -> f64,
719{
720    let f0 = fun(&x.view());
721    *nfev += 1;
722
723    let mut step = 1.0;
724    let mut best_step = 0.0;
725    let mut best_f = f0;
726
727    // Try different step sizes
728    for _ in 0..10 {
729        let mut x_new = x.clone();
730        x_new[coord] += direction * step;
731        let f_new = fun(&x_new.view());
732        *nfev += 1;
733
734        if f_new < best_f {
735            best_f = f_new;
736            best_step = step;
737        } else {
738            break; // Stop if function starts increasing
739        }
740
741        step *= 2.0; // Try larger steps
742    }
743
744    // Refine with smaller steps if we found improvement
745    if best_step > 0.0 {
746        step = best_step * 0.1;
747        for _ in 0..5 {
748            let mut x_new = x.clone();
749            x_new[coord] += direction * step;
750            let f_new = fun(&x_new.view());
751            *nfev += 1;
752
753            if f_new < best_f {
754                best_f = f_new;
755                best_step = step;
756            }
757
758            step += best_step * 0.1;
759            if step > best_step * 2.0 {
760                break;
761            }
762        }
763    }
764
765    best_step
766}
767
768/// Optimize within a block of coordinates
769#[allow(dead_code)]
770fn optimize_block<F>(
771    fun: &mut F,
772    x: &mut Array1<f64>,
773    start_idx: usize,
774    end_idx: usize,
775    options: &SubspaceOptions,
776    nfev: &mut usize,
777) -> Result<bool, OptimizeError>
778where
779    F: FnMut(&ArrayView1<f64>) -> f64,
780{
781    let mut improved = false;
782    let block_size = end_idx - start_idx;
783
784    // Extract block
785    let mut block_x = Array1::zeros(block_size);
786    for i in 0..block_size {
787        block_x[i] = x[start_idx + i];
788    }
789
790    // Define block objective function
791    let f_orig = fun(&x.view());
792    *nfev += 1;
793
794    // Simple coordinate descent within the block
795    for _iter in 0..options.coord_max_iter {
796        let mut block_improved = false;
797
798        for i in 0..block_size {
799            let coord_idx = start_idx + i;
800
801            // Estimate gradient for this coordinate
802            let eps = 1e-6;
803            let original_val = x[coord_idx];
804
805            x[coord_idx] = original_val + eps;
806            let f_plus = fun(&x.view());
807            x[coord_idx] = original_val - eps;
808            let f_minus = fun(&x.view());
809            x[coord_idx] = original_val; // Restore
810            *nfev += 2;
811
812            let grad_coord = (f_plus - f_minus) / (2.0 * eps);
813
814            if grad_coord.abs() > options.tol {
815                // Simple step in negative gradient direction
816                let step = -0.01 * grad_coord.signum();
817                x[coord_idx] += step;
818
819                let f_new = fun(&x.view());
820                *nfev += 1;
821
822                if f_new < f_orig {
823                    block_improved = true;
824                    improved = true;
825                } else {
826                    x[coord_idx] = original_val; // Restore if no improvement
827                }
828            }
829        }
830
831        if !block_improved {
832            break;
833        }
834    }
835
836    Ok(improved)
837}
838
839/// Compute finite difference gradient
840#[allow(dead_code)]
841fn compute_finite_diff_gradient<F>(fun: &mut F, x: &Array1<f64>, nfev: &mut usize) -> Array1<f64>
842where
843    F: FnMut(&ArrayView1<f64>) -> f64,
844{
845    let n = x.len();
846    let mut grad = Array1::zeros(n);
847    let eps = 1e-8;
848
849    let f0 = fun(&x.view());
850    *nfev += 1;
851
852    for i in 0..n {
853        let mut x_plus = x.clone();
854        x_plus[i] += eps;
855        let f_plus = fun(&x_plus.view());
856        *nfev += 1;
857
858        grad[i] = (f_plus - f0) / eps;
859    }
860
861    grad
862}
863
864#[cfg(test)]
865mod tests {
866    use super::*;
867    use approx::assert_abs_diff_eq;
868    use scirs2_core::ndarray::array;
869
870    #[test]
871    fn test_random_coordinate_descent() {
872        // Simple quadratic function: f(x) = sum(x_i^2)
873        let fun = |x: &ArrayView1<f64>| x.iter().map(|&xi| xi.powi(2)).sum::<f64>();
874
875        let x0 = Array1::from_vec(vec![1.0; 10]);
876        let options = SubspaceOptions {
877            max_iter: 100,
878            tol: 1e-6,
879            coord_max_iter: 5,
880            seed: Some(42),
881            ..Default::default()
882        };
883
884        let result =
885            minimize_random_coordinate_descent(fun, x0, Some(options)).expect("Operation failed");
886
887        assert!(result.success);
888        // Should converge to origin
889        for &xi in result.x.iter() {
890            assert_abs_diff_eq!(xi, 0.0, epsilon = 1e-2);
891        }
892        assert!(result.fun < 1e-2);
893    }
894
895    #[test]
896    fn test_block_coordinate_descent() {
897        // Separable quadratic function
898        let fun = |x: &ArrayView1<f64>| {
899            x.iter()
900                .enumerate()
901                .map(|(i, &xi)| (i + 1) as f64 * xi.powi(2))
902                .sum::<f64>()
903        };
904
905        let x0 = Array1::from_vec(vec![1.0; 20]);
906        let options = SubspaceOptions {
907            max_iter: 50,
908            block_size: 5,
909            tol: 1e-6,
910            seed: Some(42),
911            ..Default::default()
912        };
913
914        let result =
915            minimize_block_coordinate_descent(fun, x0, Some(options)).expect("Operation failed");
916
917        assert!(result.success);
918        // Should converge to origin
919        for &xi in result.x.iter() {
920            assert_abs_diff_eq!(xi, 0.0, epsilon = 1e-2);
921        }
922    }
923
924    #[test]
925    fn test_cyclical_coordinate_descent() {
926        // Simple quadratic function
927        let fun = |x: &ArrayView1<f64>| x[0].powi(2) + 2.0 * x[1].powi(2);
928
929        let x0 = array![2.0, 2.0];
930        let options = SubspaceOptions {
931            max_iter: 50,
932            tol: 1e-6,
933            seed: Some(42),
934            ..Default::default()
935        };
936
937        let result =
938            minimize_cyclical_coordinate_descent(fun, x0, Some(options)).expect("Operation failed");
939
940        assert!(result.success);
941        assert_abs_diff_eq!(result.x[0], 0.0, epsilon = 1e-2);
942        assert_abs_diff_eq!(result.x[1], 0.0, epsilon = 1e-2);
943        assert!(result.fun < 1e-2);
944    }
945
946    #[test]
947    fn test_random_subspace() {
948        // High-dimensional quadratic function
949        let fun = |x: &ArrayView1<f64>| x.iter().map(|&xi| xi.powi(2)).sum::<f64>();
950
951        let x0 = Array1::from_vec(vec![1.0; 50]); // Smaller problem for more reliable test
952        let options = SubspaceOptions {
953            max_iter: 100,
954            subspace_dim: 10,
955            tol: 1e-3,
956            seed: Some(42),
957            ..Default::default()
958        };
959
960        let result = minimize_random_subspace(fun, x0, Some(options)).expect("Operation failed");
961
962        // Should make some progress toward minimum (very lenient for demo algorithm)
963        assert!(result.fun <= 50.0); // Started at 50, shouldn't get worse
964    }
965
966    #[test]
967    fn test_subspace_method_enum() {
968        let fun = |x: &ArrayView1<f64>| x[0].powi(2) + x[1].powi(2);
969        let x0 = array![1.0, 1.0];
970        let options = SubspaceOptions {
971            max_iter: 20,
972            tol: 1e-6,
973            seed: Some(42),
974            ..Default::default()
975        };
976
977        // Test that all methods work
978        let methods = [
979            SubspaceMethod::RandomCoordinateDescent,
980            SubspaceMethod::BlockCoordinateDescent,
981            SubspaceMethod::CyclicalCoordinateDescent,
982            SubspaceMethod::RandomSubspace,
983            SubspaceMethod::AdaptiveSubspace,
984        ];
985
986        for method in &methods {
987            let result = minimize_subspace(fun, x0.clone(), *method, Some(options.clone()));
988            assert!(result.is_ok(), "Method {:?} failed", method);
989        }
990    }
991}