scirs2_optimize/parallel/
mod.rs

1//! Parallel computation utilities for optimization algorithms
2//!
3//! This module provides parallel implementations of computationally intensive
4//! operations used in optimization algorithms, including:
5//! - Parallel function evaluations
6//! - Parallel gradient approximations
7//! - Multi-start optimization with parallel workers
8//!
9//! # Example
10//!
11//! ```
12//! use scirs2_optimize::parallel::{parallel_finite_diff_gradient, ParallelOptions};
13//! use ndarray::{array, ArrayView1};
14//!
15//! fn objective(x: &ArrayView1<f64>) -> f64 {
16//!     x.iter().map(|&xi| xi.powi(2)).sum()
17//! }
18//!
19//! let x = array![1.0, 2.0, 3.0];
20//! let options = ParallelOptions::default();
21//! let gradient = parallel_finite_diff_gradient(objective, x.view(), &options);
22//! ```
23
24use ndarray::{Array1, ArrayView1};
25use scirs2_core::parallel_ops::*;
26
27/// Options for parallel computation
28#[derive(Debug, Clone)]
29pub struct ParallelOptions {
30    /// Number of worker threads (None = use core parallel default)
31    pub num_workers: Option<usize>,
32
33    /// Minimum problem size to enable parallelization
34    pub min_parallel_size: usize,
35
36    /// Chunk size for parallel operations
37    pub chunk_size: usize,
38
39    /// Enable parallel function evaluations
40    pub parallel_evaluations: bool,
41
42    /// Enable parallel gradient computation
43    pub parallel_gradient: bool,
44}
45
46impl Default for ParallelOptions {
47    fn default() -> Self {
48        ParallelOptions {
49            num_workers: None,
50            min_parallel_size: 10,
51            chunk_size: 1,
52            parallel_evaluations: true,
53            parallel_gradient: true,
54        }
55    }
56}
57
58/// Parallel finite difference gradient approximation
59///
60/// Computes the gradient using parallel function evaluations when
61/// the dimension is large enough to benefit from parallelization.
62pub fn parallel_finite_diff_gradient<F>(
63    f: F,
64    x: ArrayView1<f64>,
65    options: &ParallelOptions,
66) -> Array1<f64>
67where
68    F: Fn(&ArrayView1<f64>) -> f64 + Sync,
69{
70    let n = x.len();
71    let eps = 1e-8;
72
73    // For small problems, use sequential computation
74    if n < options.min_parallel_size || !options.parallel_gradient {
75        return sequential_finite_diff_gradient(f, x, eps);
76    }
77
78    // Use core parallel abstractions
79    compute_parallel_gradient(&f, x, eps)
80}
81
82/// Sequential finite difference gradient (fallback)
83fn sequential_finite_diff_gradient<F>(f: F, x: ArrayView1<f64>, eps: f64) -> Array1<f64>
84where
85    F: Fn(&ArrayView1<f64>) -> f64,
86{
87    let n = x.len();
88    let mut grad = Array1::zeros(n);
89    let fx = f(&x);
90
91    for i in 0..n {
92        let mut x_plus = x.to_owned();
93        x_plus[i] += eps;
94        let fx_plus = f(&x_plus.view());
95        grad[i] = (fx_plus - fx) / eps;
96    }
97
98    grad
99}
100
101/// Compute gradient in parallel
102fn compute_parallel_gradient<F>(f: &F, x: ArrayView1<f64>, eps: f64) -> Array1<f64>
103where
104    F: Fn(&ArrayView1<f64>) -> f64 + Sync,
105{
106    let n = x.len();
107    let fx = f(&x);
108
109    // Parallel computation of gradient components
110    let grad_vec: Vec<f64> = (0..n)
111        .into_par_iter()
112        .map(|i| {
113            let mut x_plus = x.to_owned();
114            x_plus[i] += eps;
115            let fx_plus = f(&x_plus.view());
116            (fx_plus - fx) / eps
117        })
118        .collect();
119
120    Array1::from_vec(grad_vec)
121}
122
123/// Parallel evaluation of multiple points
124///
125/// Evaluates the objective function at multiple points in parallel.
126pub fn parallel_evaluate_batch<F>(
127    f: F,
128    points: &[Array1<f64>],
129    options: &ParallelOptions,
130) -> Vec<f64>
131where
132    F: Fn(&ArrayView1<f64>) -> f64 + Sync,
133{
134    if points.len() < options.min_parallel_size || !options.parallel_evaluations {
135        // Sequential evaluation for small batches
136        points.iter().map(|x| f(&x.view())).collect()
137    } else {
138        // Parallel evaluation
139        points.par_iter().map(|x| f(&x.view())).collect()
140    }
141}
142
143/// Parallel multi-start optimization
144///
145/// Runs multiple optimization instances from different starting points in parallel.
146#[allow(dead_code)] // Bounds will be used in future implementation
147pub struct ParallelMultiStart<F> {
148    objective: F,
149    bounds: Vec<(f64, f64)>,
150    options: ParallelOptions,
151}
152
153impl<F> ParallelMultiStart<F>
154where
155    F: Fn(&ArrayView1<f64>) -> f64 + Sync + Send,
156{
157    /// Create a new parallel multi-start optimizer
158    pub fn new(objective: F, bounds: Vec<(f64, f64)>, options: ParallelOptions) -> Self {
159        ParallelMultiStart {
160            objective,
161            bounds,
162            options,
163        }
164    }
165
166    /// Run optimization from multiple starting points
167    pub fn run<O>(&self, starting_points: Vec<Array1<f64>>, optimizer: O) -> Vec<OptimizationResult>
168    where
169        O: Fn(&Array1<f64>, &F) -> OptimizationResult + Sync,
170    {
171        if starting_points.len() < self.options.min_parallel_size {
172            // Sequential execution for small number of starts
173            starting_points
174                .iter()
175                .map(|x0| optimizer(x0, &self.objective))
176                .collect()
177        } else {
178            // Parallel execution
179            starting_points
180                .par_iter()
181                .map(|x0| optimizer(x0, &self.objective))
182                .collect()
183        }
184    }
185
186    /// Find the best result among all runs
187    pub fn best_result(results: &[OptimizationResult]) -> Option<&OptimizationResult> {
188        results
189            .iter()
190            .min_by(|a, b| a.function_value.partial_cmp(&b.function_value).unwrap())
191    }
192}
193
194/// Result from an optimization run
195#[derive(Debug, Clone)]
196pub struct OptimizationResult {
197    pub x: Array1<f64>,
198    pub function_value: f64,
199    pub iterations: usize,
200    pub success: bool,
201}
202
203/// Parallel computation of Hessian matrix using finite differences
204pub fn parallel_finite_diff_hessian<F>(
205    f: &F,
206    x: ArrayView1<f64>,
207    gradient: Option<&Array1<f64>>,
208    options: &ParallelOptions,
209) -> Array1<f64>
210// Returns upper triangle in row-major order
211where
212    F: Fn(&ArrayView1<f64>) -> f64 + Sync,
213{
214    let n = x.len();
215    let eps = 1e-8;
216
217    // For small problems, use sequential computation
218    if n < options.min_parallel_size {
219        return sequential_finite_diff_hessian(f, x, gradient, eps);
220    }
221
222    // Compute gradient if not provided
223    let _grad = match gradient {
224        Some(g) => g.clone(),
225        None => parallel_finite_diff_gradient(f, x, options),
226    };
227
228    // Number of elements in upper triangle (including diagonal)
229    let num_elements = n * (n + 1) / 2;
230
231    // Parallel computation of Hessian elements
232    let hessian_elements: Vec<f64> = (0..num_elements)
233        .into_par_iter()
234        .map(|idx| {
235            // Convert linear index to (i, j) coordinates
236            let (i, j) = index_to_upper_triangle(idx, n);
237
238            if i == j {
239                // Diagonal element: ∂²f/∂xᵢ²
240                let mut x_plus = x.to_owned();
241                let mut x_minus = x.to_owned();
242                x_plus[i] += eps;
243                x_minus[i] -= eps;
244
245                let f_plus = f(&x_plus.view());
246                let f_minus = f(&x_minus.view());
247                let f_center = f(&x);
248
249                (f_plus - 2.0 * f_center + f_minus) / (eps * eps)
250            } else {
251                // Off-diagonal element: ∂²f/∂xᵢ∂xⱼ
252                let mut x_pp = x.to_owned();
253                let mut x_pm = x.to_owned();
254                let mut x_mp = x.to_owned();
255                let mut x_mm = x.to_owned();
256
257                x_pp[i] += eps;
258                x_pp[j] += eps;
259                x_pm[i] += eps;
260                x_pm[j] -= eps;
261                x_mp[i] -= eps;
262                x_mp[j] += eps;
263                x_mm[i] -= eps;
264                x_mm[j] -= eps;
265
266                let f_pp = f(&x_pp.view());
267                let f_pm = f(&x_pm.view());
268                let f_mp = f(&x_mp.view());
269                let f_mm = f(&x_mm.view());
270
271                (f_pp - f_pm - f_mp + f_mm) / (4.0 * eps * eps)
272            }
273        })
274        .collect();
275
276    Array1::from_vec(hessian_elements)
277}
278
279/// Sequential Hessian computation (fallback)
280fn sequential_finite_diff_hessian<F>(
281    f: &F,
282    x: ArrayView1<f64>,
283    _gradient: Option<&Array1<f64>>,
284    eps: f64,
285) -> Array1<f64>
286where
287    F: Fn(&ArrayView1<f64>) -> f64,
288{
289    let n = x.len();
290    let num_elements = n * (n + 1) / 2;
291    let mut hessian = Array1::zeros(num_elements);
292
293    for idx in 0..num_elements {
294        let (i, j) = index_to_upper_triangle(idx, n);
295
296        if i == j {
297            // Diagonal element
298            let mut x_plus = x.to_owned();
299            let mut x_minus = x.to_owned();
300            x_plus[i] += eps;
301            x_minus[i] -= eps;
302
303            let f_plus = f(&x_plus.view());
304            let f_minus = f(&x_minus.view());
305            let f_center = f(&x);
306
307            hessian[idx] = (f_plus - 2.0 * f_center + f_minus) / (eps * eps);
308        } else {
309            // Off-diagonal element
310            let mut x_pp = x.to_owned();
311            let mut x_pm = x.to_owned();
312            let mut x_mp = x.to_owned();
313            let mut x_mm = x.to_owned();
314
315            x_pp[i] += eps;
316            x_pp[j] += eps;
317            x_pm[i] += eps;
318            x_pm[j] -= eps;
319            x_mp[i] -= eps;
320            x_mp[j] += eps;
321            x_mm[i] -= eps;
322            x_mm[j] -= eps;
323
324            let f_pp = f(&x_pp.view());
325            let f_pm = f(&x_pm.view());
326            let f_mp = f(&x_mp.view());
327            let f_mm = f(&x_mm.view());
328
329            hessian[idx] = (f_pp - f_pm - f_mp + f_mm) / (4.0 * eps * eps);
330        }
331    }
332
333    hessian
334}
335
336/// Convert linear index to (i, j) coordinates in upper triangle
337fn index_to_upper_triangle(idx: usize, _n: usize) -> (usize, usize) {
338    // Find row i such that i*(i+1)/2 <= idx < (i+1)*(i+2)/2
339    let mut i = 0;
340    let mut cumsum = 0;
341
342    while cumsum + i < idx {
343        i += 1;
344        cumsum += i;
345    }
346
347    let j = idx - cumsum;
348    (j, i)
349}
350
351/// Parallel line search
352///
353/// Evaluates multiple step sizes in parallel to find the best one.
354pub fn parallel_line_search<F>(
355    f: F,
356    x: &Array1<f64>,
357    direction: &Array1<f64>,
358    step_sizes: &[f64],
359    options: &ParallelOptions,
360) -> (f64, f64)
361// Returns (best_step, best_value)
362where
363    F: Fn(&ArrayView1<f64>) -> f64 + Sync,
364{
365    let evaluations: Vec<(f64, f64)> = if step_sizes.len() < options.min_parallel_size {
366        // Sequential evaluation
367        step_sizes
368            .iter()
369            .map(|&alpha| {
370                let x_new = x + alpha * direction;
371                let value = f(&x_new.view());
372                (alpha, value)
373            })
374            .collect()
375    } else {
376        // Parallel evaluation
377        step_sizes
378            .par_iter()
379            .map(|&alpha| {
380                let x_new = x + alpha * direction;
381                let value = f(&x_new.view());
382                (alpha, value)
383            })
384            .collect()
385    };
386
387    // Find the best step size
388    evaluations
389        .into_iter()
390        .min_by(|(_, v1), (_, v2)| v1.partial_cmp(v2).unwrap())
391        .unwrap_or((0.0, f64::INFINITY))
392}
393
394#[cfg(test)]
395mod tests {
396    use super::*;
397    use ndarray::array;
398
399    fn quadratic(x: &ArrayView1<f64>) -> f64 {
400        x.iter().map(|&xi| xi * xi).sum()
401    }
402
403    #[test]
404    fn test_parallel_gradient() {
405        let x = array![1.0, 2.0, 3.0, 4.0, 5.0];
406        let options = ParallelOptions::default();
407
408        let grad_parallel = parallel_finite_diff_gradient(quadratic, x.view(), &options);
409        let grad_sequential = sequential_finite_diff_gradient(quadratic, x.view(), 1e-8);
410
411        // Check that parallel and sequential give similar results
412        for i in 0..x.len() {
413            assert!((grad_parallel[i] - grad_sequential[i]).abs() < 1e-10);
414            // For quadratic function, gradient should be 2*x
415            assert!((grad_parallel[i] - 2.0 * x[i]).abs() < 1e-6);
416        }
417    }
418
419    #[test]
420    fn test_parallel_batch_evaluation() {
421        let points = vec![
422            array![1.0, 2.0],
423            array![3.0, 4.0],
424            array![5.0, 6.0],
425            array![7.0, 8.0],
426        ];
427
428        let options = ParallelOptions {
429            min_parallel_size: 2,
430            ..Default::default()
431        };
432
433        let values = parallel_evaluate_batch(quadratic, &points, &options);
434
435        // Verify results
436        for (i, point) in points.iter().enumerate() {
437            let expected = quadratic(&point.view());
438            assert_eq!(values[i], expected);
439        }
440    }
441
442    #[test]
443    fn test_parallel_line_search() {
444        let x = array![1.0, 1.0];
445        let direction = array![-1.0, -1.0];
446        let step_sizes: Vec<f64> = (0..20).map(|i| i as f64 * 0.1).collect();
447
448        let options = ParallelOptions::default();
449        let (best_step, best_value) =
450            parallel_line_search(quadratic, &x, &direction, &step_sizes, &options);
451
452        // For quadratic function starting at (1,1) going in direction (-1,-1),
453        // the minimum should be at step size 1.0
454        assert!((best_step - 1.0).abs() < 0.1);
455        assert!(best_value < 0.1);
456    }
457}