1use crate::common::IntegrateFloat;
34use crate::error::{IntegrateError, IntegrateResult};
35use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
36use std::collections::VecDeque;
37
38#[derive(Debug, Clone)]
40pub struct AcceleratorOptions<F: IntegrateFloat> {
41 pub memory_depth: usize,
43 pub regularization: F,
45 pub min_iterates: usize,
47 pub use_qr: bool,
49 pub damping: F,
51 pub restart_period: Option<usize>,
53}
54
55impl<F: IntegrateFloat> Default for AcceleratorOptions<F> {
56 fn default() -> Self {
57 Self {
58 memory_depth: 5,
59 regularization: F::from_f64(1e-12)
60 .unwrap_or_else(|| F::from(1e-12).expect("Failed to convert constant to float")),
61 min_iterates: 2,
62 use_qr: true,
63 damping: F::one(),
64 restart_period: Some(20),
65 }
66 }
67}
68
69#[derive(Debug)]
75pub struct AndersonAccelerator<F: IntegrateFloat> {
76 dimension: usize,
78 options: AcceleratorOptions<F>,
80 x_history: VecDeque<Array1<F>>,
82 g_history: VecDeque<Array1<F>>,
84 residual_history: VecDeque<Array1<F>>,
86 iteration_count: usize,
88 is_active: bool,
90}
91
92impl<F: IntegrateFloat> AndersonAccelerator<F> {
93 pub fn new(dimension: usize, options: AcceleratorOptions<F>) -> Self {
95 Self {
96 dimension,
97 options,
98 x_history: VecDeque::new(),
99 g_history: VecDeque::new(),
100 residual_history: VecDeque::new(),
101 iteration_count: 0,
102 is_active: false,
103 }
104 }
105
106 pub fn with_memory_depth(_dimension: usize, memorydepth: usize) -> Self {
108 let options = AcceleratorOptions {
109 memory_depth: memorydepth,
110 ..Default::default()
111 };
112 Self::new(_dimension, options)
113 }
114
115 pub fn accelerate(
117 &mut self,
118 x_current: ArrayView1<F>,
119 g_x_current: ArrayView1<F>,
120 ) -> Option<Array1<F>> {
121 if x_current.len() != self.dimension || g_x_current.len() != self.dimension {
122 return None;
123 }
124
125 let residual = &g_x_current.to_owned() - &x_current.to_owned();
127
128 self.x_history.push_back(x_current.to_owned());
130 self.g_history.push_back(g_x_current.to_owned());
131 self.residual_history.push_back(residual);
132
133 while self.x_history.len() > self.options.memory_depth {
135 self.x_history.pop_front();
136 self.g_history.pop_front();
137 self.residual_history.pop_front();
138 }
139
140 self.iteration_count += 1;
141
142 if let Some(restart_period) = self.options.restart_period {
144 if self.iteration_count.is_multiple_of(restart_period) {
145 self.restart();
146 return Some(g_x_current.to_owned());
147 }
148 }
149
150 if self.residual_history.len() < self.options.min_iterates {
152 return Some(g_x_current.to_owned());
153 }
154
155 self.is_active = true;
157 match self.compute_anderson_update() {
158 Ok(x_accelerated) => Some(x_accelerated),
159 Err(_) => {
160 self.restart();
162 Some(g_x_current.to_owned())
163 }
164 }
165 }
166
167 fn compute_anderson_update(&self) -> IntegrateResult<Array1<F>> {
169 let m = self.residual_history.len() - 1; if m == 0 {
172 return self
174 .g_history
175 .back()
176 .ok_or_else(|| {
177 IntegrateError::ComputationError(
178 "No history available for Anderson acceleration".to_string(),
179 )
180 })
181 .cloned();
182 }
183
184 let mut delta_r = Array2::zeros((self.dimension, m));
186
187 for j in 0..m {
188 let r_diff = &self.residual_history[j + 1] - &self.residual_history[j];
189 for i in 0..self.dimension {
190 delta_r[[i, j]] = r_diff[i];
191 }
192 }
193
194 let r_m = self.residual_history.back().ok_or_else(|| {
196 IntegrateError::ComputationError(
197 "No residual history available for Anderson acceleration".to_string(),
198 )
199 })?;
200 let alpha = self.solve_least_squares(&delta_r, r_m.view())?;
201
202 let mut x_accelerated = Array1::zeros(self.dimension);
204 let mut g_accelerated = Array1::zeros(self.dimension);
205
206 let alpha_sum: F = alpha.sum();
209 let weight_m = F::one() - alpha_sum;
210
211 let x_m = self.x_history.back().ok_or_else(|| {
213 IntegrateError::ComputationError(
214 "No x history available for Anderson acceleration".to_string(),
215 )
216 })?;
217 let g_m = self.g_history.back().ok_or_else(|| {
218 IntegrateError::ComputationError(
219 "No g history available for Anderson acceleration".to_string(),
220 )
221 })?;
222
223 x_accelerated = &x_accelerated + &(x_m * weight_m);
224 g_accelerated = &g_accelerated + &(g_m * weight_m);
225
226 for (j, alpha_j) in alpha.iter().enumerate() {
228 x_accelerated = &x_accelerated + &(&self.x_history[j] * *alpha_j);
229 g_accelerated = &g_accelerated + &(&self.g_history[j] * *alpha_j);
230 }
231
232 let beta = self.options.damping;
234 let x_new = &x_accelerated * (F::one() - beta) + &g_accelerated * beta;
235
236 Ok(x_new)
237 }
238
239 fn solve_least_squares(&self, a: &Array2<F>, b: ArrayView1<F>) -> IntegrateResult<Array1<F>> {
241 let (n, m) = a.dim();
242
243 if self.options.use_qr && n >= m {
244 self.solve_qr(a, b)
245 } else {
246 self.solve_normal_equations(a, b)
247 }
248 }
249
250 fn solve_qr(&self, a: &Array2<F>, b: ArrayView1<F>) -> IntegrateResult<Array1<F>> {
252 self.solve_normal_equations(a, b)
255 }
256
257 fn solve_normal_equations(
259 &self,
260 a: &Array2<F>,
261 b: ArrayView1<F>,
262 ) -> IntegrateResult<Array1<F>> {
263 let (n, m) = a.dim();
264
265 let mut ata = Array2::zeros((m, m));
267 for i in 0..m {
268 for j in 0..m {
269 let mut sum = F::zero();
270 for k in 0..n {
271 sum += a[[k, i]] * a[[k, j]];
272 }
273 ata[[i, j]] = sum;
274 }
275 }
276
277 for i in 0..m {
279 ata[[i, i]] += self.options.regularization;
280 }
281
282 let mut atb = Array1::zeros(m);
284 for i in 0..m {
285 let mut sum = F::zero();
286 for k in 0..n {
287 sum += a[[k, i]] * b[k];
288 }
289 atb[i] = sum;
290 }
291
292 self.solve_linear_system(ata, atb)
294 }
295
296 fn solve_linear_system(
298 &self,
299 mut a: Array2<F>,
300 mut b: Array1<F>,
301 ) -> IntegrateResult<Array1<F>> {
302 let n = a.nrows();
303
304 for k in 0..n {
306 let mut max_val = a[[k, k]].abs();
308 let mut max_row = k;
309
310 for i in (k + 1)..n {
311 let abs_val = a[[i, k]].abs();
312 if abs_val > max_val {
313 max_val = abs_val;
314 max_row = i;
315 }
316 }
317
318 if max_val < self.options.regularization {
320 return Err(IntegrateError::ComputationError(
321 "Singular matrix in Anderson acceleration".to_string(),
322 ));
323 }
324
325 if max_row != k {
327 for j in 0..n {
328 let temp = a[[k, j]];
329 a[[k, j]] = a[[max_row, j]];
330 a[[max_row, j]] = temp;
331 }
332 let temp = b[k];
333 b[k] = b[max_row];
334 b[max_row] = temp;
335 }
336
337 for i in (k + 1)..n {
339 if a[[k, k]].abs()
341 < F::from_f64(1e-14).unwrap_or_else(|| {
342 F::from(1e-14).expect("Failed to convert constant to float")
343 })
344 {
345 return Err(IntegrateError::ComputationError(
346 "Zero diagonal element in Gaussian elimination".to_string(),
347 ));
348 }
349 let factor = a[[i, k]] / a[[k, k]];
350 for j in (k + 1)..n {
351 a[[i, j]] = a[[i, j]] - factor * a[[k, j]];
352 }
353 b[i] = b[i] - factor * b[k];
354 }
355 }
356
357 let mut x = Array1::zeros(n);
359 for i in (0..n).rev() {
360 let mut sum = F::zero();
361 for j in (i + 1)..n {
362 sum += a[[i, j]] * x[j];
363 }
364 if a[[i, i]].abs()
366 < F::from_f64(1e-14)
367 .unwrap_or_else(|| F::from(1e-14).expect("Failed to convert constant to float"))
368 {
369 return Err(IntegrateError::ComputationError(
370 "Zero diagonal element in back substitution".to_string(),
371 ));
372 }
373 x[i] = (b[i] - sum) / a[[i, i]];
374 }
375
376 Ok(x)
377 }
378
379 pub fn restart(&mut self) {
381 self.x_history.clear();
382 self.g_history.clear();
383 self.residual_history.clear();
384 self.is_active = false;
385 }
386
387 pub fn is_active(&self) -> bool {
389 self.is_active
390 }
391
392 pub fn memory_usage(&self) -> usize {
394 self.x_history.len()
395 }
396
397 pub fn iteration_count(&self) -> usize {
399 self.iteration_count
400 }
401}
402
403pub struct AitkenAccelerator<F: IntegrateFloat> {
405 history: VecDeque<F>,
406}
407
408impl<F: IntegrateFloat> AitkenAccelerator<F> {
409 pub fn new() -> Self {
411 Self {
412 history: VecDeque::new(),
413 }
414 }
415
416 pub fn accelerate(&mut self, value: F) -> Option<F> {
418 self.history.push_back(value);
419
420 while self.history.len() > 3 {
422 self.history.pop_front();
423 }
424
425 if self.history.len() == 3 {
426 let x0 = self.history[0];
427 let x1 = self.history[1];
428 let x2 = self.history[2];
429
430 let numerator = (x2 - x1) * (x2 - x1);
432 let two = F::from_f64(2.0)
433 .unwrap_or_else(|| F::from(2).expect("Failed to convert constant to float"));
434 let denominator = x2 - two * x1 + x0;
435 let epsilon = F::from_f64(1e-12)
436 .unwrap_or_else(|| F::from(1e-12).expect("Failed to convert constant to float"));
437
438 if denominator.abs() > epsilon {
439 Some(x2 - numerator / denominator)
440 } else {
441 Some(x2)
442 }
443 } else {
444 Some(value)
445 }
446 }
447
448 pub fn restart(&mut self) {
450 self.history.clear();
451 }
452}
453
454impl<F: IntegrateFloat> Default for AitkenAccelerator<F> {
455 fn default() -> Self {
456 Self::new()
457 }
458}
459
460#[cfg(test)]
461mod tests {
462 use super::*;
463
464 #[test]
465 fn test_anderson_accelerator() {
466 let mut accelerator = AndersonAccelerator::new(1, AcceleratorOptions::default());
469
470 let mut x = Array1::from_vec(vec![0.0]);
471
472 for _iter in 0..10 {
473 let g_x = Array1::from_vec(vec![0.5 * x[0] + 1.0]);
474
475 if let Some(x_new) = accelerator.accelerate(x.view(), g_x.view()) {
476 x = x_new;
477 }
478 }
479
480 assert!((x[0] - 2.0_f64).abs() < 0.1);
482 }
483
484 #[test]
485 fn test_aitken_accelerator() {
486 let mut accelerator = AitkenAccelerator::new();
487
488 let values = vec![0.0, 0.5, 0.666667, 0.75, 0.8];
490 let mut result = 0.0;
491
492 for value in values {
493 if let Some(accelerated) = accelerator.accelerate(value) {
494 result = accelerated;
495 }
496 }
497
498 assert!(result > 0.8);
500 }
501
502 #[test]
503 fn test_anderson_with_regularization() {
504 let options = AcceleratorOptions {
505 regularization: 1e-8,
506 memory_depth: 3,
507 ..Default::default()
508 };
509
510 let mut accelerator = AndersonAccelerator::new(2, options);
511
512 let mut x: Array1<f64> = Array1::from_vec(vec![0.0, 0.0]);
514
515 for _iter in 0..5 {
516 let g_x = Array1::from_vec(vec![
517 0.3 * x[0] + 0.1 * x[1] + 1.0,
518 0.1 * x[0] + 0.4 * x[1] + 0.5,
519 ]);
520
521 if let Some(x_new) = accelerator.accelerate(x.view(), g_x.view()) {
522 x = x_new;
523 }
524 }
525
526 assert!(x[0].is_finite() && x[1].is_finite());
528 }
529}