1use std::fmt;
4
5use nabled_core::scalar::NabledReal;
6use ndarray::{Array1, Array2};
7use num_complex::Complex64;
8
9const DEFAULT_TOLERANCE: f64 = 1.0e-12;
10
11#[derive(Debug, Clone, Copy, PartialEq, Eq)]
13pub enum OptimizationError {
14 EmptyInput,
16 DimensionMismatch,
18 NonFiniteInput,
20 InvalidConfig,
22 MaxIterationsExceeded,
24}
25
26impl fmt::Display for OptimizationError {
27 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
28 match self {
29 OptimizationError::EmptyInput => write!(f, "input cannot be empty"),
30 OptimizationError::DimensionMismatch => write!(f, "input dimensions are incompatible"),
31 OptimizationError::NonFiniteInput => write!(f, "input contains non-finite values"),
32 OptimizationError::InvalidConfig => write!(f, "invalid optimizer configuration"),
33 OptimizationError::MaxIterationsExceeded => write!(f, "maximum iterations exceeded"),
34 }
35 }
36}
37
38impl std::error::Error for OptimizationError {}
39
40#[derive(Debug, Clone, Copy)]
42pub struct LineSearchConfig<T: NabledReal = f64> {
43 pub initial_step: T,
45 pub contraction: T,
47 pub sufficient_decrease: T,
49 pub max_iterations: usize,
51}
52
53impl<T: NabledReal> Default for LineSearchConfig<T> {
54 fn default() -> Self {
55 Self {
56 initial_step: T::one(),
57 contraction: T::from_f64(0.5).unwrap_or(T::epsilon()),
58 sufficient_decrease: T::from_f64(1e-4).unwrap_or(T::epsilon()),
59 max_iterations: 64,
60 }
61 }
62}
63
64#[derive(Debug, Clone, Copy)]
66pub struct SGDConfig<T: NabledReal = f64> {
67 pub learning_rate: T,
69 pub max_iterations: usize,
71 pub tolerance: T,
73}
74
75impl<T: NabledReal> Default for SGDConfig<T> {
76 fn default() -> Self {
77 Self {
78 learning_rate: T::from_f64(1e-2).unwrap_or(T::epsilon()),
79 max_iterations: 10_000,
80 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
81 }
82 }
83}
84
85#[derive(Debug, Clone, Copy)]
87pub struct AdamConfig<T: NabledReal = f64> {
88 pub learning_rate: T,
90 pub beta1: T,
92 pub beta2: T,
94 pub epsilon: T,
96 pub max_iterations: usize,
98 pub tolerance: T,
100}
101
102impl<T: NabledReal> Default for AdamConfig<T> {
103 fn default() -> Self {
104 Self {
105 learning_rate: T::from_f64(1e-2).unwrap_or(T::epsilon()),
106 beta1: T::from_f64(0.9).unwrap_or(T::epsilon()),
107 beta2: T::from_f64(0.999).unwrap_or(T::epsilon()),
108 epsilon: T::from_f64(1e-8).unwrap_or(T::epsilon()),
109 max_iterations: 10_000,
110 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
111 }
112 }
113}
114
115#[derive(Debug, Clone, Copy)]
117pub struct MomentumConfig<T: NabledReal = f64> {
118 pub learning_rate: T,
120 pub momentum: T,
122 pub max_iterations: usize,
124 pub tolerance: T,
126}
127
128impl<T: NabledReal> Default for MomentumConfig<T> {
129 fn default() -> Self {
130 Self {
131 learning_rate: T::from_f64(1e-2).unwrap_or(T::epsilon()),
132 momentum: T::from_f64(0.9).unwrap_or(T::epsilon()),
133 max_iterations: 10_000,
134 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
135 }
136 }
137}
138
139#[derive(Debug, Clone, Copy)]
141pub struct RMSPropConfig<T: NabledReal = f64> {
142 pub learning_rate: T,
144 pub rho: T,
146 pub epsilon: T,
148 pub max_iterations: usize,
150 pub tolerance: T,
152}
153
154impl<T: NabledReal> Default for RMSPropConfig<T> {
155 fn default() -> Self {
156 Self {
157 learning_rate: T::from_f64(1e-2).unwrap_or(T::epsilon()),
158 rho: T::from_f64(0.9).unwrap_or(T::epsilon()),
159 epsilon: T::from_f64(1e-8).unwrap_or(T::epsilon()),
160 max_iterations: 10_000,
161 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
162 }
163 }
164}
165
166#[derive(Debug, Clone, Copy)]
168pub struct ProjectedGradientConfig<T: NabledReal = f64> {
169 pub learning_rate: T,
171 pub max_iterations: usize,
173 pub tolerance: T,
175}
176
177impl<T: NabledReal> Default for ProjectedGradientConfig<T> {
178 fn default() -> Self {
179 Self {
180 learning_rate: T::from_f64(1e-2).unwrap_or(T::epsilon()),
181 max_iterations: 10_000,
182 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
183 }
184 }
185}
186
187#[derive(Debug, Clone, Copy)]
189pub struct BFGSConfig<T: NabledReal = f64> {
190 pub step_size: T,
192 pub max_iterations: usize,
194 pub tolerance: T,
196 pub curvature_tolerance: T,
198}
199
200impl<T: NabledReal> Default for BFGSConfig<T> {
201 fn default() -> Self {
202 Self {
203 step_size: T::one(),
204 max_iterations: 2_000,
205 tolerance: T::from_f64(1e-8).unwrap_or(T::epsilon()),
206 curvature_tolerance: T::from_f64(1e-12).unwrap_or(T::epsilon()),
207 }
208 }
209}
210
211fn l2_norm<T: NabledReal>(vector: &Array1<T>) -> T {
212 vector
213 .iter()
214 .map(|value| *value * *value)
215 .fold(T::zero(), |acc, value| acc + value)
216 .sqrt()
217}
218
219fn l2_norm_complex(vector: &Array1<Complex64>) -> f64 {
220 vector.iter().map(Complex64::norm_sqr).sum::<f64>().sqrt()
221}
222
223fn validate_vector<T: NabledReal>(vector: &Array1<T>) -> Result<(), OptimizationError> {
224 if vector.is_empty() {
225 return Err(OptimizationError::EmptyInput);
226 }
227 if vector.iter().any(|value| !value.is_finite()) {
228 return Err(OptimizationError::NonFiniteInput);
229 }
230 Ok(())
231}
232
233fn validate_vector_complex(vector: &Array1<Complex64>) -> Result<(), OptimizationError> {
234 if vector.is_empty() {
235 return Err(OptimizationError::EmptyInput);
236 }
237 if vector.iter().any(|value| !value.re.is_finite() || !value.im.is_finite()) {
238 return Err(OptimizationError::NonFiniteInput);
239 }
240 Ok(())
241}
242
243fn validate_line_search_config<T: NabledReal>(
244 config: &LineSearchConfig<T>,
245) -> Result<(), OptimizationError> {
246 if config.initial_step <= T::zero()
247 || config.contraction <= T::zero()
248 || config.contraction >= T::one()
249 || config.sufficient_decrease <= T::zero()
250 || config.sufficient_decrease >= T::one()
251 || config.max_iterations == 0
252 {
253 return Err(OptimizationError::InvalidConfig);
254 }
255 Ok(())
256}
257
258fn validate_sgd_config<T: NabledReal>(config: &SGDConfig<T>) -> Result<(), OptimizationError> {
259 if config.learning_rate <= T::zero()
260 || config.max_iterations == 0
261 || config.tolerance < T::zero()
262 {
263 return Err(OptimizationError::InvalidConfig);
264 }
265 Ok(())
266}
267
268fn validate_adam_config<T: NabledReal>(config: &AdamConfig<T>) -> Result<(), OptimizationError> {
269 if config.learning_rate <= T::zero()
270 || config.beta1 <= T::zero()
271 || config.beta1 >= T::one()
272 || config.beta2 <= T::zero()
273 || config.beta2 >= T::one()
274 || config.epsilon <= T::zero()
275 || config.max_iterations == 0
276 || config.tolerance < T::zero()
277 {
278 return Err(OptimizationError::InvalidConfig);
279 }
280 Ok(())
281}
282
283fn validate_momentum_config<T: NabledReal>(
284 config: &MomentumConfig<T>,
285) -> Result<(), OptimizationError> {
286 if config.learning_rate <= T::zero()
287 || config.momentum < T::zero()
288 || config.momentum >= T::one()
289 || config.max_iterations == 0
290 || config.tolerance < T::zero()
291 {
292 return Err(OptimizationError::InvalidConfig);
293 }
294 Ok(())
295}
296
297fn validate_rmsprop_config<T: NabledReal>(
298 config: &RMSPropConfig<T>,
299) -> Result<(), OptimizationError> {
300 if config.learning_rate <= T::zero()
301 || config.rho <= T::zero()
302 || config.rho >= T::one()
303 || config.epsilon <= T::zero()
304 || config.max_iterations == 0
305 || config.tolerance < T::zero()
306 {
307 return Err(OptimizationError::InvalidConfig);
308 }
309 Ok(())
310}
311
312fn validate_projected_gradient_config<T: NabledReal>(
313 config: &ProjectedGradientConfig<T>,
314) -> Result<(), OptimizationError> {
315 if config.learning_rate <= T::zero()
316 || config.max_iterations == 0
317 || config.tolerance < T::zero()
318 {
319 return Err(OptimizationError::InvalidConfig);
320 }
321 Ok(())
322}
323
324fn validate_bfgs_config<T: NabledReal>(config: &BFGSConfig<T>) -> Result<(), OptimizationError> {
325 if config.step_size <= T::zero()
326 || config.max_iterations == 0
327 || config.tolerance < T::zero()
328 || config.curvature_tolerance <= T::zero()
329 {
330 return Err(OptimizationError::InvalidConfig);
331 }
332 Ok(())
333}
334
335fn validate_bounds<T: NabledReal>(
336 initial: &Array1<T>,
337 lower_bounds: &Array1<T>,
338 upper_bounds: &Array1<T>,
339) -> Result<(), OptimizationError> {
340 if initial.len() != lower_bounds.len() || initial.len() != upper_bounds.len() {
341 return Err(OptimizationError::DimensionMismatch);
342 }
343 for i in 0..initial.len() {
344 if !lower_bounds[i].is_finite()
345 || !upper_bounds[i].is_finite()
346 || lower_bounds[i] > upper_bounds[i]
347 {
348 return Err(OptimizationError::InvalidConfig);
349 }
350 }
351 Ok(())
352}
353
354fn validate_bounds_complex(
355 initial: &Array1<Complex64>,
356 lower_bounds: &Array1<Complex64>,
357 upper_bounds: &Array1<Complex64>,
358) -> Result<(), OptimizationError> {
359 if initial.len() != lower_bounds.len() || initial.len() != upper_bounds.len() {
360 return Err(OptimizationError::DimensionMismatch);
361 }
362 for i in 0..initial.len() {
363 let lower = lower_bounds[i];
364 let upper = upper_bounds[i];
365 if !lower.re.is_finite()
366 || !lower.im.is_finite()
367 || !upper.re.is_finite()
368 || !upper.im.is_finite()
369 || lower.re > upper.re
370 || lower.im > upper.im
371 {
372 return Err(OptimizationError::InvalidConfig);
373 }
374 }
375 Ok(())
376}
377
378fn project_to_bounds<T: NabledReal>(
379 point: &mut Array1<T>,
380 lower_bounds: &Array1<T>,
381 upper_bounds: &Array1<T>,
382) {
383 for i in 0..point.len() {
384 point[i] = if point[i] < lower_bounds[i] {
385 lower_bounds[i]
386 } else if point[i] > upper_bounds[i] {
387 upper_bounds[i]
388 } else {
389 point[i]
390 };
391 }
392}
393
394fn project_to_bounds_complex(
395 point: &mut Array1<Complex64>,
396 lower_bounds: &Array1<Complex64>,
397 upper_bounds: &Array1<Complex64>,
398) {
399 for i in 0..point.len() {
400 let real = point[i].re.clamp(lower_bounds[i].re, upper_bounds[i].re);
401 let imag = point[i].im.clamp(lower_bounds[i].im, upper_bounds[i].im);
402 point[i] = Complex64::new(real, imag);
403 }
404}
405
406fn outer_product<T: NabledReal>(left: &Array1<T>, right: &Array1<T>) -> Array2<T> {
407 let mut output = Array2::<T>::zeros((left.len(), right.len()));
408 for row in 0..left.len() {
409 for col in 0..right.len() {
410 output[[row, col]] = left[row] * right[col];
411 }
412 }
413 output
414}
415
416fn outer_product_complex(left: &Array1<Complex64>, right: &Array1<Complex64>) -> Array2<Complex64> {
417 let mut output = Array2::<Complex64>::zeros((left.len(), right.len()));
418 for row in 0..left.len() {
419 for col in 0..right.len() {
420 output[[row, col]] = left[row] * right[col].conj();
421 }
422 }
423 output
424}
425
426fn hermitian_transpose(matrix: &Array2<Complex64>) -> Array2<Complex64> {
427 matrix.t().mapv(|value| value.conj())
428}
429
430fn hermitian_dot(left: &Array1<Complex64>, right: &Array1<Complex64>) -> Complex64 {
431 left.iter().zip(right.iter()).map(|(lhs, rhs)| lhs.conj() * rhs).sum()
432}
433
434pub fn backtracking_line_search<T, F, G>(
439 point: &Array1<T>,
440 direction: &Array1<T>,
441 objective: F,
442 gradient: G,
443 config: &LineSearchConfig<T>,
444) -> Result<T, OptimizationError>
445where
446 T: NabledReal,
447 F: Fn(&Array1<T>) -> T,
448 G: Fn(&Array1<T>) -> Array1<T>,
449{
450 validate_vector(point)?;
451 validate_vector(direction)?;
452 if point.len() != direction.len() {
453 return Err(OptimizationError::DimensionMismatch);
454 }
455 validate_line_search_config(config)?;
456
457 let grad = gradient(point);
458 if grad.len() != point.len() || grad.iter().any(|value| !value.is_finite()) {
459 return Err(OptimizationError::NonFiniteInput);
460 }
461
462 let fx = objective(point);
463 if !fx.is_finite() {
464 return Err(OptimizationError::NonFiniteInput);
465 }
466 let directional_derivative = grad.dot(direction);
467
468 let mut alpha = config.initial_step;
469 let contraction = config.contraction;
470 let sufficient_decrease = config.sufficient_decrease;
471 for _ in 0..config.max_iterations {
472 let candidate = point + &direction.mapv(|value| value * alpha);
473 let candidate_value = objective(&candidate);
474 if !candidate_value.is_finite() {
475 return Err(OptimizationError::NonFiniteInput);
476 }
477 if candidate_value <= fx + sufficient_decrease * alpha * directional_derivative {
478 return Ok(alpha);
479 }
480 alpha *= contraction;
481 }
482 Err(OptimizationError::MaxIterationsExceeded)
483}
484
485pub fn gradient_descent<T, F, G>(
490 initial: &Array1<T>,
491 objective: F,
492 gradient: G,
493 config: &SGDConfig<T>,
494) -> Result<Array1<T>, OptimizationError>
495where
496 T: NabledReal,
497 F: Fn(&Array1<T>) -> T,
498 G: Fn(&Array1<T>) -> Array1<T>,
499{
500 validate_vector(initial)?;
501 validate_sgd_config(config)?;
502
503 let mut x = initial.clone();
504 let _ = objective(&x);
505 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
506 let learning_rate = config.learning_rate;
507
508 for _ in 0..config.max_iterations {
509 let grad = gradient(&x);
510 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
511 return Err(OptimizationError::NonFiniteInput);
512 }
513 if l2_norm(&grad) <= tolerance {
514 return Ok(x);
515 }
516 x = &x - &grad.mapv(|value| value * learning_rate);
517 }
518
519 Err(OptimizationError::MaxIterationsExceeded)
520}
521
522pub fn adam<T, F, G>(
527 initial: &Array1<T>,
528 objective: F,
529 gradient: G,
530 config: &AdamConfig<T>,
531) -> Result<Array1<T>, OptimizationError>
532where
533 T: NabledReal,
534 F: Fn(&Array1<T>) -> T,
535 G: Fn(&Array1<T>) -> Array1<T>,
536{
537 validate_vector(initial)?;
538 validate_adam_config(config)?;
539
540 let mut x = initial.clone();
541 let mut m = Array1::<T>::zeros(x.len());
542 let mut v = Array1::<T>::zeros(x.len());
543 let mut beta1_power = T::one();
544 let mut beta2_power = T::one();
545 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
546 let beta1 = config.beta1;
547 let beta2 = config.beta2;
548 let learning_rate = config.learning_rate;
549 let epsilon = config.epsilon;
550
551 let _ = objective(&x);
552 for _ in 0..config.max_iterations {
553 let grad = gradient(&x);
554 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
555 return Err(OptimizationError::NonFiniteInput);
556 }
557 if l2_norm(&grad) <= tolerance {
558 return Ok(x);
559 }
560
561 beta1_power *= beta1;
562 beta2_power *= beta2;
563
564 for i in 0..x.len() {
565 m[i] = beta1 * m[i] + (T::one() - beta1) * grad[i];
566 v[i] = beta2 * v[i] + (T::one() - beta2) * grad[i] * grad[i];
567
568 let m_hat = m[i] / (T::one() - beta1_power);
569 let v_hat = v[i] / (T::one() - beta2_power);
570 x[i] -= learning_rate * m_hat / (v_hat.sqrt() + epsilon);
571 }
572 }
573
574 Err(OptimizationError::MaxIterationsExceeded)
575}
576
577pub fn momentum_descent<T, F, G>(
582 initial: &Array1<T>,
583 objective: F,
584 gradient: G,
585 config: &MomentumConfig<T>,
586) -> Result<Array1<T>, OptimizationError>
587where
588 T: NabledReal,
589 F: Fn(&Array1<T>) -> T,
590 G: Fn(&Array1<T>) -> Array1<T>,
591{
592 validate_vector(initial)?;
593 validate_momentum_config(config)?;
594
595 let mut x = initial.clone();
596 let mut velocity = Array1::<T>::zeros(x.len());
597 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
598 let learning_rate = config.learning_rate;
599 let momentum = config.momentum;
600
601 let _ = objective(&x);
602 for _ in 0..config.max_iterations {
603 let grad = gradient(&x);
604 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
605 return Err(OptimizationError::NonFiniteInput);
606 }
607 if l2_norm(&grad) <= tolerance {
608 return Ok(x);
609 }
610
611 for i in 0..x.len() {
612 velocity[i] = momentum * velocity[i] + grad[i];
613 x[i] -= learning_rate * velocity[i];
614 }
615 }
616
617 Err(OptimizationError::MaxIterationsExceeded)
618}
619
620pub fn rmsprop<T, F, G>(
625 initial: &Array1<T>,
626 objective: F,
627 gradient: G,
628 config: &RMSPropConfig<T>,
629) -> Result<Array1<T>, OptimizationError>
630where
631 T: NabledReal,
632 F: Fn(&Array1<T>) -> T,
633 G: Fn(&Array1<T>) -> Array1<T>,
634{
635 validate_vector(initial)?;
636 validate_rmsprop_config(config)?;
637
638 let mut x = initial.clone();
639 let mut avg_sq = Array1::<T>::zeros(x.len());
640 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
641 let learning_rate = config.learning_rate;
642 let rho = config.rho;
643 let epsilon = config.epsilon;
644
645 let _ = objective(&x);
646 for _ in 0..config.max_iterations {
647 let grad = gradient(&x);
648 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
649 return Err(OptimizationError::NonFiniteInput);
650 }
651 if l2_norm(&grad) <= tolerance {
652 return Ok(x);
653 }
654
655 for i in 0..x.len() {
656 avg_sq[i] = rho * avg_sq[i] + (T::one() - rho) * grad[i] * grad[i];
657 x[i] -= learning_rate * grad[i] / (avg_sq[i].sqrt() + epsilon);
658 }
659 }
660
661 Err(OptimizationError::MaxIterationsExceeded)
662}
663
664pub fn projected_gradient_descent_box<T, F, G>(
669 initial: &Array1<T>,
670 objective: F,
671 gradient: G,
672 lower_bounds: &Array1<T>,
673 upper_bounds: &Array1<T>,
674 config: &ProjectedGradientConfig<T>,
675) -> Result<Array1<T>, OptimizationError>
676where
677 T: NabledReal,
678 F: Fn(&Array1<T>) -> T,
679 G: Fn(&Array1<T>) -> Array1<T>,
680{
681 validate_vector(initial)?;
682 validate_vector(lower_bounds)?;
683 validate_vector(upper_bounds)?;
684 validate_projected_gradient_config(config)?;
685 validate_bounds(initial, lower_bounds, upper_bounds)?;
686
687 let mut x = initial.clone();
688 project_to_bounds(&mut x, lower_bounds, upper_bounds);
689 let _ = objective(&x);
690 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
691 let learning_rate = config.learning_rate;
692
693 for _ in 0..config.max_iterations {
694 let grad = gradient(&x);
695 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
696 return Err(OptimizationError::NonFiniteInput);
697 }
698 let previous = x.clone();
699 x = &x - &grad.mapv(|value| value * learning_rate);
700 project_to_bounds(&mut x, lower_bounds, upper_bounds);
701 let step_norm = l2_norm(&(&x - &previous));
702 if step_norm <= tolerance || l2_norm(&grad) <= tolerance {
703 return Ok(x);
704 }
705 }
706
707 Err(OptimizationError::MaxIterationsExceeded)
708}
709
710pub fn stochastic_gradient_descent<T, G>(
717 initial: &Array1<T>,
718 stochastic_gradient: G,
719 config: &SGDConfig<T>,
720) -> Result<Array1<T>, OptimizationError>
721where
722 T: NabledReal,
723 G: Fn(&Array1<T>, usize) -> Array1<T>,
724{
725 validate_vector(initial)?;
726 validate_sgd_config(config)?;
727
728 let mut x = initial.clone();
729 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
730 let learning_rate = config.learning_rate;
731 for iteration in 0..config.max_iterations {
732 let grad = stochastic_gradient(&x, iteration);
733 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
734 return Err(OptimizationError::NonFiniteInput);
735 }
736 if l2_norm(&grad) <= tolerance {
737 return Ok(x);
738 }
739 x = &x - &grad.mapv(|value| value * learning_rate);
740 }
741 Err(OptimizationError::MaxIterationsExceeded)
742}
743
744pub fn bfgs<T, F, G>(
749 initial: &Array1<T>,
750 objective: F,
751 gradient: G,
752 config: &BFGSConfig<T>,
753) -> Result<Array1<T>, OptimizationError>
754where
755 T: NabledReal,
756 F: Fn(&Array1<T>) -> T,
757 G: Fn(&Array1<T>) -> Array1<T>,
758{
759 validate_vector(initial)?;
760 validate_bfgs_config(config)?;
761
762 let dimension = initial.len();
763 let mut x = initial.clone();
764 let mut h_inv = Array2::<T>::eye(dimension);
765 let tolerance = config.tolerance.max(T::from_f64(DEFAULT_TOLERANCE).unwrap_or_else(T::epsilon));
766 let step = config.step_size;
767 let curvature_tolerance = config.curvature_tolerance;
768
769 let _ = objective(&x);
770 for _ in 0..config.max_iterations {
771 let grad = gradient(&x);
772 if grad.len() != x.len() || grad.iter().any(|value| !value.is_finite()) {
773 return Err(OptimizationError::NonFiniteInput);
774 }
775 if l2_norm(&grad) <= tolerance {
776 return Ok(x);
777 }
778
779 let direction = -h_inv.dot(&grad);
780 let x_next = &x + &direction.mapv(|value| value * step);
781 let grad_next = gradient(&x_next);
782 if grad_next.len() != x.len() || grad_next.iter().any(|value| !value.is_finite()) {
783 return Err(OptimizationError::NonFiniteInput);
784 }
785
786 let s = &x_next - &x;
787 let y = &grad_next - &grad;
788 let ys = y.dot(&s);
789 if ys.abs() > curvature_tolerance {
790 let rho = T::one() / ys;
791 let identity = Array2::<T>::eye(dimension);
792 let sy = outer_product(&s, &y);
793 let ys_outer = outer_product(&y, &s);
794 let ss = outer_product(&s, &s);
795
796 let scaled_cross_left = sy.mapv(|value| rho * value);
797 let scaled_cross_right = ys_outer.mapv(|value| rho * value);
798 let scaled_rank_one = ss.mapv(|value| rho * value);
799 let left = &identity - &scaled_cross_left;
800 let right = &identity - &scaled_cross_right;
801 h_inv = left.dot(&h_inv).dot(&right) + scaled_rank_one;
802 }
803
804 x = x_next;
805 }
806
807 Err(OptimizationError::MaxIterationsExceeded)
808}
809
810pub fn backtracking_line_search_complex<F, G>(
815 point: &Array1<Complex64>,
816 direction: &Array1<Complex64>,
817 objective: F,
818 gradient: G,
819 config: &LineSearchConfig<f64>,
820) -> Result<f64, OptimizationError>
821where
822 F: Fn(&Array1<Complex64>) -> f64,
823 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
824{
825 validate_vector_complex(point)?;
826 validate_vector_complex(direction)?;
827 if point.len() != direction.len() {
828 return Err(OptimizationError::DimensionMismatch);
829 }
830 validate_line_search_config(config)?;
831
832 let grad = gradient(point);
833 if grad.len() != point.len()
834 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
835 {
836 return Err(OptimizationError::NonFiniteInput);
837 }
838
839 let fx = objective(point);
840 if !fx.is_finite() {
841 return Err(OptimizationError::NonFiniteInput);
842 }
843 let directional_derivative = hermitian_dot(&grad, direction).re;
844
845 let mut alpha = config.initial_step;
846 for _ in 0..config.max_iterations {
847 let candidate = point + &direction.mapv(|value| value * alpha);
848 let candidate_value = objective(&candidate);
849 if !candidate_value.is_finite() {
850 return Err(OptimizationError::NonFiniteInput);
851 }
852 if candidate_value <= fx + config.sufficient_decrease * alpha * directional_derivative {
853 return Ok(alpha);
854 }
855 alpha *= config.contraction;
856 }
857 Err(OptimizationError::MaxIterationsExceeded)
858}
859
860pub fn gradient_descent_complex<F, G>(
865 initial: &Array1<Complex64>,
866 objective: F,
867 gradient: G,
868 config: &SGDConfig<f64>,
869) -> Result<Array1<Complex64>, OptimizationError>
870where
871 F: Fn(&Array1<Complex64>) -> f64,
872 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
873{
874 validate_vector_complex(initial)?;
875 validate_sgd_config(config)?;
876
877 let mut x = initial.clone();
878 let _ = objective(&x);
879 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
880
881 for _ in 0..config.max_iterations {
882 let grad = gradient(&x);
883 if grad.len() != x.len()
884 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
885 {
886 return Err(OptimizationError::NonFiniteInput);
887 }
888 if l2_norm_complex(&grad) <= tolerance {
889 return Ok(x);
890 }
891 x = &x - &grad.mapv(|value| value * config.learning_rate);
892 }
893
894 Err(OptimizationError::MaxIterationsExceeded)
895}
896
897pub fn adam_complex<F, G>(
902 initial: &Array1<Complex64>,
903 objective: F,
904 gradient: G,
905 config: &AdamConfig<f64>,
906) -> Result<Array1<Complex64>, OptimizationError>
907where
908 F: Fn(&Array1<Complex64>) -> f64,
909 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
910{
911 validate_vector_complex(initial)?;
912 validate_adam_config(config)?;
913
914 let mut x = initial.clone();
915 let mut m = Array1::<Complex64>::zeros(x.len());
916 let mut v = Array1::<f64>::zeros(x.len());
917 let mut beta1_power = 1.0_f64;
918 let mut beta2_power = 1.0_f64;
919 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
920
921 let _ = objective(&x);
922 for _ in 0..config.max_iterations {
923 let grad = gradient(&x);
924 if grad.len() != x.len()
925 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
926 {
927 return Err(OptimizationError::NonFiniteInput);
928 }
929 if l2_norm_complex(&grad) <= tolerance {
930 return Ok(x);
931 }
932
933 beta1_power *= config.beta1;
934 beta2_power *= config.beta2;
935
936 for i in 0..x.len() {
937 m[i] = config.beta1 * m[i] + (1.0 - config.beta1) * grad[i];
938 v[i] = config.beta2 * v[i] + (1.0 - config.beta2) * grad[i].norm_sqr();
939
940 let m_hat = m[i] / (1.0 - beta1_power);
941 let v_hat = v[i] / (1.0 - beta2_power);
942 x[i] -= config.learning_rate * m_hat / (v_hat.sqrt() + config.epsilon);
943 }
944 }
945
946 Err(OptimizationError::MaxIterationsExceeded)
947}
948
949pub fn momentum_descent_complex<F, G>(
954 initial: &Array1<Complex64>,
955 objective: F,
956 gradient: G,
957 config: &MomentumConfig<f64>,
958) -> Result<Array1<Complex64>, OptimizationError>
959where
960 F: Fn(&Array1<Complex64>) -> f64,
961 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
962{
963 validate_vector_complex(initial)?;
964 validate_momentum_config(config)?;
965
966 let mut x = initial.clone();
967 let mut velocity = Array1::<Complex64>::zeros(x.len());
968 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
969
970 let _ = objective(&x);
971 for _ in 0..config.max_iterations {
972 let grad = gradient(&x);
973 if grad.len() != x.len()
974 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
975 {
976 return Err(OptimizationError::NonFiniteInput);
977 }
978 if l2_norm_complex(&grad) <= tolerance {
979 return Ok(x);
980 }
981
982 for i in 0..x.len() {
983 velocity[i] = config.momentum * velocity[i] + grad[i];
984 x[i] -= config.learning_rate * velocity[i];
985 }
986 }
987
988 Err(OptimizationError::MaxIterationsExceeded)
989}
990
991pub fn rmsprop_complex<F, G>(
996 initial: &Array1<Complex64>,
997 objective: F,
998 gradient: G,
999 config: &RMSPropConfig<f64>,
1000) -> Result<Array1<Complex64>, OptimizationError>
1001where
1002 F: Fn(&Array1<Complex64>) -> f64,
1003 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
1004{
1005 validate_vector_complex(initial)?;
1006 validate_rmsprop_config(config)?;
1007
1008 let mut x = initial.clone();
1009 let mut avg_sq = Array1::<f64>::zeros(x.len());
1010 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
1011
1012 let _ = objective(&x);
1013 for _ in 0..config.max_iterations {
1014 let grad = gradient(&x);
1015 if grad.len() != x.len()
1016 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
1017 {
1018 return Err(OptimizationError::NonFiniteInput);
1019 }
1020 if l2_norm_complex(&grad) <= tolerance {
1021 return Ok(x);
1022 }
1023
1024 for i in 0..x.len() {
1025 avg_sq[i] = config.rho * avg_sq[i] + (1.0 - config.rho) * grad[i].norm_sqr();
1026 x[i] -= config.learning_rate * grad[i] / (avg_sq[i].sqrt() + config.epsilon);
1027 }
1028 }
1029
1030 Err(OptimizationError::MaxIterationsExceeded)
1031}
1032
1033pub fn projected_gradient_descent_box_complex<F, G>(
1040 initial: &Array1<Complex64>,
1041 objective: F,
1042 gradient: G,
1043 lower_bounds: &Array1<Complex64>,
1044 upper_bounds: &Array1<Complex64>,
1045 config: &ProjectedGradientConfig<f64>,
1046) -> Result<Array1<Complex64>, OptimizationError>
1047where
1048 F: Fn(&Array1<Complex64>) -> f64,
1049 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
1050{
1051 validate_vector_complex(initial)?;
1052 validate_vector_complex(lower_bounds)?;
1053 validate_vector_complex(upper_bounds)?;
1054 validate_projected_gradient_config(config)?;
1055 validate_bounds_complex(initial, lower_bounds, upper_bounds)?;
1056
1057 let mut x = initial.clone();
1058 project_to_bounds_complex(&mut x, lower_bounds, upper_bounds);
1059 let _ = objective(&x);
1060 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
1061
1062 for _ in 0..config.max_iterations {
1063 let grad = gradient(&x);
1064 if grad.len() != x.len()
1065 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
1066 {
1067 return Err(OptimizationError::NonFiniteInput);
1068 }
1069 let previous = x.clone();
1070 x = &x - &grad.mapv(|value| value * config.learning_rate);
1071 project_to_bounds_complex(&mut x, lower_bounds, upper_bounds);
1072 let step_norm = l2_norm_complex(&(&x - &previous));
1073 if step_norm <= tolerance || l2_norm_complex(&grad) <= tolerance {
1074 return Ok(x);
1075 }
1076 }
1077
1078 Err(OptimizationError::MaxIterationsExceeded)
1079}
1080
1081pub fn stochastic_gradient_descent_complex<G>(
1088 initial: &Array1<Complex64>,
1089 stochastic_gradient: G,
1090 config: &SGDConfig<f64>,
1091) -> Result<Array1<Complex64>, OptimizationError>
1092where
1093 G: Fn(&Array1<Complex64>, usize) -> Array1<Complex64>,
1094{
1095 validate_vector_complex(initial)?;
1096 validate_sgd_config(config)?;
1097
1098 let mut x = initial.clone();
1099 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
1100 for iteration in 0..config.max_iterations {
1101 let grad = stochastic_gradient(&x, iteration);
1102 if grad.len() != x.len()
1103 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
1104 {
1105 return Err(OptimizationError::NonFiniteInput);
1106 }
1107 if l2_norm_complex(&grad) <= tolerance {
1108 return Ok(x);
1109 }
1110 x = &x - &grad.mapv(|value| value * config.learning_rate);
1111 }
1112 Err(OptimizationError::MaxIterationsExceeded)
1113}
1114
1115pub fn bfgs_complex<F, G>(
1120 initial: &Array1<Complex64>,
1121 objective: F,
1122 gradient: G,
1123 config: &BFGSConfig<f64>,
1124) -> Result<Array1<Complex64>, OptimizationError>
1125where
1126 F: Fn(&Array1<Complex64>) -> f64,
1127 G: Fn(&Array1<Complex64>) -> Array1<Complex64>,
1128{
1129 validate_vector_complex(initial)?;
1130 validate_bfgs_config(config)?;
1131
1132 let dimension = initial.len();
1133 let mut x = initial.clone();
1134 let mut h_inv = Array2::<Complex64>::eye(dimension);
1135 let tolerance = config.tolerance.max(DEFAULT_TOLERANCE);
1136
1137 let _ = objective(&x);
1138 for _ in 0..config.max_iterations {
1139 let grad = gradient(&x);
1140 if grad.len() != x.len()
1141 || grad.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
1142 {
1143 return Err(OptimizationError::NonFiniteInput);
1144 }
1145 if l2_norm_complex(&grad) <= tolerance {
1146 return Ok(x);
1147 }
1148
1149 let direction = -h_inv.dot(&grad);
1150 let step = config.step_size;
1151 let x_next = &x + &direction.mapv(|value| value * step);
1152 let grad_next = gradient(&x_next);
1153 if grad_next.len() != x.len()
1154 || grad_next.iter().any(|value| !value.re.is_finite() || !value.im.is_finite())
1155 {
1156 return Err(OptimizationError::NonFiniteInput);
1157 }
1158
1159 let s = &x_next - &x;
1160 let y = &grad_next - &grad;
1161 let curvature = hermitian_dot(&s, &y).re;
1162 if curvature > config.curvature_tolerance {
1163 let rho = 1.0 / curvature;
1164 let identity = Array2::<Complex64>::eye(dimension);
1165 let sy = outer_product_complex(&s, &y);
1166 let ys_outer = outer_product_complex(&y, &s);
1167 let ss = outer_product_complex(&s, &s);
1168
1169 let left = &identity - &sy.mapv(|value| value * rho);
1170 let right = &identity - &ys_outer.mapv(|value| value * rho);
1171 h_inv =
1172 left.dot(&h_inv).dot(&hermitian_transpose(&right)) + ss.mapv(|value| value * rho);
1173 }
1174
1175 x = x_next;
1176 }
1177
1178 Err(OptimizationError::MaxIterationsExceeded)
1179}
1180
1181#[cfg(test)]
1182mod tests {
1183 use ndarray::arr1;
1184 use num_complex::Complex64;
1185
1186 use super::*;
1187
1188 fn objective(x: &Array1<f64>) -> f64 {
1189 let delta = x[0] - 3.0_f64;
1190 delta * delta
1191 }
1192
1193 fn gradient(x: &Array1<f64>) -> Array1<f64> { arr1(&[2.0_f64 * (x[0] - 3.0_f64)]) }
1194
1195 fn objective_complex(x: &Array1<Complex64>) -> f64 {
1196 let delta = x[0] - Complex64::new(3.0_f64, 2.0_f64);
1197 delta.norm_sqr()
1198 }
1199
1200 fn gradient_complex(x: &Array1<Complex64>) -> Array1<Complex64> {
1201 arr1(&[Complex64::new(2.0_f64, 0.0_f64) * (x[0] - Complex64::new(3.0_f64, 2.0_f64))])
1202 }
1203
1204 #[test]
1205 fn backtracking_line_search_finds_descent_step() {
1206 let x = arr1(&[0.0_f64]);
1207 let direction = arr1(&[1.0_f64]);
1208 let alpha = backtracking_line_search(
1209 &x,
1210 &direction,
1211 objective,
1212 gradient,
1213 &LineSearchConfig::default(),
1214 )
1215 .unwrap();
1216 assert!(alpha > 0.0_f64);
1217 }
1218
1219 #[test]
1220 fn gradient_descent_converges_on_quadratic() {
1221 let x0 = arr1(&[0.0_f64]);
1222 let solution = gradient_descent(&x0, objective, gradient, &SGDConfig::default()).unwrap();
1223 assert!((solution[0] - 3.0_f64).abs() < 1e-4_f64);
1224 }
1225
1226 #[test]
1227 fn adam_converges_on_quadratic() {
1228 let x0 = arr1(&[-5.0_f64]);
1229 let solution = adam(&x0, objective, gradient, &AdamConfig::default()).unwrap();
1230 assert!((solution[0] - 3.0_f64).abs() < 1e-3_f64);
1231 }
1232
1233 #[test]
1234 fn line_search_rejects_invalid_config_and_dimension_mismatch() {
1235 let x = arr1(&[0.0_f64]);
1236 let direction = arr1(&[1.0_f64, 2.0_f64]);
1237 let result = backtracking_line_search(
1238 &x,
1239 &direction,
1240 objective,
1241 gradient,
1242 &LineSearchConfig::default(),
1243 );
1244 assert!(matches!(result, Err(OptimizationError::DimensionMismatch)));
1245
1246 let bad_config = LineSearchConfig { contraction: 1.0_f64, ..LineSearchConfig::default() };
1247 let result =
1248 backtracking_line_search(&x, &arr1(&[1.0_f64]), objective, gradient, &bad_config);
1249 assert!(matches!(result, Err(OptimizationError::InvalidConfig)));
1250 }
1251
1252 #[test]
1253 fn gradient_descent_and_adam_cover_error_paths() {
1254 let x0 = arr1(&[0.0_f64]);
1255
1256 let bad_gradient = |_x: &Array1<f64>| arr1(&[f64::NAN]);
1257 let gd_non_finite = gradient_descent(&x0, objective, bad_gradient, &SGDConfig::default());
1258 assert!(matches!(gd_non_finite, Err(OptimizationError::NonFiniteInput)));
1259
1260 let gd_stall = gradient_descent(&x0, objective, gradient, &SGDConfig {
1261 learning_rate: 1e-12_f64,
1262 max_iterations: 1,
1263 tolerance: 0.0_f64,
1264 });
1265 assert!(matches!(gd_stall, Err(OptimizationError::MaxIterationsExceeded)));
1266
1267 let bad_adam = AdamConfig { beta1: 1.0_f64, ..AdamConfig::default() };
1268 let adam_invalid = adam(&x0, objective, gradient, &bad_adam);
1269 assert!(matches!(adam_invalid, Err(OptimizationError::InvalidConfig)));
1270 }
1271
1272 #[test]
1273 fn momentum_and_rmsprop_converge_on_quadratic() {
1274 let x0 = arr1(&[-4.0_f64]);
1275
1276 let momentum_solution =
1277 momentum_descent(&x0, objective, gradient, &MomentumConfig::default()).unwrap();
1278 assert!((momentum_solution[0] - 3.0_f64).abs() < 1e-3_f64);
1279
1280 let rmsprop_solution =
1281 rmsprop(&x0, objective, gradient, &RMSPropConfig::default()).unwrap();
1282 assert!((rmsprop_solution[0] - 3.0_f64).abs() < 1e-3_f64);
1283 }
1284
1285 #[test]
1286 fn momentum_and_rmsprop_reject_invalid_config() {
1287 let x0 = arr1(&[0.0_f64]);
1288
1289 let bad_momentum = MomentumConfig { momentum: 1.0_f64, ..MomentumConfig::default() };
1290 let momentum_invalid = momentum_descent(&x0, objective, gradient, &bad_momentum);
1291 assert!(matches!(momentum_invalid, Err(OptimizationError::InvalidConfig)));
1292
1293 let bad_rmsprop = RMSPropConfig { rho: 1.0_f64, ..RMSPropConfig::default() };
1294 let rmsprop_invalid = rmsprop(&x0, objective, gradient, &bad_rmsprop);
1295 assert!(matches!(rmsprop_invalid, Err(OptimizationError::InvalidConfig)));
1296 }
1297
1298 #[test]
1299 fn projected_gradient_descent_respects_bounds() {
1300 let x0 = arr1(&[-10.0_f64]);
1301 let lower = arr1(&[0.0_f64]);
1302 let upper = arr1(&[2.5_f64]);
1303 let solution = projected_gradient_descent_box(
1304 &x0,
1305 objective,
1306 gradient,
1307 &lower,
1308 &upper,
1309 &ProjectedGradientConfig::default(),
1310 )
1311 .unwrap();
1312 assert!((solution[0] - 2.5_f64).abs() < 1e-8_f64);
1313 }
1314
1315 #[test]
1316 fn stochastic_gradient_descent_converges_on_quadratic() {
1317 let x0 = arr1(&[-3.0_f64]);
1318 let solution = stochastic_gradient_descent(
1319 &x0,
1320 |x, _iteration| arr1(&[2.0_f64 * (x[0] - 3.0_f64)]),
1321 &SGDConfig {
1322 learning_rate: 5e-2_f64,
1323 max_iterations: 2_000,
1324 tolerance: 1e-6_f64,
1325 },
1326 )
1327 .unwrap();
1328 assert!((solution[0] - 3.0_f64).abs() < 1e-3_f64);
1329 }
1330
1331 #[test]
1332 fn bfgs_converges_on_quadratic() {
1333 let x0 = arr1(&[-8.0_f64]);
1334 let solution = bfgs(&x0, objective, gradient, &BFGSConfig::default()).unwrap();
1335 assert!((solution[0] - 3.0_f64).abs() < 1e-6_f64);
1336 }
1337
1338 #[test]
1339 fn advanced_optimizers_reject_invalid_inputs() {
1340 let x0 = arr1(&[0.0_f64]);
1341 let lower = arr1(&[2.0_f64]);
1342 let upper = arr1(&[1.0_f64]);
1343 let projected = projected_gradient_descent_box(
1344 &x0,
1345 objective,
1346 gradient,
1347 &lower,
1348 &upper,
1349 &ProjectedGradientConfig::default(),
1350 );
1351 assert!(matches!(projected, Err(OptimizationError::InvalidConfig)));
1352
1353 let sgd_non_finite =
1354 stochastic_gradient_descent(&x0, |_x, _| arr1(&[f64::NAN]), &SGDConfig::default());
1355 assert!(matches!(sgd_non_finite, Err(OptimizationError::NonFiniteInput)));
1356
1357 let bfgs_invalid = bfgs(&x0, objective, gradient, &BFGSConfig {
1358 step_size: 0.0_f64,
1359 ..BFGSConfig::default()
1360 });
1361 assert!(matches!(bfgs_invalid, Err(OptimizationError::InvalidConfig)));
1362 }
1363
1364 #[test]
1365 fn complex_line_search_and_optimizers_converge() {
1366 let x = arr1(&[Complex64::new(0.0_f64, 0.0_f64)]);
1367 let direction = arr1(&[Complex64::new(1.0_f64, 1.0_f64)]);
1368 let alpha = backtracking_line_search_complex(
1369 &x,
1370 &direction,
1371 objective_complex,
1372 gradient_complex,
1373 &LineSearchConfig::default(),
1374 )
1375 .unwrap();
1376 assert!(alpha > 0.0_f64);
1377
1378 let gd = gradient_descent_complex(
1379 &x,
1380 objective_complex,
1381 gradient_complex,
1382 &SGDConfig::default(),
1383 )
1384 .unwrap();
1385 assert!((gd[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-3_f64);
1386
1387 let adam_solution =
1388 adam_complex(&x, objective_complex, gradient_complex, &AdamConfig::default()).unwrap();
1389 assert!((adam_solution[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-3_f64);
1390
1391 let momentum_solution = momentum_descent_complex(
1392 &x,
1393 objective_complex,
1394 gradient_complex,
1395 &MomentumConfig::default(),
1396 )
1397 .unwrap();
1398 assert!((momentum_solution[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-3_f64);
1399
1400 let rmsprop_solution =
1401 rmsprop_complex(&x, objective_complex, gradient_complex, &RMSPropConfig::default())
1402 .unwrap();
1403 assert!((rmsprop_solution[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-3_f64);
1404
1405 let bfgs_solution =
1406 bfgs_complex(&x, objective_complex, gradient_complex, &BFGSConfig::default()).unwrap();
1407 assert!((bfgs_solution[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-6_f64);
1408 }
1409
1410 #[test]
1411 fn complex_projected_and_stochastic_paths_work() {
1412 let x = arr1(&[Complex64::new(-5.0_f64, -5.0_f64)]);
1413 let lower = arr1(&[Complex64::new(0.0_f64, 0.0_f64)]);
1414 let upper = arr1(&[Complex64::new(2.5_f64, 2.5_f64)]);
1415 let projected = projected_gradient_descent_box_complex(
1416 &x,
1417 objective_complex,
1418 gradient_complex,
1419 &lower,
1420 &upper,
1421 &ProjectedGradientConfig::default(),
1422 )
1423 .unwrap();
1424 assert!((projected[0] - Complex64::new(2.5_f64, 2.0_f64)).norm() < 1e-3_f64);
1425
1426 let stochastic = stochastic_gradient_descent_complex(
1427 &arr1(&[Complex64::new(-3.0_f64, -1.0_f64)]),
1428 |point, _iteration| {
1429 let grad_sample = Complex64::new(2.0_f64, 0.0_f64)
1430 * (point[0] - Complex64::new(3.0_f64, 2.0_f64));
1431 arr1(&[grad_sample])
1432 },
1433 &SGDConfig {
1434 learning_rate: 5e-2_f64,
1435 max_iterations: 2_000,
1436 tolerance: 1e-6_f64,
1437 },
1438 )
1439 .unwrap();
1440 assert!((stochastic[0] - Complex64::new(3.0_f64, 2.0_f64)).norm() < 1e-3_f64);
1441 }
1442}