1#![allow(clippy::needless_range_loop)]
2#![allow(dead_code)]
14
15use std::f64::consts::PI;
16
17#[derive(Clone)]
26pub struct FunctionSpace {
27 pub basis: Vec<fn(f64) -> f64>,
29}
30
31impl FunctionSpace {
32 pub fn new(basis: Vec<fn(f64) -> f64>) -> Self {
34 Self { basis }
35 }
36
37 pub fn eval(&self, i: usize, x: f64) -> f64 {
39 (self.basis[i])(x)
40 }
41
42 pub fn dim(&self) -> usize {
44 self.basis.len()
45 }
46
47 pub fn combine(&self, coeffs: &[f64], x: f64) -> f64 {
51 assert_eq!(
52 coeffs.len(),
53 self.basis.len(),
54 "coefficient length mismatch"
55 );
56 coeffs
57 .iter()
58 .zip(self.basis.iter())
59 .map(|(c, f)| c * f(x))
60 .sum()
61 }
62}
63
64impl std::fmt::Debug for FunctionSpace {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 write!(f, "FunctionSpace {{ dim: {} }}", self.basis.len())
67 }
68}
69
70pub fn l2_inner_product(f: &[f64], g: &[f64], dx: f64) -> f64 {
78 assert_eq!(f.len(), g.len(), "l2_inner_product: slice length mismatch");
79 f.iter().zip(g.iter()).map(|(a, b)| a * b).sum::<f64>() * dx
80}
81
82pub fn l2_norm(f: &[f64], dx: f64) -> f64 {
84 l2_inner_product(f, f, dx).sqrt()
85}
86
87pub fn gram_schmidt_orthogonalize(basis: &[Vec<f64>], dx: f64) -> Vec<Vec<f64>> {
98 let n = basis.len();
99 if n == 0 {
100 return vec![];
101 }
102 let m = basis[0].len();
103 let mut q: Vec<Vec<f64>> = Vec::with_capacity(n);
104
105 for v in basis.iter() {
106 let mut u = v.clone();
107 for qi in q.iter() {
109 let proj = l2_inner_product(&u, qi, dx);
110 for (ui, qij) in u.iter_mut().zip(qi.iter()) {
111 *ui -= proj * qij;
112 }
113 }
114 let norm = l2_norm(&u, dx);
115 if norm > 1e-14 {
116 for ui in u.iter_mut() {
117 *ui /= norm;
118 }
119 } else {
120 u = vec![0.0; m];
121 }
122 q.push(u);
123 }
124 q
125}
126
127pub fn fourier_series_coeffs(f: &[f64], dx: f64, n_terms: usize) -> Vec<(f64, f64)> {
139 let n = f.len();
140 let l = n as f64 * dx;
141 let norm = 2.0 / l;
142 (0..n_terms)
143 .map(|k| {
144 let mut a = 0.0_f64;
145 let mut b = 0.0_f64;
146 for (i, &fi) in f.iter().enumerate() {
147 let x = (i as f64 + 0.5) * dx;
148 let arg = 2.0 * PI * (k as f64) * x / l;
149 a += fi * arg.cos();
150 b += fi * arg.sin();
151 }
152 (a * norm * dx, b * norm * dx)
153 })
154 .collect()
155}
156
157pub fn chebyshev_expansion(f: &[f64], n_terms: usize) -> Vec<f64> {
168 let n = f.len();
169 if n == 0 || n_terms == 0 {
170 return vec![0.0; n_terms];
171 }
172 (0..n_terms)
173 .map(|k| {
174 let sum: f64 = f
175 .iter()
176 .enumerate()
177 .map(|(j, &fj)| {
178 let theta = PI * (j as f64 + 0.5) / n as f64;
179 fj * (k as f64 * theta).cos()
180 })
181 .sum();
182 if k == 0 {
183 sum / n as f64
184 } else {
185 2.0 * sum / n as f64
186 }
187 })
188 .collect()
189}
190
191fn legendre_p(n: usize, x: f64) -> f64 {
197 if n == 0 {
198 return 1.0;
199 }
200 if n == 1 {
201 return x;
202 }
203 let mut p_prev = 1.0_f64;
204 let mut p_curr = x;
205 for k in 2..=n {
206 let p_next = ((2 * k - 1) as f64 * x * p_curr - (k - 1) as f64 * p_prev) / k as f64;
207 p_prev = p_curr;
208 p_curr = p_next;
209 }
210 p_curr
211}
212
213pub fn legendre_expansion(f: &[f64], n_terms: usize) -> Vec<f64> {
219 let n = f.len();
220 if n == 0 || n_terms == 0 {
221 return vec![0.0; n_terms];
222 }
223 let dx = 2.0 / n as f64; (0..n_terms)
225 .map(|k| {
226 let sum: f64 = f
227 .iter()
228 .enumerate()
229 .map(|(j, &fj)| {
230 let x = -1.0 + (j as f64 + 0.5) * dx;
231 fj * legendre_p(k, x)
232 })
233 .sum();
234 (2 * k + 1) as f64 / 2.0 * sum * dx
235 })
236 .collect()
237}
238
239pub fn wavelet_haar_transform(signal: &[f64]) -> Vec<f64> {
248 let n = signal.len();
249 let mut out = signal.to_vec();
250 let mut step = n;
251 while step >= 2 {
252 step /= 2;
253 let mut tmp = vec![0.0; step * 2];
254 for i in 0..step {
255 let a = out[2 * i];
256 let b = out[2 * i + 1];
257 tmp[i] = (a + b) * 0.5_f64.sqrt();
258 tmp[step + i] = (a - b) * 0.5_f64.sqrt();
259 }
260 out[..step * 2].copy_from_slice(&tmp);
261 }
262 out
263}
264
265pub fn wavelet_haar_inverse(coeffs: &[f64]) -> Vec<f64> {
269 let n = coeffs.len();
270 let mut out = coeffs.to_vec();
271 let mut step = 1_usize;
272 while step < n {
273 let mut tmp = vec![0.0; step * 2];
274 for i in 0..step {
275 let a = out[i];
276 let d = out[step + i];
277 tmp[2 * i] = (a + d) * 0.5_f64.sqrt();
278 tmp[2 * i + 1] = (a - d) * 0.5_f64.sqrt();
279 }
280 out[..step * 2].copy_from_slice(&tmp);
281 step *= 2;
282 }
283 out
284}
285
286pub fn sobolev_norm(f: &[f64], df: &[f64], dx: f64, s: f64) -> f64 {
296 assert_eq!(
297 f.len(),
298 df.len(),
299 "sobolev_norm: f and df must have equal length"
300 );
301 let l2_sq = l2_inner_product(f, f, dx);
302 let dl2_sq = l2_inner_product(df, df, dx);
303 (l2_sq + s * dl2_sq).sqrt()
304}
305
306pub fn operator_norm_estimate(matrix: &[Vec<f64>]) -> f64 {
318 let nrows = matrix.len();
319 if nrows == 0 {
320 return 0.0;
321 }
322 let ncols = matrix[0].len();
323 if ncols == 0 {
324 return 0.0;
325 }
326
327 let mut v: Vec<f64> = vec![1.0 / (ncols as f64).sqrt(); ncols];
329 let mut sigma = 0.0_f64;
330
331 for _ in 0..100 {
332 let mut w = vec![0.0_f64; nrows];
334 for (i, row) in matrix.iter().enumerate() {
335 w[i] = row.iter().zip(v.iter()).map(|(a, x)| a * x).sum();
336 }
337 let mut u = vec![0.0_f64; ncols];
339 for (i, row) in matrix.iter().enumerate() {
340 for (j, &aij) in row.iter().enumerate() {
341 u[j] += aij * w[i];
342 }
343 }
344 let norm_u: f64 = u.iter().map(|x| x * x).sum::<f64>().sqrt();
346 if norm_u < 1e-15 {
347 break;
348 }
349 sigma = norm_u.sqrt();
350 for (vi, ui) in v.iter_mut().zip(u.iter()) {
351 *vi = *ui / norm_u;
352 }
353 }
354 sigma
355}
356
357#[derive(Debug, Clone)]
366pub struct HilbertSpace {
367 pub dx: f64,
369 pub n: usize,
371}
372
373impl HilbertSpace {
374 pub fn new(n: usize, dx: f64) -> Self {
376 Self { n, dx }
377 }
378
379 pub fn inner_product(&self, f: &[f64], g: &[f64]) -> f64 {
381 l2_inner_product(f, g, self.dx)
382 }
383
384 pub fn norm(&self, f: &[f64]) -> f64 {
386 l2_norm(f, self.dx)
387 }
388
389 pub fn orthonormalize(&self, basis: &[Vec<f64>]) -> Vec<Vec<f64>> {
394 gram_schmidt_orthogonalize(basis, self.dx)
395 }
396
397 pub fn project(&self, f: &[f64], basis: &[Vec<f64>]) -> Vec<f64> {
403 if basis.is_empty() || f.is_empty() {
404 return vec![0.0; f.len()];
405 }
406 let ortho = self.orthonormalize(basis);
407 let mut proj = vec![0.0; f.len()];
408 for q in &ortho {
409 if q.iter().all(|x| x.abs() < 1e-15) {
410 continue;
411 }
412 let coeff = self.inner_product(f, q);
413 for (pi, qi) in proj.iter_mut().zip(q.iter()) {
414 *pi += coeff * qi;
415 }
416 }
417 proj
418 }
419
420 pub fn distance(&self, f: &[f64], g: &[f64]) -> f64 {
422 let diff: Vec<f64> = f.iter().zip(g.iter()).map(|(a, b)| a - b).collect();
423 self.norm(&diff)
424 }
425
426 pub fn are_orthogonal(&self, f: &[f64], g: &[f64], tol: f64) -> bool {
428 self.inner_product(f, g).abs() < tol
429 }
430
431 pub fn normalize(&self, f: &[f64]) -> Vec<f64> {
433 let nm = self.norm(f);
434 if nm < 1e-15 {
435 return f.to_vec();
436 }
437 f.iter().map(|x| x / nm).collect()
438 }
439}
440
441#[derive(Debug, Clone)]
450pub struct BanachSpace {
451 pub p: f64,
453 pub dx: f64,
455}
456
457impl BanachSpace {
458 pub fn new(p: f64, dx: f64) -> Self {
460 Self { p: p.max(1.0), dx }
461 }
462
463 pub fn norm(&self, f: &[f64]) -> f64 {
465 if self.p == f64::INFINITY || self.p > 1e10 {
466 return f.iter().cloned().fold(0.0_f64, f64::max);
467 }
468 let integral: f64 = f.iter().map(|x| x.abs().powf(self.p)).sum::<f64>() * self.dx;
469 integral.powf(1.0 / self.p)
470 }
471
472 pub fn dual_norm(&self, g: &[f64]) -> f64 {
476 let q = if (self.p - 1.0).abs() < 1e-12 {
477 f64::INFINITY
478 } else {
479 self.p / (self.p - 1.0)
480 };
481 let dual = BanachSpace::new(q, self.dx);
482 dual.norm(g)
483 }
484
485 pub fn bounded_linear_functional(&self, f: &[f64], g: &[f64]) -> f64 {
489 assert_eq!(f.len(), g.len(), "functional: f and g length mismatch");
490 f.iter().zip(g.iter()).map(|(a, b)| a * b).sum::<f64>() * self.dx
491 }
492
493 pub fn in_unit_ball(&self, f: &[f64], tol: f64) -> bool {
495 self.norm(f) <= 1.0 + tol
496 }
497
498 pub fn holder_bound(&self, f: &[f64], g: &[f64]) -> (f64, f64) {
502 let inner = self.bounded_linear_functional(f, g).abs();
503 let norm_f = self.norm(f);
504 let norm_g = self.dual_norm(g);
505 (inner, norm_f * norm_g)
506 }
507}
508
509#[derive(Debug, Clone)]
518pub struct OperatorSpectrum {
519 pub matrix: Vec<Vec<f64>>,
521 pub n: usize,
523}
524
525impl OperatorSpectrum {
526 pub fn new(matrix: Vec<Vec<f64>>) -> Self {
530 let n = matrix.len();
531 assert!(
532 matrix.iter().all(|row| row.len() == n),
533 "matrix must be square"
534 );
535 Self { matrix, n }
536 }
537
538 pub fn spectral_radius(&self, max_iter: usize) -> f64 {
542 if self.n == 0 {
543 return 0.0;
544 }
545 let mut v = vec![1.0 / (self.n as f64).sqrt(); self.n];
546 let mut lambda = 0.0_f64;
547 for _ in 0..max_iter {
548 let w = self.matvec(&v);
549 let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
550 if norm < 1e-15 {
551 break;
552 }
553 lambda = w.iter().zip(v.iter()).map(|(wi, vi)| wi * vi).sum::<f64>();
554 for (vi, wi) in v.iter_mut().zip(w.iter()) {
555 *vi = wi / norm;
556 }
557 }
558 lambda
559 }
560
561 pub fn smallest_eigenvalue(&self, shift: f64, max_iter: usize) -> f64 {
565 if self.n == 0 {
566 return 0.0;
567 }
568 let shifted: Vec<Vec<f64>> = self
570 .matrix
571 .iter()
572 .enumerate()
573 .map(|(i, row)| {
574 row.iter()
575 .enumerate()
576 .map(|(j, &a)| if i == j { a - shift } else { a })
577 .collect()
578 })
579 .collect();
580
581 let op = OperatorSpectrum::new(shifted);
582 let lam = op.spectral_radius(max_iter);
584 lam + shift
585 }
586
587 pub fn trace(&self) -> f64 {
589 (0..self.n).map(|i| self.matrix[i][i]).sum()
590 }
591
592 pub fn frobenius_norm(&self) -> f64 {
594 self.matrix
595 .iter()
596 .flat_map(|row| row.iter())
597 .map(|x| x * x)
598 .sum::<f64>()
599 .sqrt()
600 }
601
602 pub fn resolvent_norm(&self, lambda: f64) -> f64 {
608 let shifted: Vec<Vec<f64>> = self
610 .matrix
611 .iter()
612 .enumerate()
613 .map(|(i, row)| {
614 row.iter()
615 .enumerate()
616 .map(|(j, &a)| if i == j { a - lambda } else { a })
617 .collect()
618 })
619 .collect();
620 let frob: f64 = shifted
621 .iter()
622 .flat_map(|r| r.iter())
623 .map(|x| x * x)
624 .sum::<f64>()
625 .sqrt();
626 if frob < 1e-15 {
627 return f64::INFINITY;
628 }
629 1.0 / frob
630 }
631
632 pub fn apply(&self, v: &[f64]) -> Vec<f64> {
634 self.matvec(v)
635 }
636
637 pub fn diagonal(&self) -> Vec<f64> {
639 (0..self.n).map(|i| self.matrix[i][i]).collect()
640 }
641
642 fn matvec(&self, v: &[f64]) -> Vec<f64> {
643 self.matrix
644 .iter()
645 .map(|row| row.iter().zip(v.iter()).map(|(a, x)| a * x).sum())
646 .collect()
647 }
648}
649
650#[derive(Debug, Clone)]
659pub struct SobolevSpace {
660 pub n: usize,
662 pub dx: f64,
664 pub k: usize,
666}
667
668impl SobolevSpace {
669 pub fn new(n: usize, dx: f64, k: usize) -> Self {
671 Self { n, dx, k }
672 }
673
674 pub fn norm(&self, f: &[f64]) -> f64 {
678 let mut sq = l2_inner_product(f, f, self.dx);
679 let mut deriv = f.to_vec();
680 for _ in 1..=self.k {
681 deriv = Self::weak_derivative(&deriv, self.dx);
682 sq += l2_inner_product(&deriv, &deriv, self.dx);
683 }
684 sq.sqrt()
685 }
686
687 pub fn weak_derivative(f: &[f64], dx: f64) -> Vec<f64> {
691 let n = f.len();
692 if n < 2 {
693 return vec![0.0; n];
694 }
695 let mut d = vec![0.0; n];
696 for i in 1..n - 1 {
698 d[i] = (f[i + 1] - f[i - 1]) / (2.0 * dx);
699 }
700 d[0] = (f[1] - f[0]) / dx;
702 d[n - 1] = (f[n - 1] - f[n - 2]) / dx;
703 d
704 }
705
706 pub fn trace(&self, f: &[f64]) -> [f64; 2] {
708 if f.is_empty() {
709 return [0.0, 0.0];
710 }
711 [f[0], f[f.len() - 1]]
712 }
713
714 pub fn seminorm(&self, f: &[f64]) -> f64 {
716 let mut deriv = f.to_vec();
717 for _ in 0..self.k {
718 deriv = Self::weak_derivative(&deriv, self.dx);
719 }
720 l2_norm(&deriv, self.dx)
721 }
722
723 pub fn is_in_space(&self, f: &[f64]) -> bool {
725 self.norm(f).is_finite()
726 }
727
728 pub fn inner_product(&self, f: &[f64], g: &[f64]) -> f64 {
730 let mut ip = l2_inner_product(f, g, self.dx);
731 let mut df = f.to_vec();
732 let mut dg = g.to_vec();
733 for _ in 1..=self.k {
734 df = Self::weak_derivative(&df, self.dx);
735 dg = Self::weak_derivative(&dg, self.dx);
736 ip += l2_inner_product(&df, &dg, self.dx);
737 }
738 ip
739 }
740}
741
742pub struct FunctionalDerivative;
751
752impl FunctionalDerivative {
753 pub fn gateaux<F>(j: F, f: &[f64], h: &[f64], eps: f64) -> f64
757 where
758 F: Fn(&[f64]) -> f64,
759 {
760 let f_plus: Vec<f64> = f
761 .iter()
762 .zip(h.iter())
763 .map(|(fi, hi)| fi + eps * hi)
764 .collect();
765 let f_minus: Vec<f64> = f
766 .iter()
767 .zip(h.iter())
768 .map(|(fi, hi)| fi - eps * hi)
769 .collect();
770 (j(&f_plus) - j(&f_minus)) / (2.0 * eps)
771 }
772
773 pub fn frechet_gradient<F>(j: F, f: &[f64], eps: f64) -> Vec<f64>
778 where
779 F: Fn(&[f64]) -> f64,
780 {
781 let n = f.len();
782 let mut grad = vec![0.0; n];
783 for i in 0..n {
784 let mut f_plus = f.to_vec();
785 let mut f_minus = f.to_vec();
786 f_plus[i] += eps;
787 f_minus[i] -= eps;
788 grad[i] = (j(&f_plus) - j(&f_minus)) / (2.0 * eps);
789 }
790 grad
791 }
792
793 pub fn gradient_descent<F>(
798 j: F,
799 f0: &[f64],
800 step_size: f64,
801 max_iter: usize,
802 eps: f64,
803 ) -> (Vec<f64>, f64)
804 where
805 F: Fn(&[f64]) -> f64,
806 {
807 let mut f = f0.to_vec();
808 for _ in 0..max_iter {
809 let grad = Self::frechet_gradient(&j, &f, eps);
810 for (fi, gi) in f.iter_mut().zip(grad.iter()) {
811 *fi -= step_size * gi;
812 }
813 }
814 let val = j(&f);
815 (f, val)
816 }
817
818 pub fn newton_step<F>(j: F, f: &[f64], eps: f64) -> Vec<f64>
824 where
825 F: Fn(&[f64]) -> f64,
826 {
827 let n = f.len();
828 let grad = Self::frechet_gradient(&j, f, eps);
829
830 let mut hess_diag = vec![1.0_f64; n]; for i in 0..n {
833 let mut fp = f.to_vec();
834 let mut fm = f.to_vec();
835 fp[i] += eps;
836 fm[i] -= eps;
837 let h = (j(&fp) - 2.0 * j(f) + j(&fm)) / (eps * eps);
838 if h.abs() > 1e-15 {
839 hess_diag[i] = h;
840 }
841 }
842
843 f.iter()
845 .zip(grad.iter())
846 .zip(hess_diag.iter())
847 .map(|((fi, gi), hi)| fi - gi / hi)
848 .collect()
849 }
850
851 pub fn second_gateaux<F>(j: F, f: &[f64], h: &[f64], k: &[f64], eps: f64) -> f64
854 where
855 F: Fn(&[f64]) -> f64,
856 {
857 let fph: Vec<f64> = f
858 .iter()
859 .zip(h.iter())
860 .map(|(fi, hi)| fi + eps * hi)
861 .collect();
862 let fmh: Vec<f64> = f
863 .iter()
864 .zip(h.iter())
865 .map(|(fi, hi)| fi - eps * hi)
866 .collect();
867 let g_plus = Self::gateaux(&j, &fph, k, eps);
868 let g_minus = Self::gateaux(&j, &fmh, k, eps);
869 (g_plus - g_minus) / (2.0 * eps)
870 }
871}
872
873pub struct VariationalProblem;
880
881impl VariationalProblem {
882 pub fn euler_lagrange_residual<L>(lagrangian: L, u: &[f64], dx: f64) -> Vec<f64>
889 where
890 L: Fn(f64, f64, f64) -> f64,
891 {
892 let n = u.len();
893 if n < 3 {
894 return vec![0.0; n];
895 }
896 let eps = dx * 1e-5;
897 let mut residual = vec![0.0; n];
898
899 for i in 1..n - 1 {
900 let x = (i as f64) * dx;
901 let up = (u[i + 1] - u[i - 1]) / (2.0 * dx); let dlu = (lagrangian(x, u[i] + eps, up) - lagrangian(x, u[i] - eps, up)) / (2.0 * eps);
905
906 let up_p1 = (u[(i + 2).min(n - 1)] - u[i]) / (2.0 * dx);
908 let up_m1 = (u[i] - u[(i as isize - 1).max(0) as usize]) / (2.0 * dx);
909
910 let dlup_p1 = (lagrangian(x + dx, u[i + 1], up_p1 + eps)
911 - lagrangian(x + dx, u[i + 1], up_p1 - eps))
912 / (2.0 * eps);
913 let dlup_m1 = (lagrangian(x - dx, u[i - 1], up_m1 + eps)
914 - lagrangian(x - dx, u[i - 1], up_m1 - eps))
915 / (2.0 * eps);
916
917 let d_dlup_dx = (dlup_p1 - dlup_m1) / (2.0 * dx);
918 residual[i] = dlu - d_dlup_dx;
919 }
920 residual
921 }
922
923 pub fn augmented_lagrangian<J, G>(
929 j: J,
930 g: G,
931 u0: &[f64],
932 rho: f64,
933 step: f64,
934 max_iter: usize,
935 ) -> (Vec<f64>, f64)
936 where
937 J: Fn(&[f64]) -> f64,
938 G: Fn(&[f64]) -> f64,
939 {
940 let mut u = u0.to_vec();
941 let mut lambda = 0.0_f64;
942 let eps = step * 1e-4;
943
944 for _ in 0..max_iter {
945 let augmented = |v: &[f64]| {
947 let gv = g(v);
948 j(v) + lambda * gv + rho / 2.0 * gv * gv
949 };
950 let grad = FunctionalDerivative::frechet_gradient(augmented, &u, eps);
951 for (ui, gi) in u.iter_mut().zip(grad.iter()) {
952 *ui -= step * gi;
953 }
954 lambda += rho * g(&u);
956 }
957 (u, lambda)
958 }
959
960 pub fn first_variation(u: &[f64], v: &[f64], f_prime: &[f64], dx: f64) -> f64 {
965 assert_eq!(u.len(), v.len());
966 assert_eq!(u.len(), f_prime.len());
967 f_prime
968 .iter()
969 .zip(v.iter())
970 .map(|(fp, vi)| fp * vi)
971 .sum::<f64>()
972 * dx
973 }
974
975 pub fn steepest_descent<J>(
979 j: J,
980 u0: &[f64],
981 step: f64,
982 tol: f64,
983 max_iter: usize,
984 ) -> (Vec<f64>, Vec<f64>)
985 where
986 J: Fn(&[f64]) -> f64,
987 {
988 let eps = step * 1e-4;
989 let mut u = u0.to_vec();
990 let mut history = Vec::new();
991 for _ in 0..max_iter {
992 let val = j(&u);
993 history.push(val);
994 let grad = FunctionalDerivative::frechet_gradient(&j, &u, eps);
995 let grad_norm: f64 = grad.iter().map(|x| x * x).sum::<f64>().sqrt();
996 if grad_norm < tol {
997 break;
998 }
999 for (ui, gi) in u.iter_mut().zip(grad.iter()) {
1000 *ui -= step * gi;
1001 }
1002 }
1003 (u, history)
1004 }
1005}
1006
1007#[cfg(test)]
1012mod tests {
1013 use super::*;
1014
1015 #[test]
1018 fn test_function_space_eval() {
1019 let space = FunctionSpace::new(vec![|x: f64| x, |x: f64| x * x]);
1020 assert!((space.eval(0, 2.0) - 2.0).abs() < 1e-12);
1021 assert!((space.eval(1, 3.0) - 9.0).abs() < 1e-12);
1022 }
1023
1024 #[test]
1025 fn test_function_space_dim() {
1026 let space = FunctionSpace::new(vec![|x: f64| x]);
1027 assert_eq!(space.dim(), 1);
1028 }
1029
1030 #[test]
1031 fn test_function_space_combine() {
1032 let space = FunctionSpace::new(vec![|x: f64| x, |x: f64| x * x]);
1034 let val = space.combine(&[2.0, 3.0], 1.0);
1035 assert!((val - 5.0).abs() < 1e-12);
1036 }
1037
1038 #[test]
1039 fn test_function_space_debug() {
1040 let space = FunctionSpace::new(vec![|x: f64| x]);
1041 let s = format!("{space:?}");
1042 assert!(s.contains("dim: 1"));
1043 }
1044
1045 #[test]
1048 fn test_l2_inner_product_orthogonal() {
1049 let n = 1000;
1051 let dx = 2.0 * PI / n as f64;
1052 let sin_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1053 let cos_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1054 let ip = l2_inner_product(&sin_v, &cos_v, dx);
1055 assert!(ip.abs() < 1e-10, "sin/cos inner product = {ip}");
1056 }
1057
1058 #[test]
1059 fn test_l2_inner_product_self_is_norm_sq() {
1060 let f = vec![1.0, 2.0, 3.0];
1061 let dx = 0.1;
1062 let ip = l2_inner_product(&f, &f, dx);
1063 let norm = l2_norm(&f, dx);
1064 assert!((ip - norm * norm).abs() < 1e-12);
1065 }
1066
1067 #[test]
1068 fn test_l2_norm_constant() {
1069 let c = 3.0_f64;
1071 let n = 100;
1072 let dx = 0.01;
1073 let f = vec![c; n];
1074 let expected = c * ((n as f64) * dx).sqrt();
1075 let got = l2_norm(&f, dx);
1076 assert!((got - expected).abs() < 1e-10, "norm: {got} vs {expected}");
1077 }
1078
1079 #[test]
1080 fn test_l2_norm_zero_vector() {
1081 let f = vec![0.0_f64; 50];
1082 assert!(l2_norm(&f, 0.01).abs() < 1e-12);
1083 }
1084
1085 #[test]
1088 fn test_gram_schmidt_two_vectors() {
1089 let n = 100;
1090 let dx = 1.0 / n as f64;
1091 let v1: Vec<f64> = (0..n).map(|i| (i as f64 + 0.5) * dx).collect();
1092 let v2: Vec<f64> = (0..n).map(|_| 1.0).collect();
1093 let q = gram_schmidt_orthogonalize(&[v1, v2], dx);
1094 assert_eq!(q.len(), 2);
1095 let ip = l2_inner_product(&q[0], &q[1], dx);
1097 assert!(ip.abs() < 1e-10, "not orthogonal: {ip}");
1098 let n0 = l2_norm(&q[0], dx);
1100 let n1 = l2_norm(&q[1], dx);
1101 assert!((n0 - 1.0).abs() < 1e-10, "q0 norm: {n0}");
1102 assert!((n1 - 1.0).abs() < 1e-10, "q1 norm: {n1}");
1103 }
1104
1105 #[test]
1106 fn test_gram_schmidt_empty() {
1107 let q = gram_schmidt_orthogonalize(&[], 0.01);
1108 assert!(q.is_empty());
1109 }
1110
1111 #[test]
1112 fn test_gram_schmidt_linearly_dependent() {
1113 let n = 10;
1114 let dx = 0.1;
1115 let v: Vec<f64> = (0..n).map(|i| i as f64).collect();
1116 let v2 = v.iter().map(|x| 2.0 * x).collect::<Vec<_>>();
1117 let q = gram_schmidt_orthogonalize(&[v, v2], dx);
1118 let norm2 = l2_norm(&q[1], dx);
1120 assert!(norm2 < 1e-12, "dependent vector should be zero: {norm2}");
1121 }
1122
1123 #[test]
1124 fn test_gram_schmidt_three_vectors() {
1125 let n = 200;
1126 let dx = 2.0 * PI / n as f64;
1127 let v1: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1128 let v2: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1129 let v3: Vec<f64> = (0..n)
1130 .map(|i| (2.0 * (i as f64 + 0.5) * dx).sin())
1131 .collect();
1132 let q = gram_schmidt_orthogonalize(&[v1, v2, v3], dx);
1133 for i in 0..3 {
1135 for j in (i + 1)..3 {
1136 let ip = l2_inner_product(&q[i], &q[j], dx);
1137 assert!(ip.abs() < 1e-9, "q[{i}]·q[{j}] = {ip}");
1138 }
1139 }
1140 }
1141
1142 #[test]
1145 fn test_fourier_constant_function() {
1146 let n = 100;
1147 let dx = 1.0 / n as f64;
1148 let f = vec![1.0_f64; n];
1149 let coeffs = fourier_series_coeffs(&f, dx, 3);
1150 assert!((coeffs[0].0 - 2.0).abs() < 1e-8, "a0 = {}", coeffs[0].0);
1152 for (k, &(_, bk)) in coeffs.iter().enumerate() {
1154 assert!(bk.abs() < 1e-8, "b{k} = {bk}");
1155 }
1156 }
1157
1158 #[test]
1159 fn test_fourier_sine_function() {
1160 let n = 1000;
1161 let dx = 1.0 / n as f64;
1162 let f: Vec<f64> = (0..n)
1163 .map(|i| (2.0 * PI * (i as f64 + 0.5) * dx).sin())
1164 .collect();
1165 let coeffs = fourier_series_coeffs(&f, dx, 2);
1166 assert!((coeffs[1].1 - 1.0).abs() < 0.01, "b1 = {}", coeffs[1].1);
1168 }
1169
1170 #[test]
1171 fn test_fourier_n_terms_length() {
1172 let f = vec![0.0_f64; 64];
1173 let c = fourier_series_coeffs(&f, 0.1, 5);
1174 assert_eq!(c.len(), 5);
1175 }
1176
1177 #[test]
1180 fn test_chebyshev_constant() {
1181 let n = 32;
1183 let f: Vec<f64> = (0..n)
1184 .map(|k| (PI * (k as f64 + 0.5) / n as f64).cos())
1185 .map(|_x| 1.0)
1186 .collect();
1187 let c = chebyshev_expansion(&f, 4);
1188 assert!((c[0] - 1.0).abs() < 1e-10, "c0 = {}", c[0]);
1189 for k in 1..4 {
1190 assert!(c[k].abs() < 1e-10, "c{k} = {}", c[k]);
1191 }
1192 }
1193
1194 #[test]
1195 fn test_chebyshev_empty_input() {
1196 let c = chebyshev_expansion(&[], 3);
1197 assert_eq!(c.len(), 3);
1198 for ci in c {
1199 assert!(ci.abs() < 1e-15);
1200 }
1201 }
1202
1203 #[test]
1204 fn test_chebyshev_n_terms_length() {
1205 let f = vec![0.5_f64; 16];
1206 let c = chebyshev_expansion(&f, 6);
1207 assert_eq!(c.len(), 6);
1208 }
1209
1210 #[test]
1211 fn test_chebyshev_x_polynomial() {
1212 let n = 64;
1214 let f: Vec<f64> = (0..n)
1215 .map(|k| (PI * (k as f64 + 0.5) / n as f64).cos())
1216 .collect();
1217 let c = chebyshev_expansion(&f, 4);
1218 assert!(c[0].abs() < 1e-10, "c0 = {}", c[0]);
1220 assert!((c[1] - 1.0).abs() < 1e-10, "c1 = {}", c[1]);
1221 }
1222
1223 #[test]
1226 fn test_legendre_constant() {
1227 let n = 200;
1229 let f = vec![1.0_f64; n];
1230 let c = legendre_expansion(&f, 3);
1231 assert!((c[0] - 1.0).abs() < 1e-2, "c0 = {}", c[0]);
1232 assert!(c[1].abs() < 1e-4, "c1 = {}", c[1]);
1233 assert!(c[2].abs() < 1e-3, "c2 = {}", c[2]);
1235 }
1236
1237 #[test]
1238 fn test_legendre_n_terms_length() {
1239 let f = vec![1.0_f64; 50];
1240 let c = legendre_expansion(&f, 5);
1241 assert_eq!(c.len(), 5);
1242 }
1243
1244 #[test]
1245 fn test_legendre_p_values() {
1246 assert!((legendre_p(0, 0.5) - 1.0).abs() < 1e-12);
1248 assert!((legendre_p(1, 0.5) - 0.5).abs() < 1e-12);
1249 let p2 = (3.0 * 0.5_f64.powi(2) - 1.0) / 2.0;
1250 assert!((legendre_p(2, 0.5) - p2).abs() < 1e-12);
1251 }
1252
1253 #[test]
1254 fn test_legendre_empty_input() {
1255 let c = legendre_expansion(&[], 3);
1256 assert_eq!(c.len(), 3);
1257 }
1258
1259 #[test]
1262 fn test_haar_transform_inverse_roundtrip() {
1263 let signal = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1264 let coeffs = wavelet_haar_transform(&signal);
1265 let recovered = wavelet_haar_inverse(&coeffs);
1266 for (a, b) in signal.iter().zip(recovered.iter()) {
1267 assert!((a - b).abs() < 1e-10, "roundtrip mismatch: {a} vs {b}");
1268 }
1269 }
1270
1271 #[test]
1272 fn test_haar_transform_length_preserved() {
1273 let signal = vec![1.0; 16];
1274 let c = wavelet_haar_transform(&signal);
1275 assert_eq!(c.len(), 16);
1276 }
1277
1278 #[test]
1279 fn test_haar_constant_signal() {
1280 let c = 4.0_f64;
1282 let signal = vec![c; 8];
1283 let coeffs = wavelet_haar_transform(&signal);
1284 assert!(coeffs[0].abs() > 1e-6, "DC coeff should be non-zero");
1286 for &coef in &coeffs[1..] {
1288 assert!(coef.abs() < 1e-10, "detail coeff = {coef}");
1289 }
1290 }
1291
1292 #[test]
1293 fn test_haar_inverse_length_preserved() {
1294 let coeffs = vec![1.0; 8];
1295 let out = wavelet_haar_inverse(&coeffs);
1296 assert_eq!(out.len(), 8);
1297 }
1298
1299 #[test]
1300 fn test_haar_two_point() {
1301 let signal = vec![2.0, 4.0];
1302 let c = wavelet_haar_transform(&signal);
1303 let r = wavelet_haar_inverse(&c);
1304 assert!((r[0] - 2.0).abs() < 1e-12);
1305 assert!((r[1] - 4.0).abs() < 1e-12);
1306 }
1307
1308 #[test]
1311 fn test_sobolev_norm_s0_equals_l2() {
1312 let f = vec![1.0, 2.0, 3.0];
1314 let df = vec![0.0, 0.0, 0.0];
1315 let dx = 0.1;
1316 let sob = sobolev_norm(&f, &df, dx, 0.0);
1317 let l2 = l2_norm(&f, dx);
1318 assert!((sob - l2).abs() < 1e-12);
1319 }
1320
1321 #[test]
1322 fn test_sobolev_norm_positive() {
1323 let f = vec![1.0; 10];
1324 let df = vec![0.0; 10];
1325 let s = sobolev_norm(&f, &df, 0.1, 1.0);
1326 assert!(s > 0.0);
1327 }
1328
1329 #[test]
1330 fn test_sobolev_norm_larger_with_gradient() {
1331 let f = vec![1.0; 8];
1332 let df_zero = vec![0.0; 8];
1333 let df_nonzero = vec![1.0; 8];
1334 let dx = 0.1;
1335 let s0 = sobolev_norm(&f, &df_zero, dx, 1.0);
1336 let s1 = sobolev_norm(&f, &df_nonzero, dx, 1.0);
1337 assert!(s1 > s0, "nonzero derivative should increase Sobolev norm");
1338 }
1339
1340 #[test]
1343 fn test_operator_norm_identity() {
1344 let id = vec![
1346 vec![1.0, 0.0, 0.0],
1347 vec![0.0, 1.0, 0.0],
1348 vec![0.0, 0.0, 1.0],
1349 ];
1350 let sigma = operator_norm_estimate(&id);
1351 assert!((sigma - 1.0).abs() < 1e-4, "identity σ_max = {sigma}");
1352 }
1353
1354 #[test]
1355 fn test_operator_norm_scaled_identity() {
1356 let scale = 5.0_f64;
1357 let m = vec![vec![scale, 0.0], vec![0.0, scale]];
1358 let sigma = operator_norm_estimate(&m);
1359 assert!(
1360 (sigma - scale).abs() < 1e-3,
1361 "scaled identity σ_max = {sigma}"
1362 );
1363 }
1364
1365 #[test]
1366 fn test_operator_norm_empty_matrix() {
1367 assert_eq!(operator_norm_estimate(&[]), 0.0);
1368 }
1369
1370 #[test]
1371 fn test_operator_norm_single_element() {
1372 let m = vec![vec![3.0_f64]];
1373 let sigma = operator_norm_estimate(&m);
1374 assert!((sigma - 3.0).abs() < 1e-4, "1x1 matrix σ = {sigma}");
1375 }
1376
1377 #[test]
1378 fn test_operator_norm_rectangular() {
1379 let m = vec![vec![3.0_f64, 4.0]];
1381 let sigma = operator_norm_estimate(&m);
1382 assert!((sigma - 5.0).abs() < 1e-4, "σ = {sigma}, expected 5");
1383 }
1384
1385 #[test]
1386 fn test_operator_norm_diagonal() {
1387 let m = vec![
1389 vec![2.0, 0.0, 0.0],
1390 vec![0.0, 5.0, 0.0],
1391 vec![0.0, 0.0, 3.0],
1392 ];
1393 let sigma = operator_norm_estimate(&m);
1394 assert!(
1395 (sigma - 5.0).abs() < 0.1,
1396 "diagonal max singular val = {sigma}"
1397 );
1398 }
1399
1400 #[test]
1403 fn test_hilbert_inner_product() {
1404 let hs = HilbertSpace::new(4, 0.25);
1405 let f = vec![1.0, 2.0, 3.0, 4.0];
1406 let g = vec![4.0, 3.0, 2.0, 1.0];
1407 let ip = hs.inner_product(&f, &g);
1408 assert!((ip - 5.0).abs() < 1e-12, "inner product = {ip}");
1410 }
1411
1412 #[test]
1413 fn test_hilbert_norm_positive() {
1414 let hs = HilbertSpace::new(4, 0.25);
1415 let f = vec![1.0; 4];
1416 assert!(hs.norm(&f) > 0.0);
1417 }
1418
1419 #[test]
1420 fn test_hilbert_normalize_unit_norm() {
1421 let hs = HilbertSpace::new(4, 0.25);
1422 let f = vec![3.0, 1.0, 4.0, 1.0];
1423 let fn_ = hs.normalize(&f);
1424 let nm = hs.norm(&fn_);
1425 assert!((nm - 1.0).abs() < 1e-10, "normalized norm = {nm}");
1426 }
1427
1428 #[test]
1429 fn test_hilbert_orthogonality_check() {
1430 let n = 100;
1431 let dx = 2.0 * PI / n as f64;
1432 let hs = HilbertSpace::new(n, dx);
1433 let sin_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1434 let cos_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1435 assert!(hs.are_orthogonal(&sin_v, &cos_v, 1e-9));
1436 }
1437
1438 #[test]
1439 fn test_hilbert_projection_length() {
1440 let n = 8;
1441 let dx = 1.0 / n as f64;
1442 let hs = HilbertSpace::new(n, dx);
1443 let f = vec![1.0; n];
1444 let basis = vec![(0..n).map(|i| i as f64 * dx).collect::<Vec<_>>()];
1445 let p = hs.project(&f, &basis);
1446 assert_eq!(p.len(), n);
1447 }
1448
1449 #[test]
1450 fn test_hilbert_distance_zero_self() {
1451 let hs = HilbertSpace::new(4, 0.25);
1452 let f = vec![1.0, 2.0, 3.0, 4.0];
1453 assert!(hs.distance(&f, &f).abs() < 1e-12);
1454 }
1455
1456 #[test]
1457 fn test_hilbert_orthonormalize_orthogonality() {
1458 let n = 100;
1459 let dx = 2.0 * PI / n as f64;
1460 let hs = HilbertSpace::new(n, dx);
1461 let v1: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1462 let v2: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1463 let q = hs.orthonormalize(&[v1, v2]);
1464 let ip = hs.inner_product(&q[0], &q[1]);
1465 assert!(ip.abs() < 1e-9, "orthonormalized inner product = {ip}");
1466 }
1467
1468 #[test]
1471 fn test_banach_l2_norm_matches_hilbert() {
1472 let dx = 0.25;
1473 let bs = BanachSpace::new(2.0, dx);
1474 let hs = HilbertSpace::new(4, dx);
1475 let f = vec![1.0, 2.0, 3.0, 4.0];
1476 let bn = bs.norm(&f);
1477 let hn = hs.norm(&f);
1478 assert!((bn - hn).abs() < 1e-10, "L2 norms match: {bn} vs {hn}");
1479 }
1480
1481 #[test]
1482 fn test_banach_l1_norm() {
1483 let dx = 1.0;
1484 let bs = BanachSpace::new(1.0, dx);
1485 let f = vec![1.0, -2.0, 3.0];
1486 let n = bs.norm(&f);
1488 assert!((n - 6.0).abs() < 1e-12, "L1 norm = {n}");
1489 }
1490
1491 #[test]
1492 fn test_banach_linf_norm() {
1493 let dx = 0.1;
1494 let bs = BanachSpace::new(1e12, dx); let f = vec![1.0, 5.0, 3.0];
1496 let n = bs.norm(&f);
1497 assert!(n > 4.0 && n <= 5.0 * 1.01, "L∞-like norm = {n}");
1499 }
1500
1501 #[test]
1502 fn test_banach_holder_inequality() {
1503 let dx = 0.1;
1504 let bs = BanachSpace::new(2.0, dx);
1505 let f = vec![1.0, 2.0, 3.0, 4.0];
1506 let g = vec![4.0, 3.0, 2.0, 1.0];
1507 let (lhs, rhs) = bs.holder_bound(&f, &g);
1508 assert!(lhs <= rhs + 1e-10, "Hölder: {lhs} <= {rhs}");
1509 }
1510
1511 #[test]
1512 fn test_banach_unit_ball() {
1513 let dx = 1.0;
1514 let bs = BanachSpace::new(2.0, dx);
1515 let f = vec![0.5, 0.0];
1516 assert!(bs.in_unit_ball(&f, 1e-10));
1517 }
1518
1519 #[test]
1520 fn test_banach_linear_functional() {
1521 let dx = 0.5;
1522 let bs = BanachSpace::new(2.0, dx);
1523 let f = vec![1.0, 1.0];
1524 let g = vec![2.0, 2.0];
1525 let val = bs.bounded_linear_functional(&f, &g);
1527 assert!((val - 2.0).abs() < 1e-12, "functional = {val}");
1528 }
1529
1530 #[test]
1533 fn test_operator_spectrum_trace() {
1534 let m = vec![vec![2.0, 1.0], vec![0.0, 3.0]];
1535 let op = OperatorSpectrum::new(m);
1536 assert!((op.trace() - 5.0).abs() < 1e-12);
1537 }
1538
1539 #[test]
1540 fn test_operator_spectrum_frobenius() {
1541 let m = vec![vec![3.0, 4.0], vec![0.0, 0.0]];
1542 let op = OperatorSpectrum::new(m);
1543 assert!((op.frobenius_norm() - 5.0).abs() < 1e-12);
1544 }
1545
1546 #[test]
1547 fn test_operator_spectrum_apply() {
1548 let m = vec![vec![2.0, 0.0], vec![0.0, 3.0]];
1549 let op = OperatorSpectrum::new(m);
1550 let v = vec![1.0, 2.0];
1551 let w = op.apply(&v);
1552 assert!((w[0] - 2.0).abs() < 1e-12);
1553 assert!((w[1] - 6.0).abs() < 1e-12);
1554 }
1555
1556 #[test]
1557 fn test_operator_spectrum_spectral_radius() {
1558 let m = vec![vec![5.0, 0.0], vec![0.0, 2.0]];
1560 let op = OperatorSpectrum::new(m);
1561 let rho = op.spectral_radius(200);
1562 assert!((rho - 5.0).abs() < 0.1, "spectral radius = {rho}");
1563 }
1564
1565 #[test]
1566 fn test_operator_spectrum_diagonal() {
1567 let m = vec![vec![7.0, 0.0], vec![0.0, 3.0]];
1568 let op = OperatorSpectrum::new(m);
1569 let d = op.diagonal();
1570 assert!((d[0] - 7.0).abs() < 1e-12);
1571 assert!((d[1] - 3.0).abs() < 1e-12);
1572 }
1573
1574 #[test]
1575 fn test_operator_spectrum_resolvent_finite() {
1576 let m = vec![vec![1.0, 0.0], vec![0.0, 2.0]];
1577 let op = OperatorSpectrum::new(m);
1578 let r = op.resolvent_norm(10.0);
1580 assert!(r.is_finite() && r > 0.0, "resolvent = {r}");
1581 }
1582
1583 #[test]
1586 fn test_sobolev_space_h0_equals_l2() {
1587 let n = 8;
1588 let dx = 0.125;
1589 let hs = HilbertSpace::new(n, dx);
1590 let ss = SobolevSpace::new(n, dx, 0);
1591 let f = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1592 assert!(
1593 (ss.norm(&f) - hs.norm(&f)).abs() < 1e-10,
1594 "H^0 = L^2: {} vs {}",
1595 ss.norm(&f),
1596 hs.norm(&f)
1597 );
1598 }
1599
1600 #[test]
1601 fn test_sobolev_space_norm_positive() {
1602 let ss = SobolevSpace::new(8, 0.125, 1);
1603 let f = vec![1.0; 8];
1604 assert!(ss.norm(&f) > 0.0);
1605 }
1606
1607 #[test]
1608 fn test_sobolev_weak_derivative_constant() {
1609 let f = vec![3.0; 8];
1611 let d = SobolevSpace::weak_derivative(&f, 0.125);
1612 for &di in &d[1..d.len() - 1] {
1613 assert!(di.abs() < 1e-12, "interior deriv of constant: {di}");
1614 }
1615 }
1616
1617 #[test]
1618 fn test_sobolev_trace_operator() {
1619 let ss = SobolevSpace::new(8, 0.125, 1);
1620 let f: Vec<f64> = (0..8).map(|i| i as f64).collect();
1621 let tr = ss.trace(&f);
1622 assert_eq!(tr[0], 0.0);
1623 assert_eq!(tr[1], 7.0);
1624 }
1625
1626 #[test]
1627 fn test_sobolev_seminorm_nonneg() {
1628 let ss = SobolevSpace::new(8, 0.125, 1);
1629 let f: Vec<f64> = (0..8).map(|i| (i as f64 * 0.5).sin()).collect();
1630 assert!(ss.seminorm(&f) >= 0.0);
1631 }
1632
1633 #[test]
1634 fn test_sobolev_inner_product_positive_definite() {
1635 let ss = SobolevSpace::new(8, 0.125, 1);
1636 let f = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1637 let ip = ss.inner_product(&f, &f);
1638 assert!(ip > 0.0, "H^k inner product <f,f> > 0");
1639 }
1640
1641 #[test]
1642 fn test_sobolev_is_in_space() {
1643 let ss = SobolevSpace::new(8, 0.125, 2);
1644 let f: Vec<f64> = (0..8).map(|i| (i as f64).sin()).collect();
1645 assert!(ss.is_in_space(&f));
1646 }
1647
1648 #[test]
1651 fn test_gateaux_derivative_linear() {
1652 let j = |f: &[f64]| f.iter().sum::<f64>();
1654 let f = vec![1.0, 2.0, 3.0];
1655 let h = vec![1.0, 0.0, 0.0];
1656 let dj = FunctionalDerivative::gateaux(j, &f, &h, 1e-6);
1657 assert!((dj - 1.0).abs() < 1e-6, "Gateaux derivative = {dj}");
1658 }
1659
1660 #[test]
1661 fn test_frechet_gradient_quadratic() {
1662 let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1664 let f = vec![1.0, 2.0, 3.0];
1665 let grad = FunctionalDerivative::frechet_gradient(j, &f, 1e-5);
1666 for (i, (&gi, &fi)) in grad.iter().zip(f.iter()).enumerate() {
1667 assert!(
1668 (gi - 2.0 * fi).abs() < 1e-4,
1669 "grad[{i}] = {gi}, expected {}",
1670 2.0 * fi
1671 );
1672 }
1673 }
1674
1675 #[test]
1676 fn test_gradient_descent_minimizes() {
1677 let j = |f: &[f64]| (f[0] - 1.0).powi(2) + (f[1] - 2.0).powi(2);
1679 let f0 = vec![0.0, 0.0];
1680 let (fmin, val) = FunctionalDerivative::gradient_descent(j, &f0, 0.1, 200, 1e-5);
1681 assert!(val < 0.1, "not minimized: val = {val}");
1682 assert!((fmin[0] - 1.0).abs() < 0.1, "f[0] = {}", fmin[0]);
1683 assert!((fmin[1] - 2.0).abs() < 0.1, "f[1] = {}", fmin[1]);
1684 }
1685
1686 #[test]
1687 fn test_newton_step_moves_toward_minimum() {
1688 let j = |f: &[f64]| (f[0] - 3.0).powi(2);
1690 let f = vec![1.0];
1691 let f_new = FunctionalDerivative::newton_step(j, &f, 1e-5);
1692 assert!(
1694 (f_new[0] - 3.0).abs() < 0.1,
1695 "Newton step: f[0] = {}",
1696 f_new[0]
1697 );
1698 }
1699
1700 #[test]
1701 fn test_second_gateaux_symmetric() {
1702 let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1704 let f = vec![1.0, 1.0];
1705 let h = vec![1.0, 0.0];
1706 let k_dir = vec![0.0, 1.0];
1707 let d2_hk = FunctionalDerivative::second_gateaux(j, &f, &h, &k_dir, 1e-4);
1708 let d2_kh = FunctionalDerivative::second_gateaux(j, &f, &k_dir, &h, 1e-4);
1709 assert!(
1710 (d2_hk - d2_kh).abs() < 1e-4,
1711 "second Gateaux not symmetric: {d2_hk} vs {d2_kh}"
1712 );
1713 }
1714
1715 #[test]
1718 fn test_euler_lagrange_residual_length() {
1719 let u: Vec<f64> = (0..10).map(|i| i as f64 * 0.1).collect();
1721 let res = VariationalProblem::euler_lagrange_residual(|_x, _u, up| up * up, &u, 0.1);
1722 assert_eq!(res.len(), u.len());
1723 }
1724
1725 #[test]
1726 fn test_euler_lagrange_linear_minimizer_residual() {
1727 let n = 10;
1729 let dx = 1.0 / n as f64;
1730 let u: Vec<f64> = (0..n).map(|i| i as f64 * dx).collect();
1731 let res = VariationalProblem::euler_lagrange_residual(|_x, _u, up| up * up, &u, dx);
1732 for &r in &res[1..n - 1] {
1734 assert!(r.is_finite(), "residual finite: {r}");
1735 }
1736 }
1737
1738 #[test]
1739 fn test_steepest_descent_minimizes() {
1740 let c = 2.0_f64;
1742 let j = move |f: &[f64]| f.iter().map(|x| (x - c).powi(2)).sum::<f64>();
1743 let u0 = vec![0.0; 4];
1744 let (u, hist) = VariationalProblem::steepest_descent(j, &u0, 0.1, 1e-6, 300);
1745 assert!(!hist.is_empty());
1746 for &ui in &u {
1747 assert!((ui - c).abs() < 0.1, "steepest descent: ui = {ui}");
1748 }
1749 }
1750
1751 #[test]
1752 fn test_first_variation_linear() {
1753 let u = vec![1.0; 4];
1755 let v = vec![1.0; 4];
1756 let fp = vec![2.0; 4]; let dx = 0.25;
1758 let dj = VariationalProblem::first_variation(&u, &v, &fp, dx);
1759 assert!((dj - 2.0).abs() < 1e-12, "first variation = {dj}");
1761 }
1762
1763 #[test]
1764 fn test_augmented_lagrangian_reduces_constraint() {
1765 let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1767 let g = |f: &[f64]| f.iter().sum::<f64>() - 1.0;
1768 let u0 = vec![0.5, 0.5];
1769 let (_u, _lambda) = VariationalProblem::augmented_lagrangian(j, g, &u0, 1.0, 0.05, 100);
1770 }
1772}