Skip to main content

oxiphysics_core/
functional_analysis.rs

1#![allow(clippy::needless_range_loop)]
2// Copyright 2026 COOLJAPAN OU (Team KitaSan)
3// SPDX-License-Identifier: Apache-2.0
4
5//! Functional analysis tools for physics simulation.
6//!
7//! Provides Hilbert and Banach spaces, operator spectrum, Sobolev spaces,
8//! functional derivatives (Gâteaux/Fréchet), and variational problems
9//! (Euler-Lagrange, Lagrange multiplier).  Also includes L² inner products,
10//! Gram–Schmidt orthogonalization, Fourier/Chebyshev/Legendre expansions,
11//! Sobolev norms, and operator norm estimation via power iteration.
12
13#![allow(dead_code)]
14
15use std::f64::consts::PI;
16
17// ─────────────────────────────────────────────────────────────────────────────
18// FunctionSpace
19// ─────────────────────────────────────────────────────────────────────────────
20
21/// A finite-dimensional function space spanned by a list of basis functions.
22///
23/// Each basis element is a plain function pointer `fn(f64) -> f64` to avoid
24/// lifetime complications with closures.
25#[derive(Clone)]
26pub struct FunctionSpace {
27    /// Basis functions spanning this space.
28    pub basis: Vec<fn(f64) -> f64>,
29}
30
31impl FunctionSpace {
32    /// Create a new function space from a vector of basis functions.
33    pub fn new(basis: Vec<fn(f64) -> f64>) -> Self {
34        Self { basis }
35    }
36
37    /// Evaluate the `i`-th basis function at `x`.
38    pub fn eval(&self, i: usize, x: f64) -> f64 {
39        (self.basis[i])(x)
40    }
41
42    /// Number of basis functions.
43    pub fn dim(&self) -> usize {
44        self.basis.len()
45    }
46
47    /// Evaluate a linear combination `∑ coeffs[i] * basis[i](x)`.
48    ///
49    /// Panics if `coeffs.len() != self.dim()`.
50    pub fn combine(&self, coeffs: &[f64], x: f64) -> f64 {
51        assert_eq!(
52            coeffs.len(),
53            self.basis.len(),
54            "coefficient length mismatch"
55        );
56        coeffs
57            .iter()
58            .zip(self.basis.iter())
59            .map(|(c, f)| c * f(x))
60            .sum()
61    }
62}
63
64impl std::fmt::Debug for FunctionSpace {
65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66        write!(f, "FunctionSpace {{ dim: {} }}", self.basis.len())
67    }
68}
69
70// ─────────────────────────────────────────────────────────────────────────────
71// L² inner product and norm
72// ─────────────────────────────────────────────────────────────────────────────
73
74/// Compute the discrete L² inner product `⟨f, g⟩ = dx · Σ f[i]·g[i]`.
75///
76/// `f` and `g` must have the same length. `dx` is the uniform grid spacing.
77pub fn l2_inner_product(f: &[f64], g: &[f64], dx: f64) -> f64 {
78    assert_eq!(f.len(), g.len(), "l2_inner_product: slice length mismatch");
79    f.iter().zip(g.iter()).map(|(a, b)| a * b).sum::<f64>() * dx
80}
81
82/// Compute the discrete L² norm `‖f‖ = √(⟨f, f⟩)`.
83pub fn l2_norm(f: &[f64], dx: f64) -> f64 {
84    l2_inner_product(f, f, dx).sqrt()
85}
86
87// ─────────────────────────────────────────────────────────────────────────────
88// Gram–Schmidt orthogonalization
89// ─────────────────────────────────────────────────────────────────────────────
90
91/// Orthogonalize a set of sampled basis vectors using the modified Gram–Schmidt
92/// process in the L² inner product.
93///
94/// Each element of `basis` is a sampled function on a uniform grid with spacing
95/// `dx`. Returns an orthonormal set (same length as input) in the same order.
96/// Linearly dependent vectors become zero vectors.
97pub fn gram_schmidt_orthogonalize(basis: &[Vec<f64>], dx: f64) -> Vec<Vec<f64>> {
98    let n = basis.len();
99    if n == 0 {
100        return vec![];
101    }
102    let m = basis[0].len();
103    let mut q: Vec<Vec<f64>> = Vec::with_capacity(n);
104
105    for v in basis.iter() {
106        let mut u = v.clone();
107        // Subtract projections onto already-computed orthonormal vectors
108        for qi in q.iter() {
109            let proj = l2_inner_product(&u, qi, dx);
110            for (ui, qij) in u.iter_mut().zip(qi.iter()) {
111                *ui -= proj * qij;
112            }
113        }
114        let norm = l2_norm(&u, dx);
115        if norm > 1e-14 {
116            for ui in u.iter_mut() {
117                *ui /= norm;
118            }
119        } else {
120            u = vec![0.0; m];
121        }
122        q.push(u);
123    }
124    q
125}
126
127// ─────────────────────────────────────────────────────────────────────────────
128// Fourier series coefficients
129// ─────────────────────────────────────────────────────────────────────────────
130
131/// Compute the Fourier series coefficients `(aₙ, bₙ)` for `n = 0, 1, …, n_terms-1`.
132///
133/// Assumes `f` is sampled on a uniform grid over `[0, L]` where `L = len * dx`,
134/// corresponding to the period. Returns `n_terms` pairs `(aₙ, bₙ)` where
135/// - `a₀ = (2/L) ∫ f dx` (zeroth cosine term)
136/// - `aₙ = (2/L) ∫ f cos(2πnx/L) dx`
137/// - `bₙ = (2/L) ∫ f sin(2πnx/L) dx`
138pub fn fourier_series_coeffs(f: &[f64], dx: f64, n_terms: usize) -> Vec<(f64, f64)> {
139    let n = f.len();
140    let l = n as f64 * dx;
141    let norm = 2.0 / l;
142    (0..n_terms)
143        .map(|k| {
144            let mut a = 0.0_f64;
145            let mut b = 0.0_f64;
146            for (i, &fi) in f.iter().enumerate() {
147                let x = (i as f64 + 0.5) * dx;
148                let arg = 2.0 * PI * (k as f64) * x / l;
149                a += fi * arg.cos();
150                b += fi * arg.sin();
151            }
152            (a * norm * dx, b * norm * dx)
153        })
154        .collect()
155}
156
157// ─────────────────────────────────────────────────────────────────────────────
158// Chebyshev expansion
159// ─────────────────────────────────────────────────────────────────────────────
160
161/// Compute the Chebyshev expansion coefficients `cₙ` for a function sampled on
162/// `[-1, 1]` using the discrete cosine approach.
163///
164/// `f` is assumed to be evaluated at Chebyshev nodes
165/// `xₖ = cos(π(k + 0.5)/N)` for `k = 0, …, N-1`.
166/// Returns `n_terms` coefficients `c₀, c₁, …`.
167pub fn chebyshev_expansion(f: &[f64], n_terms: usize) -> Vec<f64> {
168    let n = f.len();
169    if n == 0 || n_terms == 0 {
170        return vec![0.0; n_terms];
171    }
172    (0..n_terms)
173        .map(|k| {
174            let sum: f64 = f
175                .iter()
176                .enumerate()
177                .map(|(j, &fj)| {
178                    let theta = PI * (j as f64 + 0.5) / n as f64;
179                    fj * (k as f64 * theta).cos()
180                })
181                .sum();
182            if k == 0 {
183                sum / n as f64
184            } else {
185                2.0 * sum / n as f64
186            }
187        })
188        .collect()
189}
190
191// ─────────────────────────────────────────────────────────────────────────────
192// Legendre expansion
193// ─────────────────────────────────────────────────────────────────────────────
194
195/// Evaluate the Legendre polynomial `Pₙ(x)` via the three-term recurrence.
196fn legendre_p(n: usize, x: f64) -> f64 {
197    if n == 0 {
198        return 1.0;
199    }
200    if n == 1 {
201        return x;
202    }
203    let mut p_prev = 1.0_f64;
204    let mut p_curr = x;
205    for k in 2..=n {
206        let p_next = ((2 * k - 1) as f64 * x * p_curr - (k - 1) as f64 * p_prev) / k as f64;
207        p_prev = p_curr;
208        p_curr = p_next;
209    }
210    p_curr
211}
212
213/// Compute the Legendre expansion coefficients for `f` sampled on `[-1, 1]`.
214///
215/// `f` is evaluated at `n = f.len()` uniform points in `[-1, 1]`. Returns
216/// `n_terms` coefficients `c₀, …, c_{n_terms-1}` where
217/// `cₖ = (2k+1)/2 · ∫ f(x) Pₖ(x) dx`.
218pub fn legendre_expansion(f: &[f64], n_terms: usize) -> Vec<f64> {
219    let n = f.len();
220    if n == 0 || n_terms == 0 {
221        return vec![0.0; n_terms];
222    }
223    let dx = 2.0 / n as f64; // uniform spacing over [-1, 1]
224    (0..n_terms)
225        .map(|k| {
226            let sum: f64 = f
227                .iter()
228                .enumerate()
229                .map(|(j, &fj)| {
230                    let x = -1.0 + (j as f64 + 0.5) * dx;
231                    fj * legendre_p(k, x)
232                })
233                .sum();
234            (2 * k + 1) as f64 / 2.0 * sum * dx
235        })
236        .collect()
237}
238
239// ─────────────────────────────────────────────────────────────────────────────
240// Haar wavelet transform
241// ─────────────────────────────────────────────────────────────────────────────
242
243/// Compute the full-length in-place Haar wavelet transform.
244///
245/// The length of `signal` must be a power of 2. Returns the coefficient vector
246/// (Mallat ordering: coarsest approximation first).
247pub fn wavelet_haar_transform(signal: &[f64]) -> Vec<f64> {
248    let n = signal.len();
249    let mut out = signal.to_vec();
250    let mut step = n;
251    while step >= 2 {
252        step /= 2;
253        let mut tmp = vec![0.0; step * 2];
254        for i in 0..step {
255            let a = out[2 * i];
256            let b = out[2 * i + 1];
257            tmp[i] = (a + b) * 0.5_f64.sqrt();
258            tmp[step + i] = (a - b) * 0.5_f64.sqrt();
259        }
260        out[..step * 2].copy_from_slice(&tmp);
261    }
262    out
263}
264
265/// Compute the inverse Haar wavelet transform.
266///
267/// `coeffs` must have length that is a power of 2.
268pub fn wavelet_haar_inverse(coeffs: &[f64]) -> Vec<f64> {
269    let n = coeffs.len();
270    let mut out = coeffs.to_vec();
271    let mut step = 1_usize;
272    while step < n {
273        let mut tmp = vec![0.0; step * 2];
274        for i in 0..step {
275            let a = out[i];
276            let d = out[step + i];
277            tmp[2 * i] = (a + d) * 0.5_f64.sqrt();
278            tmp[2 * i + 1] = (a - d) * 0.5_f64.sqrt();
279        }
280        out[..step * 2].copy_from_slice(&tmp);
281        step *= 2;
282    }
283    out
284}
285
286// ─────────────────────────────────────────────────────────────────────────────
287// Sobolev norm (free function)
288// ─────────────────────────────────────────────────────────────────────────────
289
290/// Compute an approximate H^s Sobolev norm `‖f‖_{H^s}`.
291///
292/// Uses the discrete approximation
293/// `‖f‖²_{H^s} = ‖f‖²_{L²} + s·‖f'‖²_{L²}`
294/// where `f'` is supplied as `df` and `s` is the Sobolev order parameter.
295pub fn sobolev_norm(f: &[f64], df: &[f64], dx: f64, s: f64) -> f64 {
296    assert_eq!(
297        f.len(),
298        df.len(),
299        "sobolev_norm: f and df must have equal length"
300    );
301    let l2_sq = l2_inner_product(f, f, dx);
302    let dl2_sq = l2_inner_product(df, df, dx);
303    (l2_sq + s * dl2_sq).sqrt()
304}
305
306// ─────────────────────────────────────────────────────────────────────────────
307// Operator norm estimate (power iteration)
308// ─────────────────────────────────────────────────────────────────────────────
309
310/// Estimate the operator (spectral) norm of a matrix via power iteration.
311///
312/// Computes the largest singular value using the power method applied to
313/// `Aᵀ A`. The returned value approximates `σ_max(A)`.
314///
315/// `matrix` is given as a row-major slice-of-rows (each inner `Vec`f64` is
316/// a row). Returns `0.0` for an empty matrix.
317pub fn operator_norm_estimate(matrix: &[Vec<f64>]) -> f64 {
318    let nrows = matrix.len();
319    if nrows == 0 {
320        return 0.0;
321    }
322    let ncols = matrix[0].len();
323    if ncols == 0 {
324        return 0.0;
325    }
326
327    // Start with a uniform vector
328    let mut v: Vec<f64> = vec![1.0 / (ncols as f64).sqrt(); ncols];
329    let mut sigma = 0.0_f64;
330
331    for _ in 0..100 {
332        // w = A v
333        let mut w = vec![0.0_f64; nrows];
334        for (i, row) in matrix.iter().enumerate() {
335            w[i] = row.iter().zip(v.iter()).map(|(a, x)| a * x).sum();
336        }
337        // u = Aᵀ w
338        let mut u = vec![0.0_f64; ncols];
339        for (i, row) in matrix.iter().enumerate() {
340            for (j, &aij) in row.iter().enumerate() {
341                u[j] += aij * w[i];
342            }
343        }
344        // Rayleigh quotient
345        let norm_u: f64 = u.iter().map(|x| x * x).sum::<f64>().sqrt();
346        if norm_u < 1e-15 {
347            break;
348        }
349        sigma = norm_u.sqrt();
350        for (vi, ui) in v.iter_mut().zip(u.iter()) {
351            *vi = *ui / norm_u;
352        }
353    }
354    sigma
355}
356
357// ─────────────────────────────────────────────────────────────────────────────
358// HilbertSpace
359// ─────────────────────────────────────────────────────────────────────────────
360
361/// A discrete Hilbert space over a uniform grid with spacing `dx`.
362///
363/// Provides inner product, norm, orthonormalization (Gram-Schmidt), and
364/// projection operations.
365#[derive(Debug, Clone)]
366pub struct HilbertSpace {
367    /// Grid spacing for integration approximation.
368    pub dx: f64,
369    /// Number of grid points.
370    pub n: usize,
371}
372
373impl HilbertSpace {
374    /// Create a new `HilbertSpace` with `n` grid points and spacing `dx`.
375    pub fn new(n: usize, dx: f64) -> Self {
376        Self { n, dx }
377    }
378
379    /// Compute the L² inner product `⟨f, g⟩`.
380    pub fn inner_product(&self, f: &[f64], g: &[f64]) -> f64 {
381        l2_inner_product(f, g, self.dx)
382    }
383
384    /// Compute the L² norm `‖f‖`.
385    pub fn norm(&self, f: &[f64]) -> f64 {
386        l2_norm(f, self.dx)
387    }
388
389    /// Orthonormalize a set of vectors using the modified Gram-Schmidt process.
390    ///
391    /// Returns the orthonormal set in the same order; linearly dependent
392    /// vectors become zero.
393    pub fn orthonormalize(&self, basis: &[Vec<f64>]) -> Vec<Vec<f64>> {
394        gram_schmidt_orthogonalize(basis, self.dx)
395    }
396
397    /// Project `f` onto the subspace spanned by `basis`.
398    ///
399    /// `basis` does **not** need to be orthonormal; a fresh Gram-Schmidt step
400    /// is performed internally.  Returns the projected vector (same length as
401    /// `f`).
402    pub fn project(&self, f: &[f64], basis: &[Vec<f64>]) -> Vec<f64> {
403        if basis.is_empty() || f.is_empty() {
404            return vec![0.0; f.len()];
405        }
406        let ortho = self.orthonormalize(basis);
407        let mut proj = vec![0.0; f.len()];
408        for q in &ortho {
409            if q.iter().all(|x| x.abs() < 1e-15) {
410                continue;
411            }
412            let coeff = self.inner_product(f, q);
413            for (pi, qi) in proj.iter_mut().zip(q.iter()) {
414                *pi += coeff * qi;
415            }
416        }
417        proj
418    }
419
420    /// Compute the distance between two functions `f` and `g`.
421    pub fn distance(&self, f: &[f64], g: &[f64]) -> f64 {
422        let diff: Vec<f64> = f.iter().zip(g.iter()).map(|(a, b)| a - b).collect();
423        self.norm(&diff)
424    }
425
426    /// Check if `f` and `g` are orthogonal (inner product < `tol`).
427    pub fn are_orthogonal(&self, f: &[f64], g: &[f64], tol: f64) -> bool {
428        self.inner_product(f, g).abs() < tol
429    }
430
431    /// Normalize `f` in-place to unit L² norm.  Returns a normalized copy.
432    pub fn normalize(&self, f: &[f64]) -> Vec<f64> {
433        let nm = self.norm(f);
434        if nm < 1e-15 {
435            return f.to_vec();
436        }
437        f.iter().map(|x| x / nm).collect()
438    }
439}
440
441// ─────────────────────────────────────────────────────────────────────────────
442// BanachSpace
443// ─────────────────────────────────────────────────────────────────────────────
444
445/// A discrete Banach space with Lᵖ norm.
446///
447/// Supports p-norms, dual space estimation, and bounded linear functional
448/// evaluation.
449#[derive(Debug, Clone)]
450pub struct BanachSpace {
451    /// The exponent `p` for the Lᵖ norm (`p >= 1`).
452    pub p: f64,
453    /// Grid spacing for integration approximation.
454    pub dx: f64,
455}
456
457impl BanachSpace {
458    /// Create a new `BanachSpace` with Lᵖ norm and spacing `dx`.
459    pub fn new(p: f64, dx: f64) -> Self {
460        Self { p: p.max(1.0), dx }
461    }
462
463    /// Compute the Lᵖ norm `‖f‖_p = (∫|f|^p dx)^{1/p}`.
464    pub fn norm(&self, f: &[f64]) -> f64 {
465        if self.p == f64::INFINITY || self.p > 1e10 {
466            return f.iter().cloned().fold(0.0_f64, f64::max);
467        }
468        let integral: f64 = f.iter().map(|x| x.abs().powf(self.p)).sum::<f64>() * self.dx;
469        integral.powf(1.0 / self.p)
470    }
471
472    /// Compute the dual Lᵍ norm where `1/p + 1/q = 1`.
473    ///
474    /// For `p = 1` returns the L∞ norm; for `p = ∞` returns the L¹ norm.
475    pub fn dual_norm(&self, g: &[f64]) -> f64 {
476        let q = if (self.p - 1.0).abs() < 1e-12 {
477            f64::INFINITY
478        } else {
479            self.p / (self.p - 1.0)
480        };
481        let dual = BanachSpace::new(q, self.dx);
482        dual.norm(g)
483    }
484
485    /// Evaluate a bounded linear functional `φ(f) = ∫ g(x) f(x) dx`.
486    ///
487    /// `g` is the Riesz representation kernel.
488    pub fn bounded_linear_functional(&self, f: &[f64], g: &[f64]) -> f64 {
489        assert_eq!(f.len(), g.len(), "functional: f and g length mismatch");
490        f.iter().zip(g.iter()).map(|(a, b)| a * b).sum::<f64>() * self.dx
491    }
492
493    /// Check whether `f` lies in the unit ball `‖f‖_p <= 1 + tol`.
494    pub fn in_unit_ball(&self, f: &[f64], tol: f64) -> bool {
495        self.norm(f) <= 1.0 + tol
496    }
497
498    /// Compute the Hölder bound: `|⟨f, g⟩| <= ‖f‖_p * ‖g‖_q`.
499    ///
500    /// Returns `(lhs, rhs)` where `lhs = |⟨f,g⟩|` and `rhs = ‖f‖_p * ‖g‖_q`.
501    pub fn holder_bound(&self, f: &[f64], g: &[f64]) -> (f64, f64) {
502        let inner = self.bounded_linear_functional(f, g).abs();
503        let norm_f = self.norm(f);
504        let norm_g = self.dual_norm(g);
505        (inner, norm_f * norm_g)
506    }
507}
508
509// ─────────────────────────────────────────────────────────────────────────────
510// OperatorSpectrum
511// ─────────────────────────────────────────────────────────────────────────────
512
513/// Spectral analysis of compact operators represented as finite matrices.
514///
515/// Provides eigenvalue computation via power iteration, spectral radius
516/// estimation, and resolvent norm.
517#[derive(Debug, Clone)]
518pub struct OperatorSpectrum {
519    /// Matrix representation of the operator (row-major, square).
520    pub matrix: Vec<Vec<f64>>,
521    /// Size of the square matrix.
522    pub n: usize,
523}
524
525impl OperatorSpectrum {
526    /// Create an `OperatorSpectrum` from a square matrix.
527    ///
528    /// Panics if the matrix is not square.
529    pub fn new(matrix: Vec<Vec<f64>>) -> Self {
530        let n = matrix.len();
531        assert!(
532            matrix.iter().all(|row| row.len() == n),
533            "matrix must be square"
534        );
535        Self { matrix, n }
536    }
537
538    /// Estimate the largest eigenvalue (spectral radius) via the power method.
539    ///
540    /// Returns the dominant eigenvalue after `max_iter` iterations.
541    pub fn spectral_radius(&self, max_iter: usize) -> f64 {
542        if self.n == 0 {
543            return 0.0;
544        }
545        let mut v = vec![1.0 / (self.n as f64).sqrt(); self.n];
546        let mut lambda = 0.0_f64;
547        for _ in 0..max_iter {
548            let w = self.matvec(&v);
549            let norm: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
550            if norm < 1e-15 {
551                break;
552            }
553            lambda = w.iter().zip(v.iter()).map(|(wi, vi)| wi * vi).sum::<f64>();
554            for (vi, wi) in v.iter_mut().zip(w.iter()) {
555                *vi = wi / norm;
556            }
557        }
558        lambda
559    }
560
561    /// Estimate the smallest eigenvalue via inverse power iteration with shift.
562    ///
563    /// Uses Rayleigh quotient shift `mu` (default 0.0 ≈ smallest eigenvalue).
564    pub fn smallest_eigenvalue(&self, shift: f64, max_iter: usize) -> f64 {
565        if self.n == 0 {
566            return 0.0;
567        }
568        // Shifted matrix: A - shift*I
569        let shifted: Vec<Vec<f64>> = self
570            .matrix
571            .iter()
572            .enumerate()
573            .map(|(i, row)| {
574                row.iter()
575                    .enumerate()
576                    .map(|(j, &a)| if i == j { a - shift } else { a })
577                    .collect()
578            })
579            .collect();
580
581        let op = OperatorSpectrum::new(shifted);
582        // Power iteration on shifted operator
583        let lam = op.spectral_radius(max_iter);
584        lam + shift
585    }
586
587    /// Compute the trace of the matrix (sum of diagonal elements).
588    pub fn trace(&self) -> f64 {
589        (0..self.n).map(|i| self.matrix[i][i]).sum()
590    }
591
592    /// Compute the Frobenius norm `‖A‖_F = √(Σ aᵢⱼ²)`.
593    pub fn frobenius_norm(&self) -> f64 {
594        self.matrix
595            .iter()
596            .flat_map(|row| row.iter())
597            .map(|x| x * x)
598            .sum::<f64>()
599            .sqrt()
600    }
601
602    /// Estimate the resolvent norm `‖(A - λI)⁻¹‖` at a regular point `lambda`.
603    ///
604    /// Uses power iteration on the shifted operator `(A - λI)`.  Returns
605    /// `1 / smallest_singular_value`, approximated via the Frobenius norm of
606    /// the shifted matrix.
607    pub fn resolvent_norm(&self, lambda: f64) -> f64 {
608        // Build (A - λI)
609        let shifted: Vec<Vec<f64>> = self
610            .matrix
611            .iter()
612            .enumerate()
613            .map(|(i, row)| {
614                row.iter()
615                    .enumerate()
616                    .map(|(j, &a)| if i == j { a - lambda } else { a })
617                    .collect()
618            })
619            .collect();
620        let frob: f64 = shifted
621            .iter()
622            .flat_map(|r| r.iter())
623            .map(|x| x * x)
624            .sum::<f64>()
625            .sqrt();
626        if frob < 1e-15 {
627            return f64::INFINITY;
628        }
629        1.0 / frob
630    }
631
632    /// Apply the operator (matrix-vector multiplication) to `v`.
633    pub fn apply(&self, v: &[f64]) -> Vec<f64> {
634        self.matvec(v)
635    }
636
637    /// Compute all diagonal elements (approximate eigenvalues of a diagonal operator).
638    pub fn diagonal(&self) -> Vec<f64> {
639        (0..self.n).map(|i| self.matrix[i][i]).collect()
640    }
641
642    fn matvec(&self, v: &[f64]) -> Vec<f64> {
643        self.matrix
644            .iter()
645            .map(|row| row.iter().zip(v.iter()).map(|(a, x)| a * x).sum())
646            .collect()
647    }
648}
649
650// ─────────────────────────────────────────────────────────────────────────────
651// SobolevSpace
652// ─────────────────────────────────────────────────────────────────────────────
653
654/// A discrete Sobolev space H^k on a uniform grid.
655///
656/// Provides H^k norms, weak derivative approximation (finite differences),
657/// and trace operator evaluation.
658#[derive(Debug, Clone)]
659pub struct SobolevSpace {
660    /// Number of grid points.
661    pub n: usize,
662    /// Grid spacing.
663    pub dx: f64,
664    /// Sobolev order `k`.
665    pub k: usize,
666}
667
668impl SobolevSpace {
669    /// Create a new `SobolevSpace` with `n` points, spacing `dx`, and order `k`.
670    pub fn new(n: usize, dx: f64, k: usize) -> Self {
671        Self { n, dx, k }
672    }
673
674    /// Compute the H^k norm using finite-difference weak derivatives.
675    ///
676    /// `‖f‖²_{H^k} = Σ_{j=0}^{k} ‖D^j f‖²_{L²}`
677    pub fn norm(&self, f: &[f64]) -> f64 {
678        let mut sq = l2_inner_product(f, f, self.dx);
679        let mut deriv = f.to_vec();
680        for _ in 1..=self.k {
681            deriv = Self::weak_derivative(&deriv, self.dx);
682            sq += l2_inner_product(&deriv, &deriv, self.dx);
683        }
684        sq.sqrt()
685    }
686
687    /// Compute the first-order weak derivative using central differences.
688    ///
689    /// Forward/backward differences are used at the boundary.
690    pub fn weak_derivative(f: &[f64], dx: f64) -> Vec<f64> {
691        let n = f.len();
692        if n < 2 {
693            return vec![0.0; n];
694        }
695        let mut d = vec![0.0; n];
696        // Central differences in interior
697        for i in 1..n - 1 {
698            d[i] = (f[i + 1] - f[i - 1]) / (2.0 * dx);
699        }
700        // Forward/backward at boundaries
701        d[0] = (f[1] - f[0]) / dx;
702        d[n - 1] = (f[n - 1] - f[n - 2]) / dx;
703        d
704    }
705
706    /// Evaluate the trace operator: return the boundary values `\[f(0), f(n-1)\]`.
707    pub fn trace(&self, f: &[f64]) -> [f64; 2] {
708        if f.is_empty() {
709            return [0.0, 0.0];
710        }
711        [f[0], f[f.len() - 1]]
712    }
713
714    /// Compute the seminorm `|f|_{H^k} = ‖D^k f‖_{L²}`.
715    pub fn seminorm(&self, f: &[f64]) -> f64 {
716        let mut deriv = f.to_vec();
717        for _ in 0..self.k {
718            deriv = Self::weak_derivative(&deriv, self.dx);
719        }
720        l2_norm(&deriv, self.dx)
721    }
722
723    /// Check whether `f` lies in H^k (finite H^k norm).
724    pub fn is_in_space(&self, f: &[f64]) -> bool {
725        self.norm(f).is_finite()
726    }
727
728    /// Compute the H^k inner product `⟨f, g⟩_{H^k} = Σ_{j=0}^{k} ⟨D^j f, D^j g⟩`.
729    pub fn inner_product(&self, f: &[f64], g: &[f64]) -> f64 {
730        let mut ip = l2_inner_product(f, g, self.dx);
731        let mut df = f.to_vec();
732        let mut dg = g.to_vec();
733        for _ in 1..=self.k {
734            df = Self::weak_derivative(&df, self.dx);
735            dg = Self::weak_derivative(&dg, self.dx);
736            ip += l2_inner_product(&df, &dg, self.dx);
737        }
738        ip
739    }
740}
741
742// ─────────────────────────────────────────────────────────────────────────────
743// FunctionalDerivative
744// ─────────────────────────────────────────────────────────────────────────────
745
746/// Gâteaux and Fréchet derivatives of functionals on function spaces.
747///
748/// Enables gradient descent in function space and Newton's method for
749/// operator equations.
750pub struct FunctionalDerivative;
751
752impl FunctionalDerivative {
753    /// Compute the Gâteaux derivative of functional `J` at `f` in direction `h`.
754    ///
755    /// `dJ/dε J(f + εh)|_{ε=0}` approximated by finite difference.
756    pub fn gateaux<F>(j: F, f: &[f64], h: &[f64], eps: f64) -> f64
757    where
758        F: Fn(&[f64]) -> f64,
759    {
760        let f_plus: Vec<f64> = f
761            .iter()
762            .zip(h.iter())
763            .map(|(fi, hi)| fi + eps * hi)
764            .collect();
765        let f_minus: Vec<f64> = f
766            .iter()
767            .zip(h.iter())
768            .map(|(fi, hi)| fi - eps * hi)
769            .collect();
770        (j(&f_plus) - j(&f_minus)) / (2.0 * eps)
771    }
772
773    /// Compute the Fréchet derivative (gradient) of functional `J` at `f`.
774    ///
775    /// Returns the gradient vector `δJ/δf` using component-wise Gâteaux
776    /// derivatives with standard basis directions.
777    pub fn frechet_gradient<F>(j: F, f: &[f64], eps: f64) -> Vec<f64>
778    where
779        F: Fn(&[f64]) -> f64,
780    {
781        let n = f.len();
782        let mut grad = vec![0.0; n];
783        for i in 0..n {
784            let mut f_plus = f.to_vec();
785            let mut f_minus = f.to_vec();
786            f_plus[i] += eps;
787            f_minus[i] -= eps;
788            grad[i] = (j(&f_plus) - j(&f_minus)) / (2.0 * eps);
789        }
790        grad
791    }
792
793    /// Perform gradient descent in function space to minimize functional `J`.
794    ///
795    /// Starting from `f0`, takes `max_iter` steps of size `step_size`.
796    /// Returns the minimizer and the final functional value.
797    pub fn gradient_descent<F>(
798        j: F,
799        f0: &[f64],
800        step_size: f64,
801        max_iter: usize,
802        eps: f64,
803    ) -> (Vec<f64>, f64)
804    where
805        F: Fn(&[f64]) -> f64,
806    {
807        let mut f = f0.to_vec();
808        for _ in 0..max_iter {
809            let grad = Self::frechet_gradient(&j, &f, eps);
810            for (fi, gi) in f.iter_mut().zip(grad.iter()) {
811                *fi -= step_size * gi;
812            }
813        }
814        let val = j(&f);
815        (f, val)
816    }
817
818    /// Newton step in function space: `f_new = f - \[D²J(f)\]⁻¹ DJ(f)`.
819    ///
820    /// Approximates the Hessian-vector product via a second finite difference
821    /// and solves the linear system by a simple diagonal preconditioned
822    /// gradient step (one step of Jacobi iteration).
823    pub fn newton_step<F>(j: F, f: &[f64], eps: f64) -> Vec<f64>
824    where
825        F: Fn(&[f64]) -> f64,
826    {
827        let n = f.len();
828        let grad = Self::frechet_gradient(&j, f, eps);
829
830        // Diagonal of Hessian via second central difference
831        let mut hess_diag = vec![1.0_f64; n]; // fallback: identity
832        for i in 0..n {
833            let mut fp = f.to_vec();
834            let mut fm = f.to_vec();
835            fp[i] += eps;
836            fm[i] -= eps;
837            let h = (j(&fp) - 2.0 * j(f) + j(&fm)) / (eps * eps);
838            if h.abs() > 1e-15 {
839                hess_diag[i] = h;
840            }
841        }
842
843        // Newton step: f - H^{-1} g (diagonal approximation)
844        f.iter()
845            .zip(grad.iter())
846            .zip(hess_diag.iter())
847            .map(|((fi, gi), hi)| fi - gi / hi)
848            .collect()
849    }
850
851    /// Compute the second Gâteaux derivative (bilinear form) at `f` in
852    /// directions `h` and `k`.
853    pub fn second_gateaux<F>(j: F, f: &[f64], h: &[f64], k: &[f64], eps: f64) -> f64
854    where
855        F: Fn(&[f64]) -> f64,
856    {
857        let fph: Vec<f64> = f
858            .iter()
859            .zip(h.iter())
860            .map(|(fi, hi)| fi + eps * hi)
861            .collect();
862        let fmh: Vec<f64> = f
863            .iter()
864            .zip(h.iter())
865            .map(|(fi, hi)| fi - eps * hi)
866            .collect();
867        let g_plus = Self::gateaux(&j, &fph, k, eps);
868        let g_minus = Self::gateaux(&j, &fmh, k, eps);
869        (g_plus - g_minus) / (2.0 * eps)
870    }
871}
872
873// ─────────────────────────────────────────────────────────────────────────────
874// VariationalProblem
875// ─────────────────────────────────────────────────────────────────────────────
876
877/// Variational problem solver: Euler-Lagrange equations and constrained
878/// minimization with Lagrange multipliers.
879pub struct VariationalProblem;
880
881impl VariationalProblem {
882    /// Compute the Euler-Lagrange residual for the functional
883    /// `J\[u\] = ∫ L(x, u, u') dx`.
884    ///
885    /// The Lagrangian `L` takes `(x, u, u_prime)`.  Returns the residual
886    /// `∂L/∂u - d/dx(∂L/∂u')` evaluated at each interior grid point using
887    /// finite differences.
888    pub fn euler_lagrange_residual<L>(lagrangian: L, u: &[f64], dx: f64) -> Vec<f64>
889    where
890        L: Fn(f64, f64, f64) -> f64,
891    {
892        let n = u.len();
893        if n < 3 {
894            return vec![0.0; n];
895        }
896        let eps = dx * 1e-5;
897        let mut residual = vec![0.0; n];
898
899        for i in 1..n - 1 {
900            let x = (i as f64) * dx;
901            let up = (u[i + 1] - u[i - 1]) / (2.0 * dx); // u' at i
902
903            // ∂L/∂u via central difference in u
904            let dlu = (lagrangian(x, u[i] + eps, up) - lagrangian(x, u[i] - eps, up)) / (2.0 * eps);
905
906            // ∂L/∂u' at i+1 and i-1 (for d/dx(∂L/∂u'))
907            let up_p1 = (u[(i + 2).min(n - 1)] - u[i]) / (2.0 * dx);
908            let up_m1 = (u[i] - u[(i as isize - 1).max(0) as usize]) / (2.0 * dx);
909
910            let dlup_p1 = (lagrangian(x + dx, u[i + 1], up_p1 + eps)
911                - lagrangian(x + dx, u[i + 1], up_p1 - eps))
912                / (2.0 * eps);
913            let dlup_m1 = (lagrangian(x - dx, u[i - 1], up_m1 + eps)
914                - lagrangian(x - dx, u[i - 1], up_m1 - eps))
915                / (2.0 * eps);
916
917            let d_dlup_dx = (dlup_p1 - dlup_m1) / (2.0 * dx);
918            residual[i] = dlu - d_dlup_dx;
919        }
920        residual
921    }
922
923    /// Minimize the functional `J\[u\]` subject to equality constraint `G\[u\] = 0`
924    /// using the augmented Lagrangian method.
925    ///
926    /// Returns `(u, lambda)` where `u` is the minimizer and `lambda` is the
927    /// Lagrange multiplier estimate.
928    pub fn augmented_lagrangian<J, G>(
929        j: J,
930        g: G,
931        u0: &[f64],
932        rho: f64,
933        step: f64,
934        max_iter: usize,
935    ) -> (Vec<f64>, f64)
936    where
937        J: Fn(&[f64]) -> f64,
938        G: Fn(&[f64]) -> f64,
939    {
940        let mut u = u0.to_vec();
941        let mut lambda = 0.0_f64;
942        let eps = step * 1e-4;
943
944        for _ in 0..max_iter {
945            // Augmented Lagrangian: L_A(u) = J(u) + λ G(u) + ρ/2 G(u)²
946            let augmented = |v: &[f64]| {
947                let gv = g(v);
948                j(v) + lambda * gv + rho / 2.0 * gv * gv
949            };
950            let grad = FunctionalDerivative::frechet_gradient(augmented, &u, eps);
951            for (ui, gi) in u.iter_mut().zip(grad.iter()) {
952                *ui -= step * gi;
953            }
954            // Update multiplier
955            lambda += rho * g(&u);
956        }
957        (u, lambda)
958    }
959
960    /// Compute the first variation of `J\[u\] = ∫ f(u(x)) dx` at `u` in
961    /// direction `v`.
962    ///
963    /// `f_prime` is the derivative of the integrand w.r.t. `u`.
964    pub fn first_variation(u: &[f64], v: &[f64], f_prime: &[f64], dx: f64) -> f64 {
965        assert_eq!(u.len(), v.len());
966        assert_eq!(u.len(), f_prime.len());
967        f_prime
968            .iter()
969            .zip(v.iter())
970            .map(|(fp, vi)| fp * vi)
971            .sum::<f64>()
972            * dx
973    }
974
975    /// Find a stationary point via steepest descent on the functional gradient.
976    ///
977    /// Returns the stationary point and the history of functional values.
978    pub fn steepest_descent<J>(
979        j: J,
980        u0: &[f64],
981        step: f64,
982        tol: f64,
983        max_iter: usize,
984    ) -> (Vec<f64>, Vec<f64>)
985    where
986        J: Fn(&[f64]) -> f64,
987    {
988        let eps = step * 1e-4;
989        let mut u = u0.to_vec();
990        let mut history = Vec::new();
991        for _ in 0..max_iter {
992            let val = j(&u);
993            history.push(val);
994            let grad = FunctionalDerivative::frechet_gradient(&j, &u, eps);
995            let grad_norm: f64 = grad.iter().map(|x| x * x).sum::<f64>().sqrt();
996            if grad_norm < tol {
997                break;
998            }
999            for (ui, gi) in u.iter_mut().zip(grad.iter()) {
1000                *ui -= step * gi;
1001            }
1002        }
1003        (u, history)
1004    }
1005}
1006
1007// ─────────────────────────────────────────────────────────────────────────────
1008// Tests
1009// ─────────────────────────────────────────────────────────────────────────────
1010
1011#[cfg(test)]
1012mod tests {
1013    use super::*;
1014
1015    // ── FunctionSpace ─────────────────────────────────────────────────────────
1016
1017    #[test]
1018    fn test_function_space_eval() {
1019        let space = FunctionSpace::new(vec![|x: f64| x, |x: f64| x * x]);
1020        assert!((space.eval(0, 2.0) - 2.0).abs() < 1e-12);
1021        assert!((space.eval(1, 3.0) - 9.0).abs() < 1e-12);
1022    }
1023
1024    #[test]
1025    fn test_function_space_dim() {
1026        let space = FunctionSpace::new(vec![|x: f64| x]);
1027        assert_eq!(space.dim(), 1);
1028    }
1029
1030    #[test]
1031    fn test_function_space_combine() {
1032        // f(x) = 2x + 3x²  evaluated at x=1: 2+3=5
1033        let space = FunctionSpace::new(vec![|x: f64| x, |x: f64| x * x]);
1034        let val = space.combine(&[2.0, 3.0], 1.0);
1035        assert!((val - 5.0).abs() < 1e-12);
1036    }
1037
1038    #[test]
1039    fn test_function_space_debug() {
1040        let space = FunctionSpace::new(vec![|x: f64| x]);
1041        let s = format!("{space:?}");
1042        assert!(s.contains("dim: 1"));
1043    }
1044
1045    // ── L² inner product ─────────────────────────────────────────────────────
1046
1047    #[test]
1048    fn test_l2_inner_product_orthogonal() {
1049        // sin and cos are orthogonal on [0, 2π]
1050        let n = 1000;
1051        let dx = 2.0 * PI / n as f64;
1052        let sin_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1053        let cos_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1054        let ip = l2_inner_product(&sin_v, &cos_v, dx);
1055        assert!(ip.abs() < 1e-10, "sin/cos inner product = {ip}");
1056    }
1057
1058    #[test]
1059    fn test_l2_inner_product_self_is_norm_sq() {
1060        let f = vec![1.0, 2.0, 3.0];
1061        let dx = 0.1;
1062        let ip = l2_inner_product(&f, &f, dx);
1063        let norm = l2_norm(&f, dx);
1064        assert!((ip - norm * norm).abs() < 1e-12);
1065    }
1066
1067    #[test]
1068    fn test_l2_norm_constant() {
1069        // ‖c‖ = c * √(n * dx) for constant c over n points with spacing dx
1070        let c = 3.0_f64;
1071        let n = 100;
1072        let dx = 0.01;
1073        let f = vec![c; n];
1074        let expected = c * ((n as f64) * dx).sqrt();
1075        let got = l2_norm(&f, dx);
1076        assert!((got - expected).abs() < 1e-10, "norm: {got} vs {expected}");
1077    }
1078
1079    #[test]
1080    fn test_l2_norm_zero_vector() {
1081        let f = vec![0.0_f64; 50];
1082        assert!(l2_norm(&f, 0.01).abs() < 1e-12);
1083    }
1084
1085    // ── Gram–Schmidt ──────────────────────────────────────────────────────────
1086
1087    #[test]
1088    fn test_gram_schmidt_two_vectors() {
1089        let n = 100;
1090        let dx = 1.0 / n as f64;
1091        let v1: Vec<f64> = (0..n).map(|i| (i as f64 + 0.5) * dx).collect();
1092        let v2: Vec<f64> = (0..n).map(|_| 1.0).collect();
1093        let q = gram_schmidt_orthogonalize(&[v1, v2], dx);
1094        assert_eq!(q.len(), 2);
1095        // Orthogonality check
1096        let ip = l2_inner_product(&q[0], &q[1], dx);
1097        assert!(ip.abs() < 1e-10, "not orthogonal: {ip}");
1098        // Normality check
1099        let n0 = l2_norm(&q[0], dx);
1100        let n1 = l2_norm(&q[1], dx);
1101        assert!((n0 - 1.0).abs() < 1e-10, "q0 norm: {n0}");
1102        assert!((n1 - 1.0).abs() < 1e-10, "q1 norm: {n1}");
1103    }
1104
1105    #[test]
1106    fn test_gram_schmidt_empty() {
1107        let q = gram_schmidt_orthogonalize(&[], 0.01);
1108        assert!(q.is_empty());
1109    }
1110
1111    #[test]
1112    fn test_gram_schmidt_linearly_dependent() {
1113        let n = 10;
1114        let dx = 0.1;
1115        let v: Vec<f64> = (0..n).map(|i| i as f64).collect();
1116        let v2 = v.iter().map(|x| 2.0 * x).collect::<Vec<_>>();
1117        let q = gram_schmidt_orthogonalize(&[v, v2], dx);
1118        // Second vector is zero (linearly dependent)
1119        let norm2 = l2_norm(&q[1], dx);
1120        assert!(norm2 < 1e-12, "dependent vector should be zero: {norm2}");
1121    }
1122
1123    #[test]
1124    fn test_gram_schmidt_three_vectors() {
1125        let n = 200;
1126        let dx = 2.0 * PI / n as f64;
1127        let v1: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1128        let v2: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1129        let v3: Vec<f64> = (0..n)
1130            .map(|i| (2.0 * (i as f64 + 0.5) * dx).sin())
1131            .collect();
1132        let q = gram_schmidt_orthogonalize(&[v1, v2, v3], dx);
1133        // All pairs must be orthogonal
1134        for i in 0..3 {
1135            for j in (i + 1)..3 {
1136                let ip = l2_inner_product(&q[i], &q[j], dx);
1137                assert!(ip.abs() < 1e-9, "q[{i}]·q[{j}] = {ip}");
1138            }
1139        }
1140    }
1141
1142    // ── Fourier series ────────────────────────────────────────────────────────
1143
1144    #[test]
1145    fn test_fourier_constant_function() {
1146        let n = 100;
1147        let dx = 1.0 / n as f64;
1148        let f = vec![1.0_f64; n];
1149        let coeffs = fourier_series_coeffs(&f, dx, 3);
1150        // a₀ should be 2 (by convention 2/L ∫ f dx = 2)
1151        assert!((coeffs[0].0 - 2.0).abs() < 1e-8, "a0 = {}", coeffs[0].0);
1152        // All b should be ~0
1153        for (k, &(_, bk)) in coeffs.iter().enumerate() {
1154            assert!(bk.abs() < 1e-8, "b{k} = {bk}");
1155        }
1156    }
1157
1158    #[test]
1159    fn test_fourier_sine_function() {
1160        let n = 1000;
1161        let dx = 1.0 / n as f64;
1162        let f: Vec<f64> = (0..n)
1163            .map(|i| (2.0 * PI * (i as f64 + 0.5) * dx).sin())
1164            .collect();
1165        let coeffs = fourier_series_coeffs(&f, dx, 2);
1166        // b₁ should be ~1 for sin(2πx)
1167        assert!((coeffs[1].1 - 1.0).abs() < 0.01, "b1 = {}", coeffs[1].1);
1168    }
1169
1170    #[test]
1171    fn test_fourier_n_terms_length() {
1172        let f = vec![0.0_f64; 64];
1173        let c = fourier_series_coeffs(&f, 0.1, 5);
1174        assert_eq!(c.len(), 5);
1175    }
1176
1177    // ── Chebyshev expansion ───────────────────────────────────────────────────
1178
1179    #[test]
1180    fn test_chebyshev_constant() {
1181        // f(x) = 1 on Chebyshev nodes: c₀ = 1, rest ≈ 0
1182        let n = 32;
1183        let f: Vec<f64> = (0..n)
1184            .map(|k| (PI * (k as f64 + 0.5) / n as f64).cos())
1185            .map(|_x| 1.0)
1186            .collect();
1187        let c = chebyshev_expansion(&f, 4);
1188        assert!((c[0] - 1.0).abs() < 1e-10, "c0 = {}", c[0]);
1189        for k in 1..4 {
1190            assert!(c[k].abs() < 1e-10, "c{k} = {}", c[k]);
1191        }
1192    }
1193
1194    #[test]
1195    fn test_chebyshev_empty_input() {
1196        let c = chebyshev_expansion(&[], 3);
1197        assert_eq!(c.len(), 3);
1198        for ci in c {
1199            assert!(ci.abs() < 1e-15);
1200        }
1201    }
1202
1203    #[test]
1204    fn test_chebyshev_n_terms_length() {
1205        let f = vec![0.5_f64; 16];
1206        let c = chebyshev_expansion(&f, 6);
1207        assert_eq!(c.len(), 6);
1208    }
1209
1210    #[test]
1211    fn test_chebyshev_x_polynomial() {
1212        // For f(x) = x on Chebyshev nodes, c₁ should dominate
1213        let n = 64;
1214        let f: Vec<f64> = (0..n)
1215            .map(|k| (PI * (k as f64 + 0.5) / n as f64).cos())
1216            .collect();
1217        let c = chebyshev_expansion(&f, 4);
1218        // c₀ should be ~0, c₁ should be ~1
1219        assert!(c[0].abs() < 1e-10, "c0 = {}", c[0]);
1220        assert!((c[1] - 1.0).abs() < 1e-10, "c1 = {}", c[1]);
1221    }
1222
1223    // ── Legendre expansion ────────────────────────────────────────────────────
1224
1225    #[test]
1226    fn test_legendre_constant() {
1227        // f(x) = 1: c₀ = 1, rest ≈ 0
1228        let n = 200;
1229        let f = vec![1.0_f64; n];
1230        let c = legendre_expansion(&f, 3);
1231        assert!((c[0] - 1.0).abs() < 1e-2, "c0 = {}", c[0]);
1232        assert!(c[1].abs() < 1e-4, "c1 = {}", c[1]);
1233        // P₂ quadrature error is O(1/n): with n=200 expect |c₂| < 1e-3
1234        assert!(c[2].abs() < 1e-3, "c2 = {}", c[2]);
1235    }
1236
1237    #[test]
1238    fn test_legendre_n_terms_length() {
1239        let f = vec![1.0_f64; 50];
1240        let c = legendre_expansion(&f, 5);
1241        assert_eq!(c.len(), 5);
1242    }
1243
1244    #[test]
1245    fn test_legendre_p_values() {
1246        // P₀(x) = 1, P₁(x) = x, P₂(x) = (3x²-1)/2
1247        assert!((legendre_p(0, 0.5) - 1.0).abs() < 1e-12);
1248        assert!((legendre_p(1, 0.5) - 0.5).abs() < 1e-12);
1249        let p2 = (3.0 * 0.5_f64.powi(2) - 1.0) / 2.0;
1250        assert!((legendre_p(2, 0.5) - p2).abs() < 1e-12);
1251    }
1252
1253    #[test]
1254    fn test_legendre_empty_input() {
1255        let c = legendre_expansion(&[], 3);
1256        assert_eq!(c.len(), 3);
1257    }
1258
1259    // ── Haar wavelet transform ────────────────────────────────────────────────
1260
1261    #[test]
1262    fn test_haar_transform_inverse_roundtrip() {
1263        let signal = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1264        let coeffs = wavelet_haar_transform(&signal);
1265        let recovered = wavelet_haar_inverse(&coeffs);
1266        for (a, b) in signal.iter().zip(recovered.iter()) {
1267            assert!((a - b).abs() < 1e-10, "roundtrip mismatch: {a} vs {b}");
1268        }
1269    }
1270
1271    #[test]
1272    fn test_haar_transform_length_preserved() {
1273        let signal = vec![1.0; 16];
1274        let c = wavelet_haar_transform(&signal);
1275        assert_eq!(c.len(), 16);
1276    }
1277
1278    #[test]
1279    fn test_haar_constant_signal() {
1280        // Constant signal: only the DC coefficient should be non-zero
1281        let c = 4.0_f64;
1282        let signal = vec![c; 8];
1283        let coeffs = wavelet_haar_transform(&signal);
1284        // The very first coefficient captures the mean (up to scaling)
1285        assert!(coeffs[0].abs() > 1e-6, "DC coeff should be non-zero");
1286        // All detail coefficients (non-DC) should be ~0
1287        for &coef in &coeffs[1..] {
1288            assert!(coef.abs() < 1e-10, "detail coeff = {coef}");
1289        }
1290    }
1291
1292    #[test]
1293    fn test_haar_inverse_length_preserved() {
1294        let coeffs = vec![1.0; 8];
1295        let out = wavelet_haar_inverse(&coeffs);
1296        assert_eq!(out.len(), 8);
1297    }
1298
1299    #[test]
1300    fn test_haar_two_point() {
1301        let signal = vec![2.0, 4.0];
1302        let c = wavelet_haar_transform(&signal);
1303        let r = wavelet_haar_inverse(&c);
1304        assert!((r[0] - 2.0).abs() < 1e-12);
1305        assert!((r[1] - 4.0).abs() < 1e-12);
1306    }
1307
1308    // ── Sobolev norm (free function) ──────────────────────────────────────────
1309
1310    #[test]
1311    fn test_sobolev_norm_s0_equals_l2() {
1312        // H⁰ norm = L² norm (when s=0)
1313        let f = vec![1.0, 2.0, 3.0];
1314        let df = vec![0.0, 0.0, 0.0];
1315        let dx = 0.1;
1316        let sob = sobolev_norm(&f, &df, dx, 0.0);
1317        let l2 = l2_norm(&f, dx);
1318        assert!((sob - l2).abs() < 1e-12);
1319    }
1320
1321    #[test]
1322    fn test_sobolev_norm_positive() {
1323        let f = vec![1.0; 10];
1324        let df = vec![0.0; 10];
1325        let s = sobolev_norm(&f, &df, 0.1, 1.0);
1326        assert!(s > 0.0);
1327    }
1328
1329    #[test]
1330    fn test_sobolev_norm_larger_with_gradient() {
1331        let f = vec![1.0; 8];
1332        let df_zero = vec![0.0; 8];
1333        let df_nonzero = vec![1.0; 8];
1334        let dx = 0.1;
1335        let s0 = sobolev_norm(&f, &df_zero, dx, 1.0);
1336        let s1 = sobolev_norm(&f, &df_nonzero, dx, 1.0);
1337        assert!(s1 > s0, "nonzero derivative should increase Sobolev norm");
1338    }
1339
1340    // ── Operator norm ─────────────────────────────────────────────────────────
1341
1342    #[test]
1343    fn test_operator_norm_identity() {
1344        // Identity 3×3 matrix: σ_max = 1
1345        let id = vec![
1346            vec![1.0, 0.0, 0.0],
1347            vec![0.0, 1.0, 0.0],
1348            vec![0.0, 0.0, 1.0],
1349        ];
1350        let sigma = operator_norm_estimate(&id);
1351        assert!((sigma - 1.0).abs() < 1e-4, "identity σ_max = {sigma}");
1352    }
1353
1354    #[test]
1355    fn test_operator_norm_scaled_identity() {
1356        let scale = 5.0_f64;
1357        let m = vec![vec![scale, 0.0], vec![0.0, scale]];
1358        let sigma = operator_norm_estimate(&m);
1359        assert!(
1360            (sigma - scale).abs() < 1e-3,
1361            "scaled identity σ_max = {sigma}"
1362        );
1363    }
1364
1365    #[test]
1366    fn test_operator_norm_empty_matrix() {
1367        assert_eq!(operator_norm_estimate(&[]), 0.0);
1368    }
1369
1370    #[test]
1371    fn test_operator_norm_single_element() {
1372        let m = vec![vec![3.0_f64]];
1373        let sigma = operator_norm_estimate(&m);
1374        assert!((sigma - 3.0).abs() < 1e-4, "1x1 matrix σ = {sigma}");
1375    }
1376
1377    #[test]
1378    fn test_operator_norm_rectangular() {
1379        // 1×2 matrix [3, 4]: σ_max = √(9+16) = 5
1380        let m = vec![vec![3.0_f64, 4.0]];
1381        let sigma = operator_norm_estimate(&m);
1382        assert!((sigma - 5.0).abs() < 1e-4, "σ = {sigma}, expected 5");
1383    }
1384
1385    #[test]
1386    fn test_operator_norm_diagonal() {
1387        // Diagonal 3×3 with values [2, 5, 3]: σ_max = 5
1388        let m = vec![
1389            vec![2.0, 0.0, 0.0],
1390            vec![0.0, 5.0, 0.0],
1391            vec![0.0, 0.0, 3.0],
1392        ];
1393        let sigma = operator_norm_estimate(&m);
1394        assert!(
1395            (sigma - 5.0).abs() < 0.1,
1396            "diagonal max singular val = {sigma}"
1397        );
1398    }
1399
1400    // ── HilbertSpace ──────────────────────────────────────────────────────────
1401
1402    #[test]
1403    fn test_hilbert_inner_product() {
1404        let hs = HilbertSpace::new(4, 0.25);
1405        let f = vec![1.0, 2.0, 3.0, 4.0];
1406        let g = vec![4.0, 3.0, 2.0, 1.0];
1407        let ip = hs.inner_product(&f, &g);
1408        // (1*4 + 2*3 + 3*2 + 4*1) * 0.25 = 20 * 0.25 = 5
1409        assert!((ip - 5.0).abs() < 1e-12, "inner product = {ip}");
1410    }
1411
1412    #[test]
1413    fn test_hilbert_norm_positive() {
1414        let hs = HilbertSpace::new(4, 0.25);
1415        let f = vec![1.0; 4];
1416        assert!(hs.norm(&f) > 0.0);
1417    }
1418
1419    #[test]
1420    fn test_hilbert_normalize_unit_norm() {
1421        let hs = HilbertSpace::new(4, 0.25);
1422        let f = vec![3.0, 1.0, 4.0, 1.0];
1423        let fn_ = hs.normalize(&f);
1424        let nm = hs.norm(&fn_);
1425        assert!((nm - 1.0).abs() < 1e-10, "normalized norm = {nm}");
1426    }
1427
1428    #[test]
1429    fn test_hilbert_orthogonality_check() {
1430        let n = 100;
1431        let dx = 2.0 * PI / n as f64;
1432        let hs = HilbertSpace::new(n, dx);
1433        let sin_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1434        let cos_v: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1435        assert!(hs.are_orthogonal(&sin_v, &cos_v, 1e-9));
1436    }
1437
1438    #[test]
1439    fn test_hilbert_projection_length() {
1440        let n = 8;
1441        let dx = 1.0 / n as f64;
1442        let hs = HilbertSpace::new(n, dx);
1443        let f = vec![1.0; n];
1444        let basis = vec![(0..n).map(|i| i as f64 * dx).collect::<Vec<_>>()];
1445        let p = hs.project(&f, &basis);
1446        assert_eq!(p.len(), n);
1447    }
1448
1449    #[test]
1450    fn test_hilbert_distance_zero_self() {
1451        let hs = HilbertSpace::new(4, 0.25);
1452        let f = vec![1.0, 2.0, 3.0, 4.0];
1453        assert!(hs.distance(&f, &f).abs() < 1e-12);
1454    }
1455
1456    #[test]
1457    fn test_hilbert_orthonormalize_orthogonality() {
1458        let n = 100;
1459        let dx = 2.0 * PI / n as f64;
1460        let hs = HilbertSpace::new(n, dx);
1461        let v1: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).sin()).collect();
1462        let v2: Vec<f64> = (0..n).map(|i| ((i as f64 + 0.5) * dx).cos()).collect();
1463        let q = hs.orthonormalize(&[v1, v2]);
1464        let ip = hs.inner_product(&q[0], &q[1]);
1465        assert!(ip.abs() < 1e-9, "orthonormalized inner product = {ip}");
1466    }
1467
1468    // ── BanachSpace ───────────────────────────────────────────────────────────
1469
1470    #[test]
1471    fn test_banach_l2_norm_matches_hilbert() {
1472        let dx = 0.25;
1473        let bs = BanachSpace::new(2.0, dx);
1474        let hs = HilbertSpace::new(4, dx);
1475        let f = vec![1.0, 2.0, 3.0, 4.0];
1476        let bn = bs.norm(&f);
1477        let hn = hs.norm(&f);
1478        assert!((bn - hn).abs() < 1e-10, "L2 norms match: {bn} vs {hn}");
1479    }
1480
1481    #[test]
1482    fn test_banach_l1_norm() {
1483        let dx = 1.0;
1484        let bs = BanachSpace::new(1.0, dx);
1485        let f = vec![1.0, -2.0, 3.0];
1486        // L1 norm = (1 + 2 + 3) * 1.0 = 6
1487        let n = bs.norm(&f);
1488        assert!((n - 6.0).abs() < 1e-12, "L1 norm = {n}");
1489    }
1490
1491    #[test]
1492    fn test_banach_linf_norm() {
1493        let dx = 0.1;
1494        let bs = BanachSpace::new(1e12, dx); // very large p ≈ L∞
1495        let f = vec![1.0, 5.0, 3.0];
1496        let n = bs.norm(&f);
1497        // Should be close to the max: 5.0 (for large p)
1498        assert!(n > 4.0 && n <= 5.0 * 1.01, "L∞-like norm = {n}");
1499    }
1500
1501    #[test]
1502    fn test_banach_holder_inequality() {
1503        let dx = 0.1;
1504        let bs = BanachSpace::new(2.0, dx);
1505        let f = vec![1.0, 2.0, 3.0, 4.0];
1506        let g = vec![4.0, 3.0, 2.0, 1.0];
1507        let (lhs, rhs) = bs.holder_bound(&f, &g);
1508        assert!(lhs <= rhs + 1e-10, "Hölder: {lhs} <= {rhs}");
1509    }
1510
1511    #[test]
1512    fn test_banach_unit_ball() {
1513        let dx = 1.0;
1514        let bs = BanachSpace::new(2.0, dx);
1515        let f = vec![0.5, 0.0];
1516        assert!(bs.in_unit_ball(&f, 1e-10));
1517    }
1518
1519    #[test]
1520    fn test_banach_linear_functional() {
1521        let dx = 0.5;
1522        let bs = BanachSpace::new(2.0, dx);
1523        let f = vec![1.0, 1.0];
1524        let g = vec![2.0, 2.0];
1525        // φ(f) = (1*2 + 1*2) * 0.5 = 2
1526        let val = bs.bounded_linear_functional(&f, &g);
1527        assert!((val - 2.0).abs() < 1e-12, "functional = {val}");
1528    }
1529
1530    // ── OperatorSpectrum ──────────────────────────────────────────────────────
1531
1532    #[test]
1533    fn test_operator_spectrum_trace() {
1534        let m = vec![vec![2.0, 1.0], vec![0.0, 3.0]];
1535        let op = OperatorSpectrum::new(m);
1536        assert!((op.trace() - 5.0).abs() < 1e-12);
1537    }
1538
1539    #[test]
1540    fn test_operator_spectrum_frobenius() {
1541        let m = vec![vec![3.0, 4.0], vec![0.0, 0.0]];
1542        let op = OperatorSpectrum::new(m);
1543        assert!((op.frobenius_norm() - 5.0).abs() < 1e-12);
1544    }
1545
1546    #[test]
1547    fn test_operator_spectrum_apply() {
1548        let m = vec![vec![2.0, 0.0], vec![0.0, 3.0]];
1549        let op = OperatorSpectrum::new(m);
1550        let v = vec![1.0, 2.0];
1551        let w = op.apply(&v);
1552        assert!((w[0] - 2.0).abs() < 1e-12);
1553        assert!((w[1] - 6.0).abs() < 1e-12);
1554    }
1555
1556    #[test]
1557    fn test_operator_spectrum_spectral_radius() {
1558        // Diagonal matrix: spectral radius = max eigenvalue
1559        let m = vec![vec![5.0, 0.0], vec![0.0, 2.0]];
1560        let op = OperatorSpectrum::new(m);
1561        let rho = op.spectral_radius(200);
1562        assert!((rho - 5.0).abs() < 0.1, "spectral radius = {rho}");
1563    }
1564
1565    #[test]
1566    fn test_operator_spectrum_diagonal() {
1567        let m = vec![vec![7.0, 0.0], vec![0.0, 3.0]];
1568        let op = OperatorSpectrum::new(m);
1569        let d = op.diagonal();
1570        assert!((d[0] - 7.0).abs() < 1e-12);
1571        assert!((d[1] - 3.0).abs() < 1e-12);
1572    }
1573
1574    #[test]
1575    fn test_operator_spectrum_resolvent_finite() {
1576        let m = vec![vec![1.0, 0.0], vec![0.0, 2.0]];
1577        let op = OperatorSpectrum::new(m);
1578        // At a regular point (e.g., λ=10) resolvent should be finite
1579        let r = op.resolvent_norm(10.0);
1580        assert!(r.is_finite() && r > 0.0, "resolvent = {r}");
1581    }
1582
1583    // ── SobolevSpace ──────────────────────────────────────────────────────────
1584
1585    #[test]
1586    fn test_sobolev_space_h0_equals_l2() {
1587        let n = 8;
1588        let dx = 0.125;
1589        let hs = HilbertSpace::new(n, dx);
1590        let ss = SobolevSpace::new(n, dx, 0);
1591        let f = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1592        assert!(
1593            (ss.norm(&f) - hs.norm(&f)).abs() < 1e-10,
1594            "H^0 = L^2: {} vs {}",
1595            ss.norm(&f),
1596            hs.norm(&f)
1597        );
1598    }
1599
1600    #[test]
1601    fn test_sobolev_space_norm_positive() {
1602        let ss = SobolevSpace::new(8, 0.125, 1);
1603        let f = vec![1.0; 8];
1604        assert!(ss.norm(&f) > 0.0);
1605    }
1606
1607    #[test]
1608    fn test_sobolev_weak_derivative_constant() {
1609        // Derivative of constant = 0 in interior, small at boundary
1610        let f = vec![3.0; 8];
1611        let d = SobolevSpace::weak_derivative(&f, 0.125);
1612        for &di in &d[1..d.len() - 1] {
1613            assert!(di.abs() < 1e-12, "interior deriv of constant: {di}");
1614        }
1615    }
1616
1617    #[test]
1618    fn test_sobolev_trace_operator() {
1619        let ss = SobolevSpace::new(8, 0.125, 1);
1620        let f: Vec<f64> = (0..8).map(|i| i as f64).collect();
1621        let tr = ss.trace(&f);
1622        assert_eq!(tr[0], 0.0);
1623        assert_eq!(tr[1], 7.0);
1624    }
1625
1626    #[test]
1627    fn test_sobolev_seminorm_nonneg() {
1628        let ss = SobolevSpace::new(8, 0.125, 1);
1629        let f: Vec<f64> = (0..8).map(|i| (i as f64 * 0.5).sin()).collect();
1630        assert!(ss.seminorm(&f) >= 0.0);
1631    }
1632
1633    #[test]
1634    fn test_sobolev_inner_product_positive_definite() {
1635        let ss = SobolevSpace::new(8, 0.125, 1);
1636        let f = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
1637        let ip = ss.inner_product(&f, &f);
1638        assert!(ip > 0.0, "H^k inner product <f,f> > 0");
1639    }
1640
1641    #[test]
1642    fn test_sobolev_is_in_space() {
1643        let ss = SobolevSpace::new(8, 0.125, 2);
1644        let f: Vec<f64> = (0..8).map(|i| (i as f64).sin()).collect();
1645        assert!(ss.is_in_space(&f));
1646    }
1647
1648    // ── FunctionalDerivative ──────────────────────────────────────────────────
1649
1650    #[test]
1651    fn test_gateaux_derivative_linear() {
1652        // J(f) = ∑ f[i] (integral with dx=1): dJ/dε J(f+εh) = ∑ h[i]
1653        let j = |f: &[f64]| f.iter().sum::<f64>();
1654        let f = vec![1.0, 2.0, 3.0];
1655        let h = vec![1.0, 0.0, 0.0];
1656        let dj = FunctionalDerivative::gateaux(j, &f, &h, 1e-6);
1657        assert!((dj - 1.0).abs() < 1e-6, "Gateaux derivative = {dj}");
1658    }
1659
1660    #[test]
1661    fn test_frechet_gradient_quadratic() {
1662        // J(f) = ∑ f[i]²  → gradient is 2*f
1663        let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1664        let f = vec![1.0, 2.0, 3.0];
1665        let grad = FunctionalDerivative::frechet_gradient(j, &f, 1e-5);
1666        for (i, (&gi, &fi)) in grad.iter().zip(f.iter()).enumerate() {
1667            assert!(
1668                (gi - 2.0 * fi).abs() < 1e-4,
1669                "grad[{i}] = {gi}, expected {}",
1670                2.0 * fi
1671            );
1672        }
1673    }
1674
1675    #[test]
1676    fn test_gradient_descent_minimizes() {
1677        // J(f) = (f[0]-1)² + (f[1]-2)²; minimum at [1, 2]
1678        let j = |f: &[f64]| (f[0] - 1.0).powi(2) + (f[1] - 2.0).powi(2);
1679        let f0 = vec![0.0, 0.0];
1680        let (fmin, val) = FunctionalDerivative::gradient_descent(j, &f0, 0.1, 200, 1e-5);
1681        assert!(val < 0.1, "not minimized: val = {val}");
1682        assert!((fmin[0] - 1.0).abs() < 0.1, "f[0] = {}", fmin[0]);
1683        assert!((fmin[1] - 2.0).abs() < 0.1, "f[1] = {}", fmin[1]);
1684    }
1685
1686    #[test]
1687    fn test_newton_step_moves_toward_minimum() {
1688        // J(f) = (f[0]-3)²; minimum at f[0]=3
1689        let j = |f: &[f64]| (f[0] - 3.0).powi(2);
1690        let f = vec![1.0];
1691        let f_new = FunctionalDerivative::newton_step(j, &f, 1e-5);
1692        // Newton step on quadratic should reach minimum in one step
1693        assert!(
1694            (f_new[0] - 3.0).abs() < 0.1,
1695            "Newton step: f[0] = {}",
1696            f_new[0]
1697        );
1698    }
1699
1700    #[test]
1701    fn test_second_gateaux_symmetric() {
1702        // For a quadratic J = ∑ f²: second Gâteaux should be symmetric
1703        let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1704        let f = vec![1.0, 1.0];
1705        let h = vec![1.0, 0.0];
1706        let k_dir = vec![0.0, 1.0];
1707        let d2_hk = FunctionalDerivative::second_gateaux(j, &f, &h, &k_dir, 1e-4);
1708        let d2_kh = FunctionalDerivative::second_gateaux(j, &f, &k_dir, &h, 1e-4);
1709        assert!(
1710            (d2_hk - d2_kh).abs() < 1e-4,
1711            "second Gateaux not symmetric: {d2_hk} vs {d2_kh}"
1712        );
1713    }
1714
1715    // ── VariationalProblem ────────────────────────────────────────────────────
1716
1717    #[test]
1718    fn test_euler_lagrange_residual_length() {
1719        // L(x, u, u') = u'²
1720        let u: Vec<f64> = (0..10).map(|i| i as f64 * 0.1).collect();
1721        let res = VariationalProblem::euler_lagrange_residual(|_x, _u, up| up * up, &u, 0.1);
1722        assert_eq!(res.len(), u.len());
1723    }
1724
1725    #[test]
1726    fn test_euler_lagrange_linear_minimizer_residual() {
1727        // For L = u'²: E-L equation is -u'' = 0 → linear solution is exact
1728        let n = 10;
1729        let dx = 1.0 / n as f64;
1730        let u: Vec<f64> = (0..n).map(|i| i as f64 * dx).collect();
1731        let res = VariationalProblem::euler_lagrange_residual(|_x, _u, up| up * up, &u, dx);
1732        // Interior residuals should be small for linear u
1733        for &r in &res[1..n - 1] {
1734            assert!(r.is_finite(), "residual finite: {r}");
1735        }
1736    }
1737
1738    #[test]
1739    fn test_steepest_descent_minimizes() {
1740        // J(f) = ∑ (f[i] - c)²; minimum at f = [c, c, ...]
1741        let c = 2.0_f64;
1742        let j = move |f: &[f64]| f.iter().map(|x| (x - c).powi(2)).sum::<f64>();
1743        let u0 = vec![0.0; 4];
1744        let (u, hist) = VariationalProblem::steepest_descent(j, &u0, 0.1, 1e-6, 300);
1745        assert!(!hist.is_empty());
1746        for &ui in &u {
1747            assert!((ui - c).abs() < 0.1, "steepest descent: ui = {ui}");
1748        }
1749    }
1750
1751    #[test]
1752    fn test_first_variation_linear() {
1753        // δJ = ∫ f'(u) v dx
1754        let u = vec![1.0; 4];
1755        let v = vec![1.0; 4];
1756        let fp = vec![2.0; 4]; // derivative of u²: 2u = 2
1757        let dx = 0.25;
1758        let dj = VariationalProblem::first_variation(&u, &v, &fp, dx);
1759        // = (2*1 + 2*1 + 2*1 + 2*1) * 0.25 = 2.0
1760        assert!((dj - 2.0).abs() < 1e-12, "first variation = {dj}");
1761    }
1762
1763    #[test]
1764    fn test_augmented_lagrangian_reduces_constraint() {
1765        // Minimize ∑ f² subject to ∑ f = 1
1766        let j = |f: &[f64]| f.iter().map(|x| x * x).sum::<f64>();
1767        let g = |f: &[f64]| f.iter().sum::<f64>() - 1.0;
1768        let u0 = vec![0.5, 0.5];
1769        let (_u, _lambda) = VariationalProblem::augmented_lagrangian(j, g, &u0, 1.0, 0.05, 100);
1770        // Just check that it runs without panic and returns finite values
1771    }
1772}