Skip to main content

oxilean_std/numerical_analysis/
functions.rs

1//! Auto-generated module
2//!
3//! πŸ€– Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use oxilean_kernel::{BinderInfo, Declaration, Environment, Expr, Level, Name};
6
7use super::types::{
8    BisectionSolver, CrankNicolsonSolver, GradientDescentOptimizer, Interval, MonteCarloIntegrator,
9    NewtonRaphsonSolver, PowerIterationSolver, RungeKutta4Solver, SparseMatrix, TikhonovSolver,
10};
11
12pub fn app(f: Expr, a: Expr) -> Expr {
13    Expr::App(Box::new(f), Box::new(a))
14}
15pub fn app2(f: Expr, a: Expr, b: Expr) -> Expr {
16    app(app(f, a), b)
17}
18pub fn app3(f: Expr, a: Expr, b: Expr, c: Expr) -> Expr {
19    app(app2(f, a, b), c)
20}
21pub fn cst(s: &str) -> Expr {
22    Expr::Const(Name::str(s), vec![])
23}
24pub fn prop() -> Expr {
25    Expr::Sort(Level::zero())
26}
27pub fn type0() -> Expr {
28    Expr::Sort(Level::succ(Level::zero()))
29}
30pub fn pi(bi: BinderInfo, name: &str, dom: Expr, body: Expr) -> Expr {
31    Expr::Pi(bi, Name::str(name), Box::new(dom), Box::new(body))
32}
33pub fn arrow(a: Expr, b: Expr) -> Expr {
34    pi(BinderInfo::Default, "_", a, b)
35}
36pub fn bvar(n: u32) -> Expr {
37    Expr::BVar(n)
38}
39pub fn nat_ty() -> Expr {
40    cst("Nat")
41}
42pub fn real_ty() -> Expr {
43    cst("Real")
44}
45pub fn bool_ty() -> Expr {
46    cst("Bool")
47}
48pub fn fn_ty(dom: Expr, cod: Expr) -> Expr {
49    arrow(dom, cod)
50}
51/// `ConvergentSequence : (Nat β†’ Real) β†’ Real β†’ Prop`
52/// f converges to L.
53pub fn convergent_sequence_ty() -> Expr {
54    arrow(fn_ty(nat_ty(), real_ty()), arrow(real_ty(), prop()))
55}
56/// `CauchySequence : (Nat β†’ Real) β†’ Prop`
57/// Cauchy completeness: every Cauchy sequence converges.
58pub fn cauchy_sequence_ty() -> Expr {
59    arrow(fn_ty(nat_ty(), real_ty()), prop())
60}
61/// `LipschitzContinuous : (Real β†’ Real) β†’ Real β†’ Prop`
62/// f is Lipschitz continuous with constant K.
63pub fn lipschitz_ty() -> Expr {
64    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
65}
66/// `ContractionMapping : (Real β†’ Real) β†’ Real β†’ Prop`
67/// f is a contraction mapping with factor k < 1.
68pub fn contraction_ty() -> Expr {
69    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
70}
71/// `FixedPoint : (Real β†’ Real) β†’ Real β†’ Prop`
72/// x* is a fixed point: f(x*) = x*.
73pub fn fixed_point_ty() -> Expr {
74    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
75}
76/// `NewtonConvergence : (Real β†’ Real) β†’ (Real β†’ Real) β†’ Real β†’ Prop`
77/// Newton's method exhibits quadratic convergence near a simple root.
78pub fn newton_convergence_ty() -> Expr {
79    arrow(
80        fn_ty(real_ty(), real_ty()),
81        arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop())),
82    )
83}
84/// `RungeKuttaError : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
85/// RK4 global truncation error is O(h^4).
86pub fn runge_kutta_error_ty() -> Expr {
87    arrow(
88        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
89        arrow(real_ty(), prop()),
90    )
91}
92/// `BisectionConvergence : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
93/// Bisection halves the interval at each step.
94pub fn bisection_convergence_ty() -> Expr {
95    arrow(
96        fn_ty(real_ty(), real_ty()),
97        arrow(real_ty(), arrow(real_ty(), prop())),
98    )
99}
100/// `IEEE754Rounding : (Real β†’ Real) β†’ Prop`
101/// A function models IEEE 754 rounding to nearest.
102pub fn ieee754_rounding_ty() -> Expr {
103    arrow(fn_ty(real_ty(), real_ty()), prop())
104}
105/// `FloatCancellation : (Real β†’ Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
106/// Catastrophic cancellation: subtracting two nearly-equal floats.
107pub fn float_cancellation_ty() -> Expr {
108    arrow(
109        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
110        arrow(real_ty(), arrow(real_ty(), prop())),
111    )
112}
113/// `ConditionNumber : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
114/// The condition number of f at x is ΞΊ.
115pub fn condition_number_ty() -> Expr {
116    arrow(
117        fn_ty(real_ty(), real_ty()),
118        arrow(real_ty(), arrow(real_ty(), prop())),
119    )
120}
121/// `NumericalStability : (Real β†’ Real) β†’ Prop`
122/// An algorithm is numerically stable.
123pub fn numerical_stability_ty() -> Expr {
124    arrow(fn_ty(real_ty(), real_ty()), prop())
125}
126/// `BackwardStability : (Real β†’ Real) β†’ Real β†’ Prop`
127/// An algorithm has backward stability constant Ξ΅.
128pub fn backward_stability_ty() -> Expr {
129    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
130}
131/// `BisectionRate : (Real β†’ Real) β†’ Prop`
132/// The bisection method converges linearly with rate 1/2.
133pub fn bisection_rate_ty() -> Expr {
134    arrow(fn_ty(real_ty(), real_ty()), prop())
135}
136/// `SecantConvergence : (Real β†’ Real) β†’ Real β†’ Prop`
137/// The secant method converges with superlinear order ~1.618.
138pub fn secant_convergence_ty() -> Expr {
139    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
140}
141/// `LagrangeInterpolantError : (Real β†’ Real) β†’ Nat β†’ Real β†’ Prop`
142/// Error bound for Lagrange interpolation with n+1 nodes at x.
143pub fn lagrange_interpolant_error_ty() -> Expr {
144    pi(
145        BinderInfo::Default,
146        "f",
147        fn_ty(real_ty(), real_ty()),
148        pi(BinderInfo::Default, "n", nat_ty(), arrow(real_ty(), prop())),
149    )
150}
151/// `NewtonDividedDifference : (Real β†’ Real) β†’ Nat β†’ Real β†’ Real β†’ Prop`
152/// Newton divided-difference interpolation formula.
153pub fn newton_divided_difference_ty() -> Expr {
154    pi(
155        BinderInfo::Default,
156        "f",
157        fn_ty(real_ty(), real_ty()),
158        pi(
159            BinderInfo::Default,
160            "n",
161            nat_ty(),
162            arrow(real_ty(), arrow(real_ty(), prop())),
163        ),
164    )
165}
166/// `CubicSplineError : (Real β†’ Real) β†’ Real β†’ Prop`
167/// Cubic spline interpolation error bound O(h^4).
168pub fn cubic_spline_error_ty() -> Expr {
169    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
170}
171/// `GaussianQuadratureExact : Nat β†’ Nat β†’ Prop`
172/// Gaussian quadrature with n points is exact for polynomials of degree ≀ 2n-1.
173pub fn gaussian_quadrature_exact_ty() -> Expr {
174    arrow(nat_ty(), arrow(nat_ty(), prop()))
175}
176/// `AdaptiveQuadratureConvergence : (Real β†’ Real) β†’ Real β†’ Prop`
177/// Adaptive quadrature converges to given tolerance.
178pub fn adaptive_quadrature_convergence_ty() -> Expr {
179    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
180}
181/// `AdamsBashforthError : Nat β†’ Real β†’ Prop`
182/// Adams-Bashforth p-step method has global error O(h^p).
183pub fn adams_bashforth_error_ty() -> Expr {
184    arrow(nat_ty(), arrow(real_ty(), prop()))
185}
186/// `StiffODE : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
187/// An ODE is stiff with stiffness ratio r.
188pub fn stiff_ode_ty() -> Expr {
189    arrow(
190        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
191        arrow(real_ty(), prop()),
192    )
193}
194/// `ImplicitMethodStability : (Real β†’ Real β†’ Real) β†’ Prop`
195/// An implicit method is A-stable for the given ODE.
196pub fn implicit_method_stability_ty() -> Expr {
197    arrow(fn_ty(real_ty(), fn_ty(real_ty(), real_ty())), prop())
198}
199/// `FiniteDifferenceConsistency : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
200/// A finite difference scheme is consistent with truncation error O(h^p).
201pub fn finite_difference_consistency_ty() -> Expr {
202    arrow(
203        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
204        arrow(real_ty(), prop()),
205    )
206}
207/// `GalerkinOrthogonality : (Real β†’ Real) β†’ Prop`
208/// Galerkin method satisfies the orthogonality condition.
209pub fn galerkin_orthogonality_ty() -> Expr {
210    arrow(fn_ty(real_ty(), real_ty()), prop())
211}
212/// `FEMConvergence : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
213/// Finite element method converges with rate O(h^k) in H^1 norm.
214pub fn fem_convergence_ty() -> Expr {
215    arrow(
216        fn_ty(real_ty(), real_ty()),
217        arrow(real_ty(), arrow(real_ty(), prop())),
218    )
219}
220/// `ConjugateGradientConvergence : Nat β†’ Real β†’ Prop`
221/// CG method converges in at most n steps; rate depends on condition number.
222pub fn conjugate_gradient_convergence_ty() -> Expr {
223    arrow(nat_ty(), arrow(real_ty(), prop()))
224}
225/// `KrylovDimension : Nat β†’ Nat β†’ Prop`
226/// Krylov subspace of dimension k for an nΓ—n system.
227pub fn krylov_dimension_ty() -> Expr {
228    arrow(nat_ty(), arrow(nat_ty(), prop()))
229}
230/// `PreconditionedSystem : (Real β†’ Real) β†’ Real β†’ Prop`
231/// A preconditioned system has condition number ΞΊ.
232pub fn preconditioned_system_ty() -> Expr {
233    arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop()))
234}
235/// `SVDDecomposition : Nat β†’ Nat β†’ Prop`
236/// An mΓ—n matrix has a singular value decomposition.
237pub fn svd_decomposition_ty() -> Expr {
238    arrow(nat_ty(), arrow(nat_ty(), prop()))
239}
240/// `QRAlgorithmConvergence : Nat β†’ Real β†’ Prop`
241/// The QR algorithm converges to eigenvalues for an nΓ—n matrix.
242pub fn qr_algorithm_convergence_ty() -> Expr {
243    arrow(nat_ty(), arrow(real_ty(), prop()))
244}
245/// `PowerIterationConvergence : Nat β†’ Real β†’ Prop`
246/// Power iteration converges with ratio |Ξ»β‚‚/λ₁|.
247pub fn power_iteration_convergence_ty() -> Expr {
248    arrow(nat_ty(), arrow(real_ty(), prop()))
249}
250/// `RichardsonExtrapolation : (Real β†’ Real) β†’ Real β†’ Nat β†’ Prop`
251/// Richardson extrapolation improves convergence order by p.
252pub fn richardson_extrapolation_ty() -> Expr {
253    pi(
254        BinderInfo::Default,
255        "f",
256        fn_ty(real_ty(), real_ty()),
257        arrow(real_ty(), arrow(nat_ty(), prop())),
258    )
259}
260/// `MultigridConvergence : Nat β†’ Real β†’ Prop`
261/// Multigrid method converges with mesh-independent rate.
262pub fn multigrid_convergence_ty() -> Expr {
263    arrow(nat_ty(), arrow(real_ty(), prop()))
264}
265/// `FastMultipoleComplexity : Nat β†’ Real β†’ Prop`
266/// FMM computes N-body interactions in O(N) with precision Ξ΅.
267pub fn fast_multipole_complexity_ty() -> Expr {
268    arrow(nat_ty(), arrow(real_ty(), prop()))
269}
270/// `SparseMatrixDensity : Nat β†’ Real β†’ Prop`
271/// An nΓ—n matrix has sparsity density ρ (fraction of nonzeros).
272pub fn sparse_matrix_density_ty() -> Expr {
273    arrow(nat_ty(), arrow(real_ty(), prop()))
274}
275/// `LaxEquivalence : (Real β†’ Real β†’ Real) β†’ Prop`
276/// Lax equivalence: consistency + stability ↔ convergence.
277pub fn lax_equivalence_ty() -> Expr {
278    arrow(fn_ty(real_ty(), fn_ty(real_ty(), real_ty())), prop())
279}
280/// `RungeKuttaOrder : Nat β†’ (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
281/// An s-stage RK method has global error order p for the given ODE and step h.
282pub fn runge_kutta_order_ty() -> Expr {
283    arrow(
284        nat_ty(),
285        arrow(
286            fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
287            arrow(real_ty(), prop()),
288        ),
289    )
290}
291/// `AdamsBashforthStability : Nat β†’ Real β†’ Prop`
292/// The p-step Adams-Bashforth method has stability region containing disk of radius r.
293pub fn adams_bashforth_stability_ty() -> Expr {
294    arrow(nat_ty(), arrow(real_ty(), prop()))
295}
296/// `AdamsMoultonError : Nat β†’ Real β†’ Prop`
297/// Adams-Moulton p-step implicit method has global error O(h^(p+1)).
298pub fn adams_moulton_error_ty() -> Expr {
299    arrow(nat_ty(), arrow(real_ty(), prop()))
300}
301/// `StiffnessRatio : (Real β†’ Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
302/// The ODE has stiffness ratio Οƒ = |Ξ»_max|/|Ξ»_min| at (t, y).
303pub fn stiffness_ratio_ty() -> Expr {
304    arrow(
305        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
306        arrow(real_ty(), arrow(real_ty(), prop())),
307    )
308}
309/// `CrankNicolsonStability : (Real β†’ Real β†’ Real) β†’ Prop`
310/// The Crank-Nicolson scheme is unconditionally stable (A-stable).
311pub fn crank_nicolson_stability_ty() -> Expr {
312    arrow(fn_ty(real_ty(), fn_ty(real_ty(), real_ty())), prop())
313}
314/// `FDMStabilityCondition : (Real β†’ Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
315/// Von Neumann stability: amplification factor satisfies |g| ≀ 1 for given Ξ”t, Ξ”x.
316pub fn fdm_stability_condition_ty() -> Expr {
317    arrow(
318        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
319        arrow(real_ty(), arrow(real_ty(), prop())),
320    )
321}
322/// `FDMTruncationError : (Real β†’ Real β†’ Real) β†’ Real β†’ Nat β†’ Prop`
323/// The FD scheme has truncation error O(h^p) for the given PDE.
324pub fn fdm_truncation_error_ty() -> Expr {
325    arrow(
326        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
327        arrow(real_ty(), arrow(nat_ty(), prop())),
328    )
329}
330/// `LaxFriedrichsScheme : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
331/// The Lax-Friedrichs scheme satisfies the CFL condition with ratio r.
332pub fn lax_friedrichs_scheme_ty() -> Expr {
333    arrow(
334        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
335        arrow(real_ty(), prop()),
336    )
337}
338/// `ChebyshevSpectralAccuracy : (Real β†’ Real) β†’ Nat β†’ Prop`
339/// Chebyshev spectral approximation of f with N modes achieves spectral accuracy.
340pub fn chebyshev_spectral_accuracy_ty() -> Expr {
341    arrow(fn_ty(real_ty(), real_ty()), arrow(nat_ty(), prop()))
342}
343/// `FourierSpectralConvergence : (Real β†’ Real) β†’ Nat β†’ Real β†’ Prop`
344/// Fourier spectral method with N modes has error bounded by Ξ΅ for smooth f.
345pub fn fourier_spectral_convergence_ty() -> Expr {
346    arrow(
347        fn_ty(real_ty(), real_ty()),
348        arrow(nat_ty(), arrow(real_ty(), prop())),
349    )
350}
351/// `SpectralElementOrder : Nat β†’ Nat β†’ Real β†’ Prop`
352/// A spectral element method of polynomial order p with K elements achieves error Ξ΅.
353pub fn spectral_element_order_ty() -> Expr {
354    arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop())))
355}
356/// `FredholmIntegralEquation : (Real β†’ Real β†’ Real) β†’ (Real β†’ Real) β†’ Prop`
357/// The Fredholm integral equation of the second kind with kernel K and rhs f is well-posed.
358pub fn fredholm_integral_equation_ty() -> Expr {
359    arrow(
360        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
361        arrow(fn_ty(real_ty(), real_ty()), prop()),
362    )
363}
364/// `VolterraIntegralEquation : (Real β†’ Real β†’ Real) β†’ (Real β†’ Real) β†’ Prop`
365/// The Volterra integral equation with kernel K and rhs f has a unique solution.
366pub fn volterra_integral_equation_ty() -> Expr {
367    arrow(
368        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
369        arrow(fn_ty(real_ty(), real_ty()), prop()),
370    )
371}
372/// `NystrΓΆmMethodConvergence : (Real β†’ Real β†’ Real) β†’ Nat β†’ Real β†’ Prop`
373/// NystrΓΆm discretization of a Fredholm equation with n nodes achieves error Ξ΅.
374pub fn nystrom_method_convergence_ty() -> Expr {
375    arrow(
376        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
377        arrow(nat_ty(), arrow(real_ty(), prop())),
378    )
379}
380/// `GradientDescentConvergence : (Real β†’ Real) β†’ Real β†’ Nat β†’ Prop`
381/// Gradient descent with step size Ξ± converges in at most k iterations for L-smooth f.
382pub fn gradient_descent_convergence_ty() -> Expr {
383    arrow(
384        fn_ty(real_ty(), real_ty()),
385        arrow(real_ty(), arrow(nat_ty(), prop())),
386    )
387}
388/// `NewtonMethodLocalConvergence : (Real β†’ Real) β†’ (Real β†’ Real) β†’ Real β†’ Prop`
389/// Newton's method converges quadratically from initial point x0 with radius r.
390pub fn newton_method_local_convergence_ty() -> Expr {
391    arrow(
392        fn_ty(real_ty(), real_ty()),
393        arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop())),
394    )
395}
396/// `SteepestDescentRate : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
397/// Steepest descent converges at rate (ΞΊ-1)/(ΞΊ+1) for function with condition number ΞΊ.
398pub fn steepest_descent_rate_ty() -> Expr {
399    arrow(
400        fn_ty(real_ty(), real_ty()),
401        arrow(real_ty(), arrow(real_ty(), prop())),
402    )
403}
404/// `MonteCarloConvergence : (Real β†’ Real) β†’ Nat β†’ Real β†’ Prop`
405/// Monte Carlo integration converges as O(1/√N) for N samples.
406pub fn monte_carlo_convergence_ty() -> Expr {
407    arrow(
408        fn_ty(real_ty(), real_ty()),
409        arrow(nat_ty(), arrow(real_ty(), prop())),
410    )
411}
412/// `VarianceReduction : (Real β†’ Real) β†’ (Real β†’ Real) β†’ Real β†’ Prop`
413/// Control variate g reduces variance of f by factor r compared to crude MC.
414pub fn variance_reduction_ty() -> Expr {
415    arrow(
416        fn_ty(real_ty(), real_ty()),
417        arrow(fn_ty(real_ty(), real_ty()), arrow(real_ty(), prop())),
418    )
419}
420/// `MCMCErgodicity : (Real β†’ Real β†’ Real) β†’ Prop`
421/// A Markov chain with transition kernel K is ergodic (mixes to stationary distribution).
422pub fn mcmc_ergodicity_ty() -> Expr {
423    arrow(fn_ty(real_ty(), fn_ty(real_ty(), real_ty())), prop())
424}
425/// `MCMCConvergenceRate : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
426/// The MCMC chain has spectral gap Ξ΄ and converges exponentially at rate (1-Ξ΄)^n.
427pub fn mcmc_convergence_rate_ty() -> Expr {
428    arrow(
429        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
430        arrow(real_ty(), prop()),
431    )
432}
433/// `NumericalContinuation : (Real β†’ Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
434/// Pseudo-arclength continuation follows a solution branch from Ξ»0 to Ξ»1.
435pub fn numerical_continuation_ty() -> Expr {
436    arrow(
437        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
438        arrow(real_ty(), arrow(real_ty(), prop())),
439    )
440}
441/// `BifurcationPoint : (Real β†’ Real β†’ Real) β†’ Real β†’ Prop`
442/// The parameter value Ξ»* is a bifurcation point of the parameterized system F(u,Ξ»)=0.
443pub fn bifurcation_point_ty() -> Expr {
444    arrow(
445        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
446        arrow(real_ty(), prop()),
447    )
448}
449/// `IntervalArithmetic : (Real β†’ Real) β†’ Real β†’ Real β†’ Real β†’ Real β†’ Prop`
450/// Interval arithmetic evaluates f([a,b]) βŠ† [c,d] rigorously.
451pub fn interval_arithmetic_ty() -> Expr {
452    arrow(
453        fn_ty(real_ty(), real_ty()),
454        arrow(
455            real_ty(),
456            arrow(real_ty(), arrow(real_ty(), arrow(real_ty(), prop()))),
457        ),
458    )
459}
460/// `VerifiedRoot : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
461/// There exists a unique root of f in interval [a,b] (interval Newton method certificate).
462pub fn verified_root_ty() -> Expr {
463    arrow(
464        fn_ty(real_ty(), real_ty()),
465        arrow(real_ty(), arrow(real_ty(), prop())),
466    )
467}
468/// `FloatingPointRoundingError : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
469/// The floating-point evaluation of f(x) has absolute error bounded by Ξ΅.
470pub fn floating_point_rounding_error_ty() -> Expr {
471    arrow(
472        fn_ty(real_ty(), real_ty()),
473        arrow(real_ty(), arrow(real_ty(), prop())),
474    )
475}
476/// `CancellationError : Real β†’ Real β†’ Real β†’ Prop`
477/// Subtracting nearly equal x β‰ˆ y produces relative error bounded by Ξ΅.
478pub fn cancellation_error_ty() -> Expr {
479    arrow(real_ty(), arrow(real_ty(), arrow(real_ty(), prop())))
480}
481/// `TikhonovRegularization : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
482/// Tikhonov regularization with parameter Ξ» produces solution with error Ξ΅.
483pub fn tikhonov_regularization_ty() -> Expr {
484    arrow(
485        fn_ty(real_ty(), real_ty()),
486        arrow(real_ty(), arrow(real_ty(), prop())),
487    )
488}
489/// `TruncatedSVDApproximation : Nat β†’ Nat β†’ Nat β†’ Real β†’ Prop`
490/// Truncated SVD of an mΓ—n matrix with rank k approximation has error bounded by Οƒ_{k+1}.
491pub fn truncated_svd_approximation_ty() -> Expr {
492    arrow(
493        nat_ty(),
494        arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop()))),
495    )
496}
497/// `LASSORegularization : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
498/// LASSO with penalty Ξ» produces a sparse solution with reconstruction error Ξ΅.
499pub fn lasso_regularization_ty() -> Expr {
500    arrow(
501        fn_ty(real_ty(), real_ty()),
502        arrow(real_ty(), arrow(real_ty(), prop())),
503    )
504}
505/// `MultigridOptimalComplexity : Nat β†’ Nat β†’ Prop`
506/// Multigrid solves an n-dof system to tolerance Ξ΅ in O(n) operations.
507pub fn multigrid_optimal_complexity_ty() -> Expr {
508    arrow(nat_ty(), arrow(nat_ty(), prop()))
509}
510/// `MultigridSmoothing : (Real β†’ Real) β†’ Nat β†’ Real β†’ Prop`
511/// The smoother reduces high-frequency error by factor ρ after ν sweeps on mesh n.
512pub fn multigrid_smoothing_ty() -> Expr {
513    arrow(
514        fn_ty(real_ty(), real_ty()),
515        arrow(nat_ty(), arrow(real_ty(), prop())),
516    )
517}
518/// `DomainDecompositionConvergence : Nat β†’ Real β†’ Prop`
519/// Schwarz domain decomposition with K subdomains converges with spectral radius ρ.
520pub fn domain_decomposition_convergence_ty() -> Expr {
521    arrow(nat_ty(), arrow(real_ty(), prop()))
522}
523/// `SchurComplementCondition : Nat β†’ Real β†’ Prop`
524/// The Schur complement of a K-subdomain decomposition has condition number ΞΊ.
525pub fn schur_complement_condition_ty() -> Expr {
526    arrow(nat_ty(), arrow(real_ty(), prop()))
527}
528/// `HpFEMExponentialConvergence : Nat β†’ Nat β†’ Real β†’ Prop`
529/// The hp-FEM method with p degrees and h mesh size achieves exponential convergence rate Ξ².
530pub fn hp_fem_exponential_convergence_ty() -> Expr {
531    arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop())))
532}
533/// `TuckerDecomposition : Nat β†’ Nat β†’ Real β†’ Prop`
534/// A tensor of order d and mode sizes n has Tucker rank-r approximation with error Ξ΅.
535pub fn tucker_decomposition_ty() -> Expr {
536    arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop())))
537}
538/// `TensorTrainApproximation : Nat β†’ Nat β†’ Real β†’ Prop`
539/// A tensor of order d admits a tensor-train decomposition of rank r with error Ξ΅.
540pub fn tensor_train_approximation_ty() -> Expr {
541    arrow(nat_ty(), arrow(nat_ty(), arrow(real_ty(), prop())))
542}
543/// `CertifiedNumericalComputation : (Real β†’ Real) β†’ Real β†’ Real β†’ Prop`
544/// A computation certifies that f(x) lies in interval [lo, hi] with machine precision.
545pub fn certified_numerical_computation_ty() -> Expr {
546    arrow(
547        fn_ty(real_ty(), real_ty()),
548        arrow(real_ty(), arrow(real_ty(), prop())),
549    )
550}
551/// `ValidatedNumericsEnclosure : (Real β†’ Real) β†’ Real β†’ Real β†’ Real β†’ Prop`
552/// The enclosure algorithm provides verified bounds [a,b] on f(x) with radius Ξ΅.
553pub fn validated_numerics_enclosure_ty() -> Expr {
554    arrow(
555        fn_ty(real_ty(), real_ty()),
556        arrow(real_ty(), arrow(real_ty(), arrow(real_ty(), prop()))),
557    )
558}
559/// `BanachFixedPoint : βˆ€ (f : Real β†’ Real) (k : Real), ContractionMapping f k β†’ βˆƒ x, FixedPoint f x`
560pub fn banach_fixed_point_ty() -> Expr {
561    pi(
562        BinderInfo::Default,
563        "f",
564        fn_ty(real_ty(), real_ty()),
565        pi(
566            BinderInfo::Default,
567            "k",
568            real_ty(),
569            arrow(app2(cst("ContractionMapping"), bvar(1), bvar(0)), prop()),
570        ),
571    )
572}
573/// `IntermediateValue : βˆ€ (f : Real β†’ Real) (a b c : Real), f(a) ≀ c ≀ f(b) β†’ βˆƒ x, f(x) = c`
574pub fn intermediate_value_ty() -> Expr {
575    pi(
576        BinderInfo::Default,
577        "f",
578        fn_ty(real_ty(), real_ty()),
579        pi(
580            BinderInfo::Default,
581            "a",
582            real_ty(),
583            pi(BinderInfo::Default, "b", real_ty(), arrow(prop(), prop())),
584        ),
585    )
586}
587/// `TaylorErrorBound : βˆ€ (f : Real β†’ Real) (n : Nat) (x a : Real), Prop`
588pub fn taylor_error_bound_ty() -> Expr {
589    pi(
590        BinderInfo::Default,
591        "f",
592        fn_ty(real_ty(), real_ty()),
593        pi(
594            BinderInfo::Default,
595            "n",
596            nat_ty(),
597            arrow(real_ty(), arrow(real_ty(), prop())),
598        ),
599    )
600}
601/// `EulerMethodError : βˆ€ (f : Real β†’ Real β†’ Real) (h : Real), Prop`
602/// Euler method global truncation error is O(h).
603pub fn euler_method_error_ty() -> Expr {
604    arrow(
605        fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
606        arrow(real_ty(), prop()),
607    )
608}
609/// Build the numerical analysis environment: register all axioms as opaque constants.
610pub fn build_numerical_analysis_env(env: &mut Environment) {
611    let axioms: &[(&str, Expr)] = &[
612        ("ConvergentSequence", convergent_sequence_ty()),
613        ("CauchySequence", cauchy_sequence_ty()),
614        ("LipschitzContinuous", lipschitz_ty()),
615        ("ContractionMapping", contraction_ty()),
616        ("FixedPoint", fixed_point_ty()),
617        ("NewtonConvergence", newton_convergence_ty()),
618        ("RungeKuttaError", runge_kutta_error_ty()),
619        ("BisectionConvergence", bisection_convergence_ty()),
620        ("IEEE754Rounding", ieee754_rounding_ty()),
621        ("FloatCancellation", float_cancellation_ty()),
622        ("ConditionNumber", condition_number_ty()),
623        ("NumericalStability", numerical_stability_ty()),
624        ("BackwardStability", backward_stability_ty()),
625        ("BisectionRate", bisection_rate_ty()),
626        ("SecantConvergence", secant_convergence_ty()),
627        ("LagrangeInterpolantError", lagrange_interpolant_error_ty()),
628        ("NewtonDividedDifference", newton_divided_difference_ty()),
629        ("CubicSplineError", cubic_spline_error_ty()),
630        ("GaussianQuadratureExact", gaussian_quadrature_exact_ty()),
631        (
632            "AdaptiveQuadratureConvergence",
633            adaptive_quadrature_convergence_ty(),
634        ),
635        ("AdamsBashforthError", adams_bashforth_error_ty()),
636        ("StiffODE", stiff_ode_ty()),
637        ("ImplicitMethodStability", implicit_method_stability_ty()),
638        (
639            "FiniteDifferenceConsistency",
640            finite_difference_consistency_ty(),
641        ),
642        ("GalerkinOrthogonality", galerkin_orthogonality_ty()),
643        ("FEMConvergence", fem_convergence_ty()),
644        (
645            "ConjugateGradientConvergence",
646            conjugate_gradient_convergence_ty(),
647        ),
648        ("KrylovDimension", krylov_dimension_ty()),
649        ("PreconditionedSystem", preconditioned_system_ty()),
650        ("SVDDecomposition", svd_decomposition_ty()),
651        ("QRAlgorithmConvergence", qr_algorithm_convergence_ty()),
652        (
653            "PowerIterationConvergence",
654            power_iteration_convergence_ty(),
655        ),
656        ("RichardsonExtrapolation", richardson_extrapolation_ty()),
657        ("MultigridConvergence", multigrid_convergence_ty()),
658        ("FastMultipoleComplexity", fast_multipole_complexity_ty()),
659        ("SparseMatrixDensity", sparse_matrix_density_ty()),
660        ("LaxEquivalence", lax_equivalence_ty()),
661        ("banach_fixed_point", banach_fixed_point_ty()),
662        ("intermediate_value", intermediate_value_ty()),
663        ("taylor_error_bound", taylor_error_bound_ty()),
664        ("euler_method_error", euler_method_error_ty()),
665        (
666            "NumericalSolution",
667            arrow(fn_ty(real_ty(), real_ty()), real_ty()),
668        ),
669        (
670            "ODESolution",
671            arrow(
672                fn_ty(real_ty(), fn_ty(real_ty(), real_ty())),
673                fn_ty(real_ty(), real_ty()),
674            ),
675        ),
676        ("QuadraticConvergence", prop()),
677        ("SecondOrderConvergence", prop()),
678        ("RungeKuttaOrder", runge_kutta_order_ty()),
679        ("AdamsBashforthStability", adams_bashforth_stability_ty()),
680        ("AdamsMoultonError", adams_moulton_error_ty()),
681        ("StiffnessRatio", stiffness_ratio_ty()),
682        ("CrankNicolsonStability", crank_nicolson_stability_ty()),
683        ("FDMStabilityCondition", fdm_stability_condition_ty()),
684        ("FDMTruncationError", fdm_truncation_error_ty()),
685        ("LaxFriedrichsScheme", lax_friedrichs_scheme_ty()),
686        (
687            "ChebyshevSpectralAccuracy",
688            chebyshev_spectral_accuracy_ty(),
689        ),
690        (
691            "FourierSpectralConvergence",
692            fourier_spectral_convergence_ty(),
693        ),
694        ("SpectralElementOrder", spectral_element_order_ty()),
695        ("FredholmIntegralEquation", fredholm_integral_equation_ty()),
696        ("VolterraIntegralEquation", volterra_integral_equation_ty()),
697        ("NystromMethodConvergence", nystrom_method_convergence_ty()),
698        (
699            "GradientDescentConvergence",
700            gradient_descent_convergence_ty(),
701        ),
702        (
703            "NewtonMethodLocalConvergence",
704            newton_method_local_convergence_ty(),
705        ),
706        ("SteepestDescentRate", steepest_descent_rate_ty()),
707        ("MonteCarloConvergence", monte_carlo_convergence_ty()),
708        ("VarianceReduction", variance_reduction_ty()),
709        ("MCMCErgodicity", mcmc_ergodicity_ty()),
710        ("MCMCConvergenceRate", mcmc_convergence_rate_ty()),
711        ("NumericalContinuation", numerical_continuation_ty()),
712        ("BifurcationPoint", bifurcation_point_ty()),
713        ("IntervalArithmetic", interval_arithmetic_ty()),
714        ("VerifiedRoot", verified_root_ty()),
715        (
716            "FloatingPointRoundingError",
717            floating_point_rounding_error_ty(),
718        ),
719        ("CancellationError", cancellation_error_ty()),
720        ("TikhonovRegularization", tikhonov_regularization_ty()),
721        (
722            "TruncatedSVDApproximation",
723            truncated_svd_approximation_ty(),
724        ),
725        ("LASSORegularization", lasso_regularization_ty()),
726        (
727            "MultigridOptimalComplexity",
728            multigrid_optimal_complexity_ty(),
729        ),
730        ("MultigridSmoothing", multigrid_smoothing_ty()),
731        (
732            "DomainDecompositionConvergence",
733            domain_decomposition_convergence_ty(),
734        ),
735        ("SchurComplementCondition", schur_complement_condition_ty()),
736        (
737            "HpFEMExponentialConvergence",
738            hp_fem_exponential_convergence_ty(),
739        ),
740        ("TuckerDecomposition", tucker_decomposition_ty()),
741        ("TensorTrainApproximation", tensor_train_approximation_ty()),
742        (
743            "CertifiedNumericalComputation",
744            certified_numerical_computation_ty(),
745        ),
746        (
747            "ValidatedNumericsEnclosure",
748            validated_numerics_enclosure_ty(),
749        ),
750    ];
751    for (name, ty) in axioms {
752        env.add(Declaration::Axiom {
753            name: Name::str(*name),
754            univ_params: vec![],
755            ty: ty.clone(),
756        })
757        .ok();
758    }
759}
760/// Find a root of f in [a, b] using the bisection method.
761///
762/// Requires f(a) and f(b) to have opposite signs.
763/// Returns `None` if the sign condition is not met or max_iter is exceeded.
764pub fn bisection(
765    f: &dyn Fn(f64) -> f64,
766    mut a: f64,
767    mut b: f64,
768    tol: f64,
769    max_iter: u32,
770) -> Option<f64> {
771    if f(a) * f(b) > 0.0 {
772        return None;
773    }
774    for _ in 0..max_iter {
775        let mid = (a + b) / 2.0;
776        let fm = f(mid);
777        if fm.abs() < tol || (b - a) / 2.0 < tol {
778            return Some(mid);
779        }
780        if f(a) * fm < 0.0 {
781            b = mid;
782        } else {
783            a = mid;
784        }
785    }
786    Some((a + b) / 2.0)
787}
788/// Find a root of f using Newton-Raphson iteration.
789///
790/// `df` is the derivative of f.
791pub fn newton_raphson(
792    f: &dyn Fn(f64) -> f64,
793    df: &dyn Fn(f64) -> f64,
794    mut x: f64,
795    tol: f64,
796    max_iter: u32,
797) -> Option<f64> {
798    for _ in 0..max_iter {
799        let fx = f(x);
800        if fx.abs() < tol {
801            return Some(x);
802        }
803        let dfx = df(x);
804        if dfx.abs() < 1e-15 {
805            return None;
806        }
807        x -= fx / dfx;
808    }
809    if f(x).abs() < tol {
810        Some(x)
811    } else {
812        None
813    }
814}
815/// Find a root of f using the secant method (no derivative required).
816pub fn secant_method(
817    f: &dyn Fn(f64) -> f64,
818    mut x0: f64,
819    mut x1: f64,
820    tol: f64,
821    max_iter: u32,
822) -> Option<f64> {
823    for _ in 0..max_iter {
824        let f0 = f(x0);
825        let f1 = f(x1);
826        if f1.abs() < tol {
827            return Some(x1);
828        }
829        let denom = f1 - f0;
830        if denom.abs() < 1e-15 {
831            return None;
832        }
833        let x2 = x1 - f1 * (x1 - x0) / denom;
834        x0 = x1;
835        x1 = x2;
836    }
837    if f(x1).abs() < tol {
838        Some(x1)
839    } else {
840        None
841    }
842}
843/// Perform a single Euler step: y_{n+1} = y_n + h * f(t_n, y_n).
844pub fn euler_step(f: &dyn Fn(f64, f64) -> f64, t: f64, y: f64, h: f64) -> f64 {
845    y + h * f(t, y)
846}
847/// Solve dy/dt = f(t, y) from t0 to t_end using the Euler method.
848///
849/// Returns a vector of (t, y) pairs.
850pub fn euler_method(
851    f: &dyn Fn(f64, f64) -> f64,
852    t0: f64,
853    y0: f64,
854    t_end: f64,
855    steps: u32,
856) -> Vec<(f64, f64)> {
857    let h = (t_end - t0) / steps as f64;
858    let mut result = Vec::with_capacity(steps as usize + 1);
859    let mut t = t0;
860    let mut y = y0;
861    result.push((t, y));
862    for _ in 0..steps {
863        y = euler_step(f, t, y, h);
864        t += h;
865        result.push((t, y));
866    }
867    result
868}
869/// Solve dy/dt = f(t, y) from t0 to t_end using the classic Runge-Kutta 4th order method.
870///
871/// Returns a vector of (t, y) pairs.
872pub fn runge_kutta_4(
873    f: &dyn Fn(f64, f64) -> f64,
874    t0: f64,
875    y0: f64,
876    t_end: f64,
877    steps: u32,
878) -> Vec<(f64, f64)> {
879    let h = (t_end - t0) / steps as f64;
880    let mut result = Vec::with_capacity(steps as usize + 1);
881    let mut t = t0;
882    let mut y = y0;
883    result.push((t, y));
884    for _ in 0..steps {
885        let k1 = f(t, y);
886        let k2 = f(t + h / 2.0, y + h / 2.0 * k1);
887        let k3 = f(t + h / 2.0, y + h / 2.0 * k2);
888        let k4 = f(t + h, y + h * k3);
889        y += h / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4);
890        t += h;
891        result.push((t, y));
892    }
893    result
894}
895/// Approximate ∫_a^b f(x) dx using the trapezoidal rule with n subintervals.
896pub fn trapezoidal_rule(f: &dyn Fn(f64) -> f64, a: f64, b: f64, n: u32) -> f64 {
897    let h = (b - a) / n as f64;
898    let mut sum = (f(a) + f(b)) / 2.0;
899    for i in 1..n {
900        sum += f(a + i as f64 * h);
901    }
902    h * sum
903}
904/// Approximate ∫_a^b f(x) dx using Simpson's rule with n subintervals (n must be even).
905///
906/// If n is odd, it is incremented by 1.
907pub fn simpson_rule(f: &dyn Fn(f64) -> f64, a: f64, b: f64, n: u32) -> f64 {
908    let n = if n % 2 == 0 { n } else { n + 1 };
909    let h = (b - a) / n as f64;
910    let mut sum = f(a) + f(b);
911    for i in 1..n {
912        let x = a + i as f64 * h;
913        if i % 2 == 0 {
914            sum += 2.0 * f(x);
915        } else {
916            sum += 4.0 * f(x);
917        }
918    }
919    h / 3.0 * sum
920}
921/// Solve the linear system Ax = b using Gaussian elimination with partial pivoting.
922///
923/// Returns `None` if the system is singular.
924pub fn gaussian_elimination(mut a: Vec<Vec<f64>>, mut b: Vec<f64>) -> Option<Vec<f64>> {
925    let n = b.len();
926    for col in 0..n {
927        let mut max_row = col;
928        let mut max_val = a[col][col].abs();
929        for row in (col + 1)..n {
930            if a[row][col].abs() > max_val {
931                max_val = a[row][col].abs();
932                max_row = row;
933            }
934        }
935        if max_val < 1e-12 {
936            return None;
937        }
938        a.swap(col, max_row);
939        b.swap(col, max_row);
940        let pivot = a[col][col];
941        for row in (col + 1)..n {
942            let factor = a[row][col] / pivot;
943            b[row] -= factor * b[col];
944            for k in col..n {
945                a[row][k] -= factor * a[col][k];
946            }
947        }
948    }
949    let mut x = vec![0.0f64; n];
950    for i in (0..n).rev() {
951        let mut s = b[i];
952        for j in (i + 1)..n {
953            s -= a[i][j] * x[j];
954        }
955        if a[i][i].abs() < 1e-12 {
956            return None;
957        }
958        x[i] = s / a[i][i];
959    }
960    Some(x)
961}
962/// Solve Ax = b iteratively using the Jacobi method.
963///
964/// Returns `None` if the method does not converge within `max_iter` iterations.
965pub fn jacobi_iteration(
966    a: &Vec<Vec<f64>>,
967    b: &Vec<f64>,
968    max_iter: u32,
969    tol: f64,
970) -> Option<Vec<f64>> {
971    let n = b.len();
972    let mut x = vec![0.0f64; n];
973    for _ in 0..max_iter {
974        let mut x_new = vec![0.0f64; n];
975        for i in 0..n {
976            let mut s = b[i];
977            for j in 0..n {
978                if j != i {
979                    s -= a[i][j] * x[j];
980                }
981            }
982            if a[i][i].abs() < 1e-15 {
983                return None;
984            }
985            x_new[i] = s / a[i][i];
986        }
987        let diff: f64 = x_new
988            .iter()
989            .zip(x.iter())
990            .map(|(a, b)| (a - b).abs())
991            .fold(0.0_f64, f64::max);
992        x = x_new;
993        if diff < tol {
994            return Some(x);
995        }
996    }
997    None
998}
999/// Approximate f'(x) using the central difference formula: (f(x+h) - f(x-h)) / (2h).
1000pub fn numerical_derivative(f: &dyn Fn(f64) -> f64, x: f64, h: f64) -> f64 {
1001    (f(x + h) - f(x - h)) / (2.0 * h)
1002}
1003/// Evaluate the Lagrange interpolating polynomial at `x` given nodes `xs` and
1004/// values `ys`.
1005///
1006/// Panics if `xs` and `ys` have different lengths or are empty.
1007pub fn lagrange_interpolation(xs: &[f64], ys: &[f64], x: f64) -> f64 {
1008    assert_eq!(xs.len(), ys.len(), "xs and ys must have equal length");
1009    assert!(!xs.is_empty(), "xs must not be empty");
1010    let n = xs.len();
1011    let mut result = 0.0;
1012    for i in 0..n {
1013        let mut li = 1.0;
1014        for j in 0..n {
1015            if i != j {
1016                li *= (x - xs[j]) / (xs[i] - xs[j]);
1017            }
1018        }
1019        result += ys[i] * li;
1020    }
1021    result
1022}
1023/// Compute Newton divided differences table for nodes `xs` and values `ys`.
1024///
1025/// Returns the vector of divided-difference coefficients (diagonal of the table).
1026pub fn newton_divided_differences(xs: &[f64], ys: &[f64]) -> Vec<f64> {
1027    assert_eq!(xs.len(), ys.len());
1028    let n = xs.len();
1029    let mut d = ys.to_vec();
1030    for j in 1..n {
1031        for i in (j..n).rev() {
1032            d[i] = (d[i] - d[i - 1]) / (xs[i] - xs[i - j]);
1033        }
1034    }
1035    d
1036}
1037/// Evaluate the Newton interpolating polynomial at `x` using the divided-difference
1038/// coefficients `coeffs` computed from `xs`.
1039pub fn newton_interpolation_eval(xs: &[f64], coeffs: &[f64], x: f64) -> f64 {
1040    let n = xs.len();
1041    let mut result = coeffs[n - 1];
1042    for i in (0..n - 1).rev() {
1043        result = result * (x - xs[i]) + coeffs[i];
1044    }
1045    result
1046}
1047/// Fixed-order Gaussian quadrature using Gauss-Legendre nodes on `[a, b]`.
1048///
1049/// Supports orders 2, 3, 4, and 5; falls back to Simpson's rule for other orders.
1050pub fn gaussian_quadrature(f: &dyn Fn(f64) -> f64, a: f64, b: f64, order: usize) -> f64 {
1051    let (nodes, weights): (&[f64], &[f64]) = match order {
1052        2 => (
1053            &[-0.577_350_269_189_626, 0.577_350_269_189_626],
1054            &[1.0, 1.0],
1055        ),
1056        3 => (
1057            &[-0.774_596_669_241_483, 0.0, 0.774_596_669_241_483],
1058            &[
1059                0.555_555_555_555_556,
1060                0.888_888_888_888_889,
1061                0.555_555_555_555_556,
1062            ],
1063        ),
1064        4 => (
1065            &[
1066                -0.861_136_311_594_953,
1067                -0.339_981_043_584_856,
1068                0.339_981_043_584_856,
1069                0.861_136_311_594_953,
1070            ],
1071            &[
1072                0.347_854_845_137_454,
1073                0.652_145_154_862_546,
1074                0.652_145_154_862_546,
1075                0.347_854_845_137_454,
1076            ],
1077        ),
1078        5 => (
1079            &[
1080                -0.906_179_845_938_664,
1081                -0.538_469_310_105_683,
1082                0.0,
1083                0.538_469_310_105_683,
1084                0.906_179_845_938_664,
1085            ],
1086            &[
1087                0.236_926_885_056_189,
1088                0.478_628_670_499_366,
1089                0.568_888_888_888_889,
1090                0.478_628_670_499_366,
1091                0.236_926_885_056_189,
1092            ],
1093        ),
1094        _ => return simpson_rule(f, a, b, 100),
1095    };
1096    let mid = (a + b) / 2.0;
1097    let half = (b - a) / 2.0;
1098    nodes
1099        .iter()
1100        .zip(weights.iter())
1101        .map(|(&t, &w)| w * f(mid + half * t))
1102        .sum::<f64>()
1103        * half
1104}
1105/// Advance one step of Adams-Bashforth 2-step explicit method.
1106///
1107/// `f_prev` = f(t_{n-1}, y_{n-1}),  `f_curr` = f(t_n, y_n).
1108pub fn adams_bashforth_2_step(y: f64, h: f64, f_prev: f64, f_curr: f64) -> f64 {
1109    y + h / 2.0 * (3.0 * f_curr - f_prev)
1110}
1111/// Solve the symmetric positive definite system `A x = b` using the conjugate
1112/// gradient method.
1113///
1114/// Returns `(solution, num_iters)` or `None` if not converged.
1115pub fn conjugate_gradient(
1116    a: &SparseMatrix,
1117    b: &[f64],
1118    tol: f64,
1119    max_iter: u32,
1120) -> Option<(Vec<f64>, u32)> {
1121    let n = b.len();
1122    let mut x = vec![0.0f64; n];
1123    let mut r: Vec<f64> = b.to_vec();
1124    let mut p = r.clone();
1125    let mut rs_old: f64 = r.iter().map(|v| v * v).sum();
1126    for iter in 0..max_iter {
1127        if rs_old.sqrt() < tol {
1128            return Some((x, iter));
1129        }
1130        let ap = a.matvec(&p);
1131        let pap: f64 = p.iter().zip(ap.iter()).map(|(pi, api)| pi * api).sum();
1132        if pap.abs() < 1e-15 {
1133            return None;
1134        }
1135        let alpha = rs_old / pap;
1136        for i in 0..n {
1137            x[i] += alpha * p[i];
1138            r[i] -= alpha * ap[i];
1139        }
1140        let rs_new: f64 = r.iter().map(|v| v * v).sum();
1141        if rs_new.sqrt() < tol {
1142            return Some((x, iter + 1));
1143        }
1144        let beta = rs_new / rs_old;
1145        for i in 0..n {
1146            p[i] = r[i] + beta * p[i];
1147        }
1148        rs_old = rs_new;
1149    }
1150    None
1151}
1152/// Apply Richardson extrapolation to improve accuracy.
1153///
1154/// Given approximations `f_h` (step h) and `f_h2` (step h/2) for order `p`,
1155/// returns the extrapolated value: (2^p * f_h2 - f_h) / (2^p - 1).
1156pub fn richardson_extrapolation(f_h: f64, f_h2: f64, p: u32) -> f64 {
1157    let r = (2.0_f64).powi(p as i32);
1158    (r * f_h2 - f_h) / (r - 1.0)
1159}
1160#[cfg(test)]
1161mod tests {
1162    use super::*;
1163    #[test]
1164    fn test_bisection_sqrt2() {
1165        let f = |x: f64| x * x - 2.0;
1166        let root = bisection(&f, 1.0, 2.0, 1e-9, 100).expect("operation should succeed");
1167        let expected = 2.0_f64.sqrt();
1168        assert!(
1169            (root - expected).abs() < 1e-6,
1170            "bisection sqrt2: got {root}, expected {expected}"
1171        );
1172    }
1173    #[test]
1174    fn test_newton_raphson_sqrt2() {
1175        let f = |x: f64| x * x - 2.0;
1176        let df = |x: f64| 2.0 * x;
1177        let root = newton_raphson(&f, &df, 1.5, 1e-10, 50).expect("operation should succeed");
1178        let expected = 2.0_f64.sqrt();
1179        assert!(
1180            (root - expected).abs() < 1e-9,
1181            "Newton sqrt2: got {root}, expected {expected}"
1182        );
1183    }
1184    #[test]
1185    fn test_euler_method() {
1186        let f = |_t: f64, y: f64| y;
1187        let result = euler_method(&f, 0.0, 1.0, 1.0, 1000);
1188        let (_, y_final) = result[result.len() - 1];
1189        let expected = 1.0_f64.exp();
1190        assert!(
1191            (y_final - expected).abs() / expected < 0.01,
1192            "Euler method: got {y_final}, expected {expected}"
1193        );
1194    }
1195    #[test]
1196    fn test_runge_kutta_4() {
1197        let f = |_t: f64, y: f64| y;
1198        let result = runge_kutta_4(&f, 0.0, 1.0, 1.0, 100);
1199        let (_, y_final) = result[result.len() - 1];
1200        let expected = 1.0_f64.exp();
1201        assert!(
1202            (y_final - expected).abs() < 1e-8,
1203            "RK4: got {y_final}, expected {expected}"
1204        );
1205    }
1206    #[test]
1207    fn test_trapezoidal_rule() {
1208        let f = |x: f64| x;
1209        let result = trapezoidal_rule(&f, 0.0, 1.0, 1000);
1210        assert!(
1211            (result - 0.5).abs() < 1e-6,
1212            "Trapezoidal: got {result}, expected 0.5"
1213        );
1214    }
1215    #[test]
1216    fn test_simpson_rule() {
1217        let f = |x: f64| x * x;
1218        let result = simpson_rule(&f, 0.0, 1.0, 100);
1219        let expected = 1.0 / 3.0;
1220        assert!(
1221            (result - expected).abs() < 1e-10,
1222            "Simpson: got {result}, expected {expected}"
1223        );
1224    }
1225    #[test]
1226    fn test_gaussian_elimination() {
1227        let a = vec![vec![2.0, 1.0], vec![1.0, 3.0]];
1228        let b = vec![5.0, 8.0];
1229        let x = gaussian_elimination(a, b).expect("operation should succeed");
1230        assert!((x[0] - 1.4).abs() < 1e-10, "x = {} (expected 1.4)", x[0]);
1231        assert!((x[1] - 2.2).abs() < 1e-10, "y = {} (expected 2.2)", x[1]);
1232    }
1233    #[test]
1234    fn test_numerical_derivative() {
1235        let f = |x: f64| x * x;
1236        let deriv = numerical_derivative(&f, 3.0, 1e-5);
1237        assert!(
1238            (deriv - 6.0).abs() < 1e-8,
1239            "Derivative: got {deriv}, expected 6.0"
1240        );
1241    }
1242    #[test]
1243    fn test_bisection_solver_struct() {
1244        let solver = BisectionSolver::new(1e-9, 100);
1245        let f = |x: f64| x * x - 3.0;
1246        let root = solver.solve(&f, 1.0, 2.0).expect("solve should succeed");
1247        let expected = 3.0_f64.sqrt();
1248        assert!((root - expected).abs() < 1e-6, "BisectionSolver: {root}");
1249    }
1250    #[test]
1251    fn test_newton_raphson_solver_struct() {
1252        let solver = NewtonRaphsonSolver::new(1e-10, 50);
1253        let f = |x: f64| x * x - 5.0;
1254        let df = |x: f64| 2.0 * x;
1255        let (root, converged) = solver.solve(&f, &df, 2.0);
1256        assert!(converged, "Newton-Raphson should converge");
1257        let expected = 5.0_f64.sqrt();
1258        assert!((root - expected).abs() < 1e-9, "NR root: {root}");
1259    }
1260    #[test]
1261    fn test_lagrange_interpolation_linear() {
1262        let xs = [0.0, 1.0];
1263        let ys = [1.0, 3.0];
1264        let val = lagrange_interpolation(&xs, &ys, 0.5);
1265        assert!((val - 2.0).abs() < 1e-12, "Lagrange linear: {val}");
1266    }
1267    #[test]
1268    fn test_lagrange_interpolation_quadratic() {
1269        let xs = [0.0, 1.0, 2.0];
1270        let ys = [0.0, 1.0, 4.0];
1271        let val = lagrange_interpolation(&xs, &ys, 1.5);
1272        assert!((val - 2.25).abs() < 1e-12, "Lagrange quadratic: {val}");
1273    }
1274    #[test]
1275    fn test_newton_divided_differences() {
1276        let xs = [0.0, 1.0, 2.0];
1277        let ys = [0.0, 1.0, 4.0];
1278        let coeffs = newton_divided_differences(&xs, &ys);
1279        let val = newton_interpolation_eval(&xs, &coeffs, 1.5);
1280        assert!((val - 2.25).abs() < 1e-12, "Newton DD: {val}");
1281    }
1282    #[test]
1283    fn test_gaussian_quadrature_order3() {
1284        let f = |x: f64| x * x;
1285        let result = gaussian_quadrature(&f, 0.0, 1.0, 3);
1286        let expected = 1.0 / 3.0;
1287        assert!(
1288            (result - expected).abs() < 1e-13,
1289            "GL order 3: got {result}, expected {expected}"
1290        );
1291    }
1292    #[test]
1293    fn test_gaussian_quadrature_order5() {
1294        let f = |x: f64| x * x * x * x;
1295        let result = gaussian_quadrature(&f, -1.0, 1.0, 5);
1296        let expected = 2.0 / 5.0;
1297        assert!(
1298            (result - expected).abs() < 1e-12,
1299            "GL order 5: got {result}, expected {expected}"
1300        );
1301    }
1302    #[test]
1303    fn test_rk4_solver_struct() {
1304        let solver = RungeKutta4Solver::new(0.01);
1305        let f = |_t: f64, y: f64| -y;
1306        let result = solver.integrate(&f, 0.0, 1.0, 1.0);
1307        let (_, y_final) = result[result.len() - 1];
1308        let expected = (-1.0_f64).exp();
1309        assert!(
1310            (y_final - expected).abs() < 1e-8,
1311            "RK4 struct: got {y_final}, expected {expected}"
1312        );
1313    }
1314    #[test]
1315    fn test_adams_bashforth_2() {
1316        let f = |_t: f64, y: f64| y;
1317        let h = 0.01;
1318        let y0 = 1.0;
1319        let y1 = euler_step(&f, 0.0, y0, h);
1320        let f0 = f(0.0, y0);
1321        let f1 = f(h, y1);
1322        let y2 = adams_bashforth_2_step(y1, h, f0, f1);
1323        let expected = (2.0 * h).exp();
1324        assert!((y2 - expected).abs() < 1e-4, "AB2: {y2} vs {expected}");
1325    }
1326    #[test]
1327    fn test_power_iteration_2x2() {
1328        let a = vec![vec![2.0, 1.0], vec![1.0, 2.0]];
1329        let solver = PowerIterationSolver::new(1e-9, 200);
1330        let (eig, _vec) = solver.solve(&a).expect("solve should succeed");
1331        assert!(
1332            (eig - 3.0).abs() < 1e-7,
1333            "Power iteration eigenvalue: {eig}"
1334        );
1335    }
1336    #[test]
1337    fn test_sparse_matrix_matvec() {
1338        let triplets = vec![(0, 0, 1.0), (0, 1, 2.0), (1, 0, 3.0), (1, 1, 4.0)];
1339        let mat = SparseMatrix::from_triplets(2, 2, &triplets);
1340        let x = vec![1.0, 1.0];
1341        let y = mat.matvec(&x);
1342        assert_eq!(y.len(), 2);
1343        assert!((y[0] - 3.0).abs() < 1e-12, "y[0] = {}", y[0]);
1344        assert!((y[1] - 7.0).abs() < 1e-12, "y[1] = {}", y[1]);
1345    }
1346    #[test]
1347    fn test_sparse_matrix_nnz() {
1348        let triplets = vec![(0, 0, 1.0), (1, 1, 2.0)];
1349        let mat = SparseMatrix::from_triplets(3, 3, &triplets);
1350        assert_eq!(mat.nnz(), 2);
1351    }
1352    #[test]
1353    fn test_conjugate_gradient_simple() {
1354        let triplets = vec![(0, 0, 4.0), (0, 1, 1.0), (1, 0, 1.0), (1, 1, 3.0)];
1355        let a = SparseMatrix::from_triplets(2, 2, &triplets);
1356        let b = vec![1.0, 2.0];
1357        let (x, _iters) = conjugate_gradient(&a, &b, 1e-12, 100).expect("operation should succeed");
1358        let expected_x0 = 1.0 / 11.0;
1359        let expected_x1 = 7.0 / 11.0;
1360        assert!(
1361            (x[0] - expected_x0).abs() < 1e-9,
1362            "CG x[0] = {} expected {}",
1363            x[0],
1364            expected_x0
1365        );
1366        assert!(
1367            (x[1] - expected_x1).abs() < 1e-9,
1368            "CG x[1] = {} expected {}",
1369            x[1],
1370            expected_x1
1371        );
1372    }
1373    #[test]
1374    fn test_richardson_extrapolation() {
1375        let f = |x: f64| x * x;
1376        let h = 1.0;
1377        let f_h = trapezoidal_rule(&f, 0.0, 1.0, 4);
1378        let f_h2 = trapezoidal_rule(&f, 0.0, 1.0, 8);
1379        let extrap = richardson_extrapolation(f_h, f_h2, 2);
1380        let expected = 1.0 / 3.0;
1381        let _ = h;
1382        assert!(
1383            (extrap - expected).abs() < 1e-10,
1384            "Richardson extrapolation: {extrap}"
1385        );
1386    }
1387    #[test]
1388    fn test_build_numerical_analysis_env() {
1389        use oxilean_kernel::Environment;
1390        let mut env = Environment::new();
1391        build_numerical_analysis_env(&mut env);
1392        assert!(env
1393            .get(&oxilean_kernel::Name::str("ConvergentSequence"))
1394            .is_some());
1395        assert!(env
1396            .get(&oxilean_kernel::Name::str("IEEE754Rounding"))
1397            .is_some());
1398        assert!(env
1399            .get(&oxilean_kernel::Name::str("SVDDecomposition"))
1400            .is_some());
1401        assert!(env
1402            .get(&oxilean_kernel::Name::str("LaxEquivalence"))
1403            .is_some());
1404        assert!(env
1405            .get(&oxilean_kernel::Name::str("SparseMatrixDensity"))
1406            .is_some());
1407    }
1408    #[test]
1409    fn test_new_axiom_builders_registered() {
1410        use oxilean_kernel::Environment;
1411        let mut env = Environment::new();
1412        build_numerical_analysis_env(&mut env);
1413        let new_axioms = [
1414            "RungeKuttaOrder",
1415            "AdamsBashforthStability",
1416            "AdamsMoultonError",
1417            "StiffnessRatio",
1418            "CrankNicolsonStability",
1419            "FDMStabilityCondition",
1420            "FDMTruncationError",
1421            "LaxFriedrichsScheme",
1422            "ChebyshevSpectralAccuracy",
1423            "FourierSpectralConvergence",
1424            "SpectralElementOrder",
1425            "FredholmIntegralEquation",
1426            "VolterraIntegralEquation",
1427            "NystromMethodConvergence",
1428            "GradientDescentConvergence",
1429            "NewtonMethodLocalConvergence",
1430            "SteepestDescentRate",
1431            "MonteCarloConvergence",
1432            "VarianceReduction",
1433            "MCMCErgodicity",
1434            "MCMCConvergenceRate",
1435            "NumericalContinuation",
1436            "BifurcationPoint",
1437            "IntervalArithmetic",
1438            "VerifiedRoot",
1439            "FloatingPointRoundingError",
1440            "CancellationError",
1441            "TikhonovRegularization",
1442            "TruncatedSVDApproximation",
1443            "LASSORegularization",
1444            "MultigridOptimalComplexity",
1445            "MultigridSmoothing",
1446            "DomainDecompositionConvergence",
1447            "SchurComplementCondition",
1448            "HpFEMExponentialConvergence",
1449            "TuckerDecomposition",
1450            "TensorTrainApproximation",
1451            "CertifiedNumericalComputation",
1452            "ValidatedNumericsEnclosure",
1453        ];
1454        for name in &new_axioms {
1455            assert!(
1456                env.get(&oxilean_kernel::Name::str(*name)).is_some(),
1457                "Axiom not registered: {name}"
1458            );
1459        }
1460    }
1461    #[test]
1462    fn test_interval_add() {
1463        let a = Interval::new(1.0, 2.0);
1464        let b = Interval::new(3.0, 4.0);
1465        let c = a.add(b);
1466        assert!((c.lo - 4.0).abs() < 1e-14);
1467        assert!((c.hi - 6.0).abs() < 1e-14);
1468    }
1469    #[test]
1470    fn test_interval_sub() {
1471        let a = Interval::new(3.0, 5.0);
1472        let b = Interval::new(1.0, 2.0);
1473        let c = a.sub(b);
1474        assert!((c.lo - 1.0).abs() < 1e-14, "lo = {}", c.lo);
1475        assert!((c.hi - 4.0).abs() < 1e-14, "hi = {}", c.hi);
1476    }
1477    #[test]
1478    fn test_interval_mul_positive() {
1479        let a = Interval::new(2.0, 3.0);
1480        let b = Interval::new(4.0, 5.0);
1481        let c = a.mul(b);
1482        assert!((c.lo - 8.0).abs() < 1e-14, "lo = {}", c.lo);
1483        assert!((c.hi - 15.0).abs() < 1e-14, "hi = {}", c.hi);
1484    }
1485    #[test]
1486    fn test_interval_contains() {
1487        let iv = Interval::new(1.0, 3.0);
1488        assert!(iv.contains(2.0));
1489        assert!(!iv.contains(4.0));
1490        assert!(iv.contains(1.0));
1491        assert!(iv.contains(3.0));
1492    }
1493    #[test]
1494    fn test_interval_sqrt() {
1495        let iv = Interval::new(4.0, 9.0);
1496        let s = iv.sqrt();
1497        assert!((s.lo - 2.0).abs() < 1e-14);
1498        assert!((s.hi - 3.0).abs() < 1e-14);
1499    }
1500    #[test]
1501    fn test_interval_mid_and_width() {
1502        let iv = Interval::new(1.0, 3.0);
1503        assert!((iv.mid() - 2.0).abs() < 1e-14);
1504        assert!((iv.width() - 2.0).abs() < 1e-14);
1505    }
1506    #[test]
1507    fn test_tikhonov_solver_identity() {
1508        let a = vec![vec![1.0, 0.0], vec![0.0, 1.0]];
1509        let b = vec![1.0, 2.0];
1510        let solver = TikhonovSolver::new(0.0);
1511        let x = solver.solve(&a, &b).expect("solve should succeed");
1512        assert!((x[0] - 1.0).abs() < 1e-9, "x[0] = {}", x[0]);
1513        assert!((x[1] - 2.0).abs() < 1e-9, "x[1] = {}", x[1]);
1514    }
1515    #[test]
1516    fn test_tikhonov_solver_regularized() {
1517        let a = vec![vec![1.0], vec![1.0]];
1518        let b = vec![1.0, 1.0];
1519        let solver = TikhonovSolver::new(1.0);
1520        let x = solver.solve(&a, &b).expect("solve should succeed");
1521        let expected = 2.0 / 3.0;
1522        assert!(
1523            (x[0] - expected).abs() < 1e-9,
1524            "x[0] = {} expected {expected}",
1525            x[0]
1526        );
1527    }
1528    #[test]
1529    fn test_gradient_descent_quadratic() {
1530        let grad = |v: &[f64]| vec![2.0 * v[0], 2.0 * v[1]];
1531        let optimizer = GradientDescentOptimizer::new(0.1, 1e-8, 500);
1532        let (x, _iters, converged) = optimizer.minimize(&grad, &[1.0, 1.0]);
1533        assert!(converged, "Gradient descent should converge");
1534        assert!(x[0].abs() < 1e-5, "x[0] = {}", x[0]);
1535        assert!(x[1].abs() < 1e-5, "x[1] = {}", x[1]);
1536    }
1537    #[test]
1538    fn test_crank_nicolson_steady_state() {
1539        let solver = CrankNicolsonSolver::new(1.0, 1.0, 4, 0.01);
1540        let u0 = vec![1.0; 4];
1541        let history = solver.integrate(&u0, 5.0);
1542        let u_final = &history[history.len() - 1];
1543        for &v in u_final {
1544            assert!(
1545                v.abs() < 0.1,
1546                "heat equation: value {v} should decay toward 0"
1547            );
1548        }
1549    }
1550    #[test]
1551    fn test_crank_nicolson_step_preserves_size() {
1552        let solver = CrankNicolsonSolver::new(0.5, 2.0, 5, 0.1);
1553        let u0 = vec![0.1, 0.2, 0.3, 0.2, 0.1];
1554        let u1 = solver.step(&u0).expect("step should succeed");
1555        assert_eq!(u1.len(), 5);
1556    }
1557    #[test]
1558    fn test_monte_carlo_integrate_constant() {
1559        let integrator = MonteCarloIntegrator::new(100_000, 42);
1560        let result = integrator.integrate(&|_| 1.0, 0.0, 1.0);
1561        assert!((result - 1.0).abs() < 0.01, "MC constant: {result}");
1562    }
1563    #[test]
1564    fn test_monte_carlo_integrate_linear() {
1565        let integrator = MonteCarloIntegrator::new(500_000, 123);
1566        let result = integrator.integrate(&|x| x, 0.0, 1.0);
1567        assert!((result - 0.5).abs() < 0.01, "MC linear: {result}");
1568    }
1569    #[test]
1570    fn test_monte_carlo_control_variate() {
1571        let integrator = MonteCarloIntegrator::new(200_000, 7);
1572        let result = integrator.integrate_with_control_variate(&|x| x * x, &|x| x, 0.5, 0.0, 1.0);
1573        let expected = 1.0 / 3.0;
1574        assert!(
1575            (result - expected).abs() < 0.01,
1576            "MC CV: {result}, expected {expected}"
1577        );
1578    }
1579}