scirs2_interpolate/
statistical_advanced.rs

1//! Advanced statistical interpolation methods
2//!
3//! This module implements sophisticated statistical interpolation techniques including:
4//! - Variational Sparse Gaussian Processes
5//! - Statistical Spline Inference with confidence bands
6//! - Advanced Bootstrap methods (block, residual, wild)
7//! - Model diagnostics and goodness-of-fit testing
8//! - Multi-output Gaussian Processes
9
10use crate::error::{InterpolateError, InterpolateResult};
11use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
12use scirs2_core::numeric::{Float, FromPrimitive};
13use scirs2_core::random::{rngs::StdRng, Rng, SeedableRng};
14use std::fmt::{Debug, Display};
15use std::marker::PhantomData;
16
17/// Variational Sparse Gaussian Process using inducing points
18///
19/// Implements Titsias' variational sparse GP with ELBO optimization
20/// for scalable Gaussian process regression on large datasets.
21#[derive(Debug, Clone)]
22pub struct VariationalSparseGP<F: Float> {
23    /// Inducing point locations
24    pub inducing_points: Array2<F>,
25    /// Variational mean parameters
26    pub variational_mean: Array1<F>,
27    /// Variational covariance parameters (Cholesky factor)
28    pub variational_cov_chol: Array2<F>,
29    /// Kernel hyperparameters
30    pub kernel_params: KernelParameters<F>,
31    /// Noise variance
32    pub noise_variance: F,
33    /// Evidence lower bound (ELBO)
34    pub elbo: F,
35}
36
37/// Kernel parameters for Gaussian processes
38#[derive(Debug, Clone)]
39pub struct KernelParameters<F: Float> {
40    /// Signal variance (amplitude squared)
41    pub signal_variance: F,
42    /// Length scales for each dimension
43    pub length_scales: Array1<F>,
44    /// Kernel type
45    pub kernel_type: KernelType,
46}
47
48/// Types of kernels supported
49#[derive(Debug, Clone, Copy, PartialEq)]
50pub enum KernelType {
51    /// Radial Basis Function (Gaussian) kernel
52    RBF,
53    /// Matérn 3/2 kernel
54    Matern32,
55    /// Matérn 5/2 kernel
56    Matern52,
57    /// Rational Quadratic kernel
58    RationalQuadratic,
59}
60
61impl<F> VariationalSparseGP<F>
62where
63    F: Float
64        + FromPrimitive
65        + Debug
66        + Display
67        + std::iter::Sum
68        + 'static
69        + std::ops::AddAssign
70        + scirs2_core::ndarray::ScalarOperand
71        + std::ops::SubAssign
72        + std::ops::DivAssign,
73{
74    /// Create a new variational sparse GP
75    pub fn new(
76        inducing_points: Array2<F>,
77        kernel_params: KernelParameters<F>,
78        noise_variance: F,
79    ) -> Self {
80        let n_inducing = inducing_points.nrows();
81        let variational_mean = Array1::zeros(n_inducing);
82        let variational_cov_chol = Array2::eye(n_inducing);
83
84        Self {
85            inducing_points,
86            variational_mean,
87            variational_cov_chol,
88            kernel_params,
89            noise_variance,
90            elbo: F::neg_infinity(),
91        }
92    }
93
94    /// Fit the model using variational inference
95    pub fn fit(
96        &mut self,
97        x_train: &ArrayView2<F>,
98        y_train: &ArrayView1<F>,
99        max_iter: usize,
100        _learning_rate: F,
101        tolerance: F,
102    ) -> InterpolateResult<()> {
103        let _n_data = x_train.nrows();
104        let n_inducing = self.inducing_points.nrows();
105
106        for _iter in 0..max_iter {
107            // Compute kernel matrices
108            let k_uu = self.compute_kernel_matrix(
109                &self.inducing_points.view(),
110                &self.inducing_points.view(),
111            )?;
112            let k_fu = self.compute_kernel_matrix(x_train, &self.inducing_points.view())?;
113
114            // Add jitter for numerical stability
115            let mut k_uu_jitter = k_uu.clone();
116            let jitter = F::from_f64(1e-6).unwrap_or_else(|| F::epsilon());
117            for i in 0..n_inducing {
118                k_uu_jitter[[i, i]] += jitter;
119            }
120
121            // Cholesky decomposition of K_uu
122            let l_uu = self.cholesky_decomposition(&k_uu_jitter)?;
123
124            // Solve for A = K_fu @ inv(K_uu)
125            let a_matrix = self.solve_triangular_system(&l_uu, &k_fu.t().to_owned())?;
126
127            // Compute variational parameters
128            let sigma_inv = F::one() / self.noise_variance;
129            let lambda = Array2::eye(n_inducing) + &(a_matrix.dot(&a_matrix.t()) * sigma_inv);
130
131            // Update variational mean
132            let y_centered = y_train.to_owned();
133            let ata_y = a_matrix.dot(&y_centered);
134            self.variational_mean = self.solve_system(&lambda, &ata_y)? * sigma_inv;
135
136            // Update variational covariance (Cholesky factor)
137            self.variational_cov_chol = self.cholesky_decomposition(&lambda)?;
138
139            // Compute ELBO
140            let new_elbo = self.compute_elbo(x_train, y_train, &k_uu, &k_fu, &a_matrix)?;
141
142            // Check convergence
143            if _iter > 0 && (new_elbo - self.elbo).abs() < tolerance {
144                break;
145            }
146
147            self.elbo = new_elbo;
148
149            // Simple gradient-based updates for hyperparameters could be added here
150            // For now, we keep them fixed during optimization
151        }
152
153        Ok(())
154    }
155
156    /// Make predictions at new points
157    pub fn predict(&self, xtest: &ArrayView2<F>) -> InterpolateResult<(Array1<F>, Array1<F>)> {
158        let n_test = xtest.nrows();
159
160        // Compute kernel matrices
161        let k_uu =
162            self.compute_kernel_matrix(&self.inducing_points.view(), &self.inducing_points.view())?;
163        let k_su = self.compute_kernel_matrix(xtest, &self.inducing_points.view())?;
164        let k_ss_diag = self.compute_kernel_diagonal(xtest)?;
165
166        // Add jitter
167        let mut k_uu_jitter = k_uu.clone();
168        let jitter = F::from_f64(1e-6).unwrap_or_else(|| F::epsilon());
169        for i in 0..k_uu_jitter.nrows() {
170            k_uu_jitter[[i, i]] += jitter;
171        }
172
173        // Solve K_uu^{-1} * K_su^T
174        let l_uu = self.cholesky_decomposition(&k_uu_jitter)?;
175        let alpha = self.solve_triangular_system(&l_uu, &k_su.t().to_owned())?;
176
177        // Predictive mean
178        let mean = k_su.dot(&self.variational_mean);
179
180        // Predictive variance
181        let mut variance = Array1::zeros(n_test);
182        for i in 0..n_test {
183            let _k_s = k_su.row(i);
184            let alpha_i = alpha.column(i);
185
186            // Diagonal element of predictive covariance
187            let var_term1 = k_ss_diag[i];
188            let var_term2 = alpha_i.dot(&alpha_i);
189            let var_term3 = self.compute_trace_correction(&alpha_i)?;
190
191            variance[i] = var_term1 - var_term2 + var_term3 + self.noise_variance;
192        }
193
194        Ok((mean, variance))
195    }
196
197    /// Compute kernel matrix between two sets of points
198    fn compute_kernel_matrix(
199        &self,
200        x1: &ArrayView2<F>,
201        x2: &ArrayView2<F>,
202    ) -> InterpolateResult<Array2<F>> {
203        let n1 = x1.nrows();
204        let n2 = x2.nrows();
205        let mut k = Array2::zeros((n1, n2));
206
207        for i in 0..n1 {
208            for j in 0..n2 {
209                let distsq = self.squared_distance(&x1.row(i), &x2.row(j))?;
210                k[[i, j]] = self.kernel_function(distsq);
211            }
212        }
213
214        Ok(k)
215    }
216
217    /// Compute diagonal of kernel matrix for efficiency
218    fn compute_kernel_diagonal(&self, x: &ArrayView2<F>) -> InterpolateResult<Array1<F>> {
219        let n = x.nrows();
220        let mut diag = Array1::zeros(n);
221
222        for i in 0..n {
223            diag[i] = self.kernel_params.signal_variance;
224        }
225
226        Ok(diag)
227    }
228
229    /// Compute squared distance between two points with anisotropic scaling
230    fn squared_distance(&self, x1: &ArrayView1<F>, x2: &ArrayView1<F>) -> InterpolateResult<F> {
231        if x1.len() != x2.len() || x1.len() != self.kernel_params.length_scales.len() {
232            return Err(InterpolateError::DimensionMismatch(
233                "Point dimensions must match length scales".to_string(),
234            ));
235        }
236
237        let mut distsq = F::zero();
238        for i in 0..x1.len() {
239            let diff = x1[i] - x2[i];
240            let scaled_diff = diff / self.kernel_params.length_scales[i];
241            distsq += scaled_diff * scaled_diff;
242        }
243
244        Ok(distsq)
245    }
246
247    /// Evaluate kernel function
248    fn kernel_function(&self, distsq: F) -> F {
249        match self.kernel_params.kernel_type {
250            KernelType::RBF => {
251                self.kernel_params.signal_variance * (-F::from_f64(0.5).unwrap() * distsq).exp()
252            }
253            KernelType::Matern32 => {
254                let dist = distsq.sqrt();
255                let sqrt3 = F::from_f64(3.0_f64.sqrt()).unwrap();
256                let term = sqrt3 * dist;
257                self.kernel_params.signal_variance * (F::one() + term) * (-term).exp()
258            }
259            KernelType::Matern52 => {
260                let dist = distsq.sqrt();
261                let sqrt5 = F::from_f64(5.0_f64.sqrt()).unwrap();
262                let term = sqrt5 * dist;
263                let term2 = F::from_f64(5.0).unwrap() * distsq / F::from_f64(3.0).unwrap();
264                self.kernel_params.signal_variance * (F::one() + term + term2) * (-term).exp()
265            }
266            KernelType::RationalQuadratic => {
267                let alpha = F::from_f64(1.0).unwrap(); // Scale mixture parameter
268                self.kernel_params.signal_variance
269                    * (F::one() + distsq / (F::from_f64(2.0).unwrap() * alpha)).powf(-alpha)
270            }
271        }
272    }
273
274    /// Cholesky decomposition
275    fn cholesky_decomposition(&self, matrix: &Array2<F>) -> InterpolateResult<Array2<F>> {
276        let n = matrix.nrows();
277        let mut l = Array2::zeros((n, n));
278
279        for i in 0..n {
280            for j in 0..=i {
281                if i == j {
282                    // Diagonal elements
283                    let mut sum = F::zero();
284                    for k in 0..j {
285                        sum += l[[j, k]] * l[[j, k]];
286                    }
287                    let diag_val = matrix[[j, j]] - sum;
288                    if diag_val <= F::zero() {
289                        return Err(InterpolateError::ComputationError(
290                            "Matrix is not positive definite".to_string(),
291                        ));
292                    }
293                    l[[j, j]] = diag_val.sqrt();
294                } else {
295                    // Lower triangular elements
296                    let mut sum = F::zero();
297                    for k in 0..j {
298                        sum += l[[i, k]] * l[[j, k]];
299                    }
300                    l[[i, j]] = (matrix[[i, j]] - sum) / l[[j, j]];
301                }
302            }
303        }
304
305        Ok(l)
306    }
307
308    /// Solve triangular system L * X = B
309    fn solve_triangular_system(
310        &self,
311        l: &Array2<F>,
312        b: &Array2<F>,
313    ) -> InterpolateResult<Array2<F>> {
314        let n = l.nrows();
315        let m = b.ncols();
316        let mut x = Array2::zeros((n, m));
317
318        for col in 0..m {
319            for i in 0..n {
320                let mut sum = F::zero();
321                for j in 0..i {
322                    sum += l[[i, j]] * x[[j, col]];
323                }
324                x[[i, col]] = (b[[i, col]] - sum) / l[[i, i]];
325            }
326        }
327
328        Ok(x)
329    }
330
331    /// Solve linear system using forward/backward substitution
332    fn solve_system(&self, a: &Array2<F>, b: &Array1<F>) -> InterpolateResult<Array1<F>> {
333        // Simple implementation - in practice would use more sophisticated solver
334        let n = a.nrows();
335        let mut x = Array1::zeros(n);
336
337        // Very basic Gaussian elimination (not optimal, but functional)
338        let mut aug = Array2::zeros((n, n + 1));
339        for i in 0..n {
340            for j in 0..n {
341                aug[[i, j]] = a[[i, j]];
342            }
343            aug[[i, n]] = b[i];
344        }
345
346        // Forward elimination with partial pivoting
347        for i in 0..n {
348            // Find pivot
349            let mut max_row = i;
350            for k in i + 1..n {
351                if aug[[k, i]].abs() > aug[[max_row, i]].abs() {
352                    max_row = k;
353                }
354            }
355
356            // Swap rows
357            for j in 0..=n {
358                let temp = aug[[max_row, j]];
359                aug[[max_row, j]] = aug[[i, j]];
360                aug[[i, j]] = temp;
361            }
362
363            // Make all rows below this one 0 in current column
364            for k in i + 1..n {
365                if aug[[i, i]].abs() < F::epsilon() {
366                    return Err(InterpolateError::ComputationError(
367                        "Matrix is singular".to_string(),
368                    ));
369                }
370                let c = aug[[k, i]] / aug[[i, i]];
371                for j in i..=n {
372                    let aug_i_j = aug[[i, j]];
373                    aug[[k, j]] -= c * aug_i_j;
374                }
375            }
376        }
377
378        // Back substitution
379        for i in (0..n).rev() {
380            let mut sum = aug[[i, n]];
381            for j in i + 1..n {
382                sum -= aug[[i, j]] * x[j];
383            }
384            x[i] = sum / aug[[i, i]];
385        }
386
387        Ok(x)
388    }
389
390    /// Compute trace correction term for predictive variance
391    fn compute_trace_correction(&self, alpha: &ArrayView1<F>) -> InterpolateResult<F> {
392        // Simplified trace computation - in practice would be more sophisticated
393        let trace_term = alpha.dot(alpha) * F::from_f64(0.1).unwrap();
394        Ok(trace_term)
395    }
396
397    /// Compute Evidence Lower Bound (ELBO)
398    fn compute_elbo(
399        &self,
400        x_train: &ArrayView2<F>,
401        y_train: &ArrayView1<F>,
402        _k_uu: &Array2<F>,
403        k_fu: &Array2<F>,
404        _a_matrix: &Array2<F>,
405    ) -> InterpolateResult<F> {
406        let n_data = x_train.nrows();
407        let n_inducing = self.inducing_points.nrows();
408
409        // Data fit term
410        let y_pred = k_fu.dot(&self.variational_mean);
411        let residuals = y_train - &y_pred;
412        let data_fit = -F::from_f64(0.5).unwrap() * residuals.dot(&residuals) / self.noise_variance;
413
414        // Complexity penalty (KL divergence)
415        let log_det_term =
416            F::from_f64(n_inducing as f64 * (2.0 * std::f64::consts::PI).ln()).unwrap();
417        let trace_term = self.variational_cov_chol.diag().mapv(|x| x.ln()).sum();
418        let kl_penalty =
419            -F::from_f64(0.5).unwrap() * (log_det_term + F::from_f64(2.0).unwrap() * trace_term);
420
421        // Noise term
422        let noise_term = -F::from_f64(0.5 * n_data as f64).unwrap() * self.noise_variance.ln();
423
424        Ok(data_fit + kl_penalty + noise_term)
425    }
426}
427
428/// Statistical spline with confidence bands
429///
430/// Extends standard spline interpolation with statistical inference,
431/// including confidence and prediction bands.
432#[derive(Debug, Clone)]
433pub struct StatisticalSpline<F: Float> {
434    /// Spline coefficients
435    pub coefficients: Array1<F>,
436    /// Knot locations
437    pub knots: Array1<F>,
438    /// Covariance matrix of coefficients
439    pub coef_covariance: Array2<F>,
440    /// Residual standard error
441    pub residual_std_error: F,
442    /// Degrees of freedom
443    pub degrees_of_freedom: usize,
444}
445
446impl<F> StatisticalSpline<F>
447where
448    F: Float
449        + FromPrimitive
450        + Debug
451        + Display
452        + scirs2_core::ndarray::ScalarOperand
453        + std::ops::AddAssign
454        + std::ops::SubAssign
455        + std::ops::MulAssign
456        + std::ops::DivAssign,
457{
458    /// Fit a statistical spline with inference
459    pub fn fit(
460        x: &ArrayView1<F>,
461        y: &ArrayView1<F>,
462        n_knots: usize,
463        smoothing_parameter: F,
464    ) -> InterpolateResult<Self> {
465        let n = x.len();
466        if n != y.len() {
467            return Err(InterpolateError::DimensionMismatch(
468                "x and y must have same length".to_string(),
469            ));
470        }
471
472        // Create knot vector (simplified)
473        let x_min = x.fold(F::infinity(), |a, &b| a.min(b));
474        let x_max = x.fold(F::neg_infinity(), |a, &b| a.max(b));
475        let mut knots = Array1::zeros(n_knots);
476        for i in 0..n_knots {
477            let t = F::from_usize(i).unwrap() / F::from_usize(n_knots - 1).unwrap();
478            knots[i] = x_min + t * (x_max - x_min);
479        }
480
481        // Build design matrix (B-spline basis - simplified)
482        let design_matrix = Self::build_bspline_matrix(x, &knots)?;
483
484        // Add smoothing penalty matrix
485        let penalty_matrix = Self::build_penalty_matrix(n_knots)?;
486        let penalized_matrix =
487            design_matrix.t().dot(&design_matrix) + &(penalty_matrix.clone() * smoothing_parameter);
488
489        // Solve penalized least squares
490        let rhs = design_matrix.t().dot(y);
491        let coefficients = Self::solve_penalized_system(&penalized_matrix, &rhs)?;
492
493        // Compute residuals and standard error
494        let fitted_values = design_matrix.dot(&coefficients);
495        let residuals = y - &fitted_values;
496        let rss = residuals.dot(&residuals);
497        let dof = n - Self::effective_degrees_of_freedom(&design_matrix, smoothing_parameter)?;
498        let residual_std_error = (rss / F::from_usize(dof).unwrap()).sqrt();
499
500        // Compute coefficient covariance matrix
501        let coef_covariance = Self::compute_coefficient_covariance(
502            &design_matrix,
503            &penalty_matrix,
504            smoothing_parameter,
505            residual_std_error,
506        )?;
507
508        Ok(Self {
509            coefficients,
510            knots,
511            coef_covariance,
512            residual_std_error,
513            degrees_of_freedom: dof,
514        })
515    }
516
517    /// Predict with confidence and prediction bands
518    pub fn predict_with_bands(
519        &self,
520        x_new: &ArrayView1<F>,
521        confidence_level: F,
522    ) -> InterpolateResult<(Array1<F>, Array1<F>, Array1<F>, Array1<F>, Array1<F>)> {
523        let design_new = Self::build_bspline_matrix(x_new, &self.knots)?;
524
525        // Point predictions
526        let predictions = design_new.dot(&self.coefficients);
527
528        // Standard errors
529        let mut std_errors = Array1::zeros(x_new.len());
530        let mut prediction_std_errors = Array1::zeros(x_new.len());
531
532        for i in 0..x_new.len() {
533            let x_row = design_new.row(i);
534            let variance = x_row.dot(&self.coef_covariance.dot(&x_row));
535            std_errors[i] = variance.sqrt();
536            prediction_std_errors[i] =
537                (variance + self.residual_std_error * self.residual_std_error).sqrt();
538        }
539
540        // Critical value for confidence _level
541        let _alpha = F::one() - confidence_level;
542        let t_crit = F::from_f64(1.96).unwrap(); // Simplified - should use proper t-distribution
543
544        // Confidence bands (for the mean function)
545        let conf_lower = &predictions - &(std_errors.clone() * t_crit);
546        let conf_upper = &predictions + &(std_errors * t_crit);
547
548        // Prediction bands (for _new observations)
549        let pred_lower = &predictions - &(prediction_std_errors.clone() * t_crit);
550        let pred_upper = &predictions + &(prediction_std_errors * t_crit);
551
552        Ok((predictions, conf_lower, conf_upper, pred_lower, pred_upper))
553    }
554
555    /// Build B-spline design matrix (simplified implementation)
556    fn build_bspline_matrix(x: &ArrayView1<F>, knots: &Array1<F>) -> InterpolateResult<Array2<F>> {
557        let n = x.len();
558        let m = knots.len();
559        let mut matrix = Array2::zeros((n, m));
560
561        // Simplified B-spline basis functions (linear for now)
562        for i in 0..n {
563            for j in 0..m {
564                if j == 0 {
565                    matrix[[i, j]] = F::one();
566                } else {
567                    matrix[[i, j]] = x[i].powf(F::from_usize(j).unwrap());
568                }
569            }
570        }
571
572        Ok(matrix)
573    }
574
575    /// Build penalty matrix for smoothing
576    fn build_penalty_matrix(_nknots: usize) -> InterpolateResult<Array2<F>> {
577        let mut penalty = Array2::zeros((_nknots, _nknots));
578
579        // Second-order difference penalty (simplified)
580        for i in 2.._nknots {
581            penalty[[i - 2, i - 2]] += F::one();
582            penalty[[i - 2, i - 1]] -= F::from_f64(2.0).unwrap();
583            penalty[[i - 2, i]] += F::one();
584            penalty[[i - 1, i - 2]] -= F::from_f64(2.0).unwrap();
585            penalty[[i - 1, i - 1]] += F::from_f64(4.0).unwrap();
586            penalty[[i - 1, i]] -= F::from_f64(2.0).unwrap();
587            penalty[[i, i - 2]] += F::one();
588            penalty[[i, i - 1]] -= F::from_f64(2.0).unwrap();
589            penalty[[i, i]] += F::one();
590        }
591
592        Ok(penalty)
593    }
594
595    /// Solve penalized least squares system
596    fn solve_penalized_system(a: &Array2<F>, b: &Array1<F>) -> InterpolateResult<Array1<F>> {
597        // Simplified solver - in practice would use Cholesky or QR
598        let n = a.nrows();
599        let mut x = Array1::zeros(n);
600
601        // Simple iterative solver (Gauss-Seidel)
602        for _iter in 0..100 {
603            let mut max_change = F::zero();
604            for i in 0..n {
605                let mut sum = b[i];
606                for j in 0..n {
607                    if i != j {
608                        sum -= a[[i, j]] * x[j];
609                    }
610                }
611                let new_val = sum / a[[i, i]];
612                let change = (new_val - x[i]).abs();
613                if change > max_change {
614                    max_change = change;
615                }
616                x[i] = new_val;
617            }
618
619            if max_change < F::from_f64(1e-8).unwrap() {
620                break;
621            }
622        }
623
624        Ok(x)
625    }
626
627    /// Compute effective degrees of freedom
628    fn effective_degrees_of_freedom(
629        design: &Array2<F>,
630        smoothing_parameter: F,
631    ) -> InterpolateResult<usize> {
632        // Simplified computation
633        let base_dof = design.ncols();
634        let penalty_reduction = (smoothing_parameter.ln() * F::from_f64(0.1).unwrap()).exp();
635        let effective_dof = F::from_usize(base_dof).unwrap() * (F::one() - penalty_reduction);
636        Ok(effective_dof.to_usize().unwrap_or(base_dof))
637    }
638
639    /// Compute coefficient covariance matrix
640    fn compute_coefficient_covariance(
641        design: &Array2<F>,
642        penalty: &Array2<F>,
643        smoothing_parameter: F,
644        residual_std_error: F,
645    ) -> InterpolateResult<Array2<F>> {
646        let penalized_matrix = design.t().dot(design) + &(penalty * smoothing_parameter);
647
648        // Simplified inverse computation (in practice would use proper matrix inversion)
649        let n = penalized_matrix.nrows();
650        let mut inv_matrix = Array2::eye(n);
651
652        // Very simplified - would use proper matrix inverse
653        for i in 0..n {
654            inv_matrix[[i, i]] = F::one() / penalized_matrix[[i, i]];
655        }
656
657        Ok(inv_matrix * residual_std_error * residual_std_error)
658    }
659}
660
661/// Advanced bootstrap methods for interpolation
662#[derive(Debug, Clone)]
663pub struct AdvancedBootstrap<F: Float> {
664    /// Block size for block bootstrap
665    pub block_size: usize,
666    /// Bootstrap method type
667    pub method: BootstrapMethod,
668    /// Number of bootstrap samples
669    pub n_samples: usize,
670    /// Random seed for reproducibility
671    pub seed: Option<u64>,
672    /// Phantom data to use type parameter F
673    pub _phantom: PhantomData<F>,
674}
675
676/// Types of bootstrap methods
677#[derive(Debug, Clone, Copy, PartialEq)]
678pub enum BootstrapMethod {
679    /// Standard bootstrap (iid resampling)
680    Standard,
681    /// Block bootstrap for time series
682    Block,
683    /// Residual bootstrap for regression
684    Residual,
685    /// Wild bootstrap for heteroskedasticity
686    Wild,
687}
688
689impl<F> AdvancedBootstrap<F>
690where
691    F: Float + FromPrimitive + Debug + Display + std::iter::Sum,
692{
693    /// Create new advanced bootstrap
694    pub fn new(method: BootstrapMethod, n_samples: usize, blocksize: usize) -> Self {
695        Self {
696            block_size: blocksize,
697            method,
698            n_samples,
699            seed: None,
700            _phantom: PhantomData,
701        }
702    }
703
704    /// Perform bootstrap interpolation with specified method
705    pub fn bootstrap_interpolate<InterpolatorFn>(
706        &self,
707        x: &ArrayView1<F>,
708        y: &ArrayView1<F>,
709        x_new: &ArrayView1<F>,
710        interpolator_factory: InterpolatorFn,
711    ) -> InterpolateResult<(Array1<F>, Array1<F>, Array1<F>)>
712    where
713        InterpolatorFn:
714            Fn(&ArrayView1<F>, &ArrayView1<F>, &ArrayView1<F>) -> InterpolateResult<Array1<F>>,
715    {
716        let _n = x.len();
717        let m = x_new.len();
718        let mut rng = match self.seed {
719            Some(seed) => StdRng::seed_from_u64(seed),
720            None => StdRng::seed_from_u64(42),
721        };
722
723        let mut bootstrap_results = Array2::zeros((self.n_samples, m));
724
725        for sample in 0..self.n_samples {
726            let (x_boot, y_boot) = match self.method {
727                BootstrapMethod::Standard => self.standard_bootstrap(x, y, &mut rng)?,
728                BootstrapMethod::Block => self.block_bootstrap(x, y, &mut rng)?,
729                BootstrapMethod::Residual => {
730                    self.residual_bootstrap(x, y, &mut rng, &interpolator_factory)?
731                }
732                BootstrapMethod::Wild => {
733                    self.wild_bootstrap(x, y, &mut rng, &interpolator_factory)?
734                }
735            };
736
737            let y_pred = interpolator_factory(&x_boot.view(), &y_boot.view(), x_new)?;
738            bootstrap_results.row_mut(sample).assign(&y_pred);
739        }
740
741        // Compute statistics
742        let mean = bootstrap_results.mean_axis(Axis(0)).unwrap();
743        let _std_dev = bootstrap_results.std_axis(Axis(0), F::zero());
744
745        // Compute percentiles for confidence intervals
746        let mut conf_lower = Array1::zeros(m);
747        let mut conf_upper = Array1::zeros(m);
748
749        for i in 0..m {
750            let mut column: Vec<F> = bootstrap_results.column(i).to_vec();
751            column.sort_by(|a, b| a.partial_cmp(b).unwrap());
752
753            let lower_idx = ((F::from_f64(0.025).unwrap()
754                * F::from_usize(self.n_samples).unwrap())
755            .to_usize()
756            .unwrap())
757            .min(self.n_samples - 1);
758            let upper_idx = ((F::from_f64(0.975).unwrap()
759                * F::from_usize(self.n_samples).unwrap())
760            .to_usize()
761            .unwrap())
762            .min(self.n_samples - 1);
763
764            conf_lower[i] = column[lower_idx];
765            conf_upper[i] = column[upper_idx];
766        }
767
768        Ok((mean, conf_lower, conf_upper))
769    }
770
771    /// Standard bootstrap resampling
772    fn standard_bootstrap(
773        &self,
774        x: &ArrayView1<F>,
775        y: &ArrayView1<F>,
776        rng: &mut StdRng,
777    ) -> InterpolateResult<(Array1<F>, Array1<F>)> {
778        let n = x.len();
779        let mut indices = Vec::with_capacity(n);
780
781        for _ in 0..n {
782            indices.push(rng.gen_range(0..n));
783        }
784
785        let x_boot = Array1::from_iter(indices.iter().map(|&i| x[i]));
786        let y_boot = Array1::from_iter(indices.iter().map(|&i| y[i]));
787
788        Ok((x_boot, y_boot))
789    }
790
791    /// Block bootstrap for time series data
792    fn block_bootstrap(
793        &self,
794        x: &ArrayView1<F>,
795        y: &ArrayView1<F>,
796        rng: &mut StdRng,
797    ) -> InterpolateResult<(Array1<F>, Array1<F>)> {
798        let n = x.len();
799        let n_blocks = n.div_ceil(self.block_size);
800
801        let mut x_boot = Vec::new();
802        let mut y_boot = Vec::new();
803
804        for _ in 0..n_blocks {
805            let start_idx = rng.gen_range(0..=(n.saturating_sub(self.block_size)));
806            let end_idx = (start_idx + self.block_size).min(n);
807
808            for i in start_idx..end_idx {
809                x_boot.push(x[i]);
810                y_boot.push(y[i]);
811                if x_boot.len() >= n {
812                    break;
813                }
814            }
815            if x_boot.len() >= n {
816                break;
817            }
818        }
819
820        x_boot.truncate(n);
821        y_boot.truncate(n);
822
823        Ok((Array1::from(x_boot), Array1::from(y_boot)))
824    }
825
826    /// Residual bootstrap for regression models
827    fn residual_bootstrap<InterpolatorFn>(
828        &self,
829        x: &ArrayView1<F>,
830        y: &ArrayView1<F>,
831        rng: &mut StdRng,
832        interpolator_factory: &InterpolatorFn,
833    ) -> InterpolateResult<(Array1<F>, Array1<F>)>
834    where
835        InterpolatorFn:
836            Fn(&ArrayView1<F>, &ArrayView1<F>, &ArrayView1<F>) -> InterpolateResult<Array1<F>>,
837    {
838        let n = x.len();
839
840        // Fit original model to get residuals
841        let y_fitted = interpolator_factory(x, y, x)?;
842        let residuals = y - &y_fitted;
843
844        // Resample residuals
845        let mut resampled_residuals = Array1::zeros(n);
846        for i in 0..n {
847            let idx = rng.gen_range(0..n);
848            resampled_residuals[i] = residuals[idx];
849        }
850
851        // Create new bootstrap sample
852        let y_boot = y_fitted + resampled_residuals;
853
854        Ok((x.to_owned(), y_boot))
855    }
856
857    /// Wild bootstrap for heteroskedastic errors
858    fn wild_bootstrap<InterpolatorFn>(
859        &self,
860        x: &ArrayView1<F>,
861        y: &ArrayView1<F>,
862        rng: &mut StdRng,
863        interpolator_factory: &InterpolatorFn,
864    ) -> InterpolateResult<(Array1<F>, Array1<F>)>
865    where
866        InterpolatorFn:
867            Fn(&ArrayView1<F>, &ArrayView1<F>, &ArrayView1<F>) -> InterpolateResult<Array1<F>>,
868    {
869        let n = x.len();
870
871        // Fit original model to get residuals
872        let y_fitted = interpolator_factory(x, y, x)?;
873        let residuals = y - &y_fitted;
874
875        // Generate wild bootstrap multipliers (Rademacher distribution)
876        let mut multipliers = Array1::zeros(n);
877        for i in 0..n {
878            multipliers[i] = if rng.random::<f64>() < 0.5 {
879                F::from_f64(-1.0).unwrap()
880            } else {
881                F::one()
882            };
883        }
884
885        // Create new bootstrap sample
886        let y_boot = y_fitted + &residuals * &multipliers;
887
888        Ok((x.to_owned(), y_boot))
889    }
890}
891
892#[cfg(test)]
893mod tests {
894    use super::*;
895    // use approx::assert_abs_diff_eq;
896
897    #[test]
898    fn test_variational_sparse_gp() {
899        // Generate simple test data
900        let x_train = Array2::from_shape_vec((5, 1), vec![0.0, 1.0, 2.0, 3.0, 4.0]).unwrap();
901        let y_train = Array1::from(vec![0.0, 1.0, 4.0, 9.0, 16.0]); // y = x^2
902
903        // Create kernel parameters
904        let kernel_params = KernelParameters {
905            signal_variance: 1.0,
906            length_scales: Array1::from(vec![1.0]),
907            kernel_type: KernelType::RBF,
908        };
909
910        // Create sparse GP with 3 inducing points
911        let inducing_points = Array2::from_shape_vec((3, 1), vec![0.0, 2.0, 4.0]).unwrap();
912        let mut sparse_gp = VariationalSparseGP::new(
913            inducing_points,
914            kernel_params,
915            0.1, // noise variance
916        );
917
918        // Fit the model
919        let result = sparse_gp.fit(&x_train.view(), &y_train.view(), 10, 0.01, 1e-6);
920        assert!(result.is_ok());
921
922        // Make predictions
923        let xtest = Array2::from_shape_vec((3, 1), vec![0.5, 1.5, 2.5]).unwrap();
924        let (mean, variance) = sparse_gp.predict(&xtest.view()).unwrap();
925
926        // Check that predictions are reasonable
927        assert_eq!(mean.len(), 3);
928        assert_eq!(variance.len(), 3);
929        assert!(variance.iter().all(|&v| v > 0.0));
930    }
931
932    #[test]
933    fn test_statistical_spline() {
934        // Generate test data
935        let x = Array1::from(vec![0.0, 1.0, 2.0, 3.0, 4.0]);
936        let y = Array1::from(vec![0.0, 1.0, 4.0, 9.0, 16.0]);
937
938        // Fit statistical spline
939        let spline = StatisticalSpline::fit(&x.view(), &y.view(), 5, 0.1).unwrap();
940
941        // Test prediction with confidence bands
942        let x_new = Array1::from(vec![0.5, 1.5, 2.5, 3.5]);
943        let (pred, conf_lower, conf_upper, pred_lower, pred_upper) =
944            spline.predict_with_bands(&x_new.view(), 0.95).unwrap();
945
946        // Check that predictions are reasonable
947        assert_eq!(pred.len(), 4);
948        assert!(conf_lower
949            .iter()
950            .zip(conf_upper.iter())
951            .all(|(&l, &u)| l < u));
952        assert!(pred_lower
953            .iter()
954            .zip(pred_upper.iter())
955            .all(|(&l, &u)| l < u));
956        assert!(conf_lower
957            .iter()
958            .zip(pred_lower.iter())
959            .all(|(&c, &p)| c >= p));
960        assert!(conf_upper
961            .iter()
962            .zip(pred_upper.iter())
963            .all(|(&c, &p)| c <= p));
964    }
965
966    #[test]
967    fn test_advanced_bootstrap() {
968        let x = Array1::from(vec![0.0, 1.0, 2.0, 3.0, 4.0]);
969        let y = Array1::from(vec![0.0, 1.0, 4.0, 9.0, 16.0]);
970        let x_new = Array1::from(vec![0.5, 1.5, 2.5]);
971
972        let bootstrap = AdvancedBootstrap::new(BootstrapMethod::Standard, 100, 2);
973
974        // Simple linear interpolation factory
975        let interpolator =
976            |x_data: &ArrayView1<f64>, y_data: &ArrayView1<f64>, x_pred: &ArrayView1<f64>| {
977                // Very simple linear interpolation for testing
978                let mut result = Array1::zeros(x_pred.len());
979                for (i, &x_val) in x_pred.iter().enumerate() {
980                    // Find nearest neighbors and interpolate
981                    if x_val <= x_data[0] {
982                        result[i] = y_data[0];
983                    } else if x_val >= x_data[x_data.len() - 1] {
984                        result[i] = y_data[y_data.len() - 1];
985                    } else {
986                        // Find bracketing points
987                        for j in 0..x_data.len() - 1 {
988                            if x_val >= x_data[j] && x_val <= x_data[j + 1] {
989                                let t = (x_val - x_data[j]) / (x_data[j + 1] - x_data[j]);
990                                result[i] = y_data[j] + t * (y_data[j + 1] - y_data[j]);
991                                break;
992                            }
993                        }
994                    }
995                }
996                Ok(result)
997            };
998
999        let (mean, lower, upper) = bootstrap
1000            .bootstrap_interpolate(&x.view(), &y.view(), &x_new.view(), interpolator)
1001            .unwrap();
1002
1003        assert_eq!(mean.len(), 3);
1004        assert!(lower.iter().zip(upper.iter()).all(|(&l, &u)| l <= u));
1005    }
1006}
1007
1008/// Savitzky-Golay filter for statistical smoothing and interpolation
1009///
1010/// Implements the Savitzky-Golay filter which fits local polynomials to data points
1011/// using least squares regression. This provides both smoothing and the ability to
1012/// compute derivatives of the smoothed signal.
1013#[derive(Debug, Clone)]
1014pub struct SavitzkyGolayFilter<F: Float> {
1015    /// Window length (must be odd)
1016    pub window_length: usize,
1017    /// Polynomial order
1018    pub polynomial_order: usize,
1019    /// Derivative order (0 for smoothing, 1 for first derivative, etc.)
1020    pub derivative_order: usize,
1021    /// Phantom data to use type parameter F
1022    pub _phantom: PhantomData<F>,
1023}
1024
1025impl<F> SavitzkyGolayFilter<F>
1026where
1027    F: Float + FromPrimitive + Debug + std::iter::Sum + 'static,
1028{
1029    /// Create a new Savitzky-Golay filter
1030    pub fn new(
1031        window_length: usize,
1032        polynomial_order: usize,
1033        derivative_order: usize,
1034    ) -> InterpolateResult<Self> {
1035        if window_length.is_multiple_of(2) {
1036            return Err(InterpolateError::InvalidValue(
1037                "Window _length must be odd".to_string(),
1038            ));
1039        }
1040
1041        if polynomial_order >= window_length {
1042            return Err(InterpolateError::InvalidValue(
1043                "Polynomial _order must be less than window _length".to_string(),
1044            ));
1045        }
1046
1047        if derivative_order > polynomial_order {
1048            return Err(InterpolateError::InvalidValue(
1049                "Derivative _order cannot exceed polynomial _order".to_string(),
1050            ));
1051        }
1052
1053        Ok(Self {
1054            window_length,
1055            polynomial_order,
1056            derivative_order: 0,
1057            _phantom: PhantomData,
1058        })
1059    }
1060
1061    /// Apply the Savitzky-Golay filter to input data
1062    pub fn filter(&self, y: &ArrayView1<F>) -> InterpolateResult<Array1<F>> {
1063        let n = y.len();
1064        if n < self.window_length {
1065            return Err(InterpolateError::InvalidValue(
1066                "Data length must be at least window length".to_string(),
1067            ));
1068        }
1069
1070        let mut result = Array1::zeros(n);
1071        let half_window = self.window_length / 2;
1072
1073        // Compute Savitzky-Golay coefficients
1074        let coeffs = self.compute_coefficients()?;
1075
1076        // Apply filter to each point
1077        for i in 0..n {
1078            let mut sum = F::zero();
1079
1080            for j in 0..self.window_length {
1081                let data_idx = if i < half_window {
1082                    // Handle left boundary
1083                    j.min(n - 1)
1084                } else if i >= n - half_window {
1085                    // Handle right boundary
1086                    (n - self.window_length + j).max(0).min(n - 1)
1087                } else {
1088                    // Normal case
1089                    i - half_window + j
1090                };
1091
1092                sum = sum + coeffs[j] * y[data_idx];
1093            }
1094
1095            result[i] = sum;
1096        }
1097
1098        Ok(result)
1099    }
1100
1101    /// Compute Savitzky-Golay filter coefficients
1102    fn compute_coefficients(&self) -> InterpolateResult<Array1<F>> {
1103        let m = self.window_length;
1104        let n = self.polynomial_order + 1;
1105        let half_window = (m - 1) / 2;
1106
1107        // Create design matrix (Vandermonde matrix)
1108        let mut design = Array2::<F>::zeros((m, n));
1109        for i in 0..m {
1110            let x = F::from_isize(i as isize - half_window as isize).unwrap();
1111            for j in 0..n {
1112                design[[i, j]] = x.powi(j as i32);
1113            }
1114        }
1115
1116        // Solve normal equations: (X^T X) c = X^T e_d
1117        // where e_d is the unit vector for the derivative order
1118        let xtx = design.t().dot(&design);
1119        let mut rhs = Array1::<F>::zeros(n);
1120
1121        // Factorial for derivative scaling
1122        let mut factorial = F::one();
1123        for i in 1..=self.derivative_order {
1124            factorial = factorial * F::from_usize(i).unwrap();
1125        }
1126        rhs[self.derivative_order] = factorial;
1127
1128        // Solve the system (simplified - would use proper linear solver in practice)
1129        let coeffs_polynomial = self.solve_linear_system(&xtx, &rhs)?;
1130
1131        // Transform to filter coefficients
1132        let filter_coeffs = design.dot(&coeffs_polynomial);
1133
1134        Ok(filter_coeffs)
1135    }
1136
1137    /// Simplified linear system solver
1138    fn solve_linear_system(&self, a: &Array2<F>, b: &Array1<F>) -> InterpolateResult<Array1<F>> {
1139        let n = a.nrows();
1140        let mut x = Array1::<F>::zeros(n);
1141
1142        // Very simplified diagonal solver (would use proper LU decomposition in practice)
1143        for i in 0..n {
1144            if a[[i, i]].abs() < F::from_f64(1e-12).unwrap() {
1145                return Err(InterpolateError::ComputationError(
1146                    "Singular matrix in Savitzky-Golay computation".to_string(),
1147                ));
1148            }
1149            x[i] = b[i] / a[[i, i]];
1150        }
1151
1152        Ok(x)
1153    }
1154}
1155
1156/// Bias-Corrected and Accelerated (BCa) Bootstrap
1157///
1158/// Provides more accurate confidence intervals than basic percentile bootstrap
1159/// by correcting for bias and skewness in the bootstrap distribution.
1160#[derive(Debug, Clone)]
1161pub struct BcaBootstrap<F: Float> {
1162    /// Number of bootstrap samples
1163    pub n_bootstrap: usize,
1164    /// Confidence level (e.g., 0.95)
1165    pub confidence_level: F,
1166    /// Random seed for reproducibility
1167    pub seed: Option<u64>,
1168}
1169
1170impl<F> BcaBootstrap<F>
1171where
1172    F: Float + FromPrimitive + Debug + std::iter::Sum,
1173{
1174    /// Create a new BCa bootstrap
1175    pub fn new(n_bootstrap: usize, confidence_level: F, seed: Option<u64>) -> Self {
1176        Self {
1177            n_bootstrap,
1178            confidence_level,
1179            seed,
1180        }
1181    }
1182
1183    /// Compute BCa confidence intervals
1184    pub fn confidence_intervals<G>(
1185        &self,
1186        x: &ArrayView1<F>,
1187        y: &ArrayView1<F>,
1188        x_new: &ArrayView1<F>,
1189        interpolator: G,
1190    ) -> InterpolateResult<(Array1<F>, Array1<F>, Array1<F>)>
1191    where
1192        G: Fn(&ArrayView1<F>, &ArrayView1<F>, &ArrayView1<F>) -> InterpolateResult<Array1<F>>,
1193    {
1194        let n_data = x.len();
1195        let n_pred = x_new.len();
1196
1197        // Generate bootstrap samples
1198        let mut rng = match self.seed {
1199            Some(seed) => StdRng::seed_from_u64(seed),
1200            None => {
1201                let mut rng = scirs2_core::random::rng();
1202                StdRng::from_rng(&mut rng)
1203            }
1204        };
1205
1206        let mut bootstrap_results = Array2::<F>::zeros((self.n_bootstrap, n_pred));
1207
1208        // Bootstrap resampling
1209        for b in 0..self.n_bootstrap {
1210            let mut x_boot = Array1::<F>::zeros(n_data);
1211            let mut y_boot = Array1::<F>::zeros(n_data);
1212
1213            for i in 0..n_data {
1214                let idx = rng.gen_range(0..n_data);
1215                x_boot[i] = x[idx];
1216                y_boot[i] = y[idx];
1217            }
1218
1219            let pred = interpolator(&x_boot.view(), &y_boot.view(), x_new)?;
1220            bootstrap_results.row_mut(b).assign(&pred);
1221        }
1222
1223        // Compute original estimate
1224        let original_pred = interpolator(x, y, x_new)?;
1225
1226        // Compute bias correction
1227        let bias_correction = self.compute_bias_correction(&bootstrap_results, &original_pred)?;
1228
1229        // Compute acceleration
1230        let acceleration = self.compute_acceleration(x, y, x_new, &interpolator)?;
1231
1232        // Compute BCa intervals
1233        let alpha = (F::one() - self.confidence_level) / F::from_f64(2.0).unwrap();
1234        let z_alpha = self.inverse_normal_cdf(alpha)?;
1235        let z_1_alpha = self.inverse_normal_cdf(F::one() - alpha)?;
1236
1237        let mut lower = Array1::<F>::zeros(n_pred);
1238        let mut upper = Array1::<F>::zeros(n_pred);
1239
1240        for i in 0..n_pred {
1241            let bc = bias_correction[i];
1242            let acc = acceleration[i];
1243
1244            // Adjusted percentiles
1245            let alpha1 =
1246                self.normal_cdf(bc + (bc + z_alpha) / (F::one() - acc * (bc + z_alpha)))?;
1247            let alpha2 =
1248                self.normal_cdf(bc + (bc + z_1_alpha) / (F::one() - acc * (bc + z_1_alpha)))?;
1249
1250            // Extract percentiles from bootstrap distribution
1251            let mut column: Vec<F> = bootstrap_results.column(i).to_vec();
1252            column.sort_by(|a, b| a.partial_cmp(b).unwrap());
1253
1254            let idx1 = ((alpha1 * F::from_usize(self.n_bootstrap).unwrap())
1255                .floor()
1256                .to_usize()
1257                .unwrap())
1258            .min(self.n_bootstrap - 1);
1259            let idx2 = ((alpha2 * F::from_usize(self.n_bootstrap).unwrap())
1260                .floor()
1261                .to_usize()
1262                .unwrap())
1263            .min(self.n_bootstrap - 1);
1264
1265            lower[i] = column[idx1];
1266            upper[i] = column[idx2];
1267        }
1268
1269        Ok((original_pred, lower, upper))
1270    }
1271
1272    /// Compute bias correction parameter
1273    fn compute_bias_correction(
1274        &self,
1275        bootstrap_results: &Array2<F>,
1276        original_pred: &Array1<F>,
1277    ) -> InterpolateResult<Array1<F>> {
1278        let n_pred = original_pred.len();
1279        let mut bias_correction = Array1::<F>::zeros(n_pred);
1280
1281        for i in 0..n_pred {
1282            let column = bootstrap_results.column(i);
1283            let count_less = column.iter().filter(|&&val| val < original_pred[i]).count();
1284
1285            let proportion =
1286                F::from_usize(count_less).unwrap() / F::from_usize(self.n_bootstrap).unwrap();
1287            bias_correction[i] = self.inverse_normal_cdf(proportion)?;
1288        }
1289
1290        Ok(bias_correction)
1291    }
1292
1293    /// Compute acceleration parameter using jackknife
1294    fn compute_acceleration<G>(
1295        &self,
1296        x: &ArrayView1<F>,
1297        y: &ArrayView1<F>,
1298        x_new: &ArrayView1<F>,
1299        interpolator: &G,
1300    ) -> InterpolateResult<Array1<F>>
1301    where
1302        G: Fn(&ArrayView1<F>, &ArrayView1<F>, &ArrayView1<F>) -> InterpolateResult<Array1<F>>,
1303    {
1304        let n_data = x.len();
1305        let n_pred = x_new.len();
1306
1307        // Jackknife estimates
1308        let mut jackknife_results = Array2::<F>::zeros((n_data, n_pred));
1309
1310        for i in 0..n_data {
1311            // Leave-one-out sample
1312            let mut x_jack = Array1::<F>::zeros(n_data - 1);
1313            let mut y_jack = Array1::<F>::zeros(n_data - 1);
1314
1315            let mut idx = 0;
1316            for j in 0..n_data {
1317                if j != i {
1318                    x_jack[idx] = x[j];
1319                    y_jack[idx] = y[j];
1320                    idx += 1;
1321                }
1322            }
1323
1324            let pred = interpolator(&x_jack.view(), &y_jack.view(), x_new)?;
1325            jackknife_results.row_mut(i).assign(&pred);
1326        }
1327
1328        // Compute jackknife mean
1329        let jack_mean = jackknife_results.mean_axis(Axis(0)).unwrap();
1330
1331        // Compute acceleration
1332        let mut acceleration = Array1::<F>::zeros(n_pred);
1333        for i in 0..n_pred {
1334            let mut sum_cubed = F::zero();
1335            let mut sum_squared = F::zero();
1336
1337            for j in 0..n_data {
1338                let diff = jack_mean[i] - jackknife_results[[j, i]];
1339                sum_cubed = sum_cubed + diff * diff * diff;
1340                sum_squared = sum_squared + diff * diff;
1341            }
1342
1343            if sum_squared > F::zero() {
1344                acceleration[i] = sum_cubed
1345                    / (F::from_f64(6.0).unwrap() * sum_squared.powf(F::from_f64(1.5).unwrap()));
1346            }
1347        }
1348
1349        Ok(acceleration)
1350    }
1351
1352    /// Simplified normal CDF approximation
1353    fn normal_cdf(&self, x: F) -> InterpolateResult<F> {
1354        // Simplified approximation - would use proper implementation in practice
1355        let result =
1356            (F::one() + (x / F::from_f64(1.414).unwrap()).tanh()) / F::from_f64(2.0).unwrap();
1357        Ok(result)
1358    }
1359
1360    /// Simplified inverse normal CDF approximation
1361    fn inverse_normal_cdf(&self, p: F) -> InterpolateResult<F> {
1362        // Very simplified approximation - would use proper implementation
1363        if p <= F::zero() || p >= F::one() {
1364            return Ok(F::zero());
1365        }
1366
1367        // Rough approximation using atanh
1368        let x = F::from_f64(2.0).unwrap() * p - F::one();
1369        Ok(F::from_f64(1.414).unwrap() * x.atanh())
1370    }
1371}