#![allow(clippy::too_many_arguments)]
#![allow(dead_code)]
use crate::advanced::kriging::{CovarianceFunction, PredictionResult};
use crate::error::{InterpolateError, InterpolateResult};
use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
use scirs2_core::numeric::{Float, FromPrimitive};
use std::fmt::{Debug, Display};
use std::marker::PhantomData;
use std::ops::{Add, Div, Mul, Sub};
#[inline(always)]
fn const_f64<F: Float + FromPrimitive>(value: f64) -> F {
F::from(value).expect("Failed to convert constant to target float type")
}
type BasisFunctionClosure<F> = Box<dyn Fn(&ArrayView1<F>) -> Vec<F>>;
#[derive(Debug, Clone)]
pub struct BayesianPredictionResult<F: Float> {
pub mean: Array1<F>,
pub variance: Array1<F>,
pub posterior_samples: Option<Array2<F>>,
pub quantiles: Option<Vec<(F, Array1<F>)>>,
pub log_marginal_likelihood: F,
}
#[derive(Debug, Clone)]
pub struct AnisotropicCovariance<
F: Float
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
> {
pub cov_fn: CovarianceFunction,
pub length_scales: Array1<F>,
pub sigma_sq: F,
pub angles: Option<Array1<F>>,
pub nugget: F,
pub extra_params: F,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TrendFunction {
Constant,
Linear,
Quadratic,
Custom(usize),
}
#[derive(Debug, Clone)]
pub enum ParameterPrior<
F: Float
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
> {
Uniform(F, F),
Normal(F, F),
Gamma(F, F),
InverseGamma(F, F),
Fixed(F),
}
#[derive(Debug, Clone)]
pub struct EnhancedKriging<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
points: Array2<F>,
values: Array1<F>,
anisotropic_cov: AnisotropicCovariance<F>,
_trend_fn: TrendFunction,
cov_matrix: Array2<F>,
cholesky_factor: Option<Array2<F>>,
weights: Array1<F>,
trend_coeffs: Option<Array1<F>>,
priors: Option<KrigingPriors<F>>,
n_samples: usize,
basis_functions: Option<Array2<F>>,
compute_full_covariance: bool,
use_exact_computation: bool,
_phantom: PhantomData<F>,
}
#[derive(Debug, Clone)]
pub struct KrigingPriors<
F: Float
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
> {
pub sigma_sq_prior: ParameterPrior<F>,
pub length_scale_prior: ParameterPrior<F>,
pub nugget_prior: ParameterPrior<F>,
pub trend_coeffs_prior: ParameterPrior<F>,
}
#[derive(Debug, Clone)]
pub struct EnhancedKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
points: Option<Array2<F>>,
values: Option<Array1<F>>,
cov_fn: CovarianceFunction,
length_scales: Option<Array1<F>>,
sigma_sq: F,
angles: Option<Array1<F>>,
nugget: F,
extra_params: F,
_trend_fn: TrendFunction,
anisotropic_cov: Option<AnisotropicCovariance<F>>,
priors: Option<KrigingPriors<F>>,
n_samples: usize,
compute_full_covariance: bool,
use_exact_computation: bool,
optimize_parameters: bool,
_phantom: PhantomData<F>,
}
impl<F> Default for EnhancedKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
fn default() -> Self {
Self::new()
}
}
impl<F> EnhancedKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
pub fn new() -> Self {
Self {
points: None,
values: None,
cov_fn: CovarianceFunction::SquaredExponential,
length_scales: None,
sigma_sq: const_f64::<F>(1.0),
angles: None,
nugget: const_f64::<F>(1e-10),
extra_params: const_f64::<F>(1.0),
_trend_fn: TrendFunction::Constant,
anisotropic_cov: None,
priors: None,
n_samples: 0,
compute_full_covariance: false,
use_exact_computation: true,
optimize_parameters: false,
_phantom: PhantomData,
}
}
pub fn points(mut self, points: Array2<F>) -> Self {
self.points = Some(points);
self
}
pub fn values(mut self, values: Array1<F>) -> Self {
self.values = Some(values);
self
}
pub fn cov_fn(mut self, covfn: CovarianceFunction) -> Self {
self.cov_fn = covfn;
self
}
pub fn length_scales(mut self, lengthscales: Array1<F>) -> Self {
self.length_scales = Some(lengthscales);
self
}
pub fn sigma_sq(mut self, sigmasq: F) -> Self {
self.sigma_sq = sigmasq;
self
}
pub fn angles(mut self, angles: Array1<F>) -> Self {
self.angles = Some(angles);
self
}
pub fn nugget(mut self, nugget: F) -> Self {
self.nugget = nugget;
self
}
pub fn extra_params(mut self, extraparams: F) -> Self {
self.extra_params = extraparams;
self
}
pub fn anisotropic_cov(mut self, anisotropiccov: AnisotropicCovariance<F>) -> Self {
self.anisotropic_cov = Some(anisotropiccov);
self
}
pub fn priors(mut self, priors: KrigingPriors<F>) -> Self {
self.priors = Some(priors);
self
}
pub fn n_samples(mut self, nsamples: usize) -> Self {
self.n_samples = nsamples;
self
}
pub fn compute_full_covariance(mut self, compute_fullcovariance: bool) -> Self {
self.compute_full_covariance = compute_fullcovariance;
self
}
pub fn use_exact_computation(mut self, use_exactcomputation: bool) -> Self {
self.use_exact_computation = use_exactcomputation;
self
}
pub fn optimize_parameters(mut self, optimizeparameters: bool) -> Self {
self.optimize_parameters = optimizeparameters;
self
}
pub fn build(self) -> InterpolateResult<EnhancedKriging<F>> {
let points = self
.points
.ok_or_else(|| InterpolateError::invalid_input("points must be set".to_string()))?;
let values = self
.values
.ok_or_else(|| InterpolateError::invalid_input("values must be set".to_string()))?;
if points.shape()[0] != values.len() {
return Err(InterpolateError::invalid_input(
"number of points must match number of values".to_string(),
));
}
if points.shape()[0] < 2 {
return Err(InterpolateError::invalid_input(
"at least 2 points are required for Kriging interpolation".to_string(),
));
}
let anisotropic_cov = if let Some(cov) = self.anisotropic_cov {
cov
} else {
let length_scales = if let Some(ls) = self.length_scales {
ls
} else {
Array1::from_elem(points.shape()[1], F::one())
};
AnisotropicCovariance {
cov_fn: self.cov_fn,
length_scales,
sigma_sq: self.sigma_sq,
angles: self.angles,
nugget: self.nugget,
extra_params: self.extra_params,
}
};
let n_points = points.shape()[0];
let _n_dims = points.shape()[1];
let mut cov_matrix = Array2::zeros((n_points, n_points));
for i in 0..n_points {
for j in 0..n_points {
if i == j {
cov_matrix[[i, j]] = anisotropic_cov.sigma_sq + anisotropic_cov.nugget;
} else {
let dist = Self::compute_anisotropic_distance(
&points.slice(scirs2_core::ndarray::s![i, ..]),
&points.slice(scirs2_core::ndarray::s![j, ..]),
&anisotropic_cov,
);
cov_matrix[[i, j]] = Self::evaluate_covariance(dist, &anisotropic_cov);
}
}
}
let (trend_matrix, n_basis) = Self::build_trend_matrix(&points, self._trend_fn)?;
let system_size = n_points + n_basis;
let mut augmented_matrix = Array2::zeros((system_size, system_size));
let mut rhs = Array1::zeros(system_size);
for i in 0..n_points {
for j in 0..n_points {
augmented_matrix[[i, j]] = cov_matrix[[i, j]];
}
}
for i in 0..n_points {
for j in 0..n_basis {
let val = trend_matrix[[i, j]];
augmented_matrix[[i, n_points + j]] = val;
augmented_matrix[[n_points + j, i]] = val;
}
}
for i in 0..n_points {
rhs[i] = values[i];
}
let (cholesky_factor, solution) = Self::solve_kriging_system(&augmented_matrix, &rhs)?;
let weights = solution
.slice(scirs2_core::ndarray::s![0..n_points])
.to_owned();
let trend_coeffs = if n_basis > 0 {
Some(
solution
.slice(scirs2_core::ndarray::s![n_points..])
.to_owned(),
)
} else {
None
};
let basis_functions = if n_basis > 0 {
Some(trend_matrix)
} else {
None
};
Ok(EnhancedKriging {
points,
values,
anisotropic_cov,
_trend_fn: self._trend_fn,
cov_matrix,
cholesky_factor: Some(cholesky_factor),
weights,
trend_coeffs,
priors: self.priors,
n_samples: self.n_samples,
basis_functions,
compute_full_covariance: self.compute_full_covariance,
use_exact_computation: self.use_exact_computation,
_phantom: PhantomData,
})
}
fn build_trend_matrix(
points: &Array2<F>,
trend_fn: TrendFunction,
) -> InterpolateResult<(Array2<F>, usize)> {
let n_points = points.shape()[0];
let n_dims = points.shape()[1];
let (n_basis, basis_fn): (usize, BasisFunctionClosure<F>) = match trend_fn {
TrendFunction::Constant => (1, Box::new(|x: &ArrayView1<F>| vec![F::one()])),
TrendFunction::Linear => (
1 + n_dims,
Box::new(|x: &ArrayView1<F>| {
let mut basis = vec![F::one()];
basis.extend(x.iter().cloned());
basis
}),
),
TrendFunction::Quadratic => (
1 + n_dims + n_dims * (n_dims + 1) / 2,
Box::new(|x: &ArrayView1<F>| {
let mut basis = vec![F::one()];
basis.extend(x.iter().cloned());
for i in 0..x.len() {
for j in i..x.len() {
basis.push(x[i] * x[j]);
}
}
basis
}),
),
TrendFunction::Custom(degree) => {
let n_basis = Self::compute_polynomial_basis_size(n_dims, degree);
(
n_basis,
Box::new(move |x: &ArrayView1<F>| Self::compute_polynomial_basis(x, degree)),
)
}
};
let mut trend_matrix = Array2::zeros((n_points, n_basis));
for i in 0..n_points {
let point = points.slice(scirs2_core::ndarray::s![i, ..]);
let basis_vals = basis_fn(&point);
for j in 0..n_basis {
trend_matrix[[i, j]] = basis_vals[j];
}
}
Ok((trend_matrix, n_basis))
}
fn compute_polynomial_basis_size(_ndims: usize, degree: usize) -> usize {
if degree == 0 {
return 1;
}
let mut size = 0;
for d in 0..=degree {
size += Self::binomial_coefficient(_ndims + d - 1, d);
}
size
}
fn binomial_coefficient(n: usize, k: usize) -> usize {
if k > n {
return 0;
}
if k == 0 || k == n {
return 1;
}
let mut result = 1;
for i in 0..k {
result = result * (n - i) / (i + 1);
}
result
}
fn compute_polynomial_basis(x: &ArrayView1<F>, degree: usize) -> Vec<F> {
let n_dims = x.len();
let mut basis = Vec::new();
Self::generate_multi_indices(n_dims, degree, &mut basis, x, &mut vec![0; n_dims], 0, 0);
basis
}
fn generate_multi_indices(
n_dims: usize,
max_degree: usize,
basis: &mut Vec<F>,
x: &ArrayView1<F>,
indices: &mut Vec<usize>,
dim: usize,
current_degree: usize,
) {
if dim == n_dims {
if current_degree <= max_degree {
let mut value = F::one();
for (i, &power) in indices.iter().enumerate() {
for _ in 0..power {
value *= x[i];
}
}
basis.push(value);
}
return;
}
for power in 0..=(max_degree - current_degree) {
indices[dim] = power;
Self::generate_multi_indices(
n_dims,
max_degree,
basis,
x,
indices,
dim + 1,
current_degree + power,
);
}
}
fn solve_kriging_system(
matrix: &Array2<F>,
rhs: &Array1<F>,
) -> InterpolateResult<(Array2<F>, Array1<F>)> {
let n = matrix.shape()[0];
let mut augmented = matrix.clone();
let regularization = const_f64::<F>(1e-8);
for i in 0..n {
augmented[[i, i]] += regularization;
}
let cholesky = Self::cholesky_decomposition(&augmented)?;
let y = Self::forward_substitution(&cholesky, rhs)?;
let solution = Self::backward_substitution(&cholesky, &y)?;
Ok((cholesky, solution))
}
fn cholesky_decomposition(matrix: &Array2<F>) -> InterpolateResult<Array2<F>> {
let n = matrix.shape()[0];
let mut cholesky = Array2::zeros((n, n));
for i in 0..n {
for j in 0..=i {
if i == j {
let mut sum = F::zero();
for k in 0..j {
sum += cholesky[[j, k]] * cholesky[[j, k]];
}
let val = matrix[[j, j]] - sum;
if val <= F::zero() {
return Err(InterpolateError::numerical_error(
"Matrix is not positive definite for Cholesky decomposition"
.to_string(),
));
}
cholesky[[j, j]] = val.sqrt();
} else {
let mut sum = F::zero();
for k in 0..j {
sum += cholesky[[i, k]] * cholesky[[j, k]];
}
cholesky[[i, j]] = (matrix[[i, j]] - sum) / cholesky[[j, j]];
}
}
}
Ok(cholesky)
}
fn forward_substitution(lower: &Array2<F>, rhs: &Array1<F>) -> InterpolateResult<Array1<F>> {
let n = lower.shape()[0];
let mut solution = Array1::zeros(n);
for i in 0..n {
let mut sum = F::zero();
for j in 0..i {
sum += lower[[i, j]] * solution[j];
}
solution[i] = (rhs[i] - sum) / lower[[i, i]];
}
Ok(solution)
}
fn backward_substitution(lower: &Array2<F>, rhs: &Array1<F>) -> InterpolateResult<Array1<F>> {
let n = lower.shape()[0];
let mut solution = Array1::zeros(n);
for i in (0..n).rev() {
let mut sum = F::zero();
for j in (i + 1)..n {
sum += lower[[j, i]] * solution[j]; }
solution[i] = (rhs[i] - sum) / lower[[i, i]];
}
Ok(solution)
}
fn compute_anisotropic_distance(
p1: &ArrayView1<F>,
p2: &ArrayView1<F>,
cov: &AnisotropicCovariance<F>,
) -> F {
let mut sum_sq = F::zero();
for (i, (&x1, &x2)) in p1.iter().zip(p2.iter()).enumerate() {
let diff = x1 - x2;
let length_scale = if i < cov.length_scales.len() {
cov.length_scales[i]
} else {
F::one()
};
let scaled_diff = diff / length_scale;
sum_sq += scaled_diff * scaled_diff;
}
sum_sq.sqrt()
}
fn evaluate_covariance(r: F, cov: &AnisotropicCovariance<F>) -> F {
match cov.cov_fn {
CovarianceFunction::SquaredExponential => cov.sigma_sq * (-r * r).exp(),
CovarianceFunction::Exponential => cov.sigma_sq * (-r).exp(),
CovarianceFunction::Matern32 => {
let sqrt3_r = const_f64::<F>(3.0).sqrt() * r;
cov.sigma_sq * (F::one() + sqrt3_r) * (-sqrt3_r).exp()
}
CovarianceFunction::Matern52 => {
let sqrt5_r = const_f64::<F>(5.0).sqrt() * r;
let factor = F::one() + sqrt5_r + const_f64::<F>(5.0) * r * r / const_f64::<F>(3.0);
cov.sigma_sq * factor * (-sqrt5_r).exp()
}
CovarianceFunction::RationalQuadratic => {
let r_sq_div_2a = r * r / (const_f64::<F>(2.0) * cov.extra_params);
cov.sigma_sq * (F::one() + r_sq_div_2a).powf(-cov.extra_params)
}
}
}
}
#[derive(Debug, Clone)]
pub struct BayesianKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
kriging_builder: EnhancedKrigingBuilder<F>,
length_scale_prior: Option<(F, F)>,
variance_prior: Option<(F, F)>,
nugget_prior: Option<(F, F)>,
n_samples: usize,
optimize_parameters: bool,
_phantom: PhantomData<F>,
}
impl<F> Default for BayesianKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
fn default() -> Self {
Self::new()
}
}
impl<F> BayesianKrigingBuilder<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
pub fn new() -> Self {
Self {
kriging_builder: EnhancedKrigingBuilder::new(),
length_scale_prior: None,
variance_prior: None,
nugget_prior: None,
n_samples: 1000, optimize_parameters: true,
_phantom: PhantomData,
}
}
pub fn build(self) -> InterpolateResult<EnhancedKriging<F>> {
if self.optimize_parameters {
let optimized_builder = self.optimize_hyperparameters()?;
optimized_builder.kriging_builder.build()
} else {
self.kriging_builder.build()
}
}
fn optimize_hyperparameters(self) -> InterpolateResult<Self> {
let mut current_builder = self;
if let Some((min_ls, max_ls)) = current_builder.length_scale_prior {
let points = current_builder
.kriging_builder
.points
.as_ref()
.ok_or_else(|| {
InterpolateError::invalid_input(
"points must be set for optimization".to_string(),
)
})?;
let values = current_builder
.kriging_builder
.values
.as_ref()
.ok_or_else(|| {
InterpolateError::invalid_input(
"values must be set for optimization".to_string(),
)
})?;
let n_dims = points.shape()[1];
let mut best_log_likelihood = F::neg_infinity();
let mut best_length_scales = Array1::from_elem(n_dims, F::one());
let n_grid = 5;
for i in 0..n_grid {
let ls_factor = min_ls
+ (max_ls - min_ls)
* F::from_usize(i).expect("Failed to convert usize to float")
/ F::from_usize(n_grid - 1).expect("Failed to convert usize to float");
let length_scales = Array1::from_elem(n_dims, ls_factor);
if let Ok(log_likelihood) = Self::compute_log_marginal_likelihood(
points,
values,
&length_scales,
current_builder.kriging_builder.sigma_sq,
current_builder.kriging_builder.nugget,
current_builder.kriging_builder.cov_fn,
) {
if log_likelihood > best_log_likelihood {
best_log_likelihood = log_likelihood;
best_length_scales = length_scales;
}
}
}
current_builder.kriging_builder = current_builder
.kriging_builder
.length_scales(best_length_scales);
}
Ok(current_builder)
}
fn compute_log_marginal_likelihood(
points: &Array2<F>,
values: &Array1<F>,
length_scales: &Array1<F>,
sigma_sq: F,
nugget: F,
cov_fn: CovarianceFunction,
) -> InterpolateResult<F> {
let n_points = points.shape()[0];
let mut cov_matrix = Array2::zeros((n_points, n_points));
let anisotropic_cov = AnisotropicCovariance {
cov_fn,
length_scales: length_scales.clone(),
sigma_sq,
angles: None,
nugget,
extra_params: F::one(),
};
for i in 0..n_points {
for j in 0..n_points {
if i == j {
cov_matrix[[i, j]] = sigma_sq + nugget;
} else {
let dist = EnhancedKrigingBuilder::compute_anisotropic_distance(
&points.slice(scirs2_core::ndarray::s![i, ..]),
&points.slice(scirs2_core::ndarray::s![j, ..]),
&anisotropic_cov,
);
cov_matrix[[i, j]] =
EnhancedKrigingBuilder::evaluate_covariance(dist, &anisotropic_cov);
}
}
}
let cholesky = EnhancedKrigingBuilder::cholesky_decomposition(&cov_matrix)?;
let alpha = EnhancedKrigingBuilder::forward_substitution(&cholesky, values)?;
let log_likelihood_alpha =
EnhancedKrigingBuilder::backward_substitution(&cholesky, &alpha)?;
let mut quadratic_form = F::zero();
for i in 0..n_points {
quadratic_form += values[i] * log_likelihood_alpha[i];
}
let mut log_det = F::zero();
for i in 0..n_points {
log_det += cholesky[[i, i]].ln();
}
log_det *= const_f64::<F>(2.0);
let n_f64 = F::from_usize(n_points).expect("Failed to convert usize to float");
let log_2pi = F::from_f64(2.0 * std::f64::consts::PI)
.expect("Failed to convert 2*PI to float")
.ln();
let log_likelihood = -const_f64::<F>(0.5) * (quadratic_form + log_det + n_f64 * log_2pi);
Ok(log_likelihood)
}
pub fn length_scale_prior(&self) -> Option<&(F, F)> {
self.length_scale_prior.as_ref()
}
pub fn variance_prior(&self) -> Option<&(F, F)> {
self.variance_prior.as_ref()
}
pub fn nugget_prior(&self) -> Option<&(F, F)> {
self.nugget_prior.as_ref()
}
pub fn n_samples(&self) -> usize {
self.n_samples
}
pub fn optimize_parameters(&self) -> bool {
self.optimize_parameters
}
}
impl<F> AnisotropicCovariance<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Add<Output = F>
+ Sub<Output = F>
+ Mul<Output = F>
+ Div<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
pub fn new(
cov_fn: CovarianceFunction,
length_scales: Vec<F>,
sigma_sq: F,
nugget: F,
angles: Option<Vec<F>>,
) -> Self {
let length_scales_array = Array1::from_vec(length_scales);
let angles_array = angles.map(Array1::from_vec);
Self {
cov_fn,
length_scales: length_scales_array,
sigma_sq,
angles: angles_array,
nugget,
extra_params: F::one(),
}
}
}
impl<F> EnhancedKriging<F>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
pub fn builder() -> EnhancedKrigingBuilder<F> {
EnhancedKrigingBuilder::new()
}
fn evaluate_trend_basis(
query_point: &ArrayView1<F>,
trend_fn: TrendFunction,
) -> InterpolateResult<Vec<F>> {
let n_dims = query_point.len();
let basis = match trend_fn {
TrendFunction::Constant => vec![F::one()],
TrendFunction::Linear => {
let mut basis = vec![F::one()];
basis.extend(query_point.iter().cloned());
basis
}
TrendFunction::Quadratic => {
let mut basis = vec![F::one()];
basis.extend(query_point.iter().cloned());
for i in 0..n_dims {
for j in i..n_dims {
basis.push(query_point[i] * query_point[j]);
}
}
basis
}
TrendFunction::Custom(degree) => {
EnhancedKrigingBuilder::compute_polynomial_basis(query_point, degree)
}
};
Ok(basis)
}
pub fn predict(&self, querypoints: &ArrayView2<F>) -> InterpolateResult<PredictionResult<F>> {
if querypoints.shape()[1] != self.points.shape()[1] {
return Err(InterpolateError::invalid_input(
"query _points must have the same dimension as sample _points".to_string(),
));
}
let n_query = querypoints.shape()[0];
let n_points = self.points.shape()[0];
let mut values = Array1::zeros(n_query);
let mut variances = Array1::zeros(n_query);
for i in 0..n_query {
let query_point = querypoints.slice(scirs2_core::ndarray::s![i, ..]);
let mut k_star = Array1::zeros(n_points);
for j in 0..n_points {
let sample_point = self.points.slice(scirs2_core::ndarray::s![j, ..]);
let dist = EnhancedKrigingBuilder::compute_anisotropic_distance(
&query_point,
&sample_point,
&self.anisotropic_cov,
);
k_star[j] =
EnhancedKrigingBuilder::evaluate_covariance(dist, &self.anisotropic_cov);
}
let mut prediction = F::zero();
for j in 0..n_points {
prediction += k_star[j] * self.weights[j];
}
if let (Some(trend_coeffs), Some(_basis_functions)) =
(&self.trend_coeffs, &self.basis_functions)
{
let trend_basis = Self::evaluate_trend_basis(&query_point, self._trend_fn)?;
for (k, &basis_val) in trend_basis.iter().enumerate() {
if k < trend_coeffs.len() {
prediction += basis_val * trend_coeffs[k];
}
}
}
values[i] = prediction;
let k_star_star = self.anisotropic_cov.sigma_sq;
let mut variance_reduction = F::zero();
if let Some(cholesky) = &self.cholesky_factor {
if let Ok(z) = EnhancedKrigingBuilder::forward_substitution(cholesky, &k_star) {
for &z_val in z.iter() {
variance_reduction += z_val * z_val;
}
}
} else {
for j in 0..n_points {
variance_reduction += k_star[j] * self.weights[j];
}
}
let variance = k_star_star - variance_reduction + self.anisotropic_cov.nugget;
variances[i] = variance.max(self.anisotropic_cov.nugget); }
Ok(PredictionResult {
value: values,
variance: variances,
})
}
pub fn predict_bayesian(
&self,
querypoints: &ArrayView2<F>,
quantile_levels: &[F],
n_samples: usize,
) -> InterpolateResult<BayesianPredictionResult<F>> {
let basic_result = self.predict(querypoints)?;
let n_query = querypoints.shape()[0];
let mut posterior_samples = Array2::zeros((n_samples, n_query));
for i in 0..n_query {
let mean = basic_result.value[i];
let std_dev = basic_result.variance[i].sqrt();
for s in 0..n_samples {
let offset =
F::from_f64((s as f64 / n_samples as f64 - 0.5) * 4.0).expect("Example failed");
posterior_samples[[s, i]] = mean + std_dev * offset;
}
}
let mut quantiles = Vec::new();
for &level in quantile_levels {
let mut quantile_values = Array1::zeros(n_query);
for i in 0..n_query {
let sample_idx = ((level
* F::from_usize(n_samples).expect("Failed to convert usize to float"))
.to_usize()
.unwrap_or(0))
.min(n_samples - 1);
quantile_values[i] = posterior_samples[[sample_idx, i]];
}
quantiles.push((level, quantile_values));
}
let log_marginal_likelihood = const_f64::<F>(-0.5)
* F::from_usize(self.points.shape()[0])
.expect("Failed to convert points count to float")
* F::from_f64(2.0 * std::f64::consts::PI)
.expect("Failed to convert 2*PI to float")
.ln();
Ok(BayesianPredictionResult {
mean: basic_result.value,
variance: basic_result.variance,
posterior_samples: Some(posterior_samples),
quantiles: Some(quantiles),
log_marginal_likelihood,
})
}
pub fn points(&self) -> &Array2<F> {
&self.points
}
pub fn values(&self) -> &Array1<F> {
&self.values
}
pub fn anisotropic_cov(&self) -> &AnisotropicCovariance<F> {
&self.anisotropic_cov
}
pub fn cov_matrix(&self) -> &Array2<F> {
&self.cov_matrix
}
pub fn cholesky_factor(&self) -> Option<&Array2<F>> {
self.cholesky_factor.as_ref()
}
pub fn weights(&self) -> &Array1<F> {
&self.weights
}
pub fn trend_coeffs(&self) -> Option<&Array1<F>> {
self.trend_coeffs.as_ref()
}
pub fn priors(&self) -> Option<&KrigingPriors<F>> {
self.priors.as_ref()
}
pub fn n_samples(&self) -> usize {
self.n_samples
}
pub fn basis_functions(&self) -> Option<&Array2<F>> {
self.basis_functions.as_ref()
}
pub fn compute_full_covariance(&self) -> bool {
self.compute_full_covariance
}
pub fn use_exact_computation(&self) -> bool {
self.use_exact_computation
}
}
#[allow(dead_code)]
pub fn make_enhanced_kriging<F>(
points: &ArrayView2<F>,
values: &ArrayView1<F>,
_cov_fn: CovarianceFunction,
_scale: F,
sq: F,
) -> InterpolateResult<EnhancedKriging<F>>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
EnhancedKriging::builder()
.points(points.to_owned())
.values(values.to_owned())
.build()
}
#[allow(dead_code)]
pub fn make_universal_kriging<F>(
points: &ArrayView2<F>,
values: &ArrayView1<F>,
_cov_fn: CovarianceFunction,
_scale: F,
sq: F,
_fn: TrendFunction,
) -> InterpolateResult<EnhancedKriging<F>>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
EnhancedKriging::builder()
.points(points.to_owned())
.values(values.to_owned())
.build()
}
#[allow(dead_code)]
pub fn make_bayesian_kriging<F>(
_points: &ArrayView2<F>,
_values: &ArrayView1<F>,
_cov_fn: CovarianceFunction,
_priors: KrigingPriors<F>,
_n_samples: usize,
) -> InterpolateResult<EnhancedKriging<F>>
where
F: Float
+ FromPrimitive
+ Debug
+ Display
+ Sub<Output = F>
+ Div<Output = F>
+ Mul<Output = F>
+ Add<Output = F>
+ std::ops::AddAssign
+ std::ops::SubAssign
+ std::ops::MulAssign
+ std::ops::DivAssign
+ std::ops::RemAssign,
{
BayesianKrigingBuilder::new().build()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_creation() {
let builder = EnhancedKrigingBuilder::<f64>::new();
assert_eq!(builder._trend_fn, TrendFunction::Constant);
let bayes_builder = BayesianKrigingBuilder::<f64>::new();
assert_eq!(bayes_builder.n_samples, 1000);
}
}