mod extendable_matrix;
pub use extendable_matrix::{EMatrix, EVector};
use crate::parameters::kernel::Kernel;
use nalgebra::{storage::Storage, Cholesky, DMatrix, DVector, Dyn, Matrix, ViewStorage, U1};
pub type SMatrix<S> = Matrix<f64, Dyn, Dyn, S>;
pub type SRowVector<S> = Matrix<f64, U1, Dyn, S>;
pub type SVector<S> = Matrix<f64, Dyn, U1, S>;
pub type MatrixSlice<'a> = Matrix<f64, Dyn, Dyn, ViewStorage<'a, f64, Dyn, Dyn, U1, Dyn>>;
pub type VectorSlice<'a> = Matrix<f64, Dyn, U1, ViewStorage<'a, f64, Dyn, U1, U1, Dyn>>;
pub fn make_covariance_matrix<S1: Storage<f64, Dyn, Dyn>, S2: Storage<f64, Dyn, Dyn>, K: Kernel>(
m1: &SMatrix<S1>,
m2: &SMatrix<S2>,
kernel: &K)
-> DMatrix<f64>
{
DMatrix::<f64>::from_fn(m1.nrows(), m2.nrows(), |r, c| {
let x = m1.row(r);
let y = m2.row(c);
kernel.kernel(&x, &y)
})
}
pub fn make_cholesky_cov_matrix<S: Storage<f64, Dyn, Dyn>, K: Kernel>(inputs: &SMatrix<S>,
kernel: &K,
diagonal_noise: f64,
cholesky_epsilon: Option<f64>)
-> Cholesky<f64, Dyn>
{
let mut covmatix = DMatrix::<f64>::from_element(inputs.nrows(), inputs.nrows(), f64::NAN);
for (col_index, x) in inputs.row_iter().enumerate()
{
for (row_index, y) in inputs.row_iter().enumerate().skip(col_index)
{
covmatix[(row_index, col_index)] = kernel.kernel(&x, &y);
}
covmatix[(col_index, col_index)] += diagonal_noise * diagonal_noise;
}
if let Some(cholesky_epsilon) = cholesky_epsilon
{
match Cholesky::new_with_substitute(covmatix, cholesky_epsilon) {
Some(v) => v,
None => panic!("Cholesky decomposition failed even though we used `cholesky_epsilon` value of {cholesky_epsilon}"),
}
}
else
{
covmatix.cholesky().expect(
"Cholesky decomposition failed, consider setting `cholesky_epsilon` via `GaussianProcessBuilder`",
)
}
}
pub fn add_rows_cholesky_cov_matrix<S: Storage<f64, Dyn, Dyn>, K: Kernel>(covmat_cholesky: &mut Cholesky<f64, Dyn>,
all_inputs: &SMatrix<S>,
nb_new_inputs: usize,
kernel: &K,
diagonal_noise: f64)
{
let nb_old_inputs = all_inputs.nrows() - nb_new_inputs;
let new_inputs = all_inputs.rows(nb_old_inputs, nb_new_inputs);
for (row_index, row) in new_inputs.row_iter().enumerate()
{
let col_index = nb_old_inputs + row_index;
let column_size = col_index + 1;
let mut new_column = DVector::<f64>::from_fn(column_size, |training_row_index, _| {
let training_row = all_inputs.row(training_row_index);
kernel.kernel(&training_row, &row)
});
new_column[col_index] += diagonal_noise * diagonal_noise;
*covmat_cholesky = covmat_cholesky.insert_column(col_index, new_column);
}
}
pub fn make_gradient_covariance_matrices<S: Storage<f64, Dyn, Dyn>, K: Kernel>(inputs: &SMatrix<S>,
kernel: &K)
-> Vec<DMatrix<f64>>
{
let mut covmatrices: Vec<_> =
(0..kernel.nb_parameters()).map(|_| {
DMatrix::<f64>::from_element(inputs.nrows(), inputs.nrows(), f64::NAN)
})
.collect();
for (col_index, x) in inputs.row_iter().enumerate()
{
for (row_index, y) in inputs.row_iter().enumerate().skip(col_index)
{
for (&grad, mat) in kernel.gradient(&x, &y).iter().zip(covmatrices.iter_mut())
{
mat[(row_index, col_index)] = grad;
mat[(col_index, row_index)] = grad;
}
}
}
covmatrices
}