rv/process/gaussian/kernel/
seard.rs

1use super::{CovGrad, CovGradError, Kernel, KernelError};
2use nalgebra::base::constraint::{SameNumberOfColumns, ShapeConstraint};
3use nalgebra::base::storage::Storage;
4use nalgebra::{DMatrix, DVector, Dim, Matrix};
5use std::f64;
6
7#[cfg(feature = "serde1")]
8use serde::{Deserialize, Serialize};
9
10/// Squared Exponential function (`SEard`) kernel
11/// The distance metric here is L2 (Euclidean).
12///
13/// ```math
14///     k(a, b) = exp(-0.5 * (a - b)' * M * (a - b))
15/// ```
16///
17/// # Parameters
18/// * `M` - Length scale for each dimension.
19///
20#[derive(Clone, Debug, PartialEq)]
21#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
22#[cfg_attr(feature = "serde1", serde(rename_all = "snake_case"))]
23pub struct SEardKernel {
24    length_scale: DVector<f64>,
25}
26
27impl SEardKernel {
28    /// Create a new seard kernel with the given length scale
29    pub fn new(length_scale: DVector<f64>) -> Result<Self, KernelError> {
30        if length_scale.iter().all(|x| x > &0.0) {
31            Ok(Self { length_scale })
32        } else {
33            Err(KernelError::ParameterOutOfBounds {
34                name: "length_scale".to_string(),
35                given: *length_scale
36                    .iter()
37                    .min_by(|a, b| a.partial_cmp(b).unwrap())
38                    .unwrap(),
39                bounds: (0.0, f64::INFINITY),
40            })
41        }
42    }
43
44    /// Create a new `SEardKernel` without checking parameters
45    #[must_use]
46    pub fn new_unchecked(length_scale: DVector<f64>) -> Self {
47        Self { length_scale }
48    }
49}
50
51#[allow(clippy::many_single_char_names)]
52impl Kernel for SEardKernel {
53    fn covariance<R1, R2, C1, C2, S1, S2>(
54        &self,
55        x1: &Matrix<f64, R1, C1, S1>,
56        x2: &Matrix<f64, R2, C2, S2>,
57    ) -> DMatrix<f64>
58    where
59        R1: Dim,
60        R2: Dim,
61        C1: Dim,
62        C2: Dim,
63        S1: Storage<f64, R1, C1>,
64        S2: Storage<f64, R2, C2>,
65        ShapeConstraint: SameNumberOfColumns<C1, C2>,
66    {
67        // k(a, b) = exp(-0.5 * (a - b)' * M * (a - b))
68
69        let m = x1.nrows();
70        let n = x2.nrows();
71        let c = x1.ncols();
72
73        let mut dm: DMatrix<f64> = DMatrix::zeros(m, n);
74
75        for i in 0..m {
76            for j in 0..n {
77                let mut s = 0.0;
78                let a = x1.row(i);
79                let b = x2.row(j);
80                for k in 0..c {
81                    let term = (a[k] - b[k]) / self.length_scale[k];
82                    s += term * term;
83                }
84                dm[(i, j)] = s;
85            }
86        }
87        dm.map(|e| (-0.5 * e).exp())
88    }
89
90    fn is_stationary(&self) -> bool {
91        true
92    }
93
94    fn diag<R, C, S>(&self, x: &Matrix<f64, R, C, S>) -> DVector<f64>
95    where
96        R: Dim,
97        C: Dim,
98        S: Storage<f64, R, C>,
99    {
100        DVector::repeat(x.nrows(), 1.0)
101    }
102
103    fn parameters(&self) -> DVector<f64> {
104        DVector::from_iterator(
105            self.n_parameters(),
106            self.length_scale.iter().map(|x| x.ln()),
107        )
108    }
109
110    fn reparameterize(&self, params: &[f64]) -> Result<Self, KernelError> {
111        use std::cmp::Ordering;
112        match params.len().cmp(&self.length_scale.nrows()) {
113            Ordering::Equal => {
114                let exped: Vec<f64> = params.iter().map(|x| x.exp()).collect();
115                Ok(Self::new(DVector::from_row_slice(&exped)).unwrap())
116            }
117            Ordering::Greater => Err(KernelError::ExtraneousParameters(
118                params.len() - self.length_scale.nrows(),
119            )),
120            Ordering::Less => Err(KernelError::MissingParameters(
121                self.length_scale.nrows() - params.len(),
122            )),
123        }
124    }
125
126    fn covariance_with_gradient<R, C, S>(
127        &self,
128        x: &Matrix<f64, R, C, S>,
129    ) -> Result<(DMatrix<f64>, CovGrad), CovGradError>
130    where
131        R: Dim,
132        C: Dim,
133        S: Storage<f64, R, C>,
134    {
135        let n = x.nrows();
136
137        let cov = DMatrix::identity(n, n);
138        let mut grad = CovGrad::zeros(n, self.length_scale.nrows());
139
140        for i in 0..n {
141            for j in 0..i {
142                // Save covariance
143                let mut d2: f64 = 0.0;
144                for k in 0..x.ncols() {
145                    let a = x.row(i);
146                    let b = x.row(j);
147                    d2 += a.zip_fold(&b, 0.0_f64, |acc, c, d| {
148                        let diff = (c - d) / self.length_scale[k];
149                        diff.mul_add(diff, acc)
150                    });
151                }
152                let cov_ij = (-d2 / 2.0_f64).exp();
153
154                for k in 0..x.ncols() {
155                    // Compute effect on cov for l_k
156                    let a = x.row(i);
157                    let b = x.row(j);
158                    // M = diag(l_0^-2, l_1^-2, l_2^-2)
159                    // cov = exp((a-b)' * M * (a-b))
160                    // d/dl_k = ( -2*(a_k - b_k)^2 / l_k^3 ) * cov
161                    grad[(i, j, k)] = -2.0 * (a[k] - b[k]).powi(2) * cov_ij
162                        / self.length_scale[k].powi(3);
163                    grad[(j, i, k)] = grad[(i, j, k)];
164                }
165            }
166        }
167        Ok((cov, grad))
168    }
169
170    fn n_parameters(&self) -> usize {
171        self.length_scale.nrows()
172    }
173}