1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
use super::Kernel;
use crate::{KernelAdd, KernelError, KernelMul};
use rayon::prelude::*;
use std::{error::Error, ops::Add, ops::Mul};

fn weighted_norm_pow(params: &[f64], x: &Vec<f64>, xprime: &Vec<f64>) -> f64 {
    params
        .par_iter()
        .zip(x.par_iter())
        .zip(xprime.par_iter())
        .map(|((relevance, xi), xprimei)| relevance * (xi - xprimei).powi(2))
        .sum()
}
#[derive(Clone, Debug)]
pub struct ARD(pub usize);

impl Kernel<Vec<f64>> for ARD {
    fn params_len(&self) -> usize {
        self.0
    }

    fn value(
        &self,
        params: &[f64],
        x: &Vec<f64>,
        xprime: &Vec<f64>,
        with_grad: bool,
    ) -> Result<(f64, Vec<f64>), Box<dyn Error>> {
        if params.len() != self.0 {
            return Err(KernelError::ParametersLengthMismatch.into());
        }
        if x.len() != self.0 || xprime.len() != self.0 {
            return Err(KernelError::InvalidArgument.into());
        }

        let fx = (-weighted_norm_pow(&params, x, xprime)).exp();

        let grad = if !with_grad {
            vec![]
        } else {
            let mut gfx = vec![f64::default(); self.0];

            gfx.par_iter_mut()
                .zip(x.par_iter())
                .zip(xprime.par_iter())
                .for_each(|((gfxi, &xi), &xprimei)| *gfxi = -(xi - xprimei).powi(2));

            gfx
        };

        Ok((fx, grad))
    }
}

impl<R> Add<R> for ARD
where
    R: Kernel<Vec<f64>>,
{
    type Output = KernelAdd<Self, R, Vec<f64>>;

    fn add(self, rhs: R) -> Self::Output {
        Self::Output::new(self, rhs)
    }
}

impl<R> Mul<R> for ARD
where
    R: Kernel<Vec<f64>>,
{
    type Output = KernelMul<Self, R, Vec<f64>>;

    fn mul(self, rhs: R) -> Self::Output {
        Self::Output::new(self, rhs)
    }
}