use ndarray::prelude::*;
use super::kernels::*;
pub struct NadWatEstimator<T: RegKernel> {
kernel: T,
x_i: Option<Array1<f64>>,
y_i: Option<Array1<f64>>
}
impl<T: RegKernel> NadWatEstimator<T> {
pub fn new(kernel: T) -> Self {
Self { kernel, x_i: None, y_i: None }
}
pub fn fit(mut self, x_i: &Array1<f64>, y_i: &Array1<f64>) -> Self {
self.x_i.get_or_insert(x_i.clone());
self.y_i.get_or_insert(y_i.clone());
self
}
pub fn predict(&self, x0: f64) -> f64
{
let x_arr: &Array1<f64> = self.x_i.as_ref().expect("Regressor was not fitted.");
let y_arr: &Array1<f64> = self.y_i.as_ref().expect("Regressor was not fitted.");
let kernel = &self.kernel;
let zipped_i = x_arr.iter().zip(y_arr.iter());
let numerator = zipped_i.fold(
0., |acc, (x, y)| {
acc + kernel.eval(x0, *x) * y
}
);
let denom = x_arr.iter().fold(
0., |acc, x| {
acc + kernel.eval(x0, *x)
}
);
numerator / denom
}
}
pub struct SmoothingKernelIntensity<K: RegKernel> {
event_times: Vec<Array1<f64>>,
kernel: K
}
impl<K> SmoothingKernelIntensity<K>
where K: RegKernelMass {
pub fn fit<T>(mut self, evts: Vec<T>) -> Self
where T: Into<Array1<f64>> {
self.event_times.reserve(evts.len());
for e in evts {
self.event_times.push(e.into())
}
self
}
pub fn predict(&self, x0: f64, tmax: f64) -> f64 {
let kernel = &self.kernel;
let edge_correct = 1. / kernel.eval_mass(x0, 0., tmax);
let num_seq = self.event_times.len();
let sum: f64 = self.event_times.iter()
.map(|seq| {
seq.into_iter()
.fold(0., |acc, xi| {
acc + kernel.eval(x0, *xi)
})
}).sum();
edge_correct * sum / num_seq as f64
}
}
pub type UniformKernelIntensity = SmoothingKernelIntensity<NearestNeighborKernel>;
impl UniformKernelIntensity {
pub fn new(bandwidth: f64) -> Self {
let kernel = NearestNeighborKernel::new(bandwidth);
let event_times = Vec::new();
Self {
event_times,
kernel
}
}
}