pointprocesses/estimators/
nadarayawatson.rs

1//! Implement the Nadaraya-Watson non-parametric regression estimator.
2//! Useful for estimating the intensity of a non-homogeneous Poisson process.
3use ndarray::prelude::*;
4
5use super::kernels::*;
6
7
8/// Nadaraya-Watson nonparametric estimator for functions using
9/// a weighted kernel average.
10/// The predictor at a point $x_0$ is given by:
11/// $$
12/// \hat y_0 = 
13/// \frac{\sum_{i=1}^p K_h(x_i, x_0) y_i}
14/// {\sum_{i=1}^p K_h(x_i, x_0)}
15/// $$
16pub struct NadWatEstimator<T: RegKernel> {
17    kernel: T,
18    x_i: Option<Array1<f64>>,
19    y_i: Option<Array1<f64>>
20}
21
22
23impl<T: RegKernel> NadWatEstimator<T> {
24    /// Return a new Nadaraya-Watson estimator.
25    pub fn new(kernel: T) -> Self {
26        Self { kernel, x_i: None, y_i: None }
27    }
28
29    pub fn fit(mut self, x_i: &Array1<f64>, y_i: &Array1<f64>) -> Self {
30        self.x_i.get_or_insert(x_i.clone());
31        self.y_i.get_or_insert(y_i.clone());
32        self
33    }
34
35    /// Perform prediction at `x0`.
36    pub fn predict(&self, x0: f64) -> f64
37    {
38        let x_arr: &Array1<f64> = self.x_i.as_ref().expect("Regressor was not fitted.");
39        let y_arr: &Array1<f64> = self.y_i.as_ref().expect("Regressor was not fitted.");
40
41        let kernel = &self.kernel;
42
43        let zipped_i = x_arr.iter().zip(y_arr.iter());
44        let numerator = zipped_i.fold(
45            0., |acc, (x, y)| {
46                acc + kernel.eval(x0, *x) * y
47            }
48        );
49        let denom = x_arr.iter().fold(
50            0., |acc, x| {
51                acc + kernel.eval(x0, *x)
52            }
53        );
54
55        numerator / denom
56    }
57}
58
59/// Estimate the intensity function of an event sequence under a
60/// variable Poisson model using a kernel smoother 
61/// (see _A kernel method for smoothing point process data_ by P. Diggle).
62/// The regressor is given by
63/// $$
64///     \hat\lambda(t) = e_h(t)^{-1}
65///     \sum_i K_h(t - t_i)
66/// $$
67/// where $e_h(t) = \int_0^T K_h(t - u)\\, du$ is an edge-correction term.
68pub struct SmoothingKernelIntensity<K: RegKernel> {
69    event_times: Vec<Array1<f64>>,
70    kernel: K
71}
72
73
74impl<K> SmoothingKernelIntensity<K> 
75where K: RegKernelMass {
76    pub fn fit<T>(mut self, evts: Vec<T>) -> Self
77    where T: Into<Array1<f64>> {
78        self.event_times.reserve(evts.len());
79        for e in evts {
80            self.event_times.push(e.into())
81        }
82
83        self
84    }
85
86    pub fn predict(&self, x0: f64, tmax: f64) -> f64 {
87        let kernel = &self.kernel;
88
89        let edge_correct = 1. /  kernel.eval_mass(x0, 0., tmax);
90        let num_seq = self.event_times.len();
91
92        let sum: f64 = self.event_times.iter()
93            .map(|seq| {
94                seq.into_iter()
95                    .fold(0., |acc, xi| {
96                        acc + kernel.eval(x0, *xi)
97                    })
98            }).sum();
99
100
101        edge_correct * sum / num_seq as f64
102    }
103
104}
105
106/// Intensity kernel estimator using a uniform kernel.
107pub type UniformKernelIntensity = SmoothingKernelIntensity<NearestNeighborKernel>;
108
109impl UniformKernelIntensity {
110    pub fn new(bandwidth: f64) -> Self {
111        let kernel = NearestNeighborKernel::new(bandwidth);
112        let event_times = Vec::new();
113        Self {
114            event_times,
115            kernel
116        }
117    }
118}
119