1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
//! Prior
//!
//! When asked to predict a value for an input that is too dissimilar to known inputs, the model will return the prior.
//! Furthermore the process will be fitted on the residual of the prior meaning that a good prior can significantly improve the precision of the model.
//!
//! This can be a constant but also a polynomial or any model of the data.
//! User-defined priors should implement the Prior trait.

use nalgebra::DVector;
use nalgebra::{storage::Storage, U1, Dynamic};
use crate::algebra::{SVector, SMatrix};

//---------------------------------------------------------------------------------------
// TRAIT

/// The Prior trait
///
/// User-defined kernels should implement this trait.
pub trait Prior
{
   /// Default value for the prior
   fn default(input_dimension: usize) -> Self;

   /// Takes and input and return an output.
   fn prior<S: Storage<f64, Dynamic, Dynamic>>(&self, input: &SMatrix<S>) -> DVector<f64>;

   /// Optional, function that fits the prior on training data
   fn fit<SM: Storage<f64, Dynamic, Dynamic> + Clone, SV: Storage<f64, Dynamic, U1>>(&mut self,
                                                                                     _training_inputs: &SMatrix<SM>,
                                                                                     _training_outputs: &SVector<SV>)
   {
   }
}

//---------------------------------------------------------------------------------------
// CLASSICAL PRIOR

/// The Zero prior
///
/// this prior always return zero.
#[derive(Clone, Copy, Debug)]
pub struct ZeroPrior {}

impl Prior for ZeroPrior
{
   fn default(_input_dimension: usize) -> Self
   {
      Self {}
   }

   fn prior<S: Storage<f64, Dynamic, Dynamic>>(&self, input: &SMatrix<S>) -> DVector<f64>
   {
      DVector::zeros(input.nrows())
   }
}

//-----------------------------------------------

/// The Constant prior
///
/// This prior returns a constant.
/// It can be fit to return the mean of the training data.
#[derive(Clone, Debug)]
pub struct ConstantPrior
{
   c: f64
}

impl ConstantPrior
{
   /// Constructs a new constant prior
   pub fn new(c: f64) -> Self
   {
      Self { c: c }
   }
}

impl Prior for ConstantPrior
{
   fn default(_input_dimension: usize) -> Self
   {
      Self::new(0f64)
   }

   fn prior<S: Storage<f64, Dynamic, Dynamic>>(&self, input: &SMatrix<S>) -> DVector<f64>
   {
      DVector::from_element(input.nrows(), self.c)
   }

   /// the prior is fitted on the mean of the training outputs
   fn fit<SM: Storage<f64, Dynamic, Dynamic>, SV: Storage<f64, Dynamic, U1>>(&mut self,
                                                                             _training_inputs: &SMatrix<SM>,
                                                                             training_outputs: &SVector<SV>)
   {
      self.c = training_outputs.mean();
   }
}

//-----------------------------------------------

/// The Linear prior
///
/// This prior is a linear function which can be fit on the training data.
#[derive(Clone, Debug)]
pub struct LinearPrior
{
   weights: DVector<f64>,
   intercept: f64
}

impl LinearPrior
{
   /// Constructs a new linear prior
   /// the first row of w is the bias such that `prior = [1|input] * w`
   pub fn new(weights: DVector<f64>, intercept: f64) -> Self
   {
      LinearPrior { weights, intercept }
   }
}

impl Prior for LinearPrior
{
   fn default(input_dimension: usize) -> Self
   {
      Self { weights: DVector::zeros(input_dimension), intercept: 0f64 }
   }

   fn prior<S: Storage<f64, Dynamic, Dynamic>>(&self, input: &SMatrix<S>) -> DVector<f64>
   {
      let mut result = input * &self.weights;
      result.add_scalar_mut(self.intercept);
      result
   }

   /// performs a linear fit to set the value of the prior
   fn fit<SM: Storage<f64, Dynamic, Dynamic> + Clone, SV: Storage<f64, Dynamic, U1>>(&mut self,
                                                                                     training_inputs: &SMatrix<SM>,
                                                                                     training_outputs: &SVector<SV>)
   {
      // solve linear system using an SVD decomposition
      let weights = training_inputs.clone()
                                   .insert_column(0, 1.) // add constant term for non-zero intercept
                                   .svd(true, true)
                                   .solve(training_outputs, 0.)
                                   .expect("Linear prior fit : solve failed.");

      // TODO solve cannot be used with qr and full_piv_lu due to issue 667
      // https://github.com/rustsim/nalgebra/issues/667
      // TODO solve_mut fails on non-square systems with both qr and full_piv_lu due to issue 672
      // https://github.com/rustsim/nalgebra/issues/672

      // extracts weights and intercept
      self.intercept = weights[0];
      self.weights = weights.remove_row(0);
   }
}