[−][src]Trait vikos::Teacher
Algorithms used to adapt Model coefficients
Associated Types
type Training
Contains state which changes during the training, but is not part of the expertise
Examples are the velocity of the coefficients (in stochastic gradient descent) or the number of events already learned. This may also be empty
Required methods
fn new_training(&self, model: &M) -> Self::Training
Creates an instance holding all mutable state of the algorithm
fn teach_event<Y, C>(
&self,
training: &mut Self::Training,
model: &mut M,
cost: &C,
features: &M::Features,
truth: Y
) where
C: Cost<Y, M::Target>,
Y: Copy,
&self,
training: &mut Self::Training,
model: &mut M,
cost: &C,
features: &M::Features,
truth: Y
) where
C: Cost<Y, M::Target>,
Y: Copy,
Changes model
s coefficients so they minimize the cost
function (hopefully)
Implementors
impl<M> Teacher<M> for Adagard where
M: Model,
M::Target: Vector,
[src]
impl<M> Teacher<M> for Adagard where
M: Model,
M::Target: Vector,
type Training = Vec<f64>
fn new_training(&self, model: &M) -> Vec<f64> | [src] |
fn teach_event<Y, C>( | [src] |
impl<M> Teacher<M> for GradientDescent where
M: Model,
M::Target: Vector,
[src]
impl<M> Teacher<M> for GradientDescent where
M: Model,
M::Target: Vector,
type Training = ()
fn new_training(&self, _: &M) | [src] |
fn teach_event<Y, C>( | [src] |
impl<M> Teacher<M> for GradientDescentAl where
M: Model,
M::Target: Vector,
[src]
impl<M> Teacher<M> for GradientDescentAl where
M: Model,
M::Target: Vector,
type Training = usize
fn new_training(&self, _: &M) -> usize | [src] |
fn teach_event<Y, C>( | [src] |
impl<M> Teacher<M> for Momentum where
M: Model,
M::Target: Vector,
[src]
impl<M> Teacher<M> for Momentum where
M: Model,
M::Target: Vector,
type Training = (usize, Vec<f64>)
fn new_training(&self, model: &M) -> (usize, Vec<f64>) | [src] |
fn teach_event<Y, C>( | [src] |
impl<M> Teacher<M> for Nesterov where
M: Model,
M::Target: Vector,
[src]
impl<M> Teacher<M> for Nesterov where
M: Model,
M::Target: Vector,