use crate::algebra::{SMatrix, SRowVector, SVector};
use nalgebra::{storage::Storage, Dyn, U1};
use std::ops::{Add, Mul};
pub trait Kernel: Default
{
fn nb_parameters(&self) -> usize;
fn is_scalable(&self) -> bool
{
false }
fn rescale(&mut self, _scale: f64)
{
if self.is_scalable()
{
unimplemented!("Please implement the `rescale` function if you set `is_scalable` to true.")
}
else
{
panic!("You tried to rescale a Kernel that is not Scalable!")
}
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64;
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>;
fn get_parameters(&self) -> Vec<f64>;
fn set_parameters(&mut self, parameters: &[f64]);
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
_training_inputs: &SMatrix<SM>,
_training_outputs: &SVector<SV>)
{
}
}
fn fit_bandwidth_mean<S: Storage<f64, Dyn, Dyn>>(training_inputs: &SMatrix<S>) -> f64
{
let mut sum_distances = 0.;
for (sample_index, sample) in training_inputs.row_iter().enumerate()
{
for sample2 in training_inputs.row_iter().skip(sample_index + 1)
{
let distance = (sample - sample2).norm();
sum_distances += distance;
}
}
let nb_samples = training_inputs.nrows();
let nb_distances = ((nb_samples * nb_samples - nb_samples) / 2) as f64;
sum_distances / nb_distances
}
fn fit_amplitude_var<S: Storage<f64, Dyn, U1>>(training_outputs: &SVector<S>) -> f64
{
training_outputs.variance()
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct KernelSum<T, U>
where T: Kernel,
U: Kernel
{
k1: T,
k2: U
}
impl<T, U> Kernel for KernelSum<T, U>
where T: Kernel,
U: Kernel
{
fn nb_parameters(&self) -> usize
{
self.k1.nb_parameters() + self.k2.nb_parameters()
}
fn is_scalable(&self) -> bool
{
self.k1.is_scalable() && self.k2.is_scalable()
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
self.k1.kernel(x1, x2) + self.k2.kernel(x1, x2)
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let mut g1 = self.k1.gradient(x1, x2);
let mut g2 = self.k2.gradient(x1, x2);
g1.append(&mut g2);
g1
}
fn rescale(&mut self, scale: f64)
{
self.k1.rescale(scale);
self.k2.rescale(scale);
}
fn get_parameters(&self) -> Vec<f64>
{
let mut p1 = self.k1.get_parameters();
let mut p2 = self.k2.get_parameters();
p1.append(&mut p2);
p1
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.k1.set_parameters(¶meters[..self.k1.nb_parameters()]);
self.k2.set_parameters(¶meters[self.k1.nb_parameters()..]);
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.k1.heuristic_fit(training_inputs, training_outputs);
self.k2.heuristic_fit(training_inputs, training_outputs);
}
}
impl<T: Kernel, U: Kernel> Default for KernelSum<T, U>
{
fn default() -> Self
{
let k1 = T::default();
let k2 = U::default();
KernelSum { k1, k2 }
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct KernelProd<T, U>
where T: Kernel,
U: Kernel
{
k1: T,
k2: U
}
impl<T, U> Kernel for KernelProd<T, U>
where T: Kernel,
U: Kernel
{
fn nb_parameters(&self) -> usize
{
self.k1.nb_parameters() + self.k2.nb_parameters()
}
fn is_scalable(&self) -> bool
{
self.k1.is_scalable() || self.k2.is_scalable()
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
self.k1.kernel(x1, x2) * self.k2.kernel(x1, x2)
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let k1 = self.k1.kernel(x1, x2);
let k2 = self.k2.kernel(x1, x2);
let g1 = self.k1.gradient(x1, x2);
let g2 = self.k2.gradient(x1, x2);
g1.iter().map(|g1| g1 * k2).chain(g2.iter().map(|g2| g2 * k1)).collect()
}
fn rescale(&mut self, scale: f64)
{
if self.k1.is_scalable()
{
self.k1.rescale(scale);
}
else
{
self.k2.rescale(scale);
}
}
fn get_parameters(&self) -> Vec<f64>
{
let mut p1 = self.k1.get_parameters();
let mut p2 = self.k2.get_parameters();
p1.append(&mut p2);
p1
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.k1.set_parameters(¶meters[..self.k1.nb_parameters()]);
self.k2.set_parameters(¶meters[self.k1.nb_parameters()..]);
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.k1.heuristic_fit(training_inputs, training_outputs);
self.k2.heuristic_fit(training_inputs, training_outputs);
}
}
impl<T: Kernel, U: Kernel> Default for KernelProd<T, U>
{
fn default() -> Self
{
let k1 = T::default();
let k2 = U::default();
KernelProd { k1, k2 }
}
}
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct KernelArith<K: Kernel>(pub K);
impl<T: Kernel, U: Kernel> Add<KernelArith<T>> for KernelArith<U>
{
type Output = KernelSum<U, T>;
fn add(self, ker: KernelArith<T>) -> KernelSum<U, T>
{
KernelSum { k1: self.0, k2: ker.0 }
}
}
impl<T: Kernel, U: Kernel> Mul<KernelArith<T>> for KernelArith<U>
{
type Output = KernelProd<U, T>;
fn mul(self, ker: KernelArith<T>) -> KernelProd<U, T>
{
KernelProd { k1: self.0, k2: ker.0 }
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Linear
{
pub c: f64
}
impl Linear
{
pub fn new(c: f64) -> Linear
{
Linear { c }
}
}
impl Default for Linear
{
fn default() -> Linear
{
Linear { c: 0f64 }
}
}
impl Kernel for Linear
{
fn nb_parameters(&self) -> usize
{
1
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
x1.dot(x2) + self.c
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
_x1: &SRowVector<S1>,
_x2: &SRowVector<S2>)
-> Vec<f64>
{
let grad_c = 1.;
vec![grad_c]
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.c]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.c = parameters[0];
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Polynomial
{
pub alpha: f64,
pub c: f64,
pub d: f64
}
impl Polynomial
{
pub fn new(alpha: f64, c: f64, d: f64) -> Polynomial
{
Polynomial { alpha, c, d }
}
}
impl Default for Polynomial
{
fn default() -> Polynomial
{
Polynomial { alpha: 1f64, c: 0f64, d: 1f64 }
}
}
impl Kernel for Polynomial
{
fn nb_parameters(&self) -> usize
{
3
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
(self.alpha * x1.dot(x2) + self.c).powf(self.d)
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let x = x1.dot(x2);
let inner_term = self.alpha * x + self.c;
let grad_c = self.d * inner_term.powf(self.d - 1.);
let grad_alpha = x * grad_c;
let grad_d = inner_term.ln() * inner_term.powf(self.d);
vec![grad_alpha, grad_c, grad_d]
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.alpha, self.c, self.d]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.alpha = parameters[0];
self.c = parameters[1];
self.d = parameters[2];
}
}
pub type Gaussian = SquaredExp;
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct SquaredExp
{
pub ls: f64,
pub ampl: f64
}
impl SquaredExp
{
pub fn new(ls: f64, ampl: f64) -> SquaredExp
{
SquaredExp { ls, ampl }
}
}
impl Default for SquaredExp
{
fn default() -> SquaredExp
{
SquaredExp { ls: 1f64, ampl: 1f64 }
}
}
impl Kernel for SquaredExp
{
fn nb_parameters(&self) -> usize
{
2
}
fn is_scalable(&self) -> bool
{
true
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
let ampl = self.ampl.abs();
let distance_squared = (x1 - x2).norm_squared();
let x = -distance_squared / (2f64 * self.ls * self.ls);
ampl * x.exp()
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let ampl = self.ampl.abs();
let distance_squared = (x1 - x2).norm_squared();
let exponential = (-distance_squared / (2f64 * self.ls * self.ls)).exp();
let grad_ls = (distance_squared * ampl * exponential) / self.ls.powi(3);
let grad_ampl = self.ampl.signum() * exponential;
vec![grad_ls, grad_ampl]
}
fn rescale(&mut self, scale: f64)
{
self.ampl *= scale;
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.ls, self.ampl]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.ls = parameters[0];
self.ampl = parameters[1];
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.ls = fit_bandwidth_mean(training_inputs);
self.ampl = fit_amplitude_var(training_outputs);
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Exponential
{
pub ls: f64,
pub ampl: f64
}
impl Exponential
{
pub fn new(ls: f64, ampl: f64) -> Exponential
{
Exponential { ls, ampl }
}
}
impl Default for Exponential
{
fn default() -> Exponential
{
Exponential { ls: 1f64, ampl: 1f64 }
}
}
impl Kernel for Exponential
{
fn nb_parameters(&self) -> usize
{
2
}
fn is_scalable(&self) -> bool
{
true
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
let ampl = self.ampl.abs();
let distance = (x1 - x2).norm();
let x = -distance / (2f64 * self.ls * self.ls);
ampl * x.exp()
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let ampl = self.ampl.abs();
let distance = (x1 - x2).norm();
let exponential = (-distance / (2f64 * self.ls * self.ls)).exp();
let grad_ls = (distance * ampl * exponential) / self.ls.powi(3);
let grad_ampl = self.ampl.signum() * exponential;
vec![grad_ls, grad_ampl]
}
fn rescale(&mut self, scale: f64)
{
self.ampl *= scale;
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.ls, self.ampl]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.ls = parameters[0];
self.ampl = parameters[1];
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.ls = fit_bandwidth_mean(training_inputs);
self.ampl = fit_amplitude_var(training_outputs);
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Matern1
{
pub ls: f64,
pub ampl: f64
}
impl Matern1
{
pub fn new(ls: f64, ampl: f64) -> Matern1
{
Matern1 { ls, ampl }
}
}
impl Default for Matern1
{
fn default() -> Matern1
{
Matern1 { ls: 1f64, ampl: 1f64 }
}
}
impl Kernel for Matern1
{
fn nb_parameters(&self) -> usize
{
2
}
fn is_scalable(&self) -> bool
{
true
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
let ampl = self.ampl.abs();
let l = self.ls.abs();
let distance = (x1 - x2).norm();
let x = (3f64).sqrt() * distance / l;
ampl * (1f64 + x) * (-x).exp()
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let ampl = self.ampl.abs();
let l = self.ls.abs();
let distance = (x1 - x2).norm();
let x = 3f64.sqrt() * distance / l;
let grad_ls = (3. * ampl * distance.powi(2) * (-x).exp()) / (self.ls.powi(3));
let grad_ampl = self.ampl.signum() * (1. + x) * (-x).exp();
vec![grad_ls, grad_ampl]
}
fn rescale(&mut self, scale: f64)
{
self.ampl *= scale;
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.ls, self.ampl]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.ls = parameters[0];
self.ampl = parameters[1];
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.ls = fit_bandwidth_mean(training_inputs);
self.ampl = fit_amplitude_var(training_outputs);
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Matern2
{
pub ls: f64,
pub ampl: f64
}
impl Matern2
{
pub fn new(ls: f64, ampl: f64) -> Matern2
{
Matern2 { ls, ampl }
}
}
impl Default for Matern2
{
fn default() -> Matern2
{
Matern2 { ls: 1f64, ampl: 1f64 }
}
}
impl Kernel for Matern2
{
fn nb_parameters(&self) -> usize
{
2
}
fn is_scalable(&self) -> bool
{
true
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
let ampl = self.ampl.abs();
let l = self.ls.abs();
let distance = (x1 - x2).norm();
let x = (5f64).sqrt() * distance / l;
ampl * (1f64 + x + (5f64 * distance * distance) / (3f64 * l * l)) * (-x).exp()
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let ampl = self.ampl.abs();
let l = self.ls.abs();
let distance = (x1 - x2).norm();
let x = (5f64).sqrt() * distance / self.ls;
let grad_ls = self.ls.signum()
* ampl
* ((2. * l / 3. + 1.)
+ distance * 5f64.sqrt() * ((l.powi(2) / 3. + l + 1.) / l.powi(2)))
* (-x).exp();
let grad_ampl =
self.ampl.signum() * (1f64 + x + (5f64 * distance * distance) / (3f64 * l * l)) * (-x).exp();
vec![grad_ls, grad_ampl]
}
fn rescale(&mut self, scale: f64)
{
self.ampl *= scale;
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.ls, self.ampl]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.ls = parameters[0];
self.ampl = parameters[1];
}
fn heuristic_fit<SM: Storage<f64, Dyn, Dyn>, SV: Storage<f64, Dyn, U1>>(&mut self,
training_inputs: &SMatrix<SM>,
training_outputs: &SVector<SV>)
{
self.ls = fit_bandwidth_mean(training_inputs);
self.ampl = fit_amplitude_var(training_outputs);
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct HyperTan
{
pub alpha: f64,
pub c: f64
}
impl HyperTan
{
pub fn new(alpha: f64, c: f64) -> HyperTan
{
HyperTan { alpha, c }
}
}
impl Default for HyperTan
{
fn default() -> HyperTan
{
HyperTan { alpha: 1f64, c: 0f64 }
}
}
impl Kernel for HyperTan
{
fn nb_parameters(&self) -> usize
{
2
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
(self.alpha * x1.dot(x2) + self.c).tanh()
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let x = x1.dot(x2);
let grad_c = 1. / (self.alpha * x + self.c).cosh().powi(2);
let grad_alpha = x * grad_c;
vec![grad_alpha, grad_c]
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.alpha, self.c]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.alpha = parameters[0];
self.c = parameters[1];
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Multiquadric
{
pub c: f64
}
impl Multiquadric
{
pub fn new(c: f64) -> Multiquadric
{
Multiquadric { c }
}
}
impl Default for Multiquadric
{
fn default() -> Multiquadric
{
Multiquadric { c: 0f64 }
}
}
impl Kernel for Multiquadric
{
fn nb_parameters(&self) -> usize
{
2
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
(x1 - x2).norm_squared().hypot(self.c)
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let grad_c = self.c / (x1 - x2).norm().hypot(self.c);
vec![grad_c]
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.c]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.c = parameters[1];
}
}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "friedrich_serde", derive(serde::Deserialize, serde::Serialize))]
pub struct RationalQuadratic
{
pub alpha: f64,
pub ls: f64
}
impl RationalQuadratic
{
pub fn new(alpha: f64, ls: f64) -> RationalQuadratic
{
RationalQuadratic { alpha, ls }
}
}
impl Default for RationalQuadratic
{
fn default() -> RationalQuadratic
{
RationalQuadratic { alpha: 1f64, ls: 1f64 }
}
}
impl Kernel for RationalQuadratic
{
fn nb_parameters(&self) -> usize
{
2
}
fn kernel<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> f64
{
let distance_squared = (x1 - x2).norm_squared();
(1f64 + distance_squared / (2f64 * self.alpha * self.ls * self.ls)).powf(-self.alpha)
}
fn gradient<S1: Storage<f64, U1, Dyn>, S2: Storage<f64, U1, Dyn>>(&self,
x1: &SRowVector<S1>,
x2: &SRowVector<S2>)
-> Vec<f64>
{
let l = self.ls.abs();
let distance_squared = (x1 - x2).norm_squared();
let grad_alpha =
((distance_squared + 2. * l.powi(2) * self.alpha) / (l.powi(2) * self.alpha)).powf(-self.alpha)
* (2f64.powf(self.alpha)
* (1.
- ((distance_squared + 2. * l.powi(2) * self.alpha) / (2. * l.powi(2) * self.alpha)).ln())
- (l.powi(2) * 2f64.powf(self.alpha + 1.) * self.alpha)
/ (distance_squared + 2. * l.powi(2) * self.alpha));
let grad_ls = distance_squared
* (distance_squared / (2. * self.alpha * l * l) + 1.).powf(-self.alpha - 1.)
/ self.ls.powi(3);
vec![grad_alpha, grad_ls]
}
fn get_parameters(&self) -> Vec<f64>
{
vec![self.alpha, self.ls]
}
fn set_parameters(&mut self, parameters: &[f64])
{
self.alpha = parameters[0];
self.ls = parameters[1];
}
}