smartcore_proba/svm/
svr.rs

1//! # Epsilon-Support Vector Regression.
2//!
3//! Support Vector Regression (SVR) is a popular algorithm used for regression that uses the same principle as SVM.
4//!
5//! Just like [SVC](../svc/index.html) SVR finds optimal decision boundary, \\(f(x)\\) that separates all training instances with the largest margin.
6//! Unlike SVC, in \\(\epsilon\\)-SVR regression the goal is to find a function \\(f(x)\\) that has at most \\(\epsilon\\) deviation from the
7//! known targets \\(y_i\\) for all the training data. To find this function, we need to find solution to this optimization problem:
8//!
9//! \\[\underset{w, \zeta}{minimize} \space \space \frac{1}{2} \lVert \vec{w} \rVert^2 + C\sum_{i=1}^m \zeta_i \\]
10//!
11//! subject to:
12//!
13//! \\[\lvert y_i - \langle\vec{w}, \vec{x}_i \rangle - b \rvert \leq \epsilon + \zeta_i \\]
14//! \\[\lvert \langle\vec{w}, \vec{x}_i \rangle + b - y_i \rvert \leq \epsilon + \zeta_i \\]
15//! \\[\zeta_i \geq 0 for \space any \space i = 1, ... , m\\]
16//!
17//! Where \\( m \\) is a number of training samples, \\( y_i \\) is a target value and \\(\langle\vec{w}, \vec{x}_i \rangle + b\\) is a decision boundary.
18//!
19//! The parameter `C` > 0 determines the trade-off between the flatness of \\(f(x)\\) and the amount up to which deviations larger than \\(\epsilon\\) are tolerated
20//!
21//! Example:
22//!
23//! ```
24//! use smartcore::linalg::basic::matrix::DenseMatrix;
25//! use smartcore::linear::linear_regression::*;
26//! use smartcore::svm::Kernels;
27//! use smartcore::svm::svr::{SVR, SVRParameters};
28//!
29//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
30//! let x = DenseMatrix::from_2d_array(&[
31//!               &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
32//!               &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
33//!               &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
34//!               &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
35//!               &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
36//!               &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
37//!               &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
38//!               &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
39//!               &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
40//!               &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
41//!               &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
42//!               &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
43//!               &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
44//!               &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
45//!               &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
46//!               &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
47//!          ]).unwrap();
48//!
49//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
50//!           100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
51//!
52//! let knl = Kernels::linear();
53//! let params = &SVRParameters::default().with_eps(2.0).with_c(10.0).with_kernel(knl);
54//! // let svr = SVR::fit(&x, &y, params).unwrap();
55//!
56//! // let y_hat = svr.predict(&x).unwrap();
57//! ```
58//!
59//! ## References:
60//!
61//! * ["Support Vector Machines", Kowalczyk A., 2017](https://www.svm-tutorial.com/2017/10/support-vector-machines-succinctly-released/)
62//! * ["A Fast Algorithm for Training Support Vector Machines", Platt J.C., 1998](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf)
63//! * ["Working Set Selection Using Second Order Information for Training Support Vector Machines", Rong-En Fan et al., 2005](https://www.jmlr.org/papers/volume6/fan05a/fan05a.pdf)
64//! * ["A tutorial on support vector regression", Smola A.J., Scholkopf B., 2003](https://alex.smola.org/papers/2004/SmoSch04.pdf)
65//!
66//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
67//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
68
69use std::cell::{Ref, RefCell};
70use std::fmt::Debug;
71use std::marker::PhantomData;
72
73use num::Bounded;
74use num_traits::float::Float;
75#[cfg(feature = "serde")]
76use serde::{Deserialize, Serialize};
77
78use crate::api::{PredictorBorrow, SupervisedEstimatorBorrow};
79use crate::error::{Failed, FailedError};
80use crate::linalg::basic::arrays::{Array1, Array2, MutArray};
81use crate::numbers::basenum::Number;
82use crate::numbers::floatnum::FloatNumber;
83use crate::svm::Kernel;
84
85#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
86#[derive(Debug)]
87/// SVR Parameters
88pub struct SVRParameters<T: Number + FloatNumber + PartialOrd> {
89    /// Epsilon in the epsilon-SVR model.
90    pub eps: T,
91    /// Regularization parameter.
92    pub c: T,
93    /// Tolerance for stopping criterion.
94    pub tol: T,
95    /// The kernel function.
96    #[cfg_attr(
97        all(feature = "serde", target_arch = "wasm32"),
98        serde(skip_serializing, skip_deserializing)
99    )]
100    pub kernel: Option<Box<dyn Kernel>>,
101}
102
103#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
104#[derive(Debug)]
105/// Epsilon-Support Vector Regression
106pub struct SVR<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> {
107    instances: Option<Vec<Vec<f64>>>,
108    #[cfg_attr(feature = "serde", serde(skip_deserializing))]
109    parameters: Option<&'a SVRParameters<T>>,
110    w: Option<Vec<T>>,
111    b: T,
112    phantom: PhantomData<(X, Y)>,
113}
114
115#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
116#[derive(Debug)]
117struct SupportVector<T> {
118    index: usize,
119    x: Vec<f64>,
120    alpha: [T; 2],
121    grad: [T; 2],
122    k: f64,
123}
124
125/// Sequential Minimal Optimization algorithm
126struct Optimizer<'a, T: Number + FloatNumber + PartialOrd> {
127    tol: T,
128    c: T,
129    parameters: Option<&'a SVRParameters<T>>,
130    svmin: usize,
131    svmax: usize,
132    gmin: T,
133    gmax: T,
134    gminindex: usize,
135    gmaxindex: usize,
136    tau: T,
137    sv: Vec<SupportVector<T>>,
138    /// avoid infinite loop if SMO does not converge
139    max_iterations: usize,
140}
141
142struct Cache<T: Clone> {
143    data: Vec<RefCell<Option<Vec<T>>>>,
144}
145
146impl<T: Number + FloatNumber + PartialOrd> SVRParameters<T> {
147    /// Epsilon in the epsilon-SVR model.
148    pub fn with_eps(mut self, eps: T) -> Self {
149        self.eps = eps;
150        self
151    }
152    /// Regularization parameter.
153    pub fn with_c(mut self, c: T) -> Self {
154        self.c = c;
155        self
156    }
157    /// Tolerance for stopping criterion.
158    pub fn with_tol(mut self, tol: T) -> Self {
159        self.tol = tol;
160        self
161    }
162    /// The kernel function.
163    pub fn with_kernel<K: Kernel + 'static>(mut self, kernel: K) -> Self {
164        self.kernel = Some(Box::new(kernel));
165        self
166    }
167}
168
169impl<T: Number + FloatNumber + PartialOrd> Default for SVRParameters<T> {
170    fn default() -> Self {
171        SVRParameters {
172            eps: T::from_f64(0.1).unwrap(),
173            c: T::one(),
174            tol: T::from_f64(1e-3).unwrap(),
175            kernel: Option::None,
176        }
177    }
178}
179
180impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>>
181    SupervisedEstimatorBorrow<'a, X, Y, SVRParameters<T>> for SVR<'a, T, X, Y>
182{
183    fn new() -> Self {
184        Self {
185            instances: Option::None,
186            parameters: Option::None,
187            w: Option::None,
188            b: T::zero(),
189            phantom: PhantomData,
190        }
191    }
192    fn fit(x: &'a X, y: &'a Y, parameters: &'a SVRParameters<T>) -> Result<Self, Failed> {
193        SVR::fit(x, y, parameters)
194    }
195}
196
197impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> PredictorBorrow<'a, X, T>
198    for SVR<'a, T, X, Y>
199{
200    fn predict(&self, x: &'a X) -> Result<Vec<T>, Failed> {
201        self.predict(x)
202    }
203}
204
205impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> SVR<'a, T, X, Y> {
206    /// Fits SVR to your data.
207    /// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
208    /// * `y` - target values
209    /// * `kernel` - the kernel function
210    /// * `parameters` - optional parameters, use `Default::default()` to set parameters to default values.
211    pub fn fit(
212        x: &'a X,
213        y: &'a Y,
214        parameters: &'a SVRParameters<T>,
215    ) -> Result<SVR<'a, T, X, Y>, Failed> {
216        let (n, _) = x.shape();
217
218        if n != y.shape() {
219            return Err(Failed::fit(
220                "Number of rows of X doesn\'t match number of rows of Y",
221            ));
222        }
223
224        if parameters.kernel.is_none() {
225            return Err(Failed::because(
226                FailedError::ParametersError,
227                "kernel should be defined at this point, please use `with_kernel()`",
228            ));
229        }
230
231        let optimizer: Optimizer<'a, T> = Optimizer::new(x, y, parameters);
232
233        let (support_vectors, weight, b) = optimizer.smo();
234
235        Ok(SVR {
236            instances: Some(support_vectors),
237            parameters: Some(parameters),
238            w: Some(weight),
239            b,
240            phantom: PhantomData,
241        })
242    }
243
244    /// Predict target values from `x`
245    /// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
246    pub fn predict(&self, x: &'a X) -> Result<Vec<T>, Failed> {
247        let (n, _) = x.shape();
248
249        let mut y_hat: Vec<T> = Vec::<T>::zeros(n);
250
251        let mut x_i = Vec::with_capacity(n);
252        for i in 0..n {
253            x_i.clear();
254            x_i.extend(x.get_row(i).iterator(0).copied());
255            y_hat.set(i, self.predict_for_row(&x_i));
256        }
257
258        Ok(y_hat)
259    }
260
261    pub(crate) fn predict_for_row(&self, x: &[T]) -> T {
262        let mut f = self.b;
263
264        let xi: Vec<_> = x.iter().map(|e| e.to_f64().unwrap()).collect();
265        for i in 0..self.instances.as_ref().unwrap().len() {
266            f += self.w.as_ref().unwrap()[i]
267                * T::from(
268                    self.parameters
269                        .as_ref()
270                        .unwrap()
271                        .kernel
272                        .as_ref()
273                        .unwrap()
274                        .apply(&xi, &self.instances.as_ref().unwrap()[i])
275                        .unwrap(),
276                )
277                .unwrap()
278        }
279
280        T::from(f).unwrap()
281    }
282}
283
284impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> PartialEq
285    for SVR<'a, T, X, Y>
286{
287    fn eq(&self, other: &Self) -> bool {
288        if (self.b - other.b).abs() > T::epsilon() * T::two()
289            || self.w.as_ref().unwrap().len() != other.w.as_ref().unwrap().len()
290            || self.instances.as_ref().unwrap().len() != other.instances.as_ref().unwrap().len()
291        {
292            false
293        } else {
294            for i in 0..self.w.as_ref().unwrap().len() {
295                if (self.w.as_ref().unwrap()[i] - other.w.as_ref().unwrap()[i]).abs() > T::epsilon()
296                {
297                    return false;
298                }
299            }
300            for i in 0..self.instances.as_ref().unwrap().len() {
301                if !self.instances.as_ref().unwrap()[i]
302                    .approximate_eq(&other.instances.as_ref().unwrap()[i], f64::epsilon())
303                {
304                    return false;
305                }
306            }
307            true
308        }
309    }
310}
311
312impl<T: Number + FloatNumber + PartialOrd> SupportVector<T> {
313    fn new(i: usize, x: Vec<f64>, y: T, eps: T, k: f64) -> SupportVector<T> {
314        SupportVector {
315            index: i,
316            x,
317            grad: [eps + y, eps - y],
318            k,
319            alpha: [T::zero(), T::zero()],
320        }
321    }
322}
323
324impl<'a, T: Number + FloatNumber + PartialOrd> Optimizer<'a, T> {
325    fn new<X: Array2<T>, Y: Array1<T>>(
326        x: &'a X,
327        y: &'a Y,
328        parameters: &'a SVRParameters<T>,
329    ) -> Optimizer<'a, T> {
330        let (n, _) = x.shape();
331
332        let mut support_vectors: Vec<SupportVector<T>> = Vec::with_capacity(n);
333
334        // initialize support vectors with kernel value (k)
335        for i in 0..n {
336            let k = parameters
337                .kernel
338                .as_ref()
339                .unwrap()
340                .apply(
341                    &Vec::from_iterator(x.iterator(0).map(|e| e.to_f64().unwrap()), n),
342                    &Vec::from_iterator(x.iterator(0).map(|e| e.to_f64().unwrap()), n),
343                )
344                .unwrap();
345            support_vectors.push(SupportVector::<T>::new(
346                i,
347                Vec::from_iterator(x.get_row(i).iterator(0).map(|e| e.to_f64().unwrap()), n),
348                T::from(*y.get(i)).unwrap(),
349                parameters.eps,
350                k,
351            ));
352        }
353
354        Optimizer {
355            tol: parameters.tol,
356            c: parameters.c,
357            parameters: Some(parameters),
358            svmin: 0,
359            svmax: 0,
360            gmin: <T as Bounded>::max_value(),
361            gmax: <T as Bounded>::min_value(),
362            gminindex: 0,
363            gmaxindex: 0,
364            tau: T::from_f64(1e-12).unwrap(),
365            sv: support_vectors,
366            max_iterations: 49999,
367        }
368    }
369
370    fn find_min_max_gradient(&mut self) {
371        self.gmin = <T as Bounded>::max_value();
372        self.gmax = <T as Bounded>::min_value();
373
374        for i in 0..self.sv.len() {
375            let v = &self.sv[i];
376            let g = -v.grad[0];
377            let a = v.alpha[0];
378            if g < self.gmin && a > T::zero() {
379                self.gmin = g;
380                self.gminindex = 0;
381                self.svmin = i;
382            }
383            if g > self.gmax && a < self.c {
384                self.gmax = g;
385                self.gmaxindex = 0;
386                self.svmax = i;
387            }
388
389            let g = v.grad[1];
390            let a = v.alpha[1];
391            if g < self.gmin && a < self.c {
392                self.gmin = g;
393                self.gminindex = 1;
394                self.svmin = i;
395            }
396            if g > self.gmax && a > T::zero() {
397                self.gmax = g;
398                self.gmaxindex = 1;
399                self.svmax = i;
400            }
401        }
402    }
403
404    /// Solves the quadratic programming (QP) problem that arises during the training of support-vector machines (SVM) algorithm.
405    /// Returns:
406    /// * support vectors (computed with f64)
407    /// * hyperplane parameters: w and b (computed with T)
408    fn smo(mut self) -> (Vec<Vec<f64>>, Vec<T>, T) {
409        let cache: Cache<f64> = Cache::new(self.sv.len());
410        let mut n_iteration = 0usize;
411        self.find_min_max_gradient();
412
413        while self.gmax - self.gmin > self.tol {
414            if n_iteration > self.max_iterations {
415                break;
416            }
417            let v1 = self.svmax;
418            let i = self.gmaxindex;
419            let old_alpha_i = self.sv[v1].alpha[i];
420
421            let k1 = cache.get(self.sv[v1].index, || {
422                self.sv
423                    .iter()
424                    .map(|vi| {
425                        self.parameters
426                            .unwrap()
427                            .kernel
428                            .as_ref()
429                            .unwrap()
430                            .apply(&self.sv[v1].x, &vi.x)
431                            .unwrap()
432                    })
433                    .collect()
434            });
435
436            let mut v2 = self.svmin;
437            let mut j = self.gminindex;
438            let mut old_alpha_j = self.sv[v2].alpha[j];
439
440            let mut best = T::zero();
441            let gi = if i == 0 {
442                -self.sv[v1].grad[0]
443            } else {
444                self.sv[v1].grad[1]
445            };
446            for jj in 0..self.sv.len() {
447                let v = &self.sv[jj];
448                let mut curv = self.sv[v1].k + v.k - 2f64 * k1[v.index];
449                if curv <= 0f64 {
450                    curv = self.tau.to_f64().unwrap();
451                }
452
453                let mut gj = -v.grad[0];
454                if v.alpha[0] > T::zero() && gj < gi {
455                    let gain = -((gi - gj) * (gi - gj)) / T::from(curv).unwrap();
456                    if gain < best {
457                        best = gain;
458                        v2 = jj;
459                        j = 0;
460                        old_alpha_j = self.sv[v2].alpha[0];
461                    }
462                }
463
464                gj = v.grad[1];
465                if v.alpha[1] < self.c && gj < gi {
466                    let gain = -((gi - gj) * (gi - gj)) / T::from(curv).unwrap();
467                    if gain < best {
468                        best = gain;
469                        v2 = jj;
470                        j = 1;
471                        old_alpha_j = self.sv[v2].alpha[1];
472                    }
473                }
474            }
475
476            let k2 = cache.get(self.sv[v2].index, || {
477                self.sv
478                    .iter()
479                    .map(|vi| {
480                        self.parameters
481                            .unwrap()
482                            .kernel
483                            .as_ref()
484                            .unwrap()
485                            .apply(&self.sv[v2].x, &vi.x)
486                            .unwrap()
487                    })
488                    .collect()
489            });
490
491            let mut curv = self.sv[v1].k + self.sv[v2].k - 2f64 * k1[self.sv[v2].index];
492            if curv <= 0f64 {
493                curv = self.tau.to_f64().unwrap();
494            }
495
496            if i != j {
497                let delta = (-self.sv[v1].grad[i] - self.sv[v2].grad[j]) / T::from(curv).unwrap();
498                let diff = self.sv[v1].alpha[i] - self.sv[v2].alpha[j];
499                self.sv[v1].alpha[i] += delta;
500                self.sv[v2].alpha[j] += delta;
501
502                if diff > T::zero() {
503                    if self.sv[v2].alpha[j] < T::zero() {
504                        self.sv[v2].alpha[j] = T::zero();
505                        self.sv[v1].alpha[i] = diff;
506                    }
507                } else if self.sv[v1].alpha[i] < T::zero() {
508                    self.sv[v1].alpha[i] = T::zero();
509                    self.sv[v2].alpha[j] = -diff;
510                }
511
512                if diff > T::zero() {
513                    if self.sv[v1].alpha[i] > self.c {
514                        self.sv[v1].alpha[i] = self.c;
515                        self.sv[v2].alpha[j] = self.c - diff;
516                    }
517                } else if self.sv[v2].alpha[j] > self.c {
518                    self.sv[v2].alpha[j] = self.c;
519                    self.sv[v1].alpha[i] = self.c + diff;
520                }
521            } else {
522                let delta = (self.sv[v1].grad[i] - self.sv[v2].grad[j]) / T::from(curv).unwrap();
523                let sum = self.sv[v1].alpha[i] + self.sv[v2].alpha[j];
524                self.sv[v1].alpha[i] -= delta;
525                self.sv[v2].alpha[j] += delta;
526
527                if sum > self.c {
528                    if self.sv[v1].alpha[i] > self.c {
529                        self.sv[v1].alpha[i] = self.c;
530                        self.sv[v2].alpha[j] = sum - self.c;
531                    }
532                } else if self.sv[v2].alpha[j] < T::zero() {
533                    self.sv[v2].alpha[j] = T::zero();
534                    self.sv[v1].alpha[i] = sum;
535                }
536
537                if sum > self.c {
538                    if self.sv[v2].alpha[j] > self.c {
539                        self.sv[v2].alpha[j] = self.c;
540                        self.sv[v1].alpha[i] = sum - self.c;
541                    }
542                } else if self.sv[v1].alpha[i] < T::zero() {
543                    self.sv[v1].alpha[i] = T::zero();
544                    self.sv[v2].alpha[j] = sum;
545                }
546            }
547
548            let delta_alpha_i = self.sv[v1].alpha[i] - old_alpha_i;
549            let delta_alpha_j = self.sv[v2].alpha[j] - old_alpha_j;
550
551            let si = T::two() * T::from_usize(i).unwrap() - T::one();
552            let sj = T::two() * T::from_usize(j).unwrap() - T::one();
553            for v in self.sv.iter_mut() {
554                v.grad[0] -= si * T::from(k1[v.index]).unwrap() * delta_alpha_i
555                    + sj * T::from(k2[v.index]).unwrap() * delta_alpha_j;
556                v.grad[1] += si * T::from(k1[v.index]).unwrap() * delta_alpha_i
557                    + sj * T::from(k2[v.index]).unwrap() * delta_alpha_j;
558            }
559
560            self.find_min_max_gradient();
561            n_iteration += 1;
562        }
563
564        let b = -(self.gmax + self.gmin) / T::two();
565
566        let mut support_vectors: Vec<Vec<f64>> = Vec::new();
567        let mut w: Vec<T> = Vec::new();
568
569        for v in self.sv {
570            if v.alpha[0] != v.alpha[1] {
571                support_vectors.push(v.x);
572                w.push(v.alpha[1] - v.alpha[0]);
573            }
574        }
575
576        (support_vectors, w, b)
577    }
578}
579
580impl<T: Clone> Cache<T> {
581    fn new(n: usize) -> Cache<T> {
582        Cache {
583            data: vec![RefCell::new(None); n],
584        }
585    }
586
587    fn get<F: Fn() -> Vec<T>>(&self, i: usize, or: F) -> Ref<'_, Vec<T>> {
588        if self.data[i].borrow().is_none() {
589            self.data[i].replace(Some(or()));
590        }
591        Ref::map(self.data[i].borrow(), |v| v.as_ref().unwrap())
592    }
593}
594
595#[cfg(test)]
596mod tests {
597    use super::*;
598    use crate::linalg::basic::matrix::DenseMatrix;
599    use crate::metrics::mean_squared_error;
600    use crate::svm::Kernels;
601
602    // #[test]
603    // fn search_parameters() {
604    //     let parameters: SVRSearchParameters<f64, DenseMatrix<f64>, LinearKernel> =
605    //         SVRSearchParameters {
606    //             eps: vec![0., 1.],
607    //             kernel: vec![LinearKernel {}],
608    //             ..Default::default()
609    //         };
610    //     let mut iter = parameters.into_iter();
611    //     let next = iter.next().unwrap();
612    //     assert_eq!(next.eps, 0.);
613    //     assert_eq!(next.kernel, LinearKernel {});
614    //     let next = iter.next().unwrap();
615    //     assert_eq!(next.eps, 1.);
616    //     assert_eq!(next.kernel, LinearKernel {});
617    //     assert!(iter.next().is_none());
618    // }
619
620    #[cfg_attr(
621        all(target_arch = "wasm32", not(target_os = "wasi")),
622        wasm_bindgen_test::wasm_bindgen_test
623    )]
624    #[test]
625    fn svr_fit_predict() {
626        let x = DenseMatrix::from_2d_array(&[
627            &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
628            &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
629            &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
630            &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
631            &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
632            &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
633            &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
634            &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
635            &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
636            &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
637            &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
638            &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
639            &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
640            &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
641            &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
642            &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
643        ])
644        .unwrap();
645
646        let y: Vec<f64> = vec![
647            83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
648            114.2, 115.7, 116.9,
649        ];
650
651        let knl = Kernels::linear();
652        let y_hat = SVR::fit(
653            &x,
654            &y,
655            &SVRParameters::default()
656                .with_eps(2.0)
657                .with_c(10.0)
658                .with_kernel(knl),
659        )
660        .and_then(|lr| lr.predict(&x))
661        .unwrap();
662
663        let t = mean_squared_error(&y_hat, &y);
664        println!("{t:?}");
665        assert!(t < 2.5);
666    }
667
668    #[cfg_attr(
669        all(target_arch = "wasm32", not(target_os = "wasi")),
670        wasm_bindgen_test::wasm_bindgen_test
671    )]
672    #[test]
673    #[cfg(all(feature = "serde", not(target_arch = "wasm32")))]
674    fn svr_serde() {
675        let x = DenseMatrix::from_2d_array(&[
676            &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
677            &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
678            &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
679            &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
680            &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
681            &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
682            &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
683            &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
684            &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
685            &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
686            &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
687            &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
688            &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
689            &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
690            &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
691            &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
692        ])
693        .unwrap();
694
695        let y: Vec<f64> = vec![
696            83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
697            114.2, 115.7, 116.9,
698        ];
699
700        let knl = Kernels::rbf().with_gamma(0.7);
701        let params = SVRParameters::default().with_kernel(knl);
702
703        let svr = SVR::fit(&x, &y, &params).unwrap();
704
705        let deserialized_svr: SVR<f64, DenseMatrix<f64>, _> =
706            serde_json::from_str(&serde_json::to_string(&svr).unwrap()).unwrap();
707
708        assert_eq!(svr, deserialized_svr);
709    }
710}