1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
//! # Overview
//!
//! J.a.i.m.e., pronounced as /hɑːɪmɛ/, is a all purpose ergonomic gradient descent engine. It can configure **ANY** \* and **ALL**\*\* models to find the best fit for your dataset. It will magicaly take care of the gradient computations with little effect on your coding style.
//!
//! \* not only neuronal
//!
//! \*\* derivability conditions apply
//!
//! # Basic example
//!
//! ```
//!#![feature(generic_arg_infer)] // this will save a lot of time and make your code much more readable
//!
//!use std::ops::Mul; // this will allow us to specify the properties of our "float-oid"
//!
//!use jaime::trainer::{
//! asymptotic_gradient_descent_trainer::AsymptoticGradientDescentTrainer,
//! default_param_translator, DataPoint, Trainer,
//!};
//!
//!// this is the model, Y = X*P, where P is the parameter, X the input and Y the output
//!fn direct<N: Clone + Mul<N, Output = N> + From<f32>>(
//! parameters: &[N; 1],
//! input: &[f32; 1],
//! _: &(),
//!) -> [N; 1] {
//! [parameters[0].clone() * N::from(input[0])]
//!}
//!
//!fn main() {
//! // define the desired behabiour as a dataset
//! let dataset = vec![
//! DataPoint {
//! input: [1.],
//! output: [2.],
//! },
//! DataPoint {
//! input: [2.],
//! output: [4.],
//! },
//! DataPoint {
//! input: [4.],
//! output: [8.],
//! },
//! ];
//! // initialize the trainer, this struct will store the parameters and nudge them down the gradient
//! let mut trainer =
//! AsymptoticGradientDescentTrainer::new_dense(direct, direct, default_param_translator, ());
//! // the function train_step_asintotic_search will step towards the local minimum. When the local minimum is found it will return false and the loop will exit.
//! while !trainer.found_local_minima() {
//! trainer.train_step::<false, false, _, _>(
//! &dataset,
//! &dataset,
//! dataset.len(),
//! dataset.len(),
//! 1.,
//! );
//!
//! println!("{:?}", trainer.get_model_params());
//! }
//! // At this point the param should be equal to 2, as that best fits our model.
//! println!("{:?}", trainer.get_model_params());
//!}
//!
//! ```
//! download and compile this example from [the github repo](https://github.com/jaimegonzalezfabregas/jaime_hello_world)
//!
//! # Further reading
//! A deeper explanation of the usage of this crate can be found in the Traier struct [documentation](https://docs.rs/jaime/latest/jaime/trainer/struct.Trainer.html)
extern crate indicatif;
extern crate rand;
extern crate rand_chacha;
extern crate rayon;
/// INTERNAL forward mode automatic derivation engine.
/// INTERNAL Implementation of arrays of floats that can apply the same operation to all the elements at the same time
/// User facing gradient descent implementation
/// for medium to big dense