jaime/
lib.rs

1#![feature(generic_arg_infer)]
2#![feature(test)]
3
4//! # Overview
5//!
6//! J.a.i.m.e., pronounced as /hɑːɪmɛ/, is a all purpose ergonomic gradient descent engine. It can configure **ANY** \* and **ALL**\*\* models to find the best fit for your dataset. It will magicaly take care of the gradient computations with little effect on your coding style.
7//!
8//! \* not only neuronal
9//!
10//! \*\* derivability conditions apply
11//!
12//! # Basic example
13//!
14//!  ```
15//!#![feature(generic_arg_infer)] // this will save a lot of time and make your code much more readable
16//!
17//!use std::ops::Mul; // this will allow us to specify the properties of our "float-oid"
18//!
19//!use jaime::trainer::{
20//!    asymptotic_gradient_descent_trainer::AsymptoticGradientDescentTrainer,
21//!    default_param_translator, DataPoint, Trainer,
22//!};
23//!
24//!// this is the model, Y = X*P, where P is the parameter, X the input and Y the output
25//!fn direct<N: Clone + Mul<N, Output = N> + From<f32>>(
26//!    parameters: &[N; 1],
27//!    input: &[f32; 1],
28//!    _: &(),
29//!) -> [N; 1] {
30//!    [parameters[0].clone() * N::from(input[0])]
31//!}
32//!
33//!fn main() {
34//!    // define the desired behabiour as a dataset
35//!    let dataset = vec![
36//!        DataPoint {
37//!            input: [1.],
38//!            output: [2.],
39//!        },
40//!        DataPoint {
41//!            input: [2.],
42//!            output: [4.],
43//!        },
44//!        DataPoint {
45//!            input: [4.],
46//!            output: [8.],
47//!        },
48//!    ];
49//!    // initialize the trainer, this struct will store the parameters and nudge them down the gradient
50//!    let mut trainer =
51//!        AsymptoticGradientDescentTrainer::new_dense(direct, direct, default_param_translator, ());
52//!    // the function train_step_asintotic_search will step towards the local minimum. When the local minimum is found it will return false and the loop will exit.
53//!    while !trainer.found_local_minima() {
54//!        trainer.train_step::<false, false, _, _>(
55//!            &dataset,
56//!            &dataset,
57//!            dataset.len(),
58//!            dataset.len(),
59//!            1.,
60//!        );
61//!
62//!        println!("{:?}", trainer.get_model_params());
63//!    }
64//!    // At this point the param should be equal to 2, as that best fits our model.
65//!    println!("{:?}", trainer.get_model_params());
66//!}
67//!
68//!  ```
69//! download and compile this example from [the github repo](https://github.com/jaimegonzalezfabregas/jaime_hello_world)
70//!
71//! # Further reading
72//! A deeper explanation of the usage of this crate can be found in the Traier struct [documentation](https://docs.rs/jaime/latest/jaime/trainer/struct.Trainer.html)
73
74extern crate indicatif;
75extern crate rand;
76extern crate rand_chacha;
77extern crate rayon;
78mod tests;
79
80/// INTERNAL forward mode automatic derivation engine.
81pub mod dual;
82/// INTERNAL Implementation of arrays of floats that can apply the same operation to all the elements at the same time
83pub mod simd_arr;
84/// User facing gradient descent implementation
85pub mod trainer;
86pub mod minimizer;
87
88/// for medium to big dense 
89pub fn set_rayon_stack() {
90    rayon::ThreadPoolBuilder::new()
91        .stack_size(1 * 1024 * 1024 * 1024)
92        .build_global()
93        .unwrap();
94}