Struct NeuralNetwork

Source
pub struct NeuralNetwork<const I: usize, const O: usize> { /* private fields */ }
Expand description

A simple Neural Network

Implementations§

Source§

impl<const I: usize, const O: usize> NeuralNetwork<I, O>

Source

pub fn new() -> Self

Examples found in repository?
examples/bench.rs (line 5)
3fn main() {
4    // let's check how fast this thing gets
5    let mut net: NeuralNetwork<1, 1> = NeuralNetwork::new()
6        .add_layer(5, ActivationFunction::ReLU)
7        .add_layer(5, ActivationFunction::ReLU)
8        .add_layer(5, ActivationFunction::ReLU)
9        .add_layer(5, ActivationFunction::ReLU)
10        .add_layer(5, ActivationFunction::ReLU)
11        .add_layer(5, ActivationFunction::ReLU)
12        .add_layer(1, ActivationFunction::Linear);
13
14    let mut sum = 0.0;
15    for i in 0..10_000_000 {
16        sum += net.run(&[i as f32])[0];
17    }
18
19    println!("{sum}");
20}
More examples
Hide additional examples
examples/training.rs (line 5)
3fn main() {
4    // create a neural network
5    let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7    // create training data
8    let mut inputs = vec![];
9    let mut outputs = vec![];
10    for i in -50..50 {
11        inputs.push([i as f32]);
12        outputs.push([i as f32 * 3.0]);
13    }
14    let data = DataSet { inputs, outputs };
15
16    let trainer = BasicTrainer::new(data);
17
18    // train the model
19    for _ in 0..10 {
20        trainer.train(&mut net, 10);
21        // lower is better
22        println!("{}", trainer.get_total_error(&net))
23    }
24
25    // show that this actually works!
26    println!("########");
27    for i in -5..5 {
28        println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29    }
30}
examples/circle.rs (line 5)
3fn main() {
4    // this network is completely overkill, but it does the job
5    let mut net: NeuralNetwork<2, 1> = NeuralNetwork::new()
6        .add_layer(3, ActivationFunction::ReLU)
7        .add_layer(3, ActivationFunction::ReLU)
8        // this layer reduced everything to one input!
9        .add_layer(1, ActivationFunction::Linear);
10
11    let mut inputs = vec![];
12    let mut output = vec![];
13    for x in 0..=100 {
14        for y in 0..=100 {
15            inputs.push([x as f32, y as f32]);
16            // we want this to be a classifier, so we ask it for a result greater zero or smaller zero
17            output.push(if (x as f32).abs() + (y as f32).abs() < 30.0 {
18                [1.0]
19            } else {
20                [-1.0]
21            })
22        }
23    }
24
25    let data = DataSet {
26        inputs,
27        outputs: output,
28    };
29
30    let trainer = BasicTrainer::new(data);
31    for _ in 0..50 {
32        trainer.train(&mut net, 10);
33        println!("{}", trainer.get_total_error(&net))
34    }
35}
Source

pub fn add_layer(self, n: usize, func: ActivationFunction) -> Self

adds a layer with n neurons and the specified activation function

Examples found in repository?
examples/bench.rs (line 6)
3fn main() {
4    // let's check how fast this thing gets
5    let mut net: NeuralNetwork<1, 1> = NeuralNetwork::new()
6        .add_layer(5, ActivationFunction::ReLU)
7        .add_layer(5, ActivationFunction::ReLU)
8        .add_layer(5, ActivationFunction::ReLU)
9        .add_layer(5, ActivationFunction::ReLU)
10        .add_layer(5, ActivationFunction::ReLU)
11        .add_layer(5, ActivationFunction::ReLU)
12        .add_layer(1, ActivationFunction::Linear);
13
14    let mut sum = 0.0;
15    for i in 0..10_000_000 {
16        sum += net.run(&[i as f32])[0];
17    }
18
19    println!("{sum}");
20}
More examples
Hide additional examples
examples/training.rs (line 5)
3fn main() {
4    // create a neural network
5    let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7    // create training data
8    let mut inputs = vec![];
9    let mut outputs = vec![];
10    for i in -50..50 {
11        inputs.push([i as f32]);
12        outputs.push([i as f32 * 3.0]);
13    }
14    let data = DataSet { inputs, outputs };
15
16    let trainer = BasicTrainer::new(data);
17
18    // train the model
19    for _ in 0..10 {
20        trainer.train(&mut net, 10);
21        // lower is better
22        println!("{}", trainer.get_total_error(&net))
23    }
24
25    // show that this actually works!
26    println!("########");
27    for i in -5..5 {
28        println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29    }
30}
examples/circle.rs (line 6)
3fn main() {
4    // this network is completely overkill, but it does the job
5    let mut net: NeuralNetwork<2, 1> = NeuralNetwork::new()
6        .add_layer(3, ActivationFunction::ReLU)
7        .add_layer(3, ActivationFunction::ReLU)
8        // this layer reduced everything to one input!
9        .add_layer(1, ActivationFunction::Linear);
10
11    let mut inputs = vec![];
12    let mut output = vec![];
13    for x in 0..=100 {
14        for y in 0..=100 {
15            inputs.push([x as f32, y as f32]);
16            // we want this to be a classifier, so we ask it for a result greater zero or smaller zero
17            output.push(if (x as f32).abs() + (y as f32).abs() < 30.0 {
18                [1.0]
19            } else {
20                [-1.0]
21            })
22        }
23    }
24
25    let data = DataSet {
26        inputs,
27        outputs: output,
28    };
29
30    let trainer = BasicTrainer::new(data);
31    for _ in 0..50 {
32        trainer.train(&mut net, 10);
33        println!("{}", trainer.get_total_error(&net))
34    }
35}
Source

pub fn random_layer(self, n: usize, func: ActivationFunction) -> Self

adds a layer with n neurons and randomized weights/bias to the model

Source

pub fn random_edit(&mut self)

randomly edits some neuron

Source

pub fn reverse_edit(&mut self)

Reverses the last random edit

Source

pub fn with_weights(self, weights: Vec<Vec<f32>>) -> Self

Adds custom weights to the last layer of the model

Source

pub fn with_bias(self, biases: Vec<f32>) -> Self

adds custom biases to the last layer of the model

Source

pub fn run(&mut self, input: &[f32; I]) -> [f32; O]

runs the model on the given input.

Examples found in repository?
examples/bench.rs (line 16)
3fn main() {
4    // let's check how fast this thing gets
5    let mut net: NeuralNetwork<1, 1> = NeuralNetwork::new()
6        .add_layer(5, ActivationFunction::ReLU)
7        .add_layer(5, ActivationFunction::ReLU)
8        .add_layer(5, ActivationFunction::ReLU)
9        .add_layer(5, ActivationFunction::ReLU)
10        .add_layer(5, ActivationFunction::ReLU)
11        .add_layer(5, ActivationFunction::ReLU)
12        .add_layer(1, ActivationFunction::Linear);
13
14    let mut sum = 0.0;
15    for i in 0..10_000_000 {
16        sum += net.run(&[i as f32])[0];
17    }
18
19    println!("{sum}");
20}
More examples
Hide additional examples
examples/training.rs (line 28)
3fn main() {
4    // create a neural network
5    let mut net = NeuralNetwork::new().add_layer(1, ActivationFunction::Linear);
6
7    // create training data
8    let mut inputs = vec![];
9    let mut outputs = vec![];
10    for i in -50..50 {
11        inputs.push([i as f32]);
12        outputs.push([i as f32 * 3.0]);
13    }
14    let data = DataSet { inputs, outputs };
15
16    let trainer = BasicTrainer::new(data);
17
18    // train the model
19    for _ in 0..10 {
20        trainer.train(&mut net, 10);
21        // lower is better
22        println!("{}", trainer.get_total_error(&net))
23    }
24
25    // show that this actually works!
26    println!("########");
27    for i in -5..5 {
28        println!("{}", &net.run(&[i as f32 + 0.5])[0]);
29    }
30}
Source

pub fn unbuffered_run(&self, input: &[f32; I]) -> [f32; O]

runs the model on the given input. This does not buffer some things, and thus is slower as ist does extra allocations work.

Source

pub fn par_run(&self, inputs: &Vec<[f32; I]>) -> Vec<[f32; O]>

runs the model on a large set of data. Uses rayon for faster computation

Trait Implementations§

Source§

impl<const I: usize, const O: usize> Default for NeuralNetwork<I, O>

Source§

fn default() -> Self

Returns the “default value” for a type. Read more

Auto Trait Implementations§

§

impl<const I: usize, const O: usize> Freeze for NeuralNetwork<I, O>

§

impl<const I: usize, const O: usize> RefUnwindSafe for NeuralNetwork<I, O>

§

impl<const I: usize, const O: usize> Send for NeuralNetwork<I, O>

§

impl<const I: usize, const O: usize> Sync for NeuralNetwork<I, O>

§

impl<const I: usize, const O: usize> Unpin for NeuralNetwork<I, O>

§

impl<const I: usize, const O: usize> UnwindSafe for NeuralNetwork<I, O>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V