#![allow(clippy::expect_used)]
mod activations;
pub mod losses;
use std::{fs::File, io::prelude::*};
use rand::prelude::*;
use rand_distr::Normal;
use serde::{Deserialize, Serialize};
pub type Float = f32;
#[cfg(feature = "floats-f64")]
pub type Float = f64;
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub enum Layer {
Linear,
ReLU,
LReLU(Float),
PReLU(Float),
ELU,
PELU(Float, Float),
SELU,
Sigmoid,
Tanh,
Abs,
Quadratic,
Cubic,
ClipLinear,
Gaussian,
SoftPlus,
SoftMax,
Dropout(Float),
Dense(Vec<Vec<Float>>),
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum Initializer {
Glorot,
He,
Const(Float),
}
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct Sequential {
num_inputs: usize,
layers: Vec<Layer>,
num_outputs: usize,
}
impl Sequential {
#[must_use]
pub fn new(num_inputs: usize) -> Sequential {
Sequential { num_inputs, layers: Vec::new(), num_outputs: num_inputs }
}
#[must_use]
pub fn get_num_inputs(&self) -> usize {
self.num_inputs
}
#[must_use]
pub fn get_layers(&self) -> &Vec<Layer> {
&self.layers
}
pub fn get_layers_mut(&mut self) -> &mut Vec<Layer> {
&mut self.layers
}
#[must_use]
pub fn get_params(&self) -> Vec<Float> {
let mut params = Vec::new();
for layer in self.layers.iter() {
match layer {
Layer::PReLU(factor) => params.push(*factor),
Layer::PELU(a, b) => {
params.push(*a);
params.push(*b);
}
Layer::Dense(weights) => {
for vec in weights.iter() {
for val in vec.iter() {
params.push(*val);
}
}
}
_ => (),
}
}
params
}
pub fn set_params(&mut self, params: &[Float]) -> &mut Self {
let mut iter = params.iter();
for layer in self.layers.iter_mut() {
match layer {
Layer::PReLU(factor) => {
*factor = *iter.next().expect("Vector params is not big enough!");
}
Layer::PELU(a, b) => {
*a = *iter.next().expect("Vector params is not big enough!");
*b = *iter.next().expect("Vector params is not big enough!");
}
Layer::Dense(weights) => {
for vec in weights.iter_mut() {
for val in vec.iter_mut() {
*val = *iter.next().expect("Vector params is not big enough!");
}
}
}
_ => (),
}
}
self
}
pub fn add_layer(&mut self, layer: Layer) -> &mut Self {
#[allow(clippy::single_match)]
match &layer {
Layer::Dense(weights) => self.num_outputs = weights.len(),
_ => (),
}
self.layers.push(layer);
self
}
pub fn add_layer_lrelu(&mut self, factor: Float) -> &mut Self {
let layer = Layer::LReLU(factor);
self.layers.push(layer);
self
}
pub fn add_layer_prelu(&mut self, factor: Float) -> &mut Self {
let layer = Layer::PReLU(factor);
self.layers.push(layer);
self
}
pub fn add_layer_pelu(&mut self, a: Float, b: Float) -> &mut Self {
let layer = Layer::PELU(a, b);
self.layers.push(layer);
self
}
pub fn add_layer_dropout(&mut self, d: Float) -> &mut Self {
if !(0.0..1.0).contains(&d) {
panic!("Inappropriate dropout parameter!");
}
let layer = Layer::Dropout(d);
self.layers.push(layer);
self
}
pub fn add_layer_dense(&mut self, neurons: usize, init: Initializer) -> &mut Self {
let weights = match init {
Initializer::Glorot => gen_glorot(self.num_outputs, neurons),
Initializer::He => gen_he(self.num_outputs, neurons),
Initializer::Const(val) => vec![vec![val; self.num_outputs + 1]; neurons],
};
self.num_outputs = neurons;
let layer = Layer::Dense(weights);
self.layers.push(layer);
self
}
#[must_use]
pub fn run(&self, input: &[Float]) -> Vec<Float> {
if input.len() != self.num_inputs {
panic!("Incorrect input size!");
}
let mut result = input.to_vec();
for layer in self.layers.iter() {
match layer {
Layer::Linear => result.iter_mut().for_each(|x| {
*x = activations::linear(*x);
}),
Layer::ReLU => result.iter_mut().for_each(|x| {
*x = activations::relu(*x);
}),
Layer::LReLU(factor) => result.iter_mut().for_each(|x| {
*x = activations::lrelu(*x, *factor);
}),
Layer::PReLU(factor) => result.iter_mut().for_each(|x| {
*x = activations::lrelu(*x, *factor);
}),
Layer::ELU => result.iter_mut().for_each(|x| {
*x = activations::elu(*x);
}),
Layer::PELU(a, b) => result.iter_mut().for_each(|x| {
*x = activations::pelu(*x, *a, *b);
}),
Layer::SELU => result.iter_mut().for_each(|x| {
*x = activations::selu(*x);
}),
Layer::Sigmoid => result.iter_mut().for_each(|x| {
*x = activations::sigmoid(*x);
}),
Layer::Tanh => result.iter_mut().for_each(|x| {
*x = activations::tanh(*x);
}),
Layer::Abs => result.iter_mut().for_each(|x| {
*x = activations::abs(*x);
}),
Layer::Quadratic => result.iter_mut().for_each(|x| {
*x = activations::quadratic(*x);
}),
Layer::Cubic => result.iter_mut().for_each(|x| {
*x = activations::cubic(*x);
}),
Layer::ClipLinear => result.iter_mut().for_each(|x| {
*x = activations::clip_linear(*x);
}),
Layer::Gaussian => result.iter_mut().for_each(|x| {
*x = activations::gaussian(*x);
}),
Layer::SoftPlus => result.iter_mut().for_each(|x| {
*x = activations::softplus(*x);
}),
Layer::SoftMax => activations::softmax(&mut result),
Layer::Dropout(d) => apply_dropout(&mut result, *d),
Layer::Dense(weights) => result = modified_matrix_dotprod(weights, &result),
}
}
result
}
#[must_use]
pub fn predict(&self, inputs: &[Vec<Float>]) -> Vec<Vec<Float>> {
let mut results = Vec::new();
for input in inputs.iter() {
let result = self.run(input);
results.push(result);
}
results
}
#[must_use]
pub fn to_json(&self) -> String {
serde_json::to_string(self).expect("Encoding JSON failed!")
}
#[must_use]
pub fn from_json(encoded: &str) -> Sequential {
serde_json::from_str(encoded).expect("Decoding JSON failed!")
}
#[must_use]
pub fn from_reader<R: Read>(encoded: R) -> Sequential {
serde_json::from_reader(encoded).expect("Decoding JSON failed!")
}
pub fn save(&self, file: &str) -> Result<(), std::io::Error> {
let mut file = File::create(file)?;
let json = self.to_json();
file.write_all(json.as_bytes())?;
Ok(())
}
pub fn load(file: &str) -> Result<Sequential, std::io::Error> {
let file = File::open(file)?;
Ok(Sequential::from_reader(file))
}
#[must_use]
pub fn calc_mse(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = *yt - *yp;
metric += error * error;
}
metric /= y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_rmse(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = *yt - *yp;
metric += error * error;
}
metric /= y.len() as Float;
avg_error += metric.sqrt();
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_mae(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = *yt - *yp;
metric += error.abs();
}
metric /= y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_mape(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = (*yt - *yp) / *yt;
metric += error.abs();
}
metric *= 100.0 / y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_logcosh(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = *yt - *yp;
metric += error.cosh().ln();
}
metric /= y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_binary_crossentropy(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = (*yt).mul_add(yp.ln(), (1.0 - *yt) * (1.0 - *yp).ln());
metric += -error;
}
metric /= y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_categorical_crossentropy(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = *yt * (*yp).ln();
metric += -error;
}
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
#[must_use]
pub fn calc_hingeloss(&self, target: &[(Vec<Float>, Vec<Float>)]) -> Float {
let mut avg_error = 0.0;
for (x, y) in target.iter() {
let pred = self.run(x);
let mut metric = 0.0;
for (yp, yt) in pred.iter().zip(y.iter()) {
let error = 1.0 - *yt * *yp;
metric += error.max(0.0);
}
metric /= y.len() as Float;
avg_error += metric;
}
avg_error /= target.len() as Float;
avg_error
}
}
#[must_use]
pub fn gen_rnd_vec(n: usize, std: Float) -> Vec<Float> {
let mut rng = thread_rng();
let normal = Normal::new(0.0, f64::from(std)).expect("Wrong normal distribution parameters!");
normal.sample_iter(&mut rng).take(n).map(|x| x as Float).collect()
}
#[must_use]
fn gen_glorot(n_in: usize, n_out: usize) -> Vec<Vec<Float>> {
let std = (2.0 / (n_in + n_out) as Float).sqrt();
let mut weights = Vec::new();
for _ in 0..n_out {
weights.push(gen_rnd_vec(n_in + 1, std));
}
weights
}
#[must_use]
fn gen_he(n_in: usize, n_out: usize) -> Vec<Vec<Float>> {
let std = (2.0 / n_in as Float).sqrt();
let mut weights = Vec::new();
for _ in 0..n_out {
weights.push(gen_rnd_vec(n_in + 1, std));
}
weights
}
fn apply_dropout(layer: &mut [Float], d: Float) {
if d == 0.0 {
return;
}
let num = (d * layer.len() as Float) as usize;
let mut rng = thread_rng();
for _ in 0..num {
let i = rng.gen::<usize>() % layer.len();
layer[i] = 0.0;
}
layer.iter_mut().for_each(|x| {
*x /= 1.0 - d;
});
}
#[must_use]
fn modified_matrix_dotprod(weights: &[Vec<Float>], values: &[Float]) -> Vec<Float> {
let mut result = Vec::new();
for node in weights.iter() {
let mut iter = node.iter();
let mut sum = *iter.next().expect("Empty weights! (Bias)");
for (weight, value) in iter.zip(values.iter())
{
sum += weight * value;
}
result.push(sum);
}
result
}