pub mod activators;
pub mod estimators;
pub mod data;
pub mod io;
extern crate rand;
extern crate serde;
extern crate serde_json;
extern crate bincode;
extern crate csv;
#[macro_use]
extern crate serde_derive;
use std::fmt;
use std::default::Default;
use data::Extractable;
#[derive(Debug)]
pub enum ErrorKind {
IO(std::io::Error),
Encoding(bincode::Error),
Json(serde_json::Error),
StdError(Box<dyn std::error::Error>)
}
#[allow(dead_code)]
enum Field {
Induced,
Y,
Deltas,
Weights
}
pub trait Transform: serde::Serialize + for <'de> serde::Deserialize<'de>{
fn before(&mut self){}
fn after(&mut self){}
}
#[derive(Serialize, Deserialize)]
struct Layer {
v: Vec<f64>,
y: Vec<f64>,
delta: Vec<f64>,
prev_delta: Vec<f64>,
w: Vec<Vec<f64>>,
}
struct ActivationContainer{
func: fn(f64) -> f64,
der: fn(f64) -> f64
}
#[derive(Serialize, Deserialize)]
pub struct FeedForward {
layers: Vec<Layer>,
learn_rate: f64,
momentum: f64,
error: f64,
act_type: activators::Type,
#[serde(skip_deserializing, skip_serializing)]
act: ActivationContainer
}
impl Layer {
fn new(amount: i32, input: i32) -> Layer {
let mut nl = Layer {v: vec![], y: vec![], delta: vec![], prev_delta: vec![], w: Vec::new()};
let mut v: Vec<f64>;
for _ in 0..amount {
nl.y.push(0.0);
nl.delta.push(0.0);
nl.v.push(0.0);
v = Vec::new();
for _ in 0..input + 1{
v.push(2f64 * rand::random::<f64>() - 1f64);
}
nl.w.push(v);
}
return nl;
}
fn bind(&mut self, index: usize){
self.v.insert(index, 0.0);
self.y.insert(index, 0.0);
self.delta.insert(index, 0.0);
let mut v: Vec<f64> = Vec::new();
let len = self.w[index].len();
for _ in 0..len{
v.push(2f64 * rand::random::<f64>() - 1f64);
}
self.w.insert(index, v);
}
fn unbind(&mut self, index: usize){
self.v.remove(index);
self.y.remove(index);
self.delta.remove(index);
self.w.remove(index);
}
}
impl FeedForward {
pub fn new(architecture: &[i32]) -> FeedForward {
let mut nn = FeedForward {learn_rate: 0.1, momentum: 0.1, error: 0.0,
layers: Vec::new(),
act: ActivationContainer{func: activators::tanh, der: activators::der_tanh},
act_type: activators::Type::Tanh};
for i in 1..architecture.len() {
nn.layers.push(Layer::new(architecture[i], architecture[i - 1]))
}
return nn;
}
fn forward(&mut self, x: &Vec<f64>){
let mut sum: f64;
for j in 0..self.layers.len(){
if j == 0{
for i in 0..self.layers[j].v.len(){
sum = 0.0;
for k in 0..x.len(){
sum += self.layers[j].w[i][k] * x[k];
}
self.layers[j].v[i] = sum;
self.layers[j].y[i] = (self.act.func)(sum);
}
}
else if j == self.layers.len() - 1{
for i in 0..self.layers[j].v.len(){
sum = self.layers[j].w[i][0];
for k in 0..self.layers[j - 1].y.len(){
sum += self.layers[j].w[i][k + 1] * self.layers[j - 1].y[k];
}
self.layers[j].v[i] = sum;
self.layers[j].y[i] = sum;
}
}
else {
for i in 0..self.layers[j].v.len(){
sum = self.layers[j].w[i][0];
for k in 0..self.layers[j - 1].y.len(){
sum += self.layers[j].w[i][k + 1] * self.layers[j - 1].y[k];
}
self.layers[j].v[i] = sum;
self.layers[j].y[i] = (self.act.func)(sum);
}
}
}
}
fn backward(&mut self, d: &Vec<f64>){
let mut sum: f64;
for j in (0..self.layers.len()).rev(){
self.layers[j].prev_delta = self.layers[j].delta.clone();
if j == self.layers.len() - 1{
self.error = 0.0;
for i in 0..self.layers[j].y.len(){
self.layers[j].delta[i] = (d[i] - self.layers[j].y[i])* (self.act.der)(self.layers[j].v[i]);
self.error += 0.5 * (d[i] - self.layers[j].y[i]).powi(2);
}
} else {
for i in 0..self.layers[j].delta.len(){
sum = 0.0;
for k in 0..self.layers[j + 1].delta.len(){
sum += self.layers[j + 1].delta[k] * self.layers[j + 1].w[k][i + 1];
}
self.layers[j].delta[i] = (self.act.der)(self.layers[j].v[i]) * sum;
}
}
}
}
fn update(&mut self, x: &Vec<f64>){
for j in 0..self.layers.len(){
for i in 0..self.layers[j].w.len(){
for k in 0..self.layers[j].w[i].len(){
if j == 0 {
self.layers[j].w[i][k] += self.learn_rate * self.layers[j].delta[i]*x[k];
} else {
if k == 0{
self.layers[j].w[i][k] += self.learn_rate * self.layers[j].delta[i];
} else {
self.layers[j].w[i][k] += self.learn_rate * self.layers[j].delta[i]*self.layers[j - 1].y[k - 1];
}
}
self.layers[j].w[i][k] += self.momentum * self.layers[j].prev_delta[i];
}
}
}
}
pub fn bind(&mut self, layer: usize, neuron: usize){
self.layers[layer - 1].bind(neuron);
}
pub fn unbind(&mut self, layer: usize, neuron: usize){
self.layers[layer - 1].unbind(neuron);
}
pub fn train<T>(&mut self, data: &T, iterations: i64) where T: Extractable{
for _ in 0..iterations{
let (x, y) = data.rand();
self.fit(&x, &y);
}
}
#[allow(non_snake_case)]
pub fn fit(&mut self, X: &[f64], d: &[f64]){
let mut x = X.to_vec();
let res = d.to_vec();
x.insert(0, 1f64);
self.forward(&x);
self.backward(&res);
self.update(&x);
}
#[allow(non_snake_case)]
pub fn calc(&mut self, X: &[f64]) -> &[f64]{
let mut x = X.to_vec();
x.insert(0, 1f64);
self.forward(&x);
&self.layers[self.layers.len() - 1].y
}
pub fn activation(&mut self, func: activators::Type) -> &mut FeedForward{
match func{
activators::Type::Sigmoid => {
self.act_type = activators::Type::Sigmoid;
self.act.func = activators::sigm;
self.act.der = activators::der_sigm;
}
activators::Type::Tanh | activators::Type::Custom => {
self.act_type = activators::Type::Tanh;
self.act.func = activators::tanh;
self.act.der = activators::der_tanh;
}
activators::Type::Relu => {
self.act_type = activators::Type::Relu;
self.act.func = activators::relu;
self.act.der = activators::der_relu;
}
}
self
}
pub fn custom_activation(&mut self, func: fn(f64) -> f64, der: fn(f64) -> f64) -> &mut FeedForward{
self.act_type = activators::Type::Custom;
self.act.func = func;
self.act.der = der;
self
}
pub fn learning_rate(&mut self, learning_rate: f64) -> &mut FeedForward {
self.learn_rate = learning_rate;
self
}
pub fn momentum(&mut self, momentum: f64) -> &mut FeedForward {
self.momentum = momentum;
self
}
pub fn get_error(&self) -> f64{
self.error
}
}
impl Transform for FeedForward{
fn after(&mut self){
match self.act_type {
activators::Type::Sigmoid => {
self.act_type = activators::Type::Sigmoid;
self.act.func = activators::sigm;
self.act.der = activators::der_sigm;
}
activators::Type::Tanh | activators::Type::Custom => {
self.act_type = activators::Type::Tanh;
self.act.func = activators::tanh;
self.act.der = activators::der_tanh;
}
activators::Type::Relu => {
self.act_type = activators::Type::Relu;
self.act.func = activators::relu;
self.act.der = activators::der_relu;
}
}
}
}
impl Default for ActivationContainer{
fn default() -> ActivationContainer {
ActivationContainer{func: activators::tanh, der: activators::der_tanh}
}
}
impl fmt::Display for FeedForward {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result{
let mut buf: String = format!("**Induced field**\n");
for v in self.layers.iter(){
for val in v.v.iter(){
buf += &format!("{:.3} ", val);
}
buf += "\n";
}
buf += "\n";
buf += "**Activated field**\n";
for v in self.layers.iter(){
for val in v.y.iter(){
buf += &format!("{:.3} ", val);
}
buf += "\n";
}
buf += "\n";
buf += "**Deltas**\n";
for v in self.layers.iter(){
for val in v.delta.iter(){
buf += &format!("{:.3} ", val);
}
buf += "\n";
}
buf += "\n";
buf += "**Weights**\n";
for v in self.layers.iter() {
for val in v.w.iter() {
buf += "[";
for cell in val.iter() {
buf += &format!("{:.3} ", cell);
}
buf += "]";
}
buf += "\n";
}
buf.fmt(f)
}
}