#[macro_use] extern crate hal;
extern crate arrayfire as af;
use hal::Model;
use hal::optimizer::{Optimizer, get_optimizer_with_defaults};
use hal::data::{DataSource, SinSource};
use hal::error::HALError;
use hal::model::{Sequential};
use hal::plot::{plot_vec, plot_array};
use hal::device::{DeviceManagerFactory, Device};
use af::{Backend, DType};
fn main() {
let input_dims = 128;
let hidden_dims = 64;
let output_dims = 128;
let num_train_samples = 65536;
let batch_size = 128;
let optimizer_type = "Adam";
let epochs = 20;
let manager = DeviceManagerFactory::new();
let gpu_device = Device{backend: Backend::DEFAULT, id: 0};
let cpu_device = Device{backend: Backend::CPU, id: 0};
let optimizer = get_optimizer_with_defaults(optimizer_type).unwrap();
let mut model = Box::new(Sequential::new(manager.clone()
, optimizer , "mse" , gpu_device));
model.add::<f32>("dense", hashmap!["activation" => "tanh".to_string()
, "input_size" => input_dims.to_string()
, "output_size" => hidden_dims.to_string()
, "w_init" => "glorot_uniform".to_string()
, "b_init" => "zeros".to_string()]);
model.add::<f32>("dense", hashmap!["activation" => "linear".to_string()
, "input_size" => hidden_dims.to_string()
, "output_size" => output_dims.to_string()
, "w_init" => "glorot_uniform".to_string()
, "b_init" => "zeros".to_string()]);
model.info();
manager.swap_device(cpu_device);
let sin_generator = SinSource::new(input_dims, batch_size
, DType::F32 , num_train_samples , false , false);
let loss = model.fit::<SinSource, f32>(&sin_generator , cpu_device , epochs, batch_size , None , true);
plot_vec(loss, "Loss vs. Iterations", 512, 512);
let test_sample = sin_generator.get_test_iter(1).input.into_inner();
println!("test sample shape: {:?}", test_sample.dims());
let prediction = model.forward::<f32>(&test_sample
, cpu_device , cpu_device); println!("\nprediction shape: {:?} | output backend = {:?}"
, prediction[0].dims(), prediction[0].get_backend());
plot_array(&af::flat(&prediction[0]), "Model Inference", 512, 512);
}