native_neural_network 0.3.1

Lib no_std Rust for native neural network (.rnn)
Documentation
use native_neural_network::activations::ActivationKind;
use native_neural_network::layers::{LayerDesc, LayerSpec};
use native_neural_network::rnn_api::{
    build_f32_with_input_into, build_f64_with_input_into, ActivationConfig, PackBuffers,
    RnnApiError,
};

use std::fs;
use std::path::Path;
use std::process;

fn gen_series_f32(len: usize, scale: f32, phase: f32) -> Vec<f32> {
    (0..len)
        .map(|i| {
            let t = i as f32;
            let a = (t * 0.017 + phase).sin();
            let b = (t * 0.031 + phase * 0.5).cos();
            (a * 0.73 + b * 0.27) * scale
        })
        .collect()
}

fn gen_series_f64(len: usize, scale: f64, phase: f64) -> Vec<f64> {
    (0..len)
        .map(|i| {
            let t = i as f64;
            let a = (t * 0.017 + phase).sin();
            let b = (t * 0.031 + phase * 0.5).cos();
            (a * 0.73 + b * 0.27) * scale
        })
        .collect()
}

fn main() {
    let run = || -> Result<(), String> {
        let topology = [32usize, 64, 32, 8];
        let mut weights_len = 0usize;
        let mut biases_len = 0usize;
        for pair in topology.windows(2) {
            weights_len = weights_len.saturating_add(pair[0].saturating_mul(pair[1]));
            biases_len = biases_len.saturating_add(pair[1]);
        }

        let weights_f32 = gen_series_f32(weights_len, 0.85, 0.11);
        let biases_f32 = gen_series_f32(biases_len, 0.35, 1.37);
        let weights_f64 = gen_series_f64(weights_len, 0.85, 0.11);
        let biases_f64 = gen_series_f64(biases_len, 0.35, 1.37);
        let runtime_input_f32 = gen_series_f32(topology[0], 1.5, 2.23);
        let runtime_input_f64 = gen_series_f64(topology[0], 1.5, 2.23);

        let mut layer_specs_scratch = vec![
            LayerSpec::Dense(LayerDesc {
                input_size: 10,
                output_size: 50,
                weight_offset: 100,
                bias_offset: 1000,
                activation: ActivationKind::Identity,
            });
            topology.len().max(2)
        ];

        let rmd1_capacity_f32 = 20usize
            .saturating_add(topology.len().saturating_sub(1).saturating_mul(20))
            .saturating_add(weights_len.saturating_mul(4))
            .saturating_add(biases_len.saturating_mul(4));
        let rmd1_capacity_f64 = 20usize
            .saturating_add(topology.len().saturating_sub(1).saturating_mul(20))
            .saturating_add(weights_len.saturating_mul(8))
            .saturating_add(biases_len.saturating_mul(8));

        let mut rmd1_scratch = vec![0u8; rmd1_capacity_f64.max(rmd1_capacity_f32).max(1024)];
        let mut metadata_scratch = vec![0u8; 64 * 1024];
        let mut out = vec![0u8; 256 * 1024];

        let used_f32 = build_f32_with_input_into(
            &topology,
            ActivationConfig {
                hidden: ActivationKind::Relu,
                output: ActivationKind::Identity,
            },
            &weights_f32,
            &biases_f32,
            &runtime_input_f32,
            PackBuffers {
                layer_specs_scratch: &mut layer_specs_scratch,
                rmd1_scratch: &mut rmd1_scratch,
                metadata_scratch: &mut metadata_scratch,
                out_bytes: &mut out,
            },
        )
        .map_err(|e| format!("build_f32_into failed: {e:?}"))?;
        let out_f32 = out[..used_f32].to_vec();

        let used_f64 = build_f64_with_input_into(
            &topology,
            ActivationConfig {
                hidden: ActivationKind::Relu,
                output: ActivationKind::Identity,
            },
            &weights_f64,
            &biases_f64,
            &runtime_input_f64,
            PackBuffers {
                layer_specs_scratch: &mut layer_specs_scratch,
                rmd1_scratch: &mut rmd1_scratch,
                metadata_scratch: &mut metadata_scratch,
                out_bytes: &mut out,
            },
        )
        .map_err(|e| format!("build_f64_into failed: {e:?}"))?;
        let out_f64 = out[..used_f64].to_vec();

        if out_f32.is_empty() || out_f64.is_empty() {
            return Err(format!("build failed: {:?}", RnnApiError::CapacityTooSmall));
        }

        let out_dir_f32 = Path::new("sample_model").join("f32");
        let out_dir_f64 = Path::new("sample_model").join("f64");
        fs::create_dir_all(&out_dir_f32)
            .map_err(|e| format!("mkdir {} failed: {e}", out_dir_f32.display()))?;
        fs::create_dir_all(&out_dir_f64)
            .map_err(|e| format!("mkdir {} failed: {e}", out_dir_f64.display()))?;
        fs::write(out_dir_f32.join("sample.rnn"), &out_f32)
            .map_err(|e| format!("write f32 failed: {e}"))?;
        fs::write(out_dir_f64.join("sample.rnn"), &out_f64)
            .map_err(|e| format!("write f64 failed: {e}"))?;

        Ok(())
    };

    if let Err(err) = run() {
        eprintln!("generate_sample_model failed: {err}");
        process::exit(1);
    }
}