#![cfg(feature = "neural_network")]
use approx::assert_abs_diff_eq;
use ndarray::Array;
use rustyml::neural_network::Tensor;
use rustyml::neural_network::layer::TrainingParameters;
use rustyml::neural_network::layer::activation_layer::linear::Linear;
use rustyml::neural_network::layer::activation_layer::relu::ReLU;
use rustyml::neural_network::layer::activation_layer::sigmoid::Sigmoid;
use rustyml::neural_network::layer::activation_layer::tanh::Tanh;
use rustyml::neural_network::layer::dense::Dense;
use rustyml::neural_network::loss_function::mean_squared_error::MeanSquaredError;
use rustyml::neural_network::neural_network_trait::Layer;
use rustyml::neural_network::optimizer::sgd::SGD;
use rustyml::neural_network::sequential::Sequential;
#[test]
fn test_dense_forward_pass_dimensions() {
let mut dense = Dense::new(4, 3, ReLU::new()).unwrap();
let input = Array::ones((2, 4)).into_dyn();
let output = dense.forward(&input).unwrap();
assert_eq!(output.shape(), &[2, 3]); println!(
"Forward pass dimension test passed: {:?} -> {:?}",
input.shape(),
output.shape()
);
}
#[test]
fn test_dense_activation_functions() {
let input = Array::from_shape_vec((1, 2), vec![1.0, -1.0])
.unwrap()
.into_dyn();
let mut dense_relu = Dense::new(2, 2, ReLU::new()).unwrap();
let output_relu = dense_relu.forward(&input);
let mut dense_sigmoid = Dense::new(2, 2, Sigmoid::new()).unwrap();
let output_sigmoid = dense_sigmoid.forward(&input);
let mut dense_tanh = Dense::new(2, 2, Tanh::new()).unwrap();
let output_tanh = dense_tanh.forward(&input).unwrap();
println!("ReLU output: {:?}", output_relu);
println!("Sigmoid output: {:?}", output_sigmoid);
println!("Tanh output: {:?}", output_tanh);
let sigmoid_output = output_sigmoid.unwrap();
let sigmoid_2d = sigmoid_output
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
for value in sigmoid_2d.iter() {
assert!(
*value > 0.0 && *value < 1.0,
"Sigmoid output should be in (0,1) range: {}",
value
);
}
let tanh_2d = output_tanh
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
for value in tanh_2d.iter() {
assert!(
*value > -1.0 && *value < 1.0,
"Tanh output should be in (-1,1) range: {}",
value
);
}
}
#[test]
fn test_dense_learning_capability() {
let x_train = Array::from_shape_vec(
(4, 2),
vec![
1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 0.0, 1.0, ],
)
.unwrap()
.into_dyn();
let y_train = Array::from_shape_vec((4, 1), vec![9.0, 8.0, 16.0, 4.0])
.unwrap()
.into_dyn();
let mut model = Sequential::new();
model.add(Dense::new(2, 1, Linear::new()).unwrap());
model.compile(SGD::new(0.01).unwrap(), MeanSquaredError::new());
let initial_prediction = model.predict(&x_train).unwrap();
let initial_loss = calculate_mse(&initial_prediction, &y_train);
println!("Initial loss: {:.4}", initial_loss);
model.fit(&x_train, &y_train, 1000).unwrap();
let final_prediction = model.predict(&x_train).unwrap();
let final_loss = calculate_mse(&final_prediction, &y_train);
println!("Final loss: {:.4}", final_loss);
println!("Predictions: {:?}", final_prediction);
println!("Ground truth: {:?}", y_train);
assert!(
final_loss < initial_loss * 0.1,
"Model should learn patterns, loss reduced from {:.4} to {:.4}",
initial_loss,
final_loss
);
let pred_2d = final_prediction
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
let target_2d = y_train
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
for i in 0..pred_2d.nrows() {
let pred_val = pred_2d[[i, 0]];
let true_val = target_2d[[i, 0]];
assert_abs_diff_eq!(pred_val, true_val, epsilon = 1.0);
}
}
#[test]
fn test_dense_batch_processing() {
let batch_sizes = vec![1, 5, 10, 32];
for batch_size in batch_sizes {
let input = Array::ones((batch_size, 4)).into_dyn();
let mut dense = Dense::new(4, 3, ReLU::new()).unwrap();
let output = dense.forward(&input).unwrap();
assert_eq!(
output.shape()[0],
batch_size,
"Batch size should remain unchanged"
);
assert_eq!(
output.shape()[1],
3,
"Output feature dimension should be correct"
);
println!(
"Batch size {} test passed: {:?} -> {:?}",
batch_size,
input.shape(),
output.shape()
);
}
}
#[test]
fn test_dense_parameter_count() {
let dense = Dense::new(10, 5, ReLU::new()).unwrap();
let expected_params = 10 * 5 + 5; assert_eq!(
dense.param_count(),
TrainingParameters::Trainable(expected_params)
);
println!(
"Parameter count test passed: {} parameters",
expected_params
);
let sizes = vec![(1, 1), (100, 50), (784, 128), (512, 10)];
for (input_dim, output_dim) in sizes {
let layer = Dense::new(input_dim, output_dim, Linear::new()).unwrap();
let expected = input_dim * output_dim + output_dim;
assert_eq!(
layer.param_count(),
TrainingParameters::Trainable(expected),
"({}, {}) layer parameter count error",
input_dim,
output_dim
);
}
}
fn calculate_mse(pred: &Tensor, target: &Tensor) -> f32 {
let pred_2d = pred
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
let target_2d = target
.as_standard_layout()
.into_dimensionality::<ndarray::Ix2>()
.unwrap();
let diff = &pred_2d - &target_2d;
let squared_diff = diff.mapv(|x| x * x);
squared_diff.mean().unwrap()
}