#![cfg(feature = "neural_network")]
use approx::assert_abs_diff_eq;
use ndarray::{Array5, ArrayD};
use rustyml::neural_network::layer::TrainingParameters;
use rustyml::neural_network::layer::pooling_layer::average_pooling_3d::AveragePooling3D;
use rustyml::neural_network::loss_function::mean_squared_error::MeanSquaredError;
use rustyml::neural_network::neural_network_trait::Layer;
use rustyml::neural_network::optimizer::rms_prop::RMSprop;
use rustyml::neural_network::sequential::Sequential;
#[test]
fn test_average_pooling_3d_basic() {
let pool_size = (2, 2, 2);
let input_shape = vec![1, 2, 4, 4, 4];
let layer = AveragePooling3D::new(pool_size, input_shape.clone(), None).unwrap();
assert_eq!(layer.layer_type(), "AveragePooling3D");
assert_eq!(layer.param_count(), TrainingParameters::NoTrainable);
assert_eq!(layer.output_shape(), "(1, 2, 2, 2, 2)");
}
#[test]
fn test_average_pooling_3d_forward() {
let input_shape = vec![1, 1, 4, 4, 4];
let mut layer = AveragePooling3D::new((2, 2, 2), input_shape, None).unwrap();
let input = Array5::from_elem((1, 1, 4, 4, 4), 8.0).into_dyn();
let output = layer.forward(&input).unwrap();
assert_eq!(output.shape(), &[1, 1, 2, 2, 2]);
for &value in output.iter() {
assert_abs_diff_eq!(value, 8.0, epsilon = 1e-6);
}
}
#[test]
fn test_average_pooling_3d_with_strides() {
let input_shape = vec![1, 1, 6, 6, 6];
let mut layer = AveragePooling3D::new((2, 2, 2), input_shape, Some((3, 3, 3))).unwrap();
let input =
Array5::from_shape_fn((1, 1, 6, 6, 6), |(_, _, d, h, w)| (d + h + w) as f32).into_dyn();
let output = layer.forward(&input).unwrap();
assert_eq!(output.shape(), &[1, 1, 2, 2, 2]);
}
#[test]
fn test_average_pooling_3d_sequential_model() {
let mut model = Sequential::new();
model.add(
AveragePooling3D::new(
(2, 2, 2), vec![1, 2, 8, 8, 8], Some((2, 2, 2)), )
.unwrap(),
);
model.compile(
RMSprop::new(0.001, 0.9, 1e-8).unwrap(),
MeanSquaredError::new(),
);
let input_data = Array5::from_shape_fn((1, 2, 8, 8, 8), |(_, c, d, h, w)| {
((d + h + w) as f32 * 0.1) + (c as f32 * 0.01)
})
.into_dyn();
let target_data = Array5::ones((1, 2, 4, 4, 4)).into_dyn();
model.summary();
model.fit(&input_data, &target_data, 2).unwrap();
let predictions = model.predict(&input_data).unwrap();
assert_eq!(predictions.shape(), &[1, 2, 4, 4, 4]);
println!("Shape of output after pooling: {:?}", predictions.shape());
}
#[test]
fn test_average_pooling_3d_multiple_channels() {
let input_shape = vec![2, 3, 4, 4, 4]; let mut layer = AveragePooling3D::new((2, 2, 2), input_shape, None).unwrap();
let input = Array5::from_shape_fn((2, 3, 4, 4, 4), |(b, c, d, h, w)| {
(b * 100 + c * 10 + d + h + w) as f32
})
.into_dyn();
let output = layer.forward(&input).unwrap();
assert_eq!(output.shape(), &[2, 3, 2, 2, 2]);
let val_batch0_ch0 = output[[0, 0, 0, 0, 0]];
let val_batch1_ch0 = output[[1, 0, 0, 0, 0]];
let val_batch0_ch1 = output[[0, 1, 0, 0, 0]];
assert_ne!(val_batch0_ch0, val_batch1_ch0);
assert_ne!(val_batch0_ch0, val_batch0_ch1);
}
#[test]
fn test_average_pooling_3d_edge_cases() {
let input_shape = vec![1, 1, 3, 3, 3];
let mut layer = AveragePooling3D::new((2, 2, 2), input_shape, Some((2, 2, 2))).unwrap();
let input = Array5::ones((1, 1, 3, 3, 3)).into_dyn();
let output = layer.forward(&input).unwrap();
assert_eq!(output.shape(), &[1, 1, 1, 1, 1]);
assert_abs_diff_eq!(output[[0, 0, 0, 0, 0]], 1.0, epsilon = 1e-6);
}
#[test]
fn test_average_pooling_3d_gradient_flow() {
let input_shape = vec![1, 1, 4, 4, 4];
let mut layer = AveragePooling3D::new((2, 2, 2), input_shape, None).unwrap();
let input = Array5::from_shape_fn((1, 1, 4, 4, 4), |(_, _, d, h, w)| {
(d * 16 + h * 4 + w) as f32
})
.into_dyn();
let output = layer.forward(&input).unwrap();
let mut grad_output = ArrayD::zeros(output.raw_dim());
grad_output[[0, 0, 0, 0, 0]] = 1.0;
let grad_input = layer.backward(&grad_output).unwrap();
let expected_grad = 1.0 / 8.0;
for d in 0..2 {
for h in 0..2 {
for w in 0..2 {
assert_abs_diff_eq!(grad_input[[0, 0, d, h, w]], expected_grad, epsilon = 1e-6);
}
}
}
assert_abs_diff_eq!(grad_input[[0, 0, 2, 2, 2]], 0.0, epsilon = 1e-6);
}