use xn::{Backend, Result, Tensor, TensorView};
macro_rules! test_both_backends {
($test_name:ident, $test_fn:ident) => {
paste::paste! {
#[test]
fn [<$test_name _cpu>]() -> Result<()> {
$test_fn(&xn::CPU)
}
#[cfg(feature = "cuda")]
#[test]
fn [<$test_name _cuda>]() -> Result<()> {
let device = xn::cuda_backend::Device::new(0)?;
$test_fn(&device)
}
}
};
}
fn test_cat_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![7., 8., 9., 10., 11., 12.], (2, 3), dev)?;
let c = Tensor::cat(&[&a, &b], 0)?;
assert_eq!(c.dims(), &[4, 3]);
assert_eq!(c.to_vec()?, vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]);
Ok(())
}
test_both_backends!(test_cat_dim0, test_cat_dim0_impl);
fn test_cat_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![7., 8., 9., 10., 11., 12.], (2, 3), dev)?;
let c = Tensor::cat(&[&a, &b], 1)?;
assert_eq!(c.dims(), &[2, 6]);
assert_eq!(c.to_vec()?, vec![1., 2., 3., 7., 8., 9., 4., 5., 6., 10., 11., 12.]);
Ok(())
}
test_both_backends!(test_cat_dim1, test_cat_dim1_impl);
fn test_cat_3d_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (2, 2, 3), dev)?;
let b: Tensor<f32, B> =
Tensor::from_vec((13..=24).map(|x| x as f32).collect(), (2, 2, 3), dev)?;
let c = Tensor::cat(&[&a, &b], 1)?;
assert_eq!(c.dims(), &[2, 4, 3]);
assert_eq!(
c.to_vec()?,
vec![
1., 2., 3., 4., 5., 6., 13., 14., 15., 16., 17., 18., 7., 8., 9., 10., 11., 12., 19., 20., 21., 22., 23., 24. ]
);
Ok(())
}
test_both_backends!(test_cat_3d_dim1, test_cat_3d_dim1_impl);
fn test_reshape_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.reshape((3, 2))?;
assert_eq!(b.dims(), &[3, 2]);
assert_eq!(b.to_vec()?, vec![1., 2., 3., 4., 5., 6.]);
let c = a.reshape((6,))?;
assert_eq!(c.dims(), &[6]);
let d = a.reshape((1, 6))?;
assert_eq!(d.dims(), &[1, 6]);
let e = a.reshape((1, 2, 3))?;
assert_eq!(e.dims(), &[1, 2, 3]);
Ok(())
}
test_both_backends!(test_reshape, test_reshape_impl);
fn test_reshape_with_hole_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.reshape((3, ()))?;
assert_eq!(b.dims(), &[3, 2]);
let c = a.reshape(((), 2))?;
assert_eq!(c.dims(), &[3, 2]);
let d = a.reshape((1, (), 3))?;
assert_eq!(d.dims(), &[1, 2, 3]);
Ok(())
}
test_both_backends!(test_reshape_with_hole, test_reshape_with_hole_impl);
fn test_index_select_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], (4, 3), dev)?;
let idx = Tensor::from_vec(vec![0i64, 2, 3], 3, dev)?;
let b = a.index_select(&idx, 0)?;
assert_eq!(b.dims(), &[3, 3]);
assert_eq!(b.to_vec()?, vec![1., 2., 3., 7., 8., 9., 10., 11., 12.]);
let idx = Tensor::from_vec(vec![1i64, 1, 0], 3, dev)?;
let c = a.index_select(&idx, 0)?;
assert_eq!(c.dims(), &[3, 3]);
assert_eq!(c.to_vec()?, vec![4., 5., 6., 4., 5., 6., 1., 2., 3.]);
Ok(())
}
test_both_backends!(test_index_select_dim0, test_index_select_dim0_impl);
fn test_index_select_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6., 7., 8.], (2, 4), dev)?;
let idx = Tensor::from_vec(vec![0i64, 2], 2, dev)?;
let b = a.index_select(&idx, 1)?;
assert_eq!(b.dims(), &[2, 2]);
assert_eq!(b.to_vec()?, vec![1., 3., 5., 7.]);
Ok(())
}
test_both_backends!(test_index_select_dim1, test_index_select_dim1_impl);
fn test_index_select_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (2, 3, 2), dev)?;
let idx = Tensor::from_vec(vec![0i64, 2], 2, dev)?;
let b = a.index_select(&idx, 1)?;
assert_eq!(b.dims(), &[2, 2, 2]);
assert_eq!(b.to_vec()?, vec![1., 2., 5., 6., 7., 8., 11., 12.]);
Ok(())
}
test_both_backends!(test_index_select_3d, test_index_select_3d_impl);
fn test_index_select_narrowed_indices_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![10., 20., 30., 40., 50.], (5, 1), dev)?;
let all_ids = Tensor::from_vec(vec![0i64, 1, 2, 3, 4], 5, dev)?;
let narrowed_ids = all_ids.narrow(0, 0..2)?.contiguous()?;
assert_eq!(narrowed_ids.dims(), &[2]);
let b = a.index_select(&narrowed_ids, 0)?;
assert_eq!(b.dims(), &[2, 1]);
assert_eq!(b.to_vec()?, vec![10., 20.]);
Ok(())
}
test_both_backends!(test_index_select_narrowed_indices, test_index_select_narrowed_indices_impl);
fn test_max_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.max(0)?;
assert_eq!(b.dims(), &[4]);
assert_eq!(b.to_vec()?, vec![9., 5., 7., 6.]);
Ok(())
}
test_both_backends!(test_max_dim0, test_max_dim0_impl);
fn test_max_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.max(1)?;
assert_eq!(b.dims(), &[3]);
assert_eq!(b.to_vec()?, vec![5., 8., 9.]);
Ok(())
}
test_both_backends!(test_max_dim1, test_max_dim1_impl);
fn test_min_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.min(0)?;
assert_eq!(b.dims(), &[4]);
assert_eq!(b.to_vec()?, vec![1., 0., 1., 2.]);
Ok(())
}
test_both_backends!(test_min_dim0, test_min_dim0_impl);
fn test_min_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.min(1)?;
assert_eq!(b.dims(), &[3]);
assert_eq!(b.to_vec()?, vec![1., 2., 0.]);
Ok(())
}
test_both_backends!(test_min_dim1, test_min_dim1_impl);
fn test_argmin_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.argmin(0)?;
assert_eq!(b.dims(), &[4]);
assert_eq!(b.to_vec()?, vec![0i64, 2, 2, 2]);
Ok(())
}
test_both_backends!(test_argmin_dim0, test_argmin_dim0_impl);
fn test_argmin_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.argmin(1)?;
assert_eq!(b.dims(), &[3]);
assert_eq!(b.to_vec()?, vec![0i64, 1, 1]);
Ok(())
}
test_both_backends!(test_argmin_dim1, test_argmin_dim1_impl);
fn test_argmax_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.argmax(0)?;
assert_eq!(b.dims(), &[4]);
assert_eq!(b.to_vec()?, vec![2i64, 0, 1, 1]);
Ok(())
}
test_both_backends!(test_argmax_dim0, test_argmax_dim0_impl);
fn test_argmax_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 5., 3., 4., 8., 2., 7., 6., 9., 0., 1., 2.], (3, 4), dev)?;
let b = a.argmax(1)?;
assert_eq!(b.dims(), &[3]);
assert_eq!(b.to_vec()?, vec![1i64, 0, 0]);
Ok(())
}
test_both_backends!(test_argmax_dim1, test_argmax_dim1_impl);
fn test_max_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (2, 3, 2), dev)?;
let b = a.max(1)?;
assert_eq!(b.dims(), &[2, 2]);
assert_eq!(b.to_vec()?, vec![5., 6., 11., 12.]);
Ok(())
}
test_both_backends!(test_max_3d, test_max_3d_impl);
fn test_broadcast_add_same_shape_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (2, 2), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![10., 20., 30., 40.], (2, 2), dev)?;
let c = a.broadcast_add(&b)?;
assert_eq!(c.dims(), &[2, 2]);
assert_eq!(c.to_vec()?, vec![11., 22., 33., 44.]);
Ok(())
}
test_both_backends!(test_broadcast_add_same_shape, test_broadcast_add_same_shape_impl);
fn test_broadcast_add_1d_to_2d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![10., 20., 30.], (3,), dev)?;
let c = a.broadcast_add(&b)?;
assert_eq!(c.dims(), &[2, 3]);
assert_eq!(c.to_vec()?, vec![11., 22., 33., 14., 25., 36.]);
Ok(())
}
test_both_backends!(test_broadcast_add_1d_to_2d, test_broadcast_add_1d_to_2d_impl);
fn test_broadcast_mul_column_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![2., 3.], (2, 1), dev)?;
let c = a.broadcast_mul(&b)?;
assert_eq!(c.dims(), &[2, 3]);
assert_eq!(c.to_vec()?, vec![2., 4., 6., 12., 15., 18.]);
Ok(())
}
test_both_backends!(test_broadcast_mul_column, test_broadcast_mul_column_impl);
fn test_broadcast_sub_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![10., 20., 30., 40., 50., 60.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (3,), dev)?;
let c = a.broadcast_sub(&b)?;
assert_eq!(c.dims(), &[2, 3]);
assert_eq!(c.to_vec()?, vec![9., 18., 27., 39., 48., 57.]);
Ok(())
}
test_both_backends!(test_broadcast_sub, test_broadcast_sub_impl);
fn test_broadcast_div_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![2., 4., 6., 9., 12., 15.], (2, 3), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![2., 3.], (2, 1), dev)?;
let c = a.broadcast_div(&b)?;
assert_eq!(c.dims(), &[2, 3]);
assert_eq!(c.to_vec()?, vec![1., 2., 3., 3., 4., 5.]);
Ok(())
}
test_both_backends!(test_broadcast_div, test_broadcast_div_impl);
fn test_broadcast_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=24).map(|x| x as f32).collect(), (2, 3, 4), dev)?;
let b: Tensor<f32, B> = Tensor::from_vec(vec![100., 200., 300., 400.], (4,), dev)?;
let c = a.broadcast_add(&b)?;
assert_eq!(c.dims(), &[2, 3, 4]);
let c_vec = c.to_vec()?;
assert_eq!(c_vec[0], 101.);
assert_eq!(c_vec[1], 202.);
assert_eq!(c_vec[4], 105.);
Ok(())
}
test_both_backends!(test_broadcast_3d, test_broadcast_3d_impl);
fn test_unsqueeze_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (3, 4), dev)?;
let b = a.unsqueeze(0)?;
assert_eq!(b.dims(), &[1, 3, 4]);
assert_eq!(b.to_vec()?, a.to_vec()?);
Ok(())
}
test_both_backends!(test_unsqueeze_dim0, test_unsqueeze_dim0_impl);
fn test_unsqueeze_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (3, 4), dev)?;
let b = a.unsqueeze(1)?;
assert_eq!(b.dims(), &[3, 1, 4]);
assert_eq!(b.to_vec()?, a.to_vec()?);
Ok(())
}
test_both_backends!(test_unsqueeze_dim1, test_unsqueeze_dim1_impl);
fn test_unsqueeze_dim_last_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (3, 4), dev)?;
let b = a.unsqueeze(2)?;
assert_eq!(b.dims(), &[3, 4, 1]);
assert_eq!(b.to_vec()?, a.to_vec()?);
Ok(())
}
test_both_backends!(test_unsqueeze_dim_last, test_unsqueeze_dim_last_impl);
fn test_unsqueeze_1d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (4,), dev)?;
let b = a.unsqueeze(0)?;
assert_eq!(b.dims(), &[1, 4]);
let c = a.unsqueeze(1)?;
assert_eq!(c.dims(), &[4, 1]);
Ok(())
}
test_both_backends!(test_unsqueeze_1d, test_unsqueeze_1d_impl);
fn test_pad_with_zeros_1d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (4,), dev)?;
let b = a.pad_with_zeros(0, 2, 3)?;
assert_eq!(b.dims(), &[9]);
assert_eq!(b.to_vec()?, vec![0., 0., 1., 2., 3., 4., 0., 0., 0.]);
Ok(())
}
test_both_backends!(test_pad_with_zeros_1d, test_pad_with_zeros_1d_impl);
fn test_pad_with_zeros_2d_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.pad_with_zeros(0, 1, 1)?;
assert_eq!(b.dims(), &[4, 3]);
assert_eq!(b.to_vec()?, vec![0., 0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0.]);
Ok(())
}
test_both_backends!(test_pad_with_zeros_2d_dim0, test_pad_with_zeros_2d_dim0_impl);
fn test_pad_with_zeros_2d_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.pad_with_zeros(1, 1, 2)?;
assert_eq!(b.dims(), &[2, 6]);
assert_eq!(b.to_vec()?, vec![0., 1., 2., 3., 0., 0., 0., 4., 5., 6., 0., 0.]);
Ok(())
}
test_both_backends!(test_pad_with_zeros_2d_dim1, test_pad_with_zeros_2d_dim1_impl);
fn test_pad_with_zeros_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let data: Vec<f32> = (1..=12).map(|x| x as f32).collect();
let a: Tensor<f32, B> = Tensor::from_vec(data, (2, 2, 3), dev)?;
let b = a.pad_with_zeros(1, 1, 0)?;
assert_eq!(b.dims(), &[2, 3, 3]);
assert_eq!(
b.to_vec()?,
vec![0., 0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0., 7., 8., 9., 10., 11., 12.]
);
Ok(())
}
test_both_backends!(test_pad_with_zeros_3d, test_pad_with_zeros_3d_impl);
fn test_conv1d_simple_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5.], (1, 1, 5), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 0., -1.], (1, 1, 3), dev)?;
let output = input.conv1d(&kernel, None, 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[1, 1, 3]);
assert_eq!(output.to_vec()?, vec![-2., -2., -2.]);
Ok(())
}
test_both_backends!(test_conv1d_simple, test_conv1d_simple_impl);
fn test_conv1d_with_padding_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (1, 1, 4), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1.], (1, 1, 3), dev)?;
let output = input.conv1d(&kernel, None, 1, 1, 1, 1)?;
assert_eq!(output.dims(), &[1, 1, 4]);
assert_eq!(output.to_vec()?, vec![3., 6., 9., 7.]);
Ok(())
}
test_both_backends!(test_conv1d_with_padding, test_conv1d_with_padding_impl);
fn test_conv1d_with_stride_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (1, 1, 6), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1.], (1, 1, 2), dev)?;
let output = input.conv1d(&kernel, None, 2, 0, 1, 1)?;
assert_eq!(output.dims(), &[1, 1, 3]);
assert_eq!(output.to_vec()?, vec![3., 7., 11.]);
Ok(())
}
test_both_backends!(test_conv1d_with_stride, test_conv1d_with_stride_impl);
fn test_conv1d_with_bias_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5.], (1, 1, 5), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1.], (1, 1, 3), dev)?;
let bias: Tensor<f32, B> = Tensor::from_vec(vec![10.], (1,), dev)?;
let output = input.conv1d(&kernel, Some(&bias), 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[1, 1, 3]);
assert_eq!(output.to_vec()?, vec![16., 19., 22.]);
Ok(())
}
test_both_backends!(test_conv1d_with_bias, test_conv1d_with_bias_impl);
fn test_conv1d_multi_channel_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (1, 2, 3), dev)?;
let kernel: Tensor<f32, B> =
Tensor::from_vec(vec![1., 1., 0., 0., 0., 0., 1., 1.], (2, 2, 2), dev)?;
let output = input.conv1d(&kernel, None, 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[1, 2, 2]);
assert_eq!(output.to_vec()?, vec![3., 5., 9., 11.]);
Ok(())
}
test_both_backends!(test_conv1d_multi_channel, test_conv1d_multi_channel_impl);
fn test_conv1d_batch_simple_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> =
Tensor::from_vec(vec![1., 2., 3., 4., 5., 6., 7., 8.], (2, 1, 4), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1.], (1, 1, 2), dev)?;
let output = input.conv1d(&kernel, None, 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[2, 1, 3]);
assert_eq!(output.to_vec()?, vec![3., 5., 7., 11., 13., 15.]);
Ok(())
}
test_both_backends!(test_conv1d_batch_simple, test_conv1d_batch_simple_impl);
fn test_conv1d_batch_multi_channel_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6.,
7., 8., 9., 10., 11., 12.,
], (2, 2, 3), dev)?;
#[rustfmt::skip]
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![
1., 0., 0., 1., 0., 1., 1., 0., ], (2, 2, 2), dev)?;
let output = input.conv1d(&kernel, None, 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[2, 2, 2]);
assert_eq!(output.to_vec()?, vec![6., 8., 6., 8., 18., 20., 18., 20.]);
Ok(())
}
test_both_backends!(test_conv1d_batch_multi_channel, test_conv1d_batch_multi_channel_impl);
fn test_conv1d_batch_with_padding_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6., 7., 8., 9., ], (3, 1, 3), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1.], (1, 1, 3), dev)?;
let output = input.conv1d(&kernel, None, 1, 1, 1, 1)?;
assert_eq!(output.dims(), &[3, 1, 3]);
assert_eq!(output.to_vec()?, vec![3., 6., 5., 9., 15., 11., 15., 24., 17.]);
Ok(())
}
test_both_backends!(test_conv1d_batch_with_padding, test_conv1d_batch_with_padding_impl);
fn test_conv1d_batch_with_stride_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., ], (2, 1, 6), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., -1.], (1, 1, 2), dev)?;
let output = input.conv1d(&kernel, None, 2, 0, 1, 1)?;
assert_eq!(output.dims(), &[2, 1, 3]);
assert_eq!(output.to_vec()?, vec![-1., -1., -1., -1., -1., -1.]);
Ok(())
}
test_both_backends!(test_conv1d_batch_with_stride, test_conv1d_batch_with_stride_impl);
fn test_conv1d_batch_with_bias_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6., ], (2, 1, 3), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1., -1.], (2, 1, 2), dev)?;
let bias: Tensor<f32, B> = Tensor::from_vec(vec![10., 100.], (2,), dev)?;
let output = input.conv1d(&kernel, Some(&bias), 1, 0, 1, 1)?;
assert_eq!(output.dims(), &[2, 2, 2]);
assert_eq!(output.to_vec()?, vec![13., 15., 99., 99., 19., 21., 99., 99.]);
Ok(())
}
test_both_backends!(test_conv1d_batch_with_bias, test_conv1d_batch_with_bias_impl);
fn test_conv1d_batch_large_impl<B: Backend>(dev: &B) -> Result<()> {
let batch = 4;
let in_ch = 3;
let out_ch = 2;
let length = 8;
let k_size = 3;
let input_data: Vec<f32> = (0..batch * in_ch * length).map(|i| i as f32).collect();
let input: Tensor<f32, B> = Tensor::from_vec(input_data.clone(), (batch, in_ch, length), dev)?;
let mut kernel_data = vec![0.0f32; out_ch * in_ch * k_size];
for c in 0..in_ch {
kernel_data[c * k_size] = 1.0; kernel_data[in_ch * k_size + c * k_size + 2] = 1.0; }
let kernel: Tensor<f32, B> = Tensor::from_vec(kernel_data, (out_ch, in_ch, k_size), dev)?;
let output = input.conv1d(&kernel, None, 1, 0, 1, 1)?;
let out_length = length - k_size + 1; assert_eq!(output.dims(), &[batch, out_ch, out_length]);
let result = output.to_vec()?;
for b in 0..batch {
for pos in 0..out_length {
let mut expected_ch0 = 0.0f32;
let mut expected_ch1 = 0.0f32;
for c in 0..in_ch {
let base = b * in_ch * length + c * length;
expected_ch0 += input_data[base + pos]; expected_ch1 += input_data[base + pos + 2]; }
let idx_ch0 = b * out_ch * out_length + pos;
let idx_ch1 = b * out_ch * out_length + out_length + pos;
assert!(
(result[idx_ch0] - expected_ch0).abs() < 1e-4,
"batch {b} out_ch 0 pos {pos}: expected {expected_ch0}, got {}",
result[idx_ch0]
);
assert!(
(result[idx_ch1] - expected_ch1).abs() < 1e-4,
"batch {b} out_ch 1 pos {pos}: expected {expected_ch1}, got {}",
result[idx_ch1]
);
}
}
Ok(())
}
test_both_backends!(test_conv1d_batch_large, test_conv1d_batch_large_impl);
fn test_conv_transpose1d_simple_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (1, 1, 3), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1.], (1, 1, 3), dev)?;
let output = input.conv_transpose1d(&kernel, None, 1, 0, 0, 1)?;
assert_eq!(output.dims(), &[1, 1, 5]);
assert_eq!(output.to_vec()?, vec![1., 3., 6., 5., 3.]);
Ok(())
}
test_both_backends!(test_conv_transpose1d_simple, test_conv_transpose1d_simple_impl);
fn test_conv_transpose1d_with_stride_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (1, 1, 3), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1.], (1, 1, 2), dev)?;
let output = input.conv_transpose1d(&kernel, None, 2, 0, 0, 1)?;
assert_eq!(output.dims(), &[1, 1, 6]);
assert_eq!(output.to_vec()?, vec![1., 1., 2., 2., 3., 3.]);
Ok(())
}
test_both_backends!(test_conv_transpose1d_with_stride, test_conv_transpose1d_with_stride_impl);
fn test_conv_transpose1d_with_bias_impl<B: Backend>(dev: &B) -> Result<()> {
let input: Tensor<f32, B> = Tensor::from_vec(vec![1., 2.], (1, 1, 2), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1.], (1, 1, 2), dev)?;
let bias: Tensor<f32, B> = Tensor::from_vec(vec![5.], (1,), dev)?;
let output = input.conv_transpose1d(&kernel, Some(&bias), 1, 0, 0, 1)?;
assert_eq!(output.dims(), &[1, 1, 3]);
assert_eq!(output.to_vec()?, vec![6., 8., 7.]);
Ok(())
}
test_both_backends!(test_conv_transpose1d_with_bias, test_conv_transpose1d_with_bias_impl);
fn test_conv_transpose1d_batch_simple_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6., ], (2, 1, 3), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1.], (1, 1, 3), dev)?;
let output = input.conv_transpose1d(&kernel, None, 1, 0, 0, 1)?;
assert_eq!(output.dims(), &[2, 1, 5]);
assert_eq!(output.to_vec()?, vec![1., 3., 6., 5., 3., 4., 9., 15., 11., 6.]);
Ok(())
}
test_both_backends!(test_conv_transpose1d_batch_simple, test_conv_transpose1d_batch_simple_impl);
fn test_conv_transpose1d_batch_multi_channel_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4.,
5., 6., 7., 8.,
], (2, 2, 2), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 1., 1.], (2, 1, 2), dev)?;
let output = input.conv_transpose1d(&kernel, None, 1, 0, 0, 1)?;
assert_eq!(output.dims(), &[2, 1, 3]);
assert_eq!(output.to_vec()?, vec![4., 10., 6., 12., 26., 14.]);
Ok(())
}
test_both_backends!(
test_conv_transpose1d_batch_multi_channel,
test_conv_transpose1d_batch_multi_channel_impl
);
fn test_conv_transpose1d_batch_with_stride_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., 5., 6., ], (3, 1, 2), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1.], (1, 1, 2), dev)?;
let output = input.conv_transpose1d(&kernel, None, 2, 0, 0, 1)?;
assert_eq!(output.dims(), &[3, 1, 4]);
assert_eq!(output.to_vec()?, vec![1., 1., 2., 2., 3., 3., 4., 4., 5., 5., 6., 6.]);
Ok(())
}
test_both_backends!(
test_conv_transpose1d_batch_with_stride,
test_conv_transpose1d_batch_with_stride_impl
);
fn test_conv_transpose1d_batch_with_bias_impl<B: Backend>(dev: &B) -> Result<()> {
#[rustfmt::skip]
let input: Tensor<f32, B> = Tensor::from_vec(vec![
1., 2., 3., 4., ], (2, 1, 2), dev)?;
let kernel: Tensor<f32, B> = Tensor::from_vec(vec![1., 1., 2., 2.], (1, 2, 2), dev)?;
let bias: Tensor<f32, B> = Tensor::from_vec(vec![10., 100.], (2,), dev)?;
let output = input.conv_transpose1d(&kernel, Some(&bias), 1, 0, 0, 1)?;
assert_eq!(output.dims(), &[2, 2, 3]);
assert_eq!(
output.to_vec()?,
vec![11., 13., 12., 102., 106., 104., 13., 17., 14., 106., 114., 108.]
);
Ok(())
}
test_both_backends!(
test_conv_transpose1d_batch_with_bias,
test_conv_transpose1d_batch_with_bias_impl
);
fn test_pad_with_same_1d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (4,), dev)?;
let b = a.pad_with_same(0, 2, 3)?;
assert_eq!(b.dims(), &[9]);
assert_eq!(b.to_vec()?, vec![1., 1., 1., 2., 3., 4., 4., 4., 4.]);
Ok(())
}
test_both_backends!(test_pad_with_same_1d, test_pad_with_same_1d_impl);
fn test_pad_with_same_2d_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.pad_with_same(0, 1, 1)?;
assert_eq!(b.dims(), &[4, 3]);
assert_eq!(b.to_vec()?, vec![1., 2., 3., 1., 2., 3., 4., 5., 6., 4., 5., 6.]);
Ok(())
}
test_both_backends!(test_pad_with_same_2d_dim0, test_pad_with_same_2d_dim0_impl);
fn test_pad_with_same_2d_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let b = a.pad_with_same(1, 1, 2)?;
assert_eq!(b.dims(), &[2, 6]);
assert_eq!(b.to_vec()?, vec![1., 1., 2., 3., 3., 3., 4., 4., 5., 6., 6., 6.]);
Ok(())
}
test_both_backends!(test_pad_with_same_2d_dim1, test_pad_with_same_2d_dim1_impl);
fn test_pad_with_same_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let data: Vec<f32> = (1..=8).map(|x| x as f32).collect();
let a: Tensor<f32, B> = Tensor::from_vec(data, (2, 2, 2), dev)?;
let b = a.pad_with_same(1, 1, 1)?;
assert_eq!(b.dims(), &[2, 4, 2]);
assert_eq!(b.to_vec()?, vec![1., 2., 1., 2., 3., 4., 3., 4., 5., 6., 5., 6., 7., 8., 7., 8.]);
Ok(())
}
test_both_backends!(test_pad_with_same_3d, test_pad_with_same_3d_impl);
fn test_sum_keepdim_1d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5.], (5,), dev)?;
let b = a.sum_keepdim(vec![0])?;
assert_eq!(b.dims(), &[1]);
assert_eq!(b.to_vec()?, vec![15.]);
Ok(())
}
test_both_backends!(test_sum_keepdim_1d, test_sum_keepdim_1d_impl);
fn test_sum_keepdim_2d_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], (3, 4), dev)?;
let b = a.sum_keepdim(vec![0])?;
assert_eq!(b.dims(), &[1, 4]);
assert_eq!(b.to_vec()?, vec![15., 18., 21., 24.]);
Ok(())
}
test_both_backends!(test_sum_keepdim_2d_dim0, test_sum_keepdim_2d_dim0_impl);
fn test_sum_keepdim_2d_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> =
Tensor::from_vec(vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], (3, 4), dev)?;
let b = a.sum_keepdim(vec![1])?;
assert_eq!(b.dims(), &[3, 1]);
assert_eq!(b.to_vec()?, vec![10., 26., 42.]);
Ok(())
}
test_both_backends!(test_sum_keepdim_2d_dim1, test_sum_keepdim_2d_dim1_impl);
fn test_sum_keepdim_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (2, 3, 2), dev)?;
let b = a.sum_keepdim(vec![1])?;
assert_eq!(b.dims(), &[2, 1, 2]);
assert_eq!(b.to_vec()?, vec![9., 12., 27., 30.]);
Ok(())
}
test_both_backends!(test_sum_keepdim_3d, test_sum_keepdim_3d_impl);
fn test_sum_keepdim_multiple_dims_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=24).map(|x| x as f32).collect(), (2, 3, 4), dev)?;
let b = a.sum_keepdim(vec![1, 2])?;
assert_eq!(b.dims(), &[2, 1, 1]);
assert_eq!(b.to_vec()?, vec![78., 222.]);
Ok(())
}
test_both_backends!(test_sum_keepdim_multiple_dims, test_sum_keepdim_multiple_dims_impl);
fn test_slice_set_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((4, 3), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
dst.slice_set(&src, 0, 1)?;
assert_eq!(dst.to_vec()?, vec![0., 0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0.]);
Ok(())
}
test_both_backends!(test_slice_set_dim0, test_slice_set_dim0_impl);
fn test_slice_set_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((2, 6), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
dst.slice_set(&src, 1, 2)?;
assert_eq!(dst.to_vec()?, vec![0., 0., 1., 2., 3., 0., 0., 0., 4., 5., 6., 0.]);
Ok(())
}
test_both_backends!(test_slice_set_dim1, test_slice_set_dim1_impl);
fn test_slice_set_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((2, 4, 3), dev)?;
let src: Tensor<f32, B> =
Tensor::from_vec((1..=12).map(|x| x as f32).collect(), (2, 2, 3), dev)?;
dst.slice_set(&src, 1, 1)?;
assert_eq!(
dst.to_vec()?,
vec![
0., 0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0., 0., 0., 0., 7., 8., 9., 10., 11., 12., 0., 0., 0. ]
);
Ok(())
}
test_both_backends!(test_slice_set_3d, test_slice_set_3d_impl);
fn test_slice_set_at_start_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::full(9., (4, 2), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (2, 2), dev)?;
dst.slice_set(&src, 0, 0)?;
assert_eq!(dst.to_vec()?, vec![1., 2., 3., 4., 9., 9., 9., 9.]);
Ok(())
}
test_both_backends!(test_slice_set_at_start, test_slice_set_at_start_impl);
fn test_slice_set_at_end_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::full(9., (4, 2), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (2, 2), dev)?;
dst.slice_set(&src, 0, 2)?;
assert_eq!(dst.to_vec()?, vec![9., 9., 9., 9., 1., 2., 3., 4.]);
Ok(())
}
test_both_backends!(test_slice_set_at_end, test_slice_set_at_end_impl);
fn test_slice_set_1d_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((8,), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (3,), dev)?;
dst.slice_set(&src, 0, 2)?;
assert_eq!(dst.to_vec()?, vec![0., 0., 1., 2., 3., 0., 0., 0.]);
Ok(())
}
test_both_backends!(test_slice_set_1d, test_slice_set_1d_impl);
fn test_scatter_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((3, 3), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let ids: Tensor<i64, B> = Tensor::from_vec(vec![0i64, 2, 1, 2, 0, 0], (2, 3), dev)?;
let result = dst.scatter(&ids, &src, 0)?;
assert_eq!(result.dims(), &[3, 3]);
assert_eq!(result.to_vec()?, vec![1., 5., 6., 0., 0., 3., 4., 2., 0.]);
Ok(())
}
test_both_backends!(test_scatter_dim0, test_scatter_dim0_impl);
fn test_scatter_dim1_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((2, 4), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let ids: Tensor<i64, B> = Tensor::from_vec(vec![3i64, 0, 1, 2, 3, 0], (2, 3), dev)?;
let result = dst.scatter(&ids, &src, 1)?;
assert_eq!(result.dims(), &[2, 4]);
assert_eq!(result.to_vec()?, vec![2., 3., 0., 1., 6., 0., 4., 5.]);
Ok(())
}
test_both_backends!(test_scatter_dim1, test_scatter_dim1_impl);
fn test_scatter_set_dim0_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> =
Tensor::from_vec(vec![10., 20., 30., 40., 50., 60., 70., 80., 90.], (3, 3), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (1, 3), dev)?;
let ids: Tensor<i64, B> = Tensor::from_vec(vec![2i64, 0, 1], (1, 3), dev)?;
dst.scatter_set(&ids, &src, 0)?;
assert_eq!(dst.to_vec()?, vec![10., 2., 30., 40., 50., 3., 1., 80., 90.]);
Ok(())
}
test_both_backends!(test_scatter_set_dim0, test_scatter_set_dim0_impl);
fn test_scatter_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let dst: Tensor<f32, B> = Tensor::zeros((2, 3, 2), dev)?;
let src: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4.], (2, 1, 2), dev)?;
let ids: Tensor<i64, B> = Tensor::from_vec(vec![2i64, 0, 1, 2], (2, 1, 2), dev)?;
let result = dst.scatter(&ids, &src, 1)?;
assert_eq!(result.dims(), &[2, 3, 2]);
assert_eq!(result.to_vec()?, vec![0., 2., 0., 0., 1., 0., 0., 0., 3., 0., 0., 4.]);
Ok(())
}
test_both_backends!(test_scatter_3d, test_scatter_3d_impl);
fn test_broadcast_as_add_dim_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (3,), dev)?;
let view = t.broadcast_as((2, 3))?;
assert_eq!(view.dims(), &[2, 3]);
let result = view.contiguous()?;
assert_eq!(result.to_vec()?, vec![1., 2., 3., 1., 2., 3.]);
Ok(())
}
test_both_backends!(test_broadcast_as_add_dim, test_broadcast_as_add_dim_impl);
fn test_broadcast_as_expand_dim_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![10., 20.], (2, 1), dev)?;
let view = t.broadcast_as((2, 3))?;
assert_eq!(view.dims(), &[2, 3]);
let result = view.contiguous()?;
assert_eq!(result.to_vec()?, vec![10., 10., 10., 20., 20., 20.]);
Ok(())
}
test_both_backends!(test_broadcast_as_expand_dim, test_broadcast_as_expand_dim_impl);
fn test_broadcast_as_3d_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (1, 3), dev)?;
let view = t.broadcast_as((2, 4, 3))?;
assert_eq!(view.dims(), &[2, 4, 3]);
let result = view.contiguous()?;
let expected: Vec<f32> = [1., 2., 3.].repeat(8);
assert_eq!(result.to_vec()?, expected);
Ok(())
}
test_both_backends!(test_broadcast_as_3d, test_broadcast_as_3d_impl);
fn test_broadcast_as_noop_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let view = t.broadcast_as((2, 3))?;
assert_eq!(view.dims(), &[2, 3]);
let result = view.contiguous()?;
assert_eq!(result.to_vec()?, vec![1., 2., 3., 4., 5., 6.]);
Ok(())
}
test_both_backends!(test_broadcast_as_noop, test_broadcast_as_noop_impl);
fn test_broadcast_as_from_view_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3., 4., 5., 6.], (2, 3), dev)?;
let view: TensorView<f32, B> = TensorView::from(&t);
let narrowed = view.narrow(0, ..1)?; let broadcast = narrowed.broadcast_as((3, 3))?;
let result = broadcast.contiguous()?;
assert_eq!(result.to_vec()?, vec![1., 2., 3., 1., 2., 3., 1., 2., 3.]);
Ok(())
}
test_both_backends!(test_broadcast_as_from_view, test_broadcast_as_from_view_impl);
fn test_broadcast_as_error_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![1., 2., 3.], (3,), dev)?;
assert!(t.broadcast_as((2, 4)).is_err());
Ok(())
}
test_both_backends!(test_broadcast_as_error, test_broadcast_as_error_impl);
fn test_matmul_transposed_view_impl<B: Backend>(dev: &B) -> Result<()> {
let q_data: Vec<f32> = (0..24).map(|i| i as f32).collect();
let k_data: Vec<f32> = (0..24).map(|i| (i as f32) * 0.1).collect();
let q: Tensor<f32, B> = Tensor::from_vec(q_data, (1, 2, 3, 4), dev)?;
let k: Tensor<f32, B> = Tensor::from_vec(k_data.clone(), (1, 2, 3, 4), dev)?;
let k_t_view = k.transpose(2, 3)?;
let result_view = q.matmul(&k_t_view)?;
let k2: Tensor<f32, B> = Tensor::from_vec(k_data, (1, 2, 3, 4), dev)?;
let result_matmul_t = q.matmul_t(&k2)?;
let v1 = result_view.to_vec()?;
let v2 = result_matmul_t.to_vec()?;
assert_eq!(v1.len(), v2.len());
for (a, b) in v1.iter().zip(v2.iter()) {
assert!((a - b).abs() < 1e-4, "mismatch: {a} vs {b}");
}
Ok(())
}
test_both_backends!(test_matmul_transposed_view, test_matmul_transposed_view_impl);
fn test_to_f32_to_f16_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1.0, 2.5, -3.0, 0.0], (2, 2), dev)?;
let b: Tensor<half::f16, B> = a.to()?;
assert_eq!(b.dims(), &[2, 2]);
let result: Vec<f32> = b.to_vec()?.iter().map(|v| v.to_f32()).collect();
assert_eq!(result, vec![1.0, 2.5, -3.0, 0.0]);
Ok(())
}
test_both_backends!(test_to_f32_to_f16, test_to_f32_to_f16_impl);
fn test_to_f16_to_f32_impl<B: Backend>(dev: &B) -> Result<()> {
let data: Vec<half::f16> =
vec![1.0, 2.5, -3.0, 0.0].into_iter().map(half::f16::from_f32).collect();
let a: Tensor<half::f16, B> = Tensor::from_vec(data, (4,), dev)?;
let b: Tensor<f32, B> = a.to()?;
assert_eq!(b.dims(), &[4]);
assert_eq!(b.to_vec()?, vec![1.0, 2.5, -3.0, 0.0]);
Ok(())
}
test_both_backends!(test_to_f16_to_f32, test_to_f16_to_f32_impl);
fn test_to_f32_to_bf16_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1.0, -0.5, 100.0], (3,), dev)?;
let b: Tensor<half::bf16, B> = a.to()?;
let result: Vec<f32> = b.to_vec()?.iter().map(|v| v.to_f32()).collect();
assert_eq!(result, vec![1.0, -0.5, 100.0]);
Ok(())
}
test_both_backends!(test_to_f32_to_bf16, test_to_f32_to_bf16_impl);
fn test_to_bf16_to_f32_impl<B: Backend>(dev: &B) -> Result<()> {
let data: Vec<half::bf16> =
vec![1.0, -0.5, 100.0].into_iter().map(half::bf16::from_f32).collect();
let a: Tensor<half::bf16, B> = Tensor::from_vec(data, (3,), dev)?;
let b: Tensor<f32, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![1.0, -0.5, 100.0]);
Ok(())
}
test_both_backends!(test_to_bf16_to_f32, test_to_bf16_to_f32_impl);
fn test_to_f16_to_bf16_impl<B: Backend>(dev: &B) -> Result<()> {
let data: Vec<half::f16> = vec![1.0, 2.0, -3.0].into_iter().map(half::f16::from_f32).collect();
let a: Tensor<half::f16, B> = Tensor::from_vec(data, (3,), dev)?;
let b: Tensor<half::bf16, B> = a.to()?;
let result: Vec<f32> = b.to_vec()?.iter().map(|v| v.to_f32()).collect();
assert_eq!(result, vec![1.0, 2.0, -3.0]);
Ok(())
}
test_both_backends!(test_to_f16_to_bf16, test_to_f16_to_bf16_impl);
fn test_to_f32_to_i64_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1.9, -2.1, 0.0, 42.0], (4,), dev)?;
let b: Tensor<i64, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![1i64, -2, 0, 42]);
Ok(())
}
test_both_backends!(test_to_f32_to_i64, test_to_f32_to_i64_impl);
fn test_to_i64_to_f32_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<i64, B> = Tensor::from_vec(vec![1, -2, 0, 42], (4,), dev)?;
let b: Tensor<f32, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![1.0, -2.0, 0.0, 42.0]);
Ok(())
}
test_both_backends!(test_to_i64_to_f32, test_to_i64_to_f32_impl);
fn test_to_f32_to_u8_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![0.0, 1.0, 127.0, 255.0], (4,), dev)?;
let b: Tensor<u8, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![0u8, 1, 127, 255]);
Ok(())
}
test_both_backends!(test_to_f32_to_u8, test_to_f32_to_u8_impl);
fn test_to_u8_to_f32_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<u8, B> = Tensor::from_vec(vec![0, 1, 127, 255], (4,), dev)?;
let b: Tensor<f32, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![0.0, 1.0, 127.0, 255.0]);
Ok(())
}
test_both_backends!(test_to_u8_to_f32, test_to_u8_to_f32_impl);
fn test_to_i64_to_u8_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<i64, B> = Tensor::from_vec(vec![0, 1, 127, 255], (4,), dev)?;
let b: Tensor<u8, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![0u8, 1, 127, 255]);
Ok(())
}
test_both_backends!(test_to_i64_to_u8, test_to_i64_to_u8_impl);
fn test_to_same_dtype_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec(vec![1.0, 2.0, 3.0], (3,), dev)?;
let b: Tensor<f32, B> = a.to()?;
assert_eq!(b.to_vec()?, vec![1.0, 2.0, 3.0]);
Ok(())
}
test_both_backends!(test_to_same_dtype, test_to_same_dtype_impl);
fn test_to_preserves_shape_impl<B: Backend>(dev: &B) -> Result<()> {
let a: Tensor<f32, B> = Tensor::from_vec((1..=24).map(|v| v as f32).collect(), (2, 3, 4), dev)?;
let b: Tensor<half::f16, B> = a.to()?;
assert_eq!(b.dims(), &[2, 3, 4]);
assert_eq!(b.elem_count(), 24);
let result: Vec<f32> = b.to_vec()?.iter().map(|v| v.to_f32()).collect();
assert_eq!(result[0], 1.0);
assert_eq!(result[23], 24.0);
Ok(())
}
test_both_backends!(test_to_preserves_shape, test_to_preserves_shape_impl);
fn test_rand_uniform_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0; 1000], 1000, dev)?;
let r = t.rand_uniform_like(0.0, 1.0)?;
assert_eq!(r.dims(), &[1000]);
let vals = r.to_vec()?;
for &v in &vals {
assert!((0.0..=1.0).contains(&v), "rand_uniform value {v} out of [0, 1]");
}
assert!(vals.windows(2).any(|w| w[0] != w[1]));
Ok(())
}
test_both_backends!(test_rand_uniform, test_rand_uniform_impl);
fn test_rand_uniform_shape_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0], 1, dev)?;
let r = t.rand_uniform((4, 8), 0.0, 1.0)?;
assert_eq!(r.dims(), &[4, 8]);
let vals = r.to_vec()?;
assert_eq!(vals.len(), 32);
for &v in &vals {
assert!((0.0..=1.0).contains(&v), "rand_uniform value {v} out of [0, 1]");
}
Ok(())
}
test_both_backends!(test_rand_uniform_shape, test_rand_uniform_shape_impl);
fn test_rand_uniform_bounds_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0; 1000], 1000, dev)?;
let r = t.rand_uniform_like(-5.0, 3.0)?;
let vals = r.to_vec()?;
for &v in &vals {
assert!((-5.0..=3.0).contains(&v), "rand_uniform value {v} out of [-5, 3]");
}
assert!(vals.windows(2).any(|w| w[0] != w[1]));
Ok(())
}
test_both_backends!(test_rand_uniform_bounds, test_rand_uniform_bounds_impl);
fn test_rand_uniform_invalid_bounds_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0; 10], 10, dev)?;
let r = t.rand_uniform_like(5.0, 2.0);
assert!(r.is_err());
let err_msg = r.unwrap_err().to_string();
assert!(err_msg.contains("upper bound"), "error should mention upper bound: {err_msg}");
Ok(())
}
test_both_backends!(test_rand_uniform_invalid_bounds, test_rand_uniform_invalid_bounds_impl);
fn test_randn_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0; 1000], 1000, dev)?;
let r = t.randn_like(0.0, 1.0)?;
assert_eq!(r.dims(), &[1000]);
let vals = r.to_vec()?;
for &v in &vals {
assert!(v.is_finite(), "randn value is not finite: {v}");
}
assert!(vals.windows(2).any(|w| w[0] != w[1]));
Ok(())
}
test_both_backends!(test_randn, test_randn_impl);
fn test_randn_shape_impl<B: Backend>(dev: &B) -> Result<()> {
let t: Tensor<f32, B> = Tensor::from_vec(vec![0.0], 1, dev)?;
let r = t.randn((4, 8), 5.0, 0.1)?;
assert_eq!(r.dims(), &[4, 8]);
let vals = r.to_vec()?;
assert_eq!(vals.len(), 32);
for &v in &vals {
assert!(v.is_finite(), "randn value is not finite: {v}");
}
Ok(())
}
test_both_backends!(test_randn_shape, test_randn_shape_impl);
fn assert_approx_eq(a: &[f32], b: &[f32], tol: f32) {
assert_eq!(a.len(), b.len(), "length mismatch: {} vs {}", a.len(), b.len());
for (i, (x, y)) in a.iter().zip(b).enumerate() {
assert!((x - y).abs() < tol, "mismatch at index {i}: {x} vs {y}");
}
}
fn test_exp_impl<B: Backend>(dev: &B) -> Result<()> {
let data = vec![0.0f32, 1.0, -1.0, 2.0];
let expected: Vec<f32> = data.iter().map(|x| x.exp()).collect();
let src: Tensor<f32, B> = Tensor::from_vec(data, 4, dev)?;
let dst: Tensor<f32, B> = Tensor::zeros(4, dev)?;
dst.exp_(&src)?;
assert_approx_eq(&dst.to_vec()?, &expected, 1e-6);
Ok(())
}
test_both_backends!(test_exp, test_exp_impl);
fn test_log_impl<B: Backend>(dev: &B) -> Result<()> {
let data = vec![1.0f32, 2.0, 0.5, 10.0];
let expected: Vec<f32> = data.iter().map(|x| x.ln()).collect();
let src: Tensor<f32, B> = Tensor::from_vec(data, 4, dev)?;
let dst: Tensor<f32, B> = Tensor::zeros(4, dev)?;
dst.log_(&src)?;
assert_approx_eq(&dst.to_vec()?, &expected, 1e-6);
Ok(())
}
test_both_backends!(test_log, test_log_impl);
fn test_neg_impl<B: Backend>(dev: &B) -> Result<()> {
let data = vec![1.0f32, -2.0, 0.0, 3.5];
let expected: Vec<f32> = data.iter().map(|x| -x).collect();
let src: Tensor<f32, B> = Tensor::from_vec(data, 4, dev)?;
let dst: Tensor<f32, B> = Tensor::zeros(4, dev)?;
dst.neg_(&src)?;
assert_approx_eq(&dst.to_vec()?, &expected, 1e-6);
Ok(())
}
test_both_backends!(test_neg, test_neg_impl);
fn test_exp_log_compose_impl<B: Backend>(dev: &B) -> Result<()> {
let data = vec![0.5f32, 1.0, 2.0, 3.0];
let expected: Vec<f32> = data.iter().map(|x| x.exp().ln()).collect();
let src: Tensor<f32, B> = Tensor::from_vec(data, 4, dev)?;
let tmp: Tensor<f32, B> = Tensor::zeros(4, dev)?;
let dst: Tensor<f32, B> = Tensor::zeros(4, dev)?;
tmp.exp_(&src)?;
dst.log_(&tmp)?;
assert_approx_eq(&dst.to_vec()?, &expected, 1e-5);
Ok(())
}
test_both_backends!(test_exp_log_compose, test_exp_log_compose_impl);
fn test_log_neg_roundtrip_impl<B: Backend>(dev: &B) -> Result<()> {
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let src: Tensor<f32, B> = Tensor::from_vec(data.clone(), 4, dev)?;
let tmp: Tensor<f32, B> = Tensor::zeros(4, dev)?;
let dst: Tensor<f32, B> = Tensor::zeros(4, dev)?;
tmp.log_(&src)?;
dst.exp_(&tmp)?;
assert_approx_eq(&dst.to_vec()?, &data, 1e-5);
tmp.neg_(&src)?;
dst.neg_(&tmp)?;
assert_approx_eq(&dst.to_vec()?, &data, 1e-6);
Ok(())
}
test_both_backends!(test_log_neg_roundtrip, test_log_neg_roundtrip_impl);