use crate::{Tensor, TensorElement};
use torsh_core::error::{Result, TorshError};
impl<T: TensorElement + Copy + Default> Tensor<T> {
pub fn stack(tensors: &[Self], dim: isize) -> Result<Self> {
if tensors.is_empty() {
return Err(TorshError::InvalidArgument(
"stack requires at least one tensor".to_string(),
));
}
let first_shape = tensors[0].shape().to_vec();
for tensor in tensors.iter().skip(1) {
if tensor.shape().dims() != first_shape.as_slice() {
return Err(TorshError::ShapeMismatch {
expected: first_shape.clone(),
got: tensor.shape().to_vec(),
});
}
}
let ndim = first_shape.len();
let dim = if dim < 0 {
((ndim + 1) as isize + dim) as usize
} else {
dim as usize
};
if dim > ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for stacking {}-D tensors",
dim, ndim
)));
}
let mut output_shape = first_shape.to_vec();
output_shape.insert(dim, tensors.len());
let elem_count: usize = first_shape.iter().product();
let mut result_data = Vec::with_capacity(elem_count * tensors.len());
let outer_size: usize = first_shape[..dim].iter().product();
let inner_size: usize = first_shape[dim..].iter().product();
for outer in 0..outer_size {
for tensor in tensors {
let data = tensor.to_vec()?;
for inner in 0..inner_size {
let idx = outer * inner_size + inner;
result_data.push(data[idx]);
}
}
}
let device = tensors[0].device.clone();
Self::from_data(result_data, output_shape, device)
}
pub fn chunk(&self, chunks: usize, dim: isize) -> Result<Vec<Self>> {
if chunks == 0 {
return Err(TorshError::InvalidArgument(
"chunks must be greater than 0".to_string(),
));
}
let ndim = self.ndim();
let dim = if dim < 0 {
(ndim as isize + dim) as usize
} else {
dim as usize
};
if dim >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
dim, ndim
)));
}
let dim_size = self.shape().dims()[dim];
let chunk_size = (dim_size + chunks - 1) / chunks;
let mut result = Vec::new();
let mut start = 0;
while start < dim_size {
let end = (start + chunk_size).min(dim_size);
let slice_tensor = self.narrow(dim as i32, start as i64, end - start)?;
result.push(slice_tensor);
start = end;
}
Ok(result)
}
pub fn split(&self, split_size: usize, dim: isize) -> Result<Vec<Self>> {
if split_size == 0 {
return Err(TorshError::InvalidArgument(
"split_size must be greater than 0".to_string(),
));
}
let ndim = self.ndim();
let dim = if dim < 0 {
(ndim as isize + dim) as usize
} else {
dim as usize
};
if dim >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
dim, ndim
)));
}
let dim_size = self.shape().dims()[dim];
let mut result = Vec::new();
let mut start = 0;
while start < dim_size {
let size = split_size.min(dim_size - start);
let slice_tensor = self.narrow(dim as i32, start as i64, size)?;
result.push(slice_tensor);
start += split_size;
}
Ok(result)
}
pub fn flip(&self, dims: &[isize]) -> Result<Self> {
if dims.is_empty() {
return Ok(self.clone());
}
let ndim = self.ndim();
let mut norm_dims = Vec::new();
for &dim in dims {
let d = if dim < 0 {
(ndim as isize + dim) as usize
} else {
dim as usize
};
if d >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
dim, ndim
)));
}
norm_dims.push(d);
}
let data = self.to_vec()?;
let shape = self.shape().to_vec();
let mut result_data = vec![T::default(); data.len()];
let mut strides = vec![1; ndim];
for i in (0..ndim - 1).rev() {
strides[i] = strides[i + 1] * shape[i + 1];
}
for i in 0..data.len() {
let mut indices = vec![0; ndim];
let mut remainder = i;
for d in 0..ndim {
indices[d] = remainder / strides[d];
remainder %= strides[d];
}
for &flip_dim in &norm_dims {
indices[flip_dim] = shape[flip_dim] - 1 - indices[flip_dim];
}
let mut flipped_idx = 0;
for d in 0..ndim {
flipped_idx += indices[d] * strides[d];
}
result_data[flipped_idx] = data[i];
}
Self::from_data(result_data, shape.to_vec(), self.device)
}
pub fn fliplr(&self) -> Result<Self> {
if self.ndim() < 2 {
return Err(TorshError::InvalidArgument(
"fliplr requires at least 2 dimensions".to_string(),
));
}
self.flip(&[-1])
}
pub fn flipud(&self) -> Result<Self> {
if self.ndim() < 1 {
return Err(TorshError::InvalidArgument(
"flipud requires at least 1 dimension".to_string(),
));
}
self.flip(&[0])
}
pub fn roll(&self, shifts: &[isize], dims: &[isize]) -> Result<Self> {
if shifts.len() != dims.len() {
return Err(TorshError::InvalidArgument(
"shifts and dims must have the same length".to_string(),
));
}
if dims.is_empty() {
let data = self.to_vec()?;
let shift = if shifts.is_empty() { 0 } else { shifts[0] };
let n = data.len();
let shift = ((shift % n as isize) + n as isize) as usize % n;
let mut result_data = vec![T::default(); n];
for (i, &val) in data.iter().enumerate() {
result_data[(i + shift) % n] = val;
}
return Self::from_data(result_data, self.shape().dims().to_vec(), self.device);
}
let ndim = self.ndim();
let mut norm_dims = Vec::new();
for &dim in dims {
let d = if dim < 0 {
(ndim as isize + dim) as usize
} else {
dim as usize
};
if d >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
dim, ndim
)));
}
norm_dims.push(d);
}
let data = self.to_vec()?;
let shape = self.shape().to_vec();
let mut result_data = vec![T::default(); data.len()];
let mut strides = vec![1; ndim];
for i in (0..ndim - 1).rev() {
strides[i] = strides[i + 1] * shape[i + 1];
}
for i in 0..data.len() {
let mut indices = vec![0; ndim];
let mut remainder = i;
for d in 0..ndim {
indices[d] = remainder / strides[d];
remainder %= strides[d];
}
for (dim_idx, &roll_dim) in norm_dims.iter().enumerate() {
let shift = shifts[dim_idx];
let dim_size = shape[roll_dim] as isize;
let rolled =
((indices[roll_dim] as isize + shift) % dim_size + dim_size) % dim_size;
indices[roll_dim] = rolled as usize;
}
let mut rolled_idx = 0;
for d in 0..ndim {
rolled_idx += indices[d] * strides[d];
}
result_data[rolled_idx] = data[i];
}
Self::from_data(result_data, shape.to_vec(), self.device)
}
pub fn rot90(&self, k: isize, dims: &[isize]) -> Result<Self> {
if dims.len() != 2 {
return Err(TorshError::InvalidArgument(
"dims must contain exactly 2 dimensions".to_string(),
));
}
let ndim = self.ndim();
if ndim < 2 {
return Err(TorshError::InvalidArgument(
"rot90 requires at least 2 dimensions".to_string(),
));
}
let dim0 = if dims[0] < 0 {
(ndim as isize + dims[0]) as usize
} else {
dims[0] as usize
};
let dim1 = if dims[1] < 0 {
(ndim as isize + dims[1]) as usize
} else {
dims[1] as usize
};
if dim0 >= ndim || dim1 >= ndim {
return Err(TorshError::InvalidArgument("dims out of range".to_string()));
}
if dim0 == dim1 {
return Err(TorshError::InvalidArgument(
"dims must be different".to_string(),
));
}
let k = ((k % 4) + 4) % 4;
let mut result = self.clone();
for _ in 0..k {
result = result.transpose_view(dim0, dim1)?;
result = result.flip(&[dim1 as isize])?;
}
Ok(result)
}
pub fn tile(&self, repeats: &[usize]) -> Result<Self> {
if repeats.is_empty() {
return Ok(self.clone());
}
let shape = self.shape().to_vec();
let ndim = shape.len();
let mut new_shape = shape.to_vec();
if repeats.len() > ndim {
let diff = repeats.len() - ndim;
for _ in 0..diff {
new_shape.insert(0, 1);
}
}
let mut output_shape = new_shape.clone();
let repeat_offset = if repeats.len() < output_shape.len() {
output_shape.len() - repeats.len()
} else {
0
};
for (i, &rep) in repeats.iter().enumerate() {
let idx = repeat_offset + i;
if idx < output_shape.len() {
output_shape[idx] *= rep;
}
}
self.repeat(repeats)
}
pub fn repeat_interleave(&self, repeats: usize, dim: Option<isize>) -> Result<Self> {
if repeats == 0 {
return Err(TorshError::InvalidArgument(
"repeats must be positive".to_string(),
));
}
match dim {
None => {
let data = self.to_vec()?;
let mut result_data = Vec::with_capacity(data.len() * repeats);
for &val in data.iter() {
for _ in 0..repeats {
result_data.push(val);
}
}
Self::from_data(result_data, vec![data.len() * repeats], self.device)
}
Some(d) => {
let ndim = self.ndim();
let dim = if d < 0 {
(ndim as isize + d) as usize
} else {
d as usize
};
if dim >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
d, ndim
)));
}
let shape = self.shape().to_vec();
let data = self.to_vec()?;
let mut output_shape = shape.clone();
output_shape[dim] *= repeats;
let dim_size = shape[dim];
let outer_size: usize = shape[..dim].iter().product();
let inner_size: usize = shape[dim + 1..].iter().product();
let mut result_data = Vec::with_capacity(data.len() * repeats);
for outer in 0..outer_size {
for d in 0..dim_size {
for _ in 0..repeats {
for inner in 0..inner_size {
let idx = outer * dim_size * inner_size + d * inner_size + inner;
result_data.push(data[idx]);
}
}
}
}
Self::from_data(result_data, output_shape, self.device)
}
}
}
pub fn unflatten(&self, dim: isize, sizes: &[usize]) -> Result<Self> {
if sizes.is_empty() {
return Err(TorshError::InvalidArgument(
"sizes cannot be empty".to_string(),
));
}
let shape = self.shape().to_vec();
let ndim = shape.len();
let dim = if dim < 0 {
(ndim as isize + dim) as usize
} else {
dim as usize
};
if dim >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
dim, ndim
)));
}
let sizes_product: usize = sizes.iter().product();
if sizes_product != shape[dim] {
return Err(TorshError::InvalidArgument(format!(
"sizes product {} does not match dimension size {}",
sizes_product, shape[dim]
)));
}
let mut new_shape = Vec::new();
new_shape.extend_from_slice(&shape[..dim]);
new_shape.extend_from_slice(sizes);
new_shape.extend_from_slice(&shape[dim + 1..]);
let data = self.to_vec()?;
Self::from_data(data, new_shape, self.device)
}
pub fn take_along_dim(&self, indices: &Tensor<i64>, dim: Option<isize>) -> Result<Self> {
match dim {
None => {
let data = self.to_vec()?;
let idx_data = indices.to_vec()?;
let mut result = Vec::with_capacity(idx_data.len());
for &idx in idx_data.iter() {
if idx < 0 || idx as usize >= data.len() {
return Err(TorshError::InvalidArgument(format!(
"Index {} out of range for tensor with {} elements",
idx,
data.len()
)));
}
result.push(data[idx as usize]);
}
Self::from_data(result, indices.shape().to_vec(), self.device)
}
Some(d) => {
let ndim = self.ndim();
let dim = if d < 0 {
(ndim as isize + d) as usize
} else {
d as usize
};
if dim >= ndim {
return Err(TorshError::InvalidArgument(format!(
"Dimension {} out of range for {}-D tensor",
d, ndim
)));
}
let self_shape = self.shape().to_vec();
let indices_shape = indices.shape().to_vec();
if self_shape.len() != indices_shape.len() {
return Err(TorshError::ShapeMismatch {
expected: self_shape.clone(),
got: indices_shape.clone(),
});
}
for (i, (&s, &idx_s)) in self_shape.iter().zip(indices_shape.iter()).enumerate() {
if i != dim && s != idx_s {
return Err(TorshError::ShapeMismatch {
expected: self_shape.clone(),
got: indices_shape.clone(),
});
}
}
let data = self.to_vec()?;
let idx_data = indices.to_vec()?;
let dim_size = self_shape[dim];
let outer_size: usize = self_shape[..dim].iter().product();
let inner_size: usize = self_shape[dim + 1..].iter().product();
let indices_dim_size = indices_shape[dim];
let mut result = Vec::with_capacity(idx_data.len());
for outer in 0..outer_size {
for d in 0..indices_dim_size {
for inner in 0..inner_size {
let idx_flat =
outer * indices_dim_size * inner_size + d * inner_size + inner;
let gather_idx = idx_data[idx_flat];
if gather_idx < 0 || gather_idx as usize >= dim_size {
return Err(TorshError::InvalidArgument(format!(
"Index {} out of range for dimension size {}",
gather_idx, dim_size
)));
}
let src_idx = outer * dim_size * inner_size
+ (gather_idx as usize) * inner_size
+ inner;
result.push(data[src_idx]);
}
}
}
Self::from_data(result, indices_shape, self.device)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use torsh_core::device::DeviceType;
#[test]
fn test_stack_1d() {
let a = Tensor::from_data(vec![1.0f32, 2.0], vec![2], DeviceType::Cpu)
.expect("failed to create tensor a");
let b = Tensor::from_data(vec![3.0f32, 4.0], vec![2], DeviceType::Cpu)
.expect("failed to create tensor b");
let result = Tensor::stack(&[a, b], 0).expect("stack should succeed for 1d tensors");
assert_eq!(result.shape().dims(), &[2, 2]);
let data = result.data().expect("failed to get stacked tensor data");
assert_eq!(data, vec![1.0, 2.0, 3.0, 4.0]);
}
#[test]
fn test_stack_negative_dim() {
let a = Tensor::from_data(vec![1.0f32, 2.0], vec![2], DeviceType::Cpu)
.expect("failed to create tensor a");
let b = Tensor::from_data(vec![3.0f32, 4.0], vec![2], DeviceType::Cpu)
.expect("failed to create tensor b");
let result = Tensor::stack(&[a, b], -1).expect("stack should succeed with negative dim");
assert_eq!(result.shape().dims(), &[2, 2]);
}
#[test]
fn test_chunk_even() {
let tensor = Tensor::from_data(
vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0],
vec![6],
DeviceType::Cpu,
)
.expect("failed to create tensor for chunk_even");
let chunks = tensor.chunk(3, 0).expect("chunk into 3 should succeed");
assert_eq!(chunks.len(), 3);
assert_eq!(chunks[0].shape().dims(), &[2]);
assert_eq!(
chunks[0].data().expect("failed to get chunk 0 data"),
vec![1.0, 2.0]
);
assert_eq!(
chunks[1].data().expect("failed to get chunk 1 data"),
vec![3.0, 4.0]
);
assert_eq!(
chunks[2].data().expect("failed to get chunk 2 data"),
vec![5.0, 6.0]
);
}
#[test]
fn test_chunk_uneven() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0, 5.0], vec![5], DeviceType::Cpu)
.expect("failed to create tensor for chunk_uneven");
let chunks = tensor.chunk(2, 0).expect("uneven chunk should succeed");
assert_eq!(chunks.len(), 2);
assert_eq!(chunks[0].shape().dims(), &[3]);
assert_eq!(chunks[1].shape().dims(), &[2]);
}
#[test]
fn test_split_even() {
let tensor = Tensor::from_data(
vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0],
vec![6],
DeviceType::Cpu,
)
.expect("failed to create tensor for split_even");
let splits = tensor.split(2, 0).expect("split by 2 should succeed");
assert_eq!(splits.len(), 3);
assert_eq!(
splits[0].data().expect("failed to get split 0 data"),
vec![1.0, 2.0]
);
assert_eq!(
splits[1].data().expect("failed to get split 1 data"),
vec![3.0, 4.0]
);
}
#[test]
fn test_split_uneven() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0, 5.0], vec![5], DeviceType::Cpu)
.expect("failed to create tensor for split_uneven");
let splits = tensor.split(2, 0).expect("uneven split should succeed");
assert_eq!(splits.len(), 3);
assert_eq!(splits[0].shape().dims(), &[2]);
assert_eq!(splits[1].shape().dims(), &[2]);
assert_eq!(splits[2].shape().dims(), &[1]); }
#[test]
fn test_flip_1d() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![4], DeviceType::Cpu)
.expect("failed to create 1d tensor for flip");
let result = tensor.flip(&[0]).expect("flip dim 0 should succeed");
assert_eq!(
result.data().expect("failed to get flipped data"),
vec![4.0, 3.0, 2.0, 1.0]
);
}
#[test]
fn test_flip_2d() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create 2d tensor for flip");
let result = tensor.flip(&[0]).expect("flip 2d dim 0 should succeed");
assert_eq!(
result.data().expect("failed to get 2d flipped data"),
vec![3.0, 4.0, 1.0, 2.0]
);
}
#[test]
fn test_fliplr() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create tensor for fliplr");
let result = tensor.fliplr().expect("fliplr should succeed");
assert_eq!(
result.data().expect("failed to get fliplr data"),
vec![2.0, 1.0, 4.0, 3.0]
);
}
#[test]
fn test_flipud() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create tensor for flipud");
let result = tensor.flipud().expect("flipud should succeed");
assert_eq!(
result.data().expect("failed to get flipud data"),
vec![3.0, 4.0, 1.0, 2.0]
);
}
#[test]
fn test_roll_1d() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![4], DeviceType::Cpu)
.expect("failed to create tensor for roll");
let result = tensor.roll(&[1], &[0]).expect("roll by 1 should succeed");
assert_eq!(
result.data().expect("failed to get rolled data"),
vec![4.0, 1.0, 2.0, 3.0]
);
}
#[test]
fn test_roll_negative() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![4], DeviceType::Cpu)
.expect("failed to create tensor for negative roll");
let result = tensor
.roll(&[-1], &[0])
.expect("negative roll should succeed");
assert_eq!(
result.data().expect("failed to get negatively rolled data"),
vec![2.0, 3.0, 4.0, 1.0]
);
}
#[test]
fn test_rot90_once() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create tensor for rot90");
let result = tensor.rot90(1, &[0, 1]).expect("rot90 once should succeed");
assert_eq!(result.shape().dims(), &[2, 2]);
}
#[test]
fn test_rot90_twice() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create tensor for rot90 twice");
let result = tensor
.rot90(2, &[0, 1])
.expect("rot90 twice should succeed");
assert_eq!(result.shape().dims(), &[2, 2]);
assert_eq!(
result.data().expect("failed to get rot90 twice data"),
vec![4.0, 3.0, 2.0, 1.0]
);
}
#[test]
fn test_tile_1d() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0], vec![2], DeviceType::Cpu)
.expect("failed to create tensor for tile 1d");
let result = tensor.tile(&[2]).expect("tile 1d should succeed");
assert_eq!(result.shape().dims(), &[4]);
assert_eq!(
result.data().expect("failed to get tiled 1d data"),
vec![1.0, 2.0, 1.0, 2.0]
);
}
#[test]
fn test_tile_2d() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0], vec![1, 2], DeviceType::Cpu)
.expect("failed to create tensor for tile 2d");
let result = tensor.tile(&[2, 1]).expect("tile 2d should succeed");
assert_eq!(result.shape().dims(), &[2, 2]);
}
#[test]
fn test_repeat_interleave_flatten() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0], vec![3], DeviceType::Cpu)
.expect("failed to create tensor for repeat_interleave");
let result = tensor
.repeat_interleave(2, None)
.expect("repeat_interleave flatten should succeed");
assert_eq!(result.shape().dims(), &[6]);
assert_eq!(
result.data().expect("failed to get repeat_interleave data"),
vec![1.0, 1.0, 2.0, 2.0, 3.0, 3.0]
);
}
#[test]
fn test_repeat_interleave_dim() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![2, 2], DeviceType::Cpu)
.expect("failed to create tensor for repeat_interleave dim");
let result = tensor
.repeat_interleave(2, Some(0))
.expect("repeat_interleave along dim 0 should succeed");
assert_eq!(result.shape().dims(), &[4, 2]);
}
#[test]
fn test_unflatten_basic() {
let tensor = Tensor::from_data(
vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0],
vec![6],
DeviceType::Cpu,
)
.expect("failed to create tensor for unflatten");
let result = tensor
.unflatten(0, &[2, 3])
.expect("unflatten to [2,3] should succeed");
assert_eq!(result.shape().dims(), &[2, 3]);
assert_eq!(
result.data().expect("failed to get unflattened data"),
vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
);
}
#[test]
fn test_unflatten_multiple_dims() {
let tensor = Tensor::from_data(vec![1.0f32; 24], vec![24], DeviceType::Cpu)
.expect("failed to create tensor for unflatten multiple dims");
let result = tensor
.unflatten(0, &[2, 3, 4])
.expect("unflatten to [2,3,4] should succeed");
assert_eq!(result.shape().dims(), &[2, 3, 4]);
}
#[test]
fn test_take_along_dim_flatten() {
let tensor = Tensor::from_data(vec![1.0f32, 2.0, 3.0, 4.0], vec![4], DeviceType::Cpu)
.expect("failed to create tensor for take_along_dim");
let indices = Tensor::from_data(vec![0i64, 2, 1], vec![3], DeviceType::Cpu)
.expect("failed to create indices tensor");
let result = tensor
.take_along_dim(&indices, None)
.expect("take_along_dim flatten should succeed");
assert_eq!(result.shape().dims(), &[3]);
assert_eq!(
result
.data()
.expect("failed to get take_along_dim flatten data"),
vec![1.0, 3.0, 2.0]
);
}
#[test]
fn test_take_along_dim_2d() {
let tensor = Tensor::from_data(
vec![1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0],
vec![2, 3],
DeviceType::Cpu,
)
.expect("failed to create 2d tensor for take_along_dim");
let indices = Tensor::from_data(vec![0i64, 2, 1, 1, 0, 2], vec![2, 3], DeviceType::Cpu)
.expect("failed to create 2d indices tensor");
let result = tensor
.take_along_dim(&indices, Some(1))
.expect("take_along_dim 2d should succeed");
assert_eq!(result.shape().dims(), &[2, 3]);
assert_eq!(
result.data().expect("failed to get take_along_dim 2d data"),
vec![1.0, 3.0, 2.0, 5.0, 4.0, 6.0]
);
}
}