use rand::{Rng};
#[derive(Clone)]
pub struct Tensor<T>{
data: Vec<T>,
shape: Vec<u32>,
}
impl<T: Default + Clone> Tensor<T>{
pub fn new(_shape: &[u32]) -> Tensor<T>{
let mut total_size: u32 = 1;
for i in 0.._shape.len(){
total_size *= _shape[i];
}
Self{
data: vec![T::default(); total_size as usize],
shape: _shape.to_vec(),
}
}
pub fn from_data(_data: &[T], _shape: &[u32]) -> Option<Self>{
if _shape.iter().product::<u32>() as usize != _data.len(){
return None;
}
Some(Self{
data: _data.to_vec(),
shape: _shape.to_vec(),
})
}
pub fn fill(fill_data: T, _shape: &[u32]) -> Self{
let full_size: u32 = _shape.iter().product();
Self{
data: vec![fill_data; full_size as usize],
shape: _shape.to_vec(),
}
}
pub fn get_data(&self) -> &Vec<T>{
return &self.data;
}
pub fn get_shape(&self) -> &Vec<u32>{
return &self.shape;
}
pub fn append(&self, tens2: &Tensor<T>) -> Option<Self>{
if (self.shape.len() != 1 || tens2.shape.len() != 1) && self.get_shape()[1..].to_vec() != tens2.get_shape()[1..].to_vec(){
return None;
}
let mut return_data: Vec<T> = self.get_data().clone();
let mut append_data: Vec<T> = tens2.get_data().clone();
return_data.append(&mut append_data);
let mut return_shape = self.get_shape().clone();
return_shape[0] += tens2.get_shape()[0];
Some(Self{
data: return_data,
shape: return_shape,
})
}
pub fn count_data(&self) -> usize{
self.get_data().len()
}
pub fn set_shape(&mut self, new_shape: &[u32]){
let shape_prod: u32 = new_shape.iter().product();
if(shape_prod as usize != self.data.len()){
return;
}
self.shape = new_shape.to_vec();
}
pub fn set_data(&mut self, new_data: &[T]){
if new_data.len() != self.data.len(){
return;
}
self.data = new_data.to_vec();
}
}
impl<T> Tensor<T>{
pub fn value(&self, pos: &[u32]) -> Option<&T>{
let self_dimensions = self.shape.len();
let selector_dimensions = pos.len();
if self_dimensions - selector_dimensions != 0{
return None;
}
for i in 0..pos.len(){
if pos[i] >= *self.shape.get(i).unwrap(){
return None;
}
}
let mut index = 0;
let mut stride = 1;
for i in (0..self.shape.len()).rev() {
index += pos[i] * stride;
stride *= self.shape[i];
}
Some(&self.data[index as usize])
}
pub fn set(&mut self, value: T, pos: &[u32]){
let self_dimensions = self.shape.len();
let selector_dimensions = pos.len();
if self_dimensions - selector_dimensions != 0{
return;
}
for i in 0..pos.len(){
if pos[i] >= *self.shape.get(i).unwrap(){
return;
}
}
let mut index = 0;
let mut stride = 1;
for i in (0..self.shape.len()).rev() {
index += pos[i] * stride;
stride *= self.shape[i];
}
self.data[index as usize] = value;
}
pub fn idx_to_global(&self, idx: u32) -> Vec<u32>{
idx_to_global(idx, &self.shape)
}
}
impl Tensor<f32> {
pub fn rand(rand_range: f32, _shape: &[u32]) -> Self{
let full_size: u32 = _shape.iter().product();
let mut data: Vec<f32> = Vec::with_capacity(full_size as usize);
let mut rng = rand::rng();
for i in 0..full_size{
data.push(rng.random_range(-rand_range..rand_range));
}
Self{
data,
shape: _shape.to_vec(),
}
}
}
pub fn idx_to_global(idx: u32, shape: &[u32]) -> Vec<u32>{
if idx>shape.iter().product::<u32>(){
return Vec::new();
}
let mut used_id = idx;
let mut shape_prod: u32 = shape.iter().product::<u32>();
let mut output_vec: Vec<u32> = Vec::with_capacity(shape.len());
for i in 0..shape.len(){
shape_prod = shape_prod/shape[i];
output_vec.push(used_id/shape_prod);
used_id = used_id%shape_prod;
}
output_vec
}