rds_tensors/
lib.rs

1use std::iter::repeat;
2use std::borrow::{Borrow,BorrowMut};
3use std::fmt;
4use std::ops::{Index, IndexMut, Range};
5
6pub mod types;
7pub mod tindex;
8
9use types::{RDSType, RDSTyped, CastTo};
10use tindex::TIndex;
11
12/***************************************************************************************************
13                                               TRAITS
14***************************************************************************************************/
15
16pub trait Tensor<T: RDSTyped> : Sized {
17
18    fn from_scalar<R: AsRef<[usize]>>(shape: R, s: T) -> Self;
19
20    fn zeros<R: AsRef<[usize]>>(shape: R) -> Self {
21        Self::from_scalar::<R>(shape, T::cast_from(0u8))
22    }
23
24    fn ones<R: AsRef<[usize]>>(shape: R) -> Self {
25        Self::from_scalar::<R>(shape, T::cast_from(1u8))
26    }
27
28    fn from_tensor<W: Tensor<S>, S: RDSTyped + CastTo<T>>(tensor: &W) -> Self;
29
30    fn from_slice<R: AsRef<[usize]>, S: AsRef<[T]>>(shape: R, slice: S) -> Self;
31
32    fn from_boxed_slice<R: AsRef<[usize]>>(shape: R, data: Box<[T]>) -> Self;
33
34    fn into_boxed_slice(self) -> Box<[T]>;
35
36    fn dim(&self) -> usize;
37
38    fn effective_dim(&self) -> usize {
39        self.shape().iter().fold(self.dim(), |acc, &i| acc - (i == 1) as usize)
40    }
41
42    fn shape(&self) -> &[usize];
43
44    fn strides(&self) -> &[usize]; 
45
46    fn size(&self) -> usize;
47
48    fn rds_type(&self) -> RDSType;
49
50    fn get_raw_array(&self) -> &[T];
51
52    fn get_raw_array_mut(&mut self) -> &mut [T];
53
54    fn reshape_into_vector<R: AsRef<[usize]>>(self, shape: R) -> Vector<T> {
55        return Vector::from_boxed_slice(shape, self.into_boxed_slice());
56    }
57
58    fn reshape_into_matrix<R: AsRef<[usize]>>(self, shape: R) -> Matrix<T> {
59        return Matrix::from_boxed_slice(shape, self.into_boxed_slice());
60    }
61
62    fn reshape_into_tensor<R: AsRef<[usize]>>(self, shape: R) -> TensorN<T> {
63        return TensorN::from_boxed_slice(shape, self.into_boxed_slice());
64    }
65
66    fn insert<W: Tensor<T>>(&mut self, dim: usize, pos: usize, tensor: &W);
67
68    fn extract<R: AsRef<[Range<usize>]>>(&self, bounds: R) -> Self;
69
70    fn remove<R: AsRef<[Range<usize>]>>(&mut self, bounds: R);
71
72    fn assign<R: AsRef<[Range<usize>]>, W: Tensor<T>>(&mut self, bounds: R, tensor: &W);
73
74    fn right_split(&mut self, dim: usize, pos: usize) -> Self {
75        debug_assert!(dim < self.dim(), "TensorN::right_split(): splitting dimension is invalid");
76        debug_assert!(pos <= self.shape()[dim], "TensorN::right_split(): splitting position is out of bound");
77        let mut bounds: Vec<Range<usize>> = self.shape().iter().map(|&x| 0..x).collect();
78        bounds[dim].start = pos;
79        let right = self.extract(&bounds);
80        self.remove(&bounds);
81        return right;
82    }
83
84    fn left_split(&mut self, dim: usize, pos: usize) -> Self {
85        debug_assert!(dim < self.dim(), "TensorN::left_split(): splitting dimension is invalid");
86        debug_assert!(pos <= self.shape()[dim], "TensorN::left_split(): splitting position is out of bound");
87        let mut bounds: Vec<Range<usize>> = self.shape().iter().map(|&x| 0..x).collect();
88        bounds[dim].end = pos;
89        let left = self.extract(&bounds);
90        self.remove(&bounds);
91        return left;
92    }
93
94
95    fn transpose(&mut self);
96}
97
98fn shape_to_strides(shape: &[usize]) -> Vec<usize> {
99    let mut strides: Vec<usize> = repeat(0usize).take(shape.len()).collect();
100    let mut size = 1;
101    for i in 0..shape.len()  {
102        strides[shape.len()-i-1] = size;
103        size *= shape[shape.len()-i-1];
104    }
105    return strides;
106}
107
108/***************************************************************************************************
109                                               VECTOR
110***************************************************************************************************/
111
112pub struct Vector<T: RDSTyped> {
113    data: Vec<T>,
114    shape: Vec<usize>,
115    strides: Vec<usize>
116}
117
118impl<T: RDSTyped> Tensor<T> for Vector<T> {
119    fn from_scalar<R: AsRef<[usize]>>(shape: R, s: T) -> Self {
120        let shape = shape.as_ref();
121        debug_assert!(shape.len() == 1, "Vector::from_scalar(): provided shape has more than one dimension");
122        let data: Vec<T> = repeat(s).take(shape[0]).collect();
123        Vector {
124            data: data,
125            shape: vec![shape[0]],
126            strides: vec![1]
127        }
128    }
129
130    fn from_slice<R: AsRef<[usize]>, S: AsRef<[T]>>(shape: R, slice: S) -> Self {
131        return Self::from_boxed_slice(shape, slice.as_ref().to_vec().into_boxed_slice());
132    }
133
134    fn from_tensor<W: Tensor<S>, S: RDSTyped + CastTo<T>>(tensor: &W) -> Self {
135        debug_assert!(tensor.effective_dim() <= 1,  "Vector::from_tensor(): provided tensor has more than one non-unit dimension");
136        let data: Vec<T> = tensor.get_raw_array().iter().map(|&i| i.cast_to()).collect();
137        Vector {
138            shape: vec![data.len()],
139            data: data,
140            strides: vec![1]
141        }
142    }
143
144    fn from_boxed_slice<R: AsRef<[usize]>>(shape: R, data: Box<[T]>) -> Self {
145        let shape = shape.as_ref();
146        debug_assert!(shape.len() == 1, "Vector::from_boxed_slice(): provided shape has more than one dimension");
147        debug_assert!(shape[0] == data.len(), "Vector::from_boxed_slice(): provided shape and slice do not have the same number of elements");
148        Vector {
149            shape: vec![data.len()],
150            data: data.into_vec(),
151            strides: vec![1]
152        }
153    }
154
155    fn into_boxed_slice(self) -> Box<[T]> {
156        return self.data.into_boxed_slice();
157    }
158
159    fn dim(&self) -> usize { 
160        1 
161    }
162
163    fn shape(&self) -> &[usize] { 
164        self.shape.as_slice()
165    }
166
167    fn strides(&self) -> &[usize] { 
168        self.strides.as_slice()
169    }
170
171    fn size(&self) -> usize { 
172        self.data.len() 
173    }
174
175    fn rds_type(&self) -> RDSType {
176        T::rds_type()
177    }
178
179    fn get_raw_array(&self) -> &[T] {
180        self.data.borrow()
181    }
182
183    fn get_raw_array_mut(&mut self) -> &mut [T] {
184        self.data.borrow_mut()
185    }
186
187    fn insert<W: Tensor<T>>(&mut self, dim: usize, pos: usize, tensor: &W) {
188        debug_assert!(dim == 0, "Vector::insert(): insertion dimension should be 0");
189        debug_assert!(pos <= self.shape[0], "Vector::insert(): insertion position is out of bound");
190        debug_assert!(tensor.dim() == 1 , "Vector::insert(): tensor to insert is not uni-dimensional");
191        // Make self the right size
192        self.data.reserve(tensor.size());
193        // Write the extended part coming from tensor
194        self.data.extend_from_slice(&tensor.get_raw_array()[(self.shape[0]-pos)..(tensor.size())]);
195        // Write the extended part coming from self
196        for i in pos..self.shape[0] {
197            let t = self.data[i];
198            self.data.push(t);
199        }
200        // Write the replaced part
201        let tensor_data = tensor.get_raw_array();
202        for i in pos..self.shape[0] {
203            self.data[i] = tensor_data[i-pos];
204        }
205        self.shape[0] += tensor.size();
206    }
207
208    fn extract<R: AsRef<[Range<usize>]>>(&self, bounds: R) -> Self {
209        let bounds = bounds.as_ref();
210        debug_assert!(bounds.len() == 1, "Vector::extract(): bounds should be uni-dimensional");
211        debug_assert!(bounds[0].start <= self.shape[0] && bounds[0].end <= self.shape[0], "Vector::extract(): bounds are out of range");
212        debug_assert!(bounds[0].start <= bounds[0].end, "Vector::extract(): range start is after range end");
213        return Vector::<T>::from_slice([bounds[0].end - bounds[0].start], &self.data[bounds[0].clone()]);
214    }
215
216    fn remove<R: AsRef<[Range<usize>]>>(&mut self, bounds: R) {
217        let bounds = bounds.as_ref();
218        debug_assert!(bounds.len() == 1, "Vector::remove(): bounds should be uni-dimensional");
219        debug_assert!(bounds[0].start <= self.shape[0] && bounds[0].end <= self.shape[0], "Vector::remove(): bounds are out of range");
220        debug_assert!(bounds[0].start <= bounds[0].end, "Vector::remove(): range start is after range end");
221        let delta = bounds[0].end - bounds[0].start;
222        for i in bounds[0].end..self.shape[0] {
223            self.data[i - delta] = self.data[i]
224        }
225        self.shape[0] -= delta;
226        self.data.truncate(self.shape[0]);
227    }
228
229    fn assign<R: AsRef<[Range<usize>]>, W: Tensor<T>>(&mut self, bounds: R, tensor: &W) {
230        let bounds = bounds.as_ref();
231        debug_assert!(bounds.len() == 1, "Vector::assign(): bounds should be uni-dimensional");
232        debug_assert!(tensor.dim() == 1, "Vector::assign(): tensor should be uni-dimensional");
233        debug_assert!(bounds[0].end - bounds[0].start <= tensor.shape()[0], "Vector::assign(): bounds are out of range");
234        debug_assert!(bounds[0].start <= self.shape[0] && bounds[0].end <= self.shape[0], "Vector::assign(): bounds are out of range");
235        debug_assert!(bounds[0].start <= bounds[0].end, "Vector::assign(): range start is after range end");
236        for i in 0..(bounds[0].end - bounds[0].start) {
237            self.data[bounds[0].start + i] = tensor.get_raw_array()[i];
238        }
239    }
240
241    fn transpose(&mut self) {
242        debug_assert!(false, "Vector::transpose(): transposition of a vector has no effect");
243    }
244}
245
246impl<I,T> Index<I> for Vector<T> where I: AsRef<[usize]>, T: RDSTyped {
247    type Output = T;
248
249    fn index(&self, i: I) -> &T {
250        let i = i.as_ref();
251        debug_assert!(i.len() == 1, "Vector::index(): provided index has more than one dimension");
252        return &self.data[i[0]];
253    }
254}
255
256impl<I,T> IndexMut<I> for Vector<T> where I: AsRef<[usize]>, T: RDSTyped {
257    fn index_mut(&mut self, i: I) -> &mut T {
258        let i = i.as_ref();
259        debug_assert!(i.len() == 1, "Vector::index(): provided index_mut has more than one dimension");
260        return &mut self.data[i[0]];
261    }
262}
263
264impl<T: RDSTyped> fmt::Display for Vector<T> {
265    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
266        write!(f, "[")?;
267        for i in 0..self.shape[0] {
268            write!(f, "{}", self.data[i])?;
269            if i != self.shape[0]-1 {
270                write!(f, ", ")?;
271            }
272        }
273        write!(f, "]")
274    }
275}
276
277/***************************************************************************************************
278                                               MATRIX
279***************************************************************************************************/
280
281pub struct Matrix<T: RDSTyped> {
282    data: Vec<T>,
283    shape: Vec<usize>,
284    strides: Vec<usize>
285}
286
287impl<T: RDSTyped> Tensor<T> for Matrix<T> {
288    fn from_scalar<R: AsRef<[usize]>>(shape: R, s: T) -> Self {
289        let shape = shape.as_ref();
290        debug_assert!(shape.len() == 2, "Matrix::from_scalar(): provided shape is not two dimensional");
291        let data: Vec<T> = repeat(s).take(shape[0]*shape[1]).collect();
292        Matrix {
293            data: data,
294            shape: shape.to_vec(),
295            strides: vec![shape[1], 1]
296        }
297    }
298
299    fn from_tensor<W: Tensor<S>, S: RDSTyped + CastTo<T>>(tensor: &W) -> Self {
300        debug_assert!(tensor.effective_dim() <= 2, "Matrix::from_tensor(): provided tensor has more than two non-unit dimensions");
301        let mut shape = Vec::<usize>::with_capacity(2);
302        for l in tensor.shape() {
303            if *l > 1 {
304                shape.push(*l);
305            }
306        }
307        while shape.len() < 2 {
308            shape.push(1);
309        }
310        let data: Vec<T> = tensor.get_raw_array().iter().map(|&i| i.cast_to()).collect();
311        Matrix {
312            strides: vec![shape[1], 1],
313            shape: shape,
314            data: data
315        }
316    }
317
318    fn from_slice<R: AsRef<[usize]>, S: AsRef<[T]>>(shape: R, slice: S) -> Self {
319        return Self::from_boxed_slice(shape, slice.as_ref().to_vec().into_boxed_slice());
320    }
321
322    fn from_boxed_slice<R: AsRef<[usize]>>(shape: R, data: Box<[T]>) -> Self {
323        let shape = shape.as_ref();
324        debug_assert!(shape.len() == 2, "Matrix::from_boxed_slice(): provided shape is not two dimensional");
325        debug_assert!(shape[0] * shape[1] == data.len(), "Matrix::from_boxed_slice(): provided data and shape does not have the same number of elements");
326        Matrix {
327            data: data.into_vec(),
328            shape: shape.to_vec(),
329            strides: vec![shape[1], 1]
330        }
331    }
332
333    fn into_boxed_slice(self) -> Box<[T]> {
334        return self.data.into_boxed_slice();
335    }
336
337    fn dim(&self) -> usize { 
338        2
339    }
340
341    fn shape(&self) -> &[usize] { 
342        self.shape.as_slice()
343    }
344
345    fn strides(&self) -> &[usize] { 
346        self.strides.as_slice()
347    }
348
349    fn size(&self) -> usize { 
350        self.data.len() 
351    }
352
353    fn rds_type(&self) -> RDSType {
354        T::rds_type()
355    }
356
357    fn get_raw_array(&self) -> &[T] {
358        self.data.borrow()
359    }
360
361    fn get_raw_array_mut(&mut self) -> &mut [T] {
362        self.data.borrow_mut()
363    }
364
365    fn insert<W: Tensor<T>>(&mut self, dim: usize, pos: usize, tensor: &W) {
366        debug_assert!(dim <= 1, "Matrix::insert(): insertion dimension should be 0 or 1");
367        debug_assert!(pos <= self.shape[dim], "Matrix::insert(): insertion position is out of bound");
368        debug_assert!(tensor.dim() == 2 , "Matrix::insert(): tensor to insert is not two dimensional");
369        // dim^1 will produce the dimension orthogonal to the insertion dimension (dim)
370        debug_assert!(self.shape[dim^1] == tensor.shape()[dim^1], "Matrix::insert(): tensor to insert has an incompatible shape");
371        let mut idx_src_self = self.data.len();
372        // Make self the right size
373        self.data.reserve(tensor.size());
374        for _ in 0..tensor.size() {
375            self.data.push(T::cast_from(0u8));
376        }
377        // Back to front zipping parameter init
378        let length = tensor.shape()[1];
379        let count = tensor.shape()[0];
380        let spacing = match dim {
381            0 => 0,
382            1 => self.shape[1],
383            _ => unreachable!()
384        };
385        let offset = match dim {
386            0 => (self.shape[0] - pos) * self.shape[1],
387            1 => self.shape[1] - pos,
388            _ => unreachable!()
389        };
390        // Back to front zipping
391        let mut idx_dst = self.data.len();
392        let mut idx_src_tensor = tensor.size();
393        let tensor_data = tensor.get_raw_array();
394        for _ in 0..offset {
395            idx_dst -= 1;
396            idx_src_self -= 1;
397            self.data[idx_dst] = self.data[idx_src_self];
398        }
399        for _ in 0..length {
400            idx_dst -= 1;
401            idx_src_tensor -= 1;
402            self.data[idx_dst] = tensor_data[idx_src_tensor];
403        }
404        for _ in 1..count {
405            for _ in 0..spacing {
406                idx_dst -= 1;
407                idx_src_self -= 1;
408                self.data[idx_dst] = self.data[idx_src_self];
409            }
410            for _ in 0..length {
411                idx_dst -= 1;
412                idx_src_tensor -= 1;
413                self.data[idx_dst] = tensor_data[idx_src_tensor];
414            }
415        }
416        self.shape[dim] += tensor.shape()[dim];
417        self.strides = shape_to_strides(self.shape());
418    }
419
420    fn extract<R: AsRef<[Range<usize>]>>(&self, bounds: R) -> Self {
421        let bounds = bounds.as_ref();
422        debug_assert!(bounds.len() == 2, "Matrix::extract(): bounds should be two dimensional");
423        debug_assert!(bounds[0].start <= self.shape[0] && bounds[0].end <= self.shape[0], "Matrix::extract(): bounds are out of range");
424        debug_assert!(bounds[1].start <= self.shape[1] && bounds[1].end <= self.shape[1], "Matrix::extract(): bounds are out of range");
425        debug_assert!(bounds[0].start <= bounds[0].end, "Matrix::extract(): range start is after range end");
426        debug_assert!(bounds[1].start <= bounds[1].end, "Matrix::extract(): range start is after range end");
427        // New exact allocation
428        let mut new_data = Vec::<T>::with_capacity((bounds[0].end - bounds[0].start) * (bounds[1].end - bounds[1].start));
429        // Striding parameters
430        let mut idx_src = bounds[0].start * self.strides[0] + bounds[1].start;
431        let length = bounds[1].end - bounds[1].start;
432        // Extract row by row
433        for _ in bounds[0].clone() {
434            new_data.extend_from_slice(&self.data[idx_src..idx_src+length]);
435            idx_src += self.strides[0];
436        }
437        return Matrix::<T>::from_boxed_slice([bounds[0].end - bounds[0].start, bounds[1].end - bounds[1].start], new_data.into_boxed_slice());
438    }
439
440    fn remove<R: AsRef<[Range<usize>]>>(&mut self, bounds: R) {
441        let bounds = bounds.as_ref();
442        let mut removed_dim: Option<usize> = None;
443        debug_assert!(bounds.len() == 2, "Matrix::remove(): bounds should be two dimensional");
444        for i in 0..2 {
445            debug_assert!(bounds[i].start <= self.shape[i] && bounds[i].end <= self.shape[i], "Matrix::remove(): bounds are out of range");
446            debug_assert!(bounds[i].start <= bounds[i].end, "Matrix::remove(): range start is after range end");
447            if (bounds[i].end - bounds[i].start) != self.shape[i] {
448                debug_assert!(removed_dim.is_none(), "Matrix::remove(): bounds should match all dimensions but one");
449                removed_dim = Some(i);
450            }
451        }
452        debug_assert!(removed_dim.is_some(), "Matrix::remove(): cannot remove the entire matrix");
453        let removed_dim = removed_dim.unwrap();
454        // Front to back unzipping parameter init
455        let length = bounds[1].end - bounds[1].start;
456        let count = bounds[0].end - bounds[0].start;
457        let spacing = self.strides[0] - length;
458        let offset = bounds[0].start * self.strides[0] + bounds[1].start * self.strides[1];
459        // Front to back in-place unzipping
460        let mut idx_dst = offset;
461        let mut idx_src = offset + length;
462        for _ in 1..count {
463            for _ in 0..spacing {
464                self.data[idx_dst] = self.data[idx_src];
465                idx_dst += 1;
466                idx_src += 1;
467            }
468            for _ in 0..length {
469                idx_src += 1;
470            }
471        }
472        while idx_src < self.data.len() {
473            self.data[idx_dst] = self.data[idx_src];
474            idx_dst += 1;
475            idx_src += 1;
476        }
477        self.shape[removed_dim] -= bounds[removed_dim].end - bounds[removed_dim].start;
478        self.strides = shape_to_strides(self.shape());
479        self.data.truncate(self.shape[0] * self.shape[1]);
480    }
481
482    fn assign<R: AsRef<[Range<usize>]>, W: Tensor<T>>(&mut self, bounds: R, tensor: &W) {
483        let bounds = bounds.as_ref();
484        debug_assert!(bounds.len() == 2, "Matrix::assign(): bounds should be two dimensional");
485        debug_assert!(tensor.dim() == 2, "Matrix::assign(): tensor should be two dimensional");
486        debug_assert!(bounds[0].start <= bounds[0].end, "Matrix::assign(): range start is after range end");
487        debug_assert!(bounds[1].start <= bounds[1].end, "Matrix::assign(): range start is after range end");
488        debug_assert!(bounds[0].end - bounds[0].start <= tensor.shape()[0], "Matrix::assign(): bounds are out of range");
489        debug_assert!(bounds[1].end - bounds[1].start <= tensor.shape()[1], "Matrix::assign(): bounds are out of range");
490        debug_assert!(bounds[0].start <= self.shape[0] && bounds[0].end <= self.shape[0], "Matrix::assign(): bounds are out of range");
491        debug_assert!(bounds[1].start <= self.shape[1] && bounds[1].end <= self.shape[1], "Matrix::assign(): bounds are out of range");
492        for i in 0..(bounds[0].end - bounds[0].start) {
493            let self_idx = (bounds[0].start + i) * self.strides[0];
494            let tensor_idx = i * tensor.strides()[0];
495            for j in 0..(bounds[1].end - bounds[1].start) {
496                self.data[self_idx + bounds[1].start + j] = tensor.get_raw_array()[tensor_idx + j];
497            }
498        }
499    }
500    
501    fn transpose(&mut self) {
502        let mut init_idx = 0;
503        let mut new_shape = self.shape.clone();
504        new_shape.reverse();
505        let new_strides = shape_to_strides(&new_shape);
506        'outer: while init_idx < self.data.len() {
507            let start_idx = init_idx;
508            let mut idx = start_idx;
509            // Check walk
510            loop {
511                // Compute next transposition transformation
512                idx = (idx % self.strides[0]) * new_strides[0] + (idx / self.strides[0]);
513                // Cycled ended
514                if idx == start_idx {
515                    break;
516                }
517                // Optimization to reduce the large loop iteration numbers
518                else if idx == init_idx + 1 {
519                    init_idx += 1;
520                }
521                // This cycle goes through an idx < start_idx -> we have already gone through it
522                else if idx < start_idx {
523                    init_idx += 1;
524                    continue 'outer;
525                }
526            }
527            // Execute walk
528            idx = start_idx;
529            let mut cycle_carry = self.data[idx];
530            loop {
531                // Compute next transposition transformation
532                idx = (idx % self.strides[0]) * new_strides[0] + (idx / self.strides[0]);
533                // Swap cycle_carry <-> self.data[idx]
534                let t = self.data[idx];
535                self.data[idx] = cycle_carry;
536                cycle_carry = t;
537                // Cycled ended
538                if idx == start_idx {
539                    break;
540                }
541                // Optimization to reduce the large loop iteration numbers
542                else if idx == init_idx + 1 {
543                    init_idx += 1;
544                }
545            }
546            init_idx += 1;
547        }
548        self.shape = new_shape;
549        self.strides = new_strides;
550    }
551}
552
553impl<I,T> Index<I> for Matrix<T> where I: AsRef<[usize]>, T: RDSTyped {
554    type Output = T;
555
556    fn index(&self, i: I) -> &T {
557        let i = i.as_ref();
558        debug_assert!(i.len() == 2, "Matrix::index(): provided index is not two dimensional");
559        let pos = i.to_pos(self.shape(), self.strides());
560        return &self.data[pos];
561    }
562}
563
564impl<I,T> IndexMut<I> for Matrix<T> where I: AsRef<[usize]>, T: RDSTyped {
565    fn index_mut(&mut self, i: I) -> &mut T {
566        let i = i.as_ref();
567        debug_assert!(i.len() == 2, "Matrix::index_mut(): provided index is not two dimensionsal");
568        let pos = i.to_pos(self.shape(), self.strides());
569        return &mut self.data[pos];
570    }
571}
572
573impl<T: RDSTyped> fmt::Display for Matrix<T> {
574    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
575        write!(f, "[")?;
576        for i in 0..self.shape[0] {
577            write!(f, "\t[")?;
578            for j in 0..self.shape[1] {
579                write!(f, "{}", self[[i,j]])?;
580                if j != self.shape[1]-1 {
581                    write!(f, ", ")?;
582                }
583            }
584            if i != self.shape[0]-1 {
585                writeln!(f, "],")?;
586            }
587            else {
588                writeln!(f, "]")?;
589            }
590        }
591        write!(f, "]")
592    }
593}
594
595/***************************************************************************************************
596                                               TENSORN
597***************************************************************************************************/
598
599pub struct TensorN<T: RDSTyped> {
600    data: Vec<T>,
601    shape: Vec<usize>,
602    strides: Vec<usize>
603}
604
605impl<T: RDSTyped> Tensor<T> for TensorN<T> {
606    fn from_scalar<R: AsRef<[usize]>>(shape: R, s: T) -> Self {
607        let shape = shape.as_ref();
608        debug_assert!(shape.len() > 0, "TensorN::from_scalar(): shape should have a least one dimension");
609        let strides = shape_to_strides(shape);
610        let size = strides[0] * shape[0];
611        let data: Vec<T> = repeat(s).take(size).collect();
612        TensorN {
613            data: data,
614            shape: shape.to_vec(),
615            strides: strides
616        }
617    }
618
619    fn from_tensor<W: Tensor<S>, S: RDSTyped + CastTo<T>>(tensor: &W) -> Self {
620        let data: Vec<T> = tensor.get_raw_array().iter().map(|&i| i.cast_to()).collect();
621        TensorN {
622            shape: tensor.shape().to_vec(),
623            data: data,
624            strides: tensor.strides().to_vec()
625        }
626    }
627
628    fn from_slice<R: AsRef<[usize]>, S: AsRef<[T]>>(shape: R, slice: S) -> Self {
629        return Self::from_boxed_slice(shape, slice.as_ref().to_vec().into_boxed_slice());
630    }
631
632    fn from_boxed_slice<R: AsRef<[usize]>>(shape: R, data: Box<[T]>) -> Self {
633        let shape = shape.as_ref();
634        debug_assert!(shape.len() > 0, "TensorN::from_boxed_slice(): shape should have a least one dimension");
635        let strides = shape_to_strides(shape);
636        debug_assert!(strides[0] * shape[0] == data.len(), "TensorN::from_boxed_slice(): provided data and shape does not have the same number of elements");
637        TensorN {
638            data: data.into_vec(),
639            shape: shape.to_vec(),
640            strides: strides
641        }
642    }
643
644    fn into_boxed_slice(self) -> Box<[T]> {
645        return self.data.into_boxed_slice();
646    }
647
648    fn dim(&self) -> usize { 
649        self.shape.len()
650    }
651
652    fn shape(&self) -> &[usize] { 
653        self.shape.as_slice()
654    }
655
656    fn strides(&self) -> &[usize] { 
657        self.strides.as_slice()
658    }
659
660    fn size(&self) -> usize { 
661        self.data.len() 
662    }
663
664    fn rds_type(&self) -> RDSType {
665        T::rds_type()
666    }
667
668    fn get_raw_array(&self) -> &[T] {
669        self.data.borrow()
670    }
671
672    fn get_raw_array_mut(&mut self) -> &mut [T] {
673        self.data.borrow_mut()
674    }
675
676    fn insert<W: Tensor<T>>(&mut self, dim: usize, pos: usize, tensor: &W) {
677        debug_assert!(dim < self.dim(), "TensorN::insert(): insertion dimension should be 0 or 1");
678        debug_assert!(pos <= self.shape[dim], "TensorN::insert(): insertion position is out of bound");
679        debug_assert!(tensor.dim() == self.dim() , "TensorN::insert(): tensor has a different dimensionality than self");
680        for i in 0..self.shape.len() {
681            debug_assert!((dim == i) || (self.shape[i] == tensor.shape()[i]), "TensorN::insert(): tensor shape doesn't match with self shape in the non-insertion dimensions");
682        }
683
684        let mut idx_src_self = self.data.len();
685        // Make self the right size
686        self.data.reserve(tensor.size());
687        for _ in 0..tensor.size() {
688            self.data.push(T::cast_from(0u8));
689        }
690        // Back to front zipping parameter init
691        let length = tensor.shape()[dim..].iter().fold(1usize, |acc, x| acc*x);
692        let count = tensor.shape()[..dim].iter().fold(1usize, |acc, x| acc*x);
693        let spacing = self.shape[dim..].iter().fold(1usize, |acc, x| acc*x);
694        let offset = (self.shape[dim] - pos) * self.shape[dim+1..].iter().fold(1usize, |acc, x| acc*x);
695        // Back to front zipping
696        let mut idx_dst = self.data.len();
697        let mut idx_src_tensor = tensor.size();
698        let tensor_data = tensor.get_raw_array();
699        for _ in 0..offset {
700            idx_dst -= 1;
701            idx_src_self -= 1;
702            self.data[idx_dst] = self.data[idx_src_self];
703        }
704        for _ in 0..length {
705            idx_dst -= 1;
706            idx_src_tensor -= 1;
707            self.data[idx_dst] = tensor_data[idx_src_tensor];
708        }
709        for _ in 1..count {
710            for _ in 0..spacing {
711                idx_dst -= 1;
712                idx_src_self -= 1;
713                self.data[idx_dst] = self.data[idx_src_self];
714            }
715            for _ in 0..length {
716                idx_dst -= 1;
717                idx_src_tensor -= 1;
718                self.data[idx_dst] = tensor_data[idx_src_tensor];
719            }
720        }
721        self.shape[dim] += tensor.shape()[dim];
722        self.strides = shape_to_strides(self.shape());
723    }
724
725    fn extract<R: AsRef<[Range<usize>]>>(&self, bounds: R) -> Self {
726        let bounds = bounds.as_ref();
727        debug_assert!(bounds.len() == self.dim(), "TensorN::extract(): bounds and self have different dimensionality");
728        for i in 0..self.dim() {
729            debug_assert!(bounds[i].start <= self.shape[i] && bounds[i].end <= self.shape[i], "TensorN::extract(): bounds are out of range");
730            debug_assert!(bounds[i].start <= bounds[i].end, "TensorN::extract(): range start is after range end");
731        }
732        let size = bounds.iter().fold(1usize, |acc, b| acc * (b.end - b.start));
733        let mut new_data = Vec::<T>::with_capacity(size);
734        if size > 0 {
735            //let mut new_strides = shape_to_strides(&new_shape);
736            let mut idx: Vec<usize> = bounds.iter().map(|b| b.start).collect();
737            let length = bounds[self.dim()-1].end - bounds[self.dim()-1].start;
738            // Extract row by row
739            loop {
740                let start_pos = idx.to_pos(&self.shape[..], &self.strides[..]);
741                new_data.extend_from_slice(&self.data[start_pos..start_pos+length]);
742                // Row order increment but within bounds
743                // Start from the before last dimension (because we do row by row copy)
744                let mut i = self.dim()-1; 
745                while i > 0 {
746                    idx[i-1] += 1;
747                    if idx[i-1] >= bounds[i-1].end {
748                        idx[i-1] = bounds[i-1].start;
749                        i -= 1;
750                    }
751                    else {
752                        break;
753                    }
754                }
755                // Complete overflow of the idx, we inserted everything
756                if i == 0 {
757                    break;
758                }
759            }
760        }
761        let new_shape : Vec<usize> = bounds.iter().map(|b| b.end - b.start).collect();
762        return TensorN::<T>::from_boxed_slice(new_shape, new_data.into_boxed_slice());
763    }
764
765    fn remove<R: AsRef<[Range<usize>]>>(&mut self, bounds: R) {
766        let bounds = bounds.as_ref();
767        let mut removed_dim: Option<usize> = None;
768        debug_assert!(bounds.len() == self.dim(), "TensorN::remove(): bounds and self should have the same dimensionality");
769        for i in 0..self.dim() {
770            debug_assert!(bounds[i].start <= self.shape[i] && bounds[i].end <= self.shape[i], "TensorN::remove(): bounds are out of range");
771            debug_assert!(bounds[i].start <= bounds[i].end, "TensorN::remove(): range start is after range end");
772            if (bounds[i].end - bounds[i].start) != self.shape[i] {
773                debug_assert!(removed_dim.is_none(), "TensorN::remove(): bounds should match all dimensions but one");
774                removed_dim = Some(i);
775            }
776        }
777        debug_assert!(removed_dim.is_some(), "TensorN::remove(): cannot remove the entire tensor");
778        let removed_dim = removed_dim.unwrap();
779        // Front to back unzipping parameter init
780        let length = bounds[removed_dim..].iter().fold(1usize, |acc, x| acc*(x.end - x.start));
781        let count = bounds[..removed_dim].iter().fold(1usize, |acc, x| acc*(x.end - x.start));
782        let spacing = self.shape[removed_dim..].iter().fold(1usize, |acc, x| acc * x) - length;
783        let offset = bounds.iter().zip(self.strides.iter()).fold(0usize, |acc, (b, s)| acc + s * b.start);
784        // Front to back unzipping
785        let mut idx_dst = offset;
786        let mut idx_src = offset + length;
787        for _ in 1..count {
788            for _ in 0..spacing {
789                self.data[idx_dst] = self.data[idx_src];
790                idx_dst += 1;
791                idx_src += 1;
792            }
793            for _ in 0..length {
794                idx_src += 1;
795            }
796        }
797        while idx_src < self.data.len() {
798            self.data[idx_dst] = self.data[idx_src];
799            idx_dst += 1;
800            idx_src += 1;
801        }
802        self.shape[removed_dim] -= bounds[removed_dim].end - bounds[removed_dim].start;
803        self.strides = shape_to_strides(self.shape());
804        self.data.truncate(self.shape.iter().fold(1usize, |acc, x| acc * x));
805    }
806
807    fn assign<R: AsRef<[Range<usize>]>, W: Tensor<T>>(&mut self, bounds: R, tensor: &W) {
808        let bounds = bounds.as_ref();
809        debug_assert!(bounds.len() == self.dim(), "TensorN::assign(): bounds should be two dimensional");
810        debug_assert!(tensor.dim() == self.dim(), "TensorN::assign(): tensor should be two dimensional");
811        for i in 0..self.dim() {
812            debug_assert!(bounds[i].start <= bounds[i].end, "TensorN::assign(): range start is after range end");
813            debug_assert!(bounds[i].end - bounds[i].start <= tensor.shape()[i], "TensorN::assign(): bounds are out of range");
814            debug_assert!(bounds[i].start <= self.shape[i] && bounds[i].end <= self.shape[i], "TensorN::assign(): bounds are out of range");
815        }
816        let bounds_shape: Vec<usize> = bounds.iter().map(|x| x.end - x.start).collect();
817        let mut tensor_idx: Vec<usize> = repeat(0).take(self.dim()).collect();
818        let mut self_idx = tensor_idx.clone();
819
820        loop {
821            for i in 0..self.dim() {
822                self_idx[i] = tensor_idx[i] + bounds[i].start;
823            }
824            self[&self_idx] = tensor.get_raw_array()[tensor_idx.to_pos(tensor.shape(), tensor.strides())];
825            tensor_idx.inc_ro(&bounds_shape);
826            if tensor_idx.is_zero() {
827                break;
828            }
829        }
830    }
831
832    fn transpose(&mut self) {
833        let mut init_idx = 0;
834        let mut new_shape = self.shape.clone();
835        new_shape.reverse();
836        let new_strides = shape_to_strides(&new_shape);
837        'outer: while init_idx < self.data.len() {
838            let start_idx = init_idx;
839            let mut idx = start_idx;
840            // Check walk
841            loop {
842                // Compute next transposition transformation
843                let mut next_idx = 0;
844                for i in 0..self.strides.len() {
845                    next_idx += (idx / self.strides[i]) * new_strides[new_strides.len() - i - 1];
846                    idx = idx % self.strides[i];
847                }
848                idx = next_idx;
849                // Cycled ended
850                if idx == start_idx {
851                    break;
852                }
853                // Optimization to reduce the large loop iteration numbers
854                else if idx == init_idx + 1 {
855                    init_idx += 1;
856                }
857                // This cycle goes through an idx < start_idx -> we have already gone through it
858                else if idx < start_idx {
859                    init_idx += 1;
860                    continue 'outer;
861                }
862            }
863            // Execute walk
864            idx = start_idx;
865            let mut cycle_carry = self.data[idx];
866            loop {
867                // Compute next transposition transformation
868                let mut next_idx = 0;
869                for i in 0..self.strides.len() {
870                    next_idx += (idx / self.strides[i]) * new_strides[new_strides.len() - i - 1];
871                    idx = idx % self.strides[i];
872                }
873                idx = next_idx;
874                // Swap cycle_carry <-> self.data[idx]
875                let t = self.data[idx];
876                self.data[idx] = cycle_carry;
877                cycle_carry = t;
878                // Cycled ended
879                if idx == start_idx {
880                    break;
881                }
882                // Optimization to reduce the large loop iteration numbers
883                else if idx == init_idx + 1 {
884                    init_idx += 1;
885                }
886            }
887            init_idx += 1;
888        }
889        self.shape = new_shape;
890        self.strides = new_strides;
891    }
892}
893
894impl<I,T> Index<I> for TensorN<T> where I: AsRef<[usize]>, T: RDSTyped {
895    type Output = T;
896
897    fn index(&self, i: I) -> &T {
898        let i = i.as_ref();
899        debug_assert!(i.len() == self.shape.len(), "TensorN::index(): provided index and this tensor have a different number of dimensions");
900        let pos = i.to_pos(self.shape(), self.strides());
901        return &self.data[pos];
902    }
903}
904
905impl<I,T> IndexMut<I> for TensorN<T> where I: AsRef<[usize]>, T: RDSTyped {
906    fn index_mut(&mut self, i: I) -> &mut T {
907        let i = i.as_ref();
908        debug_assert!(i.len() == self.shape.len(), "TensorN::index(): provided index and this tensor have a different number of dimensions");
909        let pos = i.to_pos(self.shape(), self.strides());
910        return &mut self.data[pos];
911    }
912}
913
914impl<T: RDSTyped> fmt::Display for TensorN<T> {
915    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
916        let mut index: Vec<usize> = repeat(0).take(self.dim()).collect();
917        // Down the braces
918        for i in 0..self.dim() {
919            for _j in 0..i {
920                write!(f, "\t")?;
921            }
922            if i != self.dim() - 1 {
923                writeln!(f, "[")?;
924            }
925            else {
926                write!(f, "[")?;
927            }
928        }
929        loop {
930            // Print one element
931            write!(f, "{}", self.data[index.to_pos(&self.shape, &self.strides)])?;
932            // Index increment
933            let mut i = self.dim();
934            while i > 0 {
935                index[i-1] += 1;
936                if index[i-1] >= self.shape[i-1] {
937                    index[i-1] = 0;
938                    i -= 1;
939                }
940                else {
941                    break;
942                }
943            }
944            // End of the tensor, break out of the loop
945            if i == 0 {
946                break;
947            }
948            // If we overflow, print some braces
949            if i < self.dim() {
950                // Up the braces
951                for j in 0..(self.dim() - i) {
952                    // No tab for the first row
953                    if j > 0 {
954                        for _k in 0..(self.dim() - j - 1) {
955                            write!(f, "\t")?;
956                        }
957                    }
958                    if j == self.dim() - i - 1 {
959                        writeln!(f, "],")?;
960                    }
961                    else {
962                        writeln!(f, "]")?;
963                    }
964                }
965                // Down the braces
966                for j in i..self.dim() {
967                    for _k in 0..j {
968                        write!(f, "\t")?;
969                    }
970                    if j != self.dim() - 1 {
971                        writeln!(f, "[")?;
972                    }
973                    else {
974                        write!(f, "[")?;
975                    }
976                }
977            }
978            else {
979                write!(f, ", ")?;
980            }
981        }
982        for i in 0..self.dim() {
983            if i > 0 {
984                for _j in 0..(self.dim() - i - 1) {
985                    write!(f, "\t")?;
986                }
987            }
988            writeln!(f, "]")?;
989        }
990        return Ok(());
991    }
992}
993
994
995#[cfg(test)]
996mod tests;