1#![allow(clippy::comparison_chain)]
2use std::fmt;
3use std::cmp;
4
5pub mod compare_tensor;
6pub mod convolution;
7pub mod elemwise;
8pub mod index_slicing;
9pub mod linalg;
10pub mod reduction;
11pub mod rand;
12
13#[cfg(feature = "use-serde")]
14use serde::{Serialize, Deserialize};
15
16#[cfg_attr(feature = "use-serde", derive(Serialize, Deserialize))]
18pub struct GenTensor<T> {
19 d: Vec<T>, dim: Vec<usize>, }
22impl<T> GenTensor<T> where T: num_traits::Float {
23
24 pub fn new() -> GenTensor<T> {
27 GenTensor { d: Vec::<T>::new(), dim: Vec::new() }
28 }
29
30 pub fn new_raw(data: &[T], shape: &[usize]) -> GenTensor<T> {
32 let new_data = data.to_vec();
33 let new_dim = shape.to_vec();
34 GenTensor {
35 d: new_data,
36 dim: new_dim,
37 }
38 }
39
40 pub fn new_move(data: Vec::<T>, shape: Vec::<usize>) -> GenTensor<T>{
41 GenTensor {
42 d: data,
43 dim: shape,
44 }
45 }
46
47 pub fn data_copy(&mut self, other: &GenTensor<T>) {
48 self.d = other.d.clone();
49 self.dim = other.dim.clone();
50 }
51
52 pub fn index2dimpos(&self, index: usize) -> Vec::<usize> {
54 if index >= self.d.len() {
55 panic!("index out of range, {:?}, {:?}", index, self.d.len());
56 }
57 let mut ret = Vec::new();
58 let mut reminder = index;
59 for i in &self.stride() {
60 ret.push(reminder/i);
62 reminder %= i;
63 }
64 ret
65 }
66
67 pub fn dimpos2index(&self, dimpos: &[usize]) -> usize {
69 if dimpos.len() != self.dim.len() {
70 panic!("get expects the same dim self.dim: {:?}, o: {:?}", self.dim, dimpos);
71 }
72 for (i, j) in self.dim.iter().zip(dimpos.iter()) {
73 if j >= i {
74 panic!("get expects the dim within range self.dim: {:?}, o: {:?}", self.dim, dimpos);
75 }
76 }
77 let mut ret = 0;
78 for (st, i) in self.stride().iter().zip(dimpos.iter()) {
79 ret += st*i;
81 }
82 ret
83 }
84
85 pub fn zeros(size: &[usize]) -> GenTensor<T> {
91 let cap = size.iter().product();
92 GenTensor {
93 d: vec![T::zero(); cap],
94 dim: size.to_vec(),
95 }
96 }
97 pub fn zeros_like(&self) -> GenTensor<T> {
99 let new_data = vec![T::zero(); self.d.len()];
100 let new_dim = self.dim.to_vec();
101 GenTensor {
102 d: new_data,
103 dim: new_dim,
104 }
105 }
106
107 pub fn ones(size: &[usize]) -> GenTensor<T> {
109 let cap = size.iter().product();
110 GenTensor {
111 d: vec![T::one(); cap],
112 dim: size.to_vec(),
113 }
114 }
115 pub fn ones_like(&self) -> GenTensor<T> {
117 let new_data = vec![T::one(); self.d.len()];
118 let new_dim = self.dim.to_vec();
119 GenTensor {
120 d: new_data,
121 dim: new_dim,
122 }
123 }
124 pub fn arange(end: usize) -> GenTensor<T> {
126 let mut ret = GenTensor::<T>::zeros(&[end]);
127 for i in 0..end {
128 ret.d[i] = T::from(i).expect("");
129 }
130 ret
131 }
132 pub fn eye(n: usize, m: usize) -> GenTensor<T> {
136 let cap = n*m;
137 let d = vec![T::zero(); cap];
138 let mut ret = GenTensor {
139 d,
140 dim: [n, m].to_vec(),
141 };
142 for i in 0..cmp::min(n, m) {
143 ret.set(&[i, i], T::one());
144 }
145 ret
146 }
147 pub fn fill(d: T, shape: &[usize]) -> GenTensor<T> {
186 let mut dsize = 1;
187 for i in shape {
188 dsize *= *i;
189 }
190 GenTensor {
191 d: vec![d; dsize],
192 dim: shape.to_vec(),
193 }
194 }
195 pub fn from_record_f32(&mut self, row: usize, record: &[f32]) -> Result<(), &'static str> {
197 for (i, index) in record.iter().zip(0..self.dim[self.dim.len()-1]) {
198 self.d[row*self.dim[self.dim.len()-1] + index] = T::from(*i).expect("");
199 }
200 Ok(())
201 }
202 pub fn from_record_f64(&mut self, row: usize, record: &[f64]) -> Result<(), &'static str> {
203 for (i, index) in record.iter().zip(0..self.dim[self.dim.len()-1]) {
204 self.d[row*self.dim[self.dim.len()-1] + index] = T::from(*i).expect("");
205 }
206 Ok(())
207 }
208
209 pub fn stride(&self) -> Vec<usize> {
220 let mut ret = vec![0; self.dim.len()];
221 let dsize = ret.len();
222 for i in 0..dsize {
223 if i == 0 {
224 ret[dsize-1] = 1;
225 } else {
226 ret[dsize-i-1] = ret[dsize-i]*self.dim[dsize-i];
227 }
228 }
229 ret
230 }
231
232 pub fn get(&self, o: &[usize]) -> T {
240 if o.len() != self.dim.len() {
241 panic!("get expects the same dim self.dim: {:?}, o: {:?}", self.dim, o);
242 }
243 for (i, j) in self.dim.iter().zip(o.iter()) {
244 if j >= i {
245 panic!("get expects the dim within range self.dim: {:?}, o: {:?}", self.dim, o);
246 }
247 }
248 let stride = self.stride();
249 let dsize = o.len();
250 let mut index = 0;
251 for i in 0..dsize {
253 index += stride[i]*o[i];
254 }
255 self.d[index]
257 }
258 pub fn set(&mut self, o: &[usize], v: T) {
259 if o.len() != self.dim.len() {
260 panic!("get expects the same dim self.dim: {:?}, o: {:?}", self.dim, o);
261 }
262 for (i, j) in self.dim.iter().zip(o.iter()) {
263 if j >= i {
264 panic!("get expects the dim within range self.dim: {:?}, o: {:?}", self.dim, o);
265 }
266 }
267 let stride = self.stride();
268 let dsize = o.len();
269 let mut index = 0;
270 for i in 0..dsize {
271 index += stride[i]*o[i];
272 }
273 self.d[index] = v;
274 }
275 pub fn set_1d(&mut self, o: usize, v: T) {
276 if o < self.d.len() {
277 self.d[o] = v;
278 } else {
279 panic!("o {} is beyond limit {}", o, self.d.len());
280 }
281 }
282 pub fn get_mut(&mut self, o: &[usize]) -> &mut T {
283 if o.len() != self.dim.len() {
284 panic!("get expects the same dim self.dim: {:?}, o: {:?}", self.dim, o);
285 }
286 for (i, j) in self.dim.iter().zip(o.iter()) {
287 if j >= i {
288 panic!("get expects the dim within range self.dim: {:?}, o: {:?}", self.dim, o);
289 }
290 }
291 let stride = self.stride();
292 let dsize = o.len();
293 let mut index = 0;
294 for i in 0..dsize {
295 index += stride[i]*o[i];
296 }
297 &mut self.d[index]
298 }
299
300 pub fn get_raw(&self) -> Vec<T> {
302 self.d.to_vec()
303 }
304 pub fn get_u8(&self) -> Option<Vec<u8>> {
305 let mut ret = Vec::<u8>::with_capacity(self.d.len());
306 for i in &self.d {
307 let val = i.to_u8()?;
308 ret.push(val);
309 }
310 Some(ret)
311 }
312
313 pub fn get_scale(&self) -> T {
316 if self.d.len() == 1 {
317 self.d[0]
318 } else {
319 panic!("Only one element tensor can get_scale()");
320 }
321 }
322
323 pub fn get_n(&self) -> GenTensor<T> {
326 GenTensor {
327 d: vec![T::from(self.dim[0]).expect("N")],
328 dim: vec![1],
329 }
330 }
331 pub fn get_c(&self) -> GenTensor<T> {
333 GenTensor {
334 d: vec![T::from(self.dim[1]).expect("N")],
335 dim: vec![1],
336 }
337 }
338 pub fn get_d(&self) -> GenTensor<T> {
340 if self.dim.len() == 5 {
341 GenTensor {
342 d: vec![T::from(self.dim[2]).expect("N")],
343 dim: vec![1],
344 }
345 } else {
346 panic!("Bad shape for get_D");
347 }
348
349 }
350 pub fn get_h(&self) -> GenTensor<T> {
352 if self.dim.len() == 5 {
353 GenTensor {
354 d: vec![T::from(self.dim[3]).expect("N")],
355 dim: vec![1],
356 }
357 } else if self.dim.len() == 4 {
358 GenTensor {
359 d: vec![T::from(self.dim[2]).expect("N")],
360 dim: vec![1],
361 }
362 } else {
363 panic!("Bad shape for get_D");
364 }
365 }
366 pub fn get_w(&self) -> GenTensor<T> {
368 if self.dim.len() == 5 {
369 GenTensor {
370 d: vec![T::from(self.dim[4]).expect("N")],
371 dim: vec![1],
372 }
373 } else if self.dim.len() == 4 {
374 GenTensor {
375 d: vec![T::from(self.dim[3]).expect("N")],
376 dim: vec![1],
377 }
378 } else {
379 panic!("Bad shape for get_D");
380 }
381 }
382
383 pub fn size(&self) -> &Vec<usize> {
385 &self.dim
386 }
387 pub fn get_size(&self) -> &Vec<usize> {
388 self.size()
389 }
390 pub fn get_data(&self) -> &Vec<T> {
391 &self.d
392 }
393 pub fn get_data_mut(&mut self) -> &mut Vec<T> {
394 &mut self.d
395 }
396
397 pub fn numel(&self) -> usize {
399 self.d.len()
400 }
401
402 pub fn numel_tensor(&self) -> GenTensor<T> {
404 GenTensor {
405 d: vec![T::from(self.d.len()).expect(""),],
406 dim: vec![1],
407 }
408 }
409
410 pub fn get_patch(&self, range: &[(usize, usize)], step: Option<&[usize]>) -> GenTensor<T> {
414 if range.len() != self.dim.len() {
415 panic!("Expect range covers all dimension range: {:?}, dim: {:?}", range, self.dim);
416 }
417 let mut step_dim = vec![1; self.dim.len()];
418 if let Some(step_val) = step {
419 step_dim = step_val.to_vec();
420 }
421
422 let mut index = Vec::<Vec::<usize>>::new();
424 let mut ret_dim = Vec::new();
425 for (i, dim_index) in range.iter().zip(0..self.dim.len()) {
426 let mut pos = i.0;
427 let mut all_index = Vec::new();
428 while pos < i.1 {
429 all_index.push(pos);
430 pos += step_dim[dim_index];
431 }
432 ret_dim.push(all_index.len());
434 index.push(all_index);
435 }
436 let mut ret = Self::zeros(&ret_dim);
437
438 let d = self.dim.len();
439 let mut pos_index = vec![0; d];
440 let mut self_index = vec![0; d];
441 loop {
442 for i in 0..d {
444 self_index[i] = index[i][pos_index[i]];
445 }
446 let value = self.get(&self_index);
447 ret.set(&pos_index, value);
448
449 for dim_index in 0..d {
450 pos_index[d-1-dim_index] += 1;
451 if pos_index[d-1-dim_index] >= ret.dim[d-1-dim_index] {
452 pos_index[d-1-dim_index] = 0;
453 } else {
454 break;
455 }
456 }
457
458 if pos_index == vec![0; d] {
459 break;
460 }
461 }
462
463 ret
464 }
465
466 pub fn set_patch(&mut self, val: &GenTensor<T>, range: &[(usize, usize)], step: Option<&[usize]>) {
467 if range.len() != self.dim.len() {
468 panic!("Expect range covers all dimension range: {:?}, dim: {:?}", range, self.dim);
469 }
470
471 let mut step_dim = vec![1; self.dim.len()];
472 if let Some(step_val) = step {
473 step_dim = step_val.to_vec();
474 }
475
476 let mut index = Vec::<Vec::<usize>>::new();
478 for (i, dim_index) in range.iter().zip(0..self.dim.len()) {
479 let mut pos = i.0;
480 let mut all_index = Vec::new();
481 while pos < i.1 {
482 all_index.push(pos);
483 pos += step_dim[dim_index];
484 }
485 index.push(all_index);
487 }
488
489 let d = self.dim.len();
490 let mut pos_index = vec![0; d];
491 let mut self_index = vec![0; d];
492 loop {
493 for i in 0..d {
495 self_index[i] = index[i][pos_index[i]];
496 }
497 self.set(&self_index, val.get(&pos_index));
500
501 for dim_index in 0..d {
502 pos_index[d-1-dim_index] += 1;
503 if pos_index[d-1-dim_index] >= val.size()[d-1-dim_index] {
504 pos_index[d-1-dim_index] = 0;
505 } else {
506 break;
507 }
508 }
509
510 if pos_index == vec![0; d] {
511 break;
512 }
513 }
514 }
515
516 pub fn _iter_patch<F>(&self, dim: Option<&[usize]>, keep_dim: bool, closure: F) -> GenTensor<T>
517 where F: Fn(&[T]) -> T {
518 if dim.is_none() {
520 let ret_dim = if keep_dim {
521 vec![1; self.size().len()]
522 } else {
523 vec![1]
524 };
525 return GenTensor::new_raw(&[closure(self.get_data())], &ret_dim)
526 }
527 let dim = dim.unwrap();
528
529 let mut ret_dim = Vec::new();
531 for i in 0..self.size().len() {
532 if dim.contains(&i) {
533 if keep_dim {
534 ret_dim.push(1);
535 }
536 } else {
537 ret_dim.push(self.size()[i]);
538 }
539 }
540 let mut ret = Self::zeros(&ret_dim);
541
542
543 let kept_dim: Vec<usize> = (0..self.size().len()).filter(|x| !dim.contains(x)).collect();
544 let mut index = vec![0; kept_dim.len()];
545 loop {
546 let mut patch_index: Vec::<(usize, usize)> = Vec::new();
547 let mut output_index: Vec<usize> = Vec::new();
548 let mut kept_dim_step = 0;
549 for i in 0..self.size().len() {
550 if dim.contains(&i) {
551 patch_index.push((0, self.size()[i]));
552 if keep_dim {
553 output_index.push(0);
554 }
555 } else {
556 patch_index.push((index[kept_dim_step], index[kept_dim_step]+1));
557 output_index.push(index[kept_dim_step]);
558 kept_dim_step += 1;
559 }
560 }
561 let value = closure(self.get_patch(&patch_index, None).get_data());
564 ret.set(&output_index, value);
565
566 for i in 0..index.len() {
567 index[kept_dim.len() -i -1] += 1;
568 if index[kept_dim.len() -i -1] >= self.size()[kept_dim[kept_dim.len() -i -1]] {
569 index[kept_dim.len() -i -1] = 0;
570 } else {
571 break
572 }
573 }
574
575 if index == vec![0; kept_dim.len()] {
576 break
577 }
578 }
579
580 ret
581 }
582
583 pub fn _dim_statistic<F>(&self, dim: usize, keepdim: bool, closure: F) -> GenTensor<T>
584 where F: Fn(usize, usize, usize, usize, usize) -> T {
585 if self.dim.len() <= dim {
586 panic!("Tensor has dimension {:?}, mean() get dim of {}", self.dim, dim);
587 }
588
589 let mut ret_dim;
590 if keepdim {
591 ret_dim = self.dim.to_vec();
592 ret_dim[dim] = 1;
593 } else {
594 ret_dim = Vec::new();
595 for (i, index) in self.dim.iter().zip(0..self.dim.len()) {
596 if index != dim {
597 ret_dim.push(*i);
598 }
599 }
600 }
601
602 let mut cap = 1;
603 for i in &ret_dim {
604 cap *= i;
605 }
606
607 let mut outer_size = 1;
608 let mut inner_size = 1;
609 for i in 0..self.dim.len() {
610 if i < dim {
611 outer_size *= self.dim[i];
612 }
613 if i > dim {
614 inner_size *= self.dim[i];
615 }
616 }
617
618 let mut data = Vec::with_capacity(cap);
619 let over = self.dim[dim];
620 let stride = self.stride();
621 let step = stride[dim];
622
623 for k in 0..outer_size {
624 for j in 0..inner_size {
625 let val = closure(over, k, j, inner_size, step);
626 data.push(val);
627 }
628 }
629
630 GenTensor {
631 d: data,
632 dim: ret_dim,
633 }
634 }
635
636 pub fn get_diag(&self) -> GenTensor<T> {
637 let n = *self.size().last().unwrap();
639 let mut ret = GenTensor::<T>::zeros(&[n]);
640 for i in 0..n {
641 ret.set(&[i], self.get(&[i, i]))
642 }
643 ret
644 }
645 pub fn set_diag(&mut self, o: &GenTensor<T>) {
646 let n = *self.size().last().unwrap();
648 for i in 0..n {
649 self.set(&[i,i], o.get(&[i]));
650 }
651 }
652
653 pub fn get_column(&self, i: usize) -> GenTensor<T> {
655 let nr = self.size()[self.size().len()-2];
656 let mut ret = GenTensor::zeros(&[nr, 1]);
657 for r in 0..nr {
658 ret.set(&[r, 0], self.get(&[r, i]));
659 }
660 ret
661 }
662 pub fn set_column(&mut self, o: &GenTensor<T>, i: usize) {
664 let nr = self.size()[self.size().len()-2];
665 for r in 0..nr {
666 self.set(&[r, i], o.get(&[r, 0]));
667 }
668 }
669 pub fn get_row(&self, i: usize) -> GenTensor<T> {
671 let nc = self.size()[self.size().len()-1];
672 let mut ret = GenTensor::zeros(&[nc]);
673 for c in 0..nc {
674 ret.set(&[c], self.get(&[i, c]));
675 }
676 ret
677 }
678 pub fn set_row(&mut self, o: &GenTensor<T>, i: usize) {
680 let nc = self.size()[self.size().len()-1];
681 for c in 0..nc {
682 self.set(&[i, c], o.get(&[c]));
683 }
684 }
685
686 pub fn _pointwise<F>(&self, closure: F) -> GenTensor<T>
691 where F: Fn(&T) -> T {
692 let mut ret = GenTensor {
693 d: Vec::with_capacity(self.d.len()),
694 dim: self.dim.clone(),
695 };
696
697 for i in &self.d {
698 ret.d.push(closure(i));
699 }
700 ret
701 }
702
703 pub fn _right_broadcast<F>(&self, o: &GenTensor<T>, closure: F) -> GenTensor<T>
704 where F: Fn(&T, &T) -> T {
705 let mut ret = GenTensor {
706 d: Vec::with_capacity(self.d.len()),
707 dim: self.dim.clone(),
708 };
709 if self.d.len() == o.d.len() {
711 for (v1, v2) in self.d.iter().zip(o.d.iter()) {
712 ret.d.push(closure(v1, v2));
713 }
714 } else if o.dim.len() == 1 && o.dim[0] == 1{
716 for i in 0..self.d.len() {
717 ret.d.push(closure(&self.d[i], &o.d[0]));
718 }
719 } else {
720 if self.d.len() < o.d.len() {
721 panic!("right-hand broadcast only.");
722 }
723 if self.dim.len() <= o.dim.len() {
724 panic!("unmatched dimension. {}, {}", self.dim.len(), o.dim.len());
725 }
726 for i in 0..o.dim.len() {
727 if o.dim[o.dim.len()-i-1] != self.dim[self.dim.len()-i-1] {
728 panic!("unmatched size.");
729 }
730 }
731
732 let mut index = 0;
734 for i in 0..self.d.len() {
735 ret.d.push(closure(&self.d[i], &o.d[index]));
736 index += 1;
737 if index >= o.d.len() {
738 index = 0;
739 }
740 }
741 }
742 ret
743 }
744
745 pub fn log10_like(&self) -> GenTensor<T> {
746 let new_data = vec![T::from(std::f64::consts::LN_10).unwrap(); self.d.len()];
747 let new_dim = self.dim.to_vec();
748 GenTensor {
749 d: new_data,
750 dim: new_dim,
751 }
752 }
753 pub fn log2_like(&self) -> GenTensor<T> {
754 let new_data = vec![T::from(std::f64::consts::LN_2).unwrap(); self.d.len()];
755 let new_dim = self.dim.to_vec();
756 GenTensor {
757 d: new_data,
758 dim: new_dim,
759 }
760 }
761
762 pub fn add(&self, o: &GenTensor<T>) -> GenTensor<T> {
773 self._right_broadcast(o, |x, y| *x + *y)
774 }
775 pub fn sub(&self, o: &GenTensor<T>) -> GenTensor<T> {
776 self._right_broadcast(o, |x, y| *x - *y)
777 }
778 pub fn mul(&self, o: &GenTensor<T>) -> GenTensor<T> {
779 self._right_broadcast(o, |x, y| *x * *y)
780 }
781 pub fn div(&self, o: &GenTensor<T>) -> GenTensor<T> {
782 self._right_broadcast(o, |x, y| *x / *y)
783 }
784
785 pub fn mm(&self, o: &GenTensor<T>) -> GenTensor<T>{
795 if self.dim.len() != 2 || o.dim.len() != 2 {
796 panic!("Not a matrix input.");
797 }
798 let ls = self.dim[0];
799 let rs = o.dim[1];
800 let mut ret = GenTensor {
801 d: Vec::with_capacity(ls*rs),
802 dim: vec![ls, rs],
803 };
804 let lstride = self.stride();
805 let rstride = o.stride();
806 for i in 0..ls {
807 for j in 0..rs {
808 let mut tsum = T::zero();
809 for k in 0..self.dim[1] {
810 tsum = tsum
811 + self.d[i*lstride[0] + k] * o.d[k*rstride[0] + j];
812 }
813 ret.d.push(tsum);
814 }
815 }
816 ret
817 }
818
819 pub fn dot(&self, b: &GenTensor<T>) -> T {
820 let mut sum = T::zero();
821 for (l, m) in self.d.iter().zip(b.d.iter()) {
822 sum = (*l)*(*m) + sum;
823 }
824 sum
825 }
826
827 pub fn proj(&self, b: &GenTensor<T>) -> GenTensor<T> {
830 let mut sum = T::zero();
831 for (l, m) in self.d.iter().zip(b.d.iter()) {
832 sum = (*l)*(*m) + sum;
833 }
834 let mut ret = b.clone();
835 for i in ret.d.iter_mut() {
836 *i = (*i) * sum;
837 }
838 ret
839 }
840
841 pub fn matmul(&self, o: &GenTensor<T>) -> GenTensor<T> {
844 if self.size()[self.size().len()-1] != o.size()[0] {
845 panic!("matmul expect matched size {:?}, {:?}", self.dim, o.dim);
846 }
847 if self.size().len() == 1 && o.size().len() == 1 {
848 panic!("Two vector have not matched size for matmul! {:?}, {:?}", self.numel(), o.numel());
849 }
850 let inner = o.dim[0];
851 let mut cap = 1;
852 let mut odim = Vec::new();
853 let mut lloop = 1;
854 let mut rloop = 1;
855 for i in 0..self.dim.len()-1 {
856 cap *= self.dim[i];
857 odim.push(self.dim[i]);
858 lloop *= self.dim[i];
859 }
860 for i in 1..o.dim.len() {
861 cap *= o.dim[i];
862 odim.push(o.dim[i]);
863 rloop *= o.dim[i];
864 }
865
866 let mut ret = GenTensor {
867 d: Vec::with_capacity(cap),
868 dim: odim,
869 };
870
871 let lstride = self.stride();
872 let rstride = o.stride();
873 for i in 0..lloop {
874 for j in 0..rloop {
875 let mut tsum = T::zero();
876 for k in 0..inner {
877 tsum = tsum
878 + self.d[i*lstride[0] + k] * o.d[k*rstride[0] + j];
879 }
880 ret.d.push(tsum);
881 }
882 }
883 ret
884 }
885
886 pub fn outer(&self, o: &GenTensor<T>, avg: Option<bool>) -> GenTensor<T> {
888 let mut dim = Vec::new();
889 let mut data;
890 let mut cap = 1;
891 let mut outer_size = 1;
892 let left_dim;
893 let right_dim;
894 if self.dim.len() == o.dim.len()
895 && self.dim[0..self.dim.len()-1] == o.dim[0..self.dim.len()-1] {
896 left_dim = self.dim[self.dim.len()-1];
897 right_dim = o.dim[self.dim.len()-1];
898 for i in 0..self.dim.len()-1 {
899 dim.push(self.dim[i]);
900 cap *= self.dim[i];
901 outer_size *= self.dim[i];
902 }
903 dim.push(left_dim);
904 cap *= left_dim;
905 dim.push(right_dim);
906 cap *= right_dim;
907 if avg.is_some() && avg.unwrap() {
908 data = vec![T::zero(); left_dim*right_dim];
909 dim = vec![left_dim, right_dim];
910 } else {
911 data = Vec::with_capacity(cap);
912 }
913 } else {
914 panic!("bad size for outer: {:?}, {:?}", self.dim, o.dim);
915 }
916
917
918 if avg.is_some() && avg.unwrap() {
919 for k in 0..outer_size {
920 let mut new_data = Vec::with_capacity(left_dim*right_dim);
921 for i in 0..left_dim {
922 for j in 0..right_dim {
923 new_data.push(self.d[i + k*left_dim] * o.d[j + k*right_dim]);
924 }
925 }
926 for i in 0..new_data.len() {
927 data[i] = data[i] + new_data[i];
928 }
929 }
930 for i in &mut data {
931 *i = *i / T::from(outer_size).expect("");
932 }
933 GenTensor {
934 d: data,
935 dim,
936 }
937 } else {
938 for k in 0..outer_size {
939 for i in 0..left_dim {
940 for j in 0..right_dim {
941 data.push(self.d[i + k*left_dim] * o.d[j + k*right_dim]);
942 }
943 }
944 }
945 GenTensor {
946 d: data,
947 dim,
948 }
949 }
950 }
951
952 #[cfg(not(feature = "use-blas-lapack"))]
953 pub fn squared_error(t1: &Self, t2: &Self) -> GenTensor<T> {
954 let mut ret = GenTensor {
955 d: Vec::with_capacity(t1.d.len()),
956 dim: t1.dim.to_vec(),
957 };
958 for (v1, v2) in t1.d.iter().zip(t2.d.iter()) {
959 ret.d.push((*v1 - *v2)*(*v1 - *v2));
960 }
961 ret
962 }
963
964
965 pub fn all_close(&self, o: &GenTensor<T>) -> GenTensor<T> {
969 self.eq_t(o)
970 }
971
972 pub fn arg_sort(&self, dim: usize, descending: bool) -> GenTensor<T> {
973 let mut d = self.d.to_vec();
974
975 let mut outer_size = 1;
976 let mut inner_size = 1;
977
978 for (i, index) in self.dim.iter().zip(0..self.dim.len()) {
979 if index < dim {
980 outer_size *= i;
981 } else if index > dim {
982 inner_size *= i;
983 }
984 }
985
986 let stride = self.stride()[dim];
987 let size = self.dim[dim];
988
989 for i in 0..outer_size {
990 for j in 0..inner_size {
991 let mut collected = Vec::<(T, usize)>::with_capacity(size);
992 for k in 0..size {
993 collected.push((self.d[k*stride + j + i*inner_size*size], k));
994 }
995 collected.sort_unstable_by(|a, b| {
996 let porder = a.0.partial_cmp(&b.0).unwrap();
997 if descending {
998 porder
999 } else {
1000 porder.reverse()
1001 }
1002 });
1003 let (_left, right): (Vec<_>, Vec<_>) = collected.iter().cloned().unzip();
1004 for k in 0..size {
1005 d[k*stride + j + i*inner_size*size] = T::from(right[k]).expect("");
1006 }
1007 }
1008 }
1009
1010 GenTensor {
1011 d,
1012 dim: self.dim.to_vec()
1013 }
1014 }
1015
1016 pub fn eq_t(&self, o: &GenTensor<T>) -> GenTensor<T> {
1027 let mut cmp = Vec::<T>::with_capacity(self.d.len());
1028 for (v1, v2) in self.d.iter().zip(o.d.iter()) {
1029 if (*v1-*v2).abs() < T::min_positive_value().sqrt() {
1030 cmp.push(T::one(),);
1031 } else {
1032 cmp.push(T::zero());
1033 }
1034 }
1035 GenTensor {
1036 d: cmp,
1037 dim: self.dim.to_vec(),
1038 }
1039 }
1040
1041 pub fn equal(&self, o: &GenTensor<T>) -> bool {
1050 if self.dim.len() != o.dim.len() || self.dim != o.dim {
1051 return false;
1052 }
1053
1054 if self.d.len() != o.d.len() {
1055 return false;
1056 }
1057
1058 let mut same = true;
1059 for (v1, v2) in self.d.iter().zip(o.d.iter()) {
1060 if (*v1-*v2).abs() > T::min_positive_value().sqrt() {
1061 same = false;
1062 break;
1063 }
1064 }
1065 same
1066 }
1067
1068 pub fn ge(&self, o: &GenTensor<T>) -> GenTensor<T> {
1069 if self.size() != o.size() {
1070 panic!("max needs two tensor have the same size, {:?}, {:?}", self.dim, o.dim);
1071 }
1072 let mut ret = GenTensor::zeros(&self.dim);
1073
1074 for ((a, b), c) in self.d.iter().zip(o.d.iter()).zip(ret.d.iter_mut()) {
1075 if a >= b {
1076 *c = T::one();
1077 } else {
1078 *c = T::zero();
1079 }
1080 }
1081 ret
1082 }
1083
1084 pub fn gt(&self, o: &GenTensor<T>) -> GenTensor<T> {
1085 if self.size() != o.size() {
1086 panic!("max needs two tensor have the same size, {:?}, {:?}", self.dim, o.dim);
1087 }
1088 let mut ret = GenTensor::zeros(&self.dim);
1089
1090 for ((a, b), c) in self.d.iter().zip(o.d.iter()).zip(ret.d.iter_mut()) {
1091 if a > b {
1092 *c = T::one();
1093 } else {
1094 *c = T::zero();
1095 }
1096 }
1097 ret
1098 }
1099
1100 pub fn le(&self, o: &GenTensor<T>) -> GenTensor<T> {
1117 if self.size() != o.size() {
1118 panic!("max needs two tensor have the same size, {:?}, {:?}", self.dim, o.dim);
1119 }
1120 let mut ret = GenTensor::zeros(&self.dim);
1121
1122 for ((a, b), c) in self.d.iter().zip(o.d.iter()).zip(ret.d.iter_mut()) {
1123 if a <= b {
1124 *c = T::one();
1125 } else {
1126 *c = T::zero();
1127 }
1128 }
1129 ret
1130 }
1131 pub fn lt(&self, o: &GenTensor<T>) -> GenTensor<T> {
1133 if self.size() != o.size() {
1134 panic!("max needs two tensor have the same size, {:?}, {:?}", self.dim, o.dim);
1135 }
1136 let mut ret = GenTensor::zeros(&self.dim);
1137
1138 for ((a, b), c) in self.d.iter().zip(o.d.iter()).zip(ret.d.iter_mut()) {
1139 if a < b {
1140 *c = T::one();
1141 } else {
1142 *c = T::zero();
1143 }
1144 }
1145 ret
1146 }
1147 pub fn ne(&self, o: &GenTensor<T>) -> GenTensor<T> {
1158 if self.size() != o.size() {
1159 panic!("max needs two tensor have the same size, {:?}, {:?}", self.dim, o.dim);
1160 }
1161
1162
1163 let data = self.d.iter().zip(
1164 o.d.iter())
1165 .map(|(x, y)|
1166 if *x != *y {
1167 T::one()
1168 } else {
1169 T::zero()
1170 }
1171 ).collect();
1172 GenTensor {
1173 d: data,
1174 dim: self.dim.to_vec(),
1175 }
1176 }
1177 }
1181
1182impl<T> Default for GenTensor<T> where T: num_traits::Float {
1183 fn default() -> GenTensor<T> {
1184 GenTensor { d: Vec::<T>::new(), dim: Vec::new() }
1185 }
1186}
1187
1188
1189impl<T> PartialEq for GenTensor<T> where T: num_traits::Float {
1196 fn eq(&self, other: &Self) -> bool {
1197 self.equal(other)
1198 }
1199}
1200impl<T> Eq for GenTensor<T> where T: num_traits::Float {}
1201
1202impl fmt::Display for GenTensor<f32> {
1203 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1204 if self.dim.len() == 2 {
1205 write!(f, "[")?;
1206 for i in 0..self.dim[0] {
1207 write!(f, "[")?;
1208 for j in 0..self.dim[1] {
1209 write!(f, "{}, ", self.get(&[i, j]))?;
1210 }
1211 writeln!(f, "]")?;
1212 }
1213 writeln!(f, "]")
1214 } else {
1215 writeln!(f, "{:?}", self.dim)?;
1216 write!(f, "{:?}", self.d)
1217 }
1218 }
1219}
1220impl fmt::Display for GenTensor<f64> {
1221 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1222 write!(f, "{:?}", self.dim)?;
1223 writeln!(f, "{:?}", self.d)
1224 }
1225}
1226
1227impl fmt::Debug for GenTensor<f32> {
1228 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1229 if self.numel() > 30 {
1230 writeln!(f, "size: {:?}", self.dim)?;
1231 write!(f, "data: {:?}", &self.d[..30])
1232 } else if self.dim.len() == 2 {
1233 write!(f, "[")?;
1234 for i in 0..self.dim[0] {
1235 write!(f, "[")?;
1236 for j in 0..self.dim[1] {
1237 write!(f, "{}, ", self.get(&[i, j]))?;
1238 }
1239 writeln!(f, "]")?;
1240 }
1241 writeln!(f, "]")
1242 } else {
1243 writeln!(f, "size: {:?}", self.dim)?;
1244 write!(f, "data: {:?}", self.d)
1245 }
1246 }
1247}
1248impl fmt::Debug for GenTensor<f64> {
1249 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1250 if self.numel() > 30 {
1251 writeln!(f, "size: {:?}", self.dim)?;
1252 write!(f, "data: {:?}", &self.d[..30])
1253 } else if self.dim.len() == 2 {
1254 write!(f, "[")?;
1255 for i in 0..self.dim[0] {
1256 write!(f, "[")?;
1257 for j in 0..self.dim[1] {
1258 write!(f, "{}, ", self.get(&[i, j]))?;
1259 }
1260 writeln!(f, "]")?;
1261 }
1262 writeln!(f, "]")
1263 } else {
1264 writeln!(f, "{:?}", self.dim)?;
1265 write!(f, "{:?}", self.d)
1266 }
1267 }
1268}
1269
1270impl<T> Clone for GenTensor<T> where T: num_traits::Float {
1271 fn clone(&self) -> Self {
1272 GenTensor {
1273 d: self.d.to_vec(),
1274 dim: self.dim.to_vec(),
1275 }
1276 }
1277}
1278
1279#[cfg(feature = "use-blas-lapack")]
1280use crate::tensor_impl::lapack_tensor::blas_api::BlasAPI;
1281
1282
1283#[cfg(feature = "use-blas-lapack")]
1284impl GenTensor<f32> {
1285
1286 pub fn squared_error(t1: &Self, t2: &Self) -> GenTensor<f32> {
1287 let mut v2 = t2.d.to_vec();
1288 BlasAPI::<f32>::axpy(t1.d.len(), -1., &t1.d, 1, &mut v2, 1);
1289
1290 let mut ret = GenTensor {
1291 d: v2,
1292 dim: t1.dim.to_vec(),
1293 };
1294 for i in &mut ret.d {
1295 *i = (*i)*(*i);
1296 }
1297 ret
1298 }
1299}
1300#[cfg(feature = "use-blas-lapack")]
1301impl GenTensor<f64> {
1302
1303 pub fn squared_error(t1: &Self, t2: &Self) -> GenTensor<f64> {
1304 let mut v2 = t2.d.to_vec();
1305 BlasAPI::<f64>::axpy(t1.d.len(), -1., &t1.d, 1, &mut v2, 1);
1306
1307 let mut ret = GenTensor {
1308 d: v2,
1309 dim: t1.dim.to_vec(),
1310 };
1311 for i in &mut ret.d {
1312 *i = (*i)*(*i);
1313 }
1314 ret
1315 }
1316}
1317
1318
1319#[cfg(test)]
1320mod tests {
1321 use super::*;
1322
1323
1324 #[test]
1325 fn test_index2dimpos() {
1326 let a = GenTensor::<f32>::zeros(&vec![10, 5, 3, 4]);
1327
1328 let b = a.index2dimpos(10);
1329 assert_eq!(b, vec![0, 0, 2, 2]);
1330
1331 }
1332
1333 #[test]
1334 fn test_gentensor() {
1335 {
1336 let mut m2 = GenTensor::<f64>::new_raw(&vec![1., 2., 3., 4.,], &vec![2, 2]);
1337 *m2.get_mut(&vec![0,0]) = 5.;
1338 assert_eq!(m2.get_raw(), vec![5., 2., 3., 4.,])
1339 }
1340 }
1341
1342 #[test]
1343 fn test_gen_tensor_get() {
1344 {
1345 let m1 = GenTensor::<f64>::fill(1., &vec![10, 3, 28, 30]);
1346 assert_eq!(m1.get_n().get_raw(), vec![10.]);
1347 assert_eq!(m1.get_c().get_raw(), vec![3.]);
1348 assert_eq!(m1.get_h().get_raw(), vec![28.]);
1349 assert_eq!(m1.get_w().get_raw(), vec![30.]);
1350
1351 let result = std::panic::catch_unwind(
1352 ||
1353 m1.get_d().get_raw()
1354 );
1355 assert!(result.is_err());
1356 }
1357 }
1358
1359 #[test]
1360 fn outer() {
1361 let a = GenTensor::<f32>::fill(1., &vec![10, 2]);
1362 let b = GenTensor::<f32>::fill(1., &vec![10, 3]);
1363 let c = a.outer(&b, None);
1364 assert_eq!(*c.size(), vec![10, 2, 3]);
1365 let d = b.outer(&a, None);
1367 assert_eq!(*d.size(), vec![10, 3, 2]);
1368
1369 let e = a.outer(&b, Some(true));
1370 assert_eq!(e, GenTensor::ones(&[2, 3]));
1371 }
1372
1373 #[test]
1374 fn proj() {
1375 let a = GenTensor::<f32>::fill(1., &vec![2]);
1376 let b = GenTensor::<f32>::new_raw(&[3., -2.], &[2, 1]);
1377 assert_eq!(a.proj(&b), b);
1378 }
1379
1380 #[test]
1381 fn get_patch() {
1382 let a = GenTensor::new_raw(&GenTensor::<f32>::arange(30).get_data(), &[2, 3, 5]);
1383 let b = a.get_patch(&vec![(0, 2), (0, 2), (2, 3)][..], Option::None);
1384 assert_eq!(b, GenTensor::<f32>::new_raw(&vec![2.0, 7.0, 17.0, 22.0][..], &vec![2, 2, 1][..]));
1385 }
1386
1387 #[test]
1388 fn set_patch() {
1389 let mut a = GenTensor::new_raw(&GenTensor::<f32>::arange(30).get_data(), &[2, 3, 5]);
1390 let b = GenTensor::<f32>::ones(&[1, 3, 5]);
1391 a.set_patch(&b, &[(1,2), (0,3), (0,5)], None);
1392 println!("{:?}", a);
1393 assert_eq!(a, GenTensor::new_raw(&[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], &[2, 3, 5]));
1394 }
1395
1396 use crate::tensor_trait::elemwise::ElemwiseTensorOp;
1398 #[test]
1399 fn ceil() {
1400 let a = GenTensor::<f32>::new_raw(&vec![0.9213, 1.0887, -0.8858, -1.7683],
1401 &vec![4]);
1402
1403 let ret = a.ceil();
1404
1405 let expect = GenTensor::<f32>::new_raw(&vec![1., 2., 0., -1.],
1406 &vec![4]);
1407 assert_eq!(ret, expect);
1408 }
1409
1410 #[test]
1411 fn log1pexp() {
1412 let a = GenTensor::<f32>::new_raw(&vec![0.9213, 1.0887, -0.8858, -1.7683],
1413 &vec![4]);
1414
1415 let ret = a.log1pexp();
1416
1417 let expect = GenTensor::<f32>::new_raw(&vec![1.2563436, 1.3788694, 0.34527916, 0.15753591],
1418 &vec![4]);
1419 assert_eq!(ret, expect);
1420 }
1421
1422 #[test]
1423 fn sigmoid() {
1424 let a = GenTensor::<f32>::new_raw(&vec![0.9213, 1.0887, -0.8858, -1.7683],
1425 &vec![4]);
1426
1427 let ret = a.sigmoid();
1428
1429 let expect = GenTensor::<f32>::new_raw(&vec![0.71530694, 0.7481369, 0.29197732, 0.14575386],
1430 &vec![4]);
1431 assert_eq!(ret, expect);
1432 }
1433
1434 #[test]
1435 fn sign() {
1436 let a = GenTensor::<f32>::new_raw(&vec![0.9213, 0.0, -0.0, -1.7683],
1437 &vec![4]);
1438
1439 let ret = a.sign();
1440
1441 let expect = GenTensor::<f32>::new_raw(&vec![1.0, 0.0, 0.0, -1.0],
1442 &vec![4]);
1443 assert_eq!(ret, expect);
1444 }
1445
1446
1447 #[test]
1449 fn arg_sort() {
1450 let a = GenTensor::<f32>::new_raw(&vec![0.0785, 1.5267, -0.8521, 0.4065,
1451 0.1598, 0.0788, -0.0745, -1.2700,
1452 1.2208, 1.0722, -0.7064, 1.2564,
1453 0.0669, -0.2318, -0.8229, -0.9280,],
1454 &vec![4, 4]);
1455
1456 let index = a.arg_sort(1, true);
1457
1458 let expect = GenTensor::<f32>::new_raw(&vec![2., 0., 3., 1.,
1459 3., 2., 1., 0.,
1460 2., 1., 0., 3.,
1461 3., 2., 1., 0.],
1462 &vec![4, 4]);
1463 assert_eq!(index, expect);
1464 }
1465
1466 #[test]
1467 fn ne() {
1468 let a = GenTensor::<f32>::new_raw(&vec![1., 3., 10., 11.], &vec![2,2]);
1469 let b = GenTensor::<f32>::new_raw(&vec![2., 3., 10., 6.], &vec![2,2]);
1470 let c = a.ne(&b);
1471 assert_eq!(c, GenTensor::<f32>::new_raw(&vec![1., 0., 0., 1.], &vec![2,2]));
1472 }
1473}