pub struct GraphTensor<S: Shape, T: DType, D: Dev> { /* private fields */ }
Expand description
A tensor representing an intermediary result of a graph. Performing operations on this tensor will not cause any computations.
Implementations§
Source§impl<const B: usize, const M: usize, const K: usize, T: DType, D: Dev> GraphTensor<R3<B, M, K>, T, D>
impl<const B: usize, const M: usize, const K: usize, T: DType, D: Dev> GraphTensor<R3<B, M, K>, T, D>
pub fn matmul<const N: usize>( self, rhs: GraphTensor<R3<B, K, N>, T, D>, ) -> GraphTensor<R3<B, M, N>, T, D>
Sourcepub fn matmul_axpby<const N: usize>(
self,
rhs: GraphTensor<R3<B, K, N>, T, D>,
out: GraphTensor<R3<B, M, N>, T, D>,
alpha: T,
beta: T,
) -> GraphTensor<R3<B, M, N>, T, D>
pub fn matmul_axpby<const N: usize>( self, rhs: GraphTensor<R3<B, K, N>, T, D>, out: GraphTensor<R3<B, M, N>, T, D>, alpha: T, beta: T, ) -> GraphTensor<R3<B, M, N>, T, D>
out = out * alpha + beta * lhs * rhs
Examples found in repository?
examples/matmul/main.rs (line 19)
4fn bench<T: DType, const B: usize, const M: usize, const K: usize, const N: usize>(
5 type_name: &str,
6 alpha: T,
7 beta: T,
8) {
9 // Number of times to run the matmul for averaging
10 let iterations = 1;
11 let mut total = std::time::Duration::new(0, 0);
12
13 let mut graph = Graph::empty();
14 let a = GraphTensor::<R3<B, M, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(1.));
15 // Strided matmuls works on all devices.
16 let b = GraphTensor::<R3<B, N, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.)).t();
17 // let b = GraphTensor::<R3<B, K, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.));
18 let o = GraphTensor::<R3<B, M, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(3.));
19 let _c = a.matmul_axpby(b, o, alpha, beta);
20
21 graph.optimize();
22 let compiled: CompiledGraph<R3<B, M, N>, T, BestDevice<0>> = graph.compile().unwrap();
23
24 for _ in 0..iterations {
25 let start = Instant::now();
26
27 let tensor = std::hint::black_box(compiled.run().unwrap());
28 dbg!(tensor.data().unwrap());
29
30 total += start.elapsed();
31 }
32
33 let avg = total / (iterations as u32);
34 println!("Average execution time for {type_name} over {iterations} iterations: {avg:?}");
35}
Source§impl<S: Shape, T: DType, D: Dev> GraphTensor<S, T, D>
impl<S: Shape, T: DType, D: Dev> GraphTensor<S, T, D>
Sourcepub fn fill(graph: &mut Graph<T>, v: T) -> Self
pub fn fill(graph: &mut Graph<T>, v: T) -> Self
Create a tensor filled with some value.
Examples found in repository?
examples/hello_world/main.rs (line 6)
3fn main() {
4 let mut graph: Graph<f32> = Graph::empty();
5 let _arange = GraphTensor::<R1<10>, f32, Cpu>::arange(&mut graph, 0., 1.);
6 let a = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 1.0);
7 let b = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 2.0);
8 let c = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 3.0);
9 let d = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 4.0);
10 let res = a * b + c;
11 let _out = res + d;
12
13 graph.optimize();
14
15 graph.visualize("graph.png").unwrap();
16
17 let compiled: constensor_core::CompiledGraph<R2<3, 4>, f32, Cpu> = graph.compile().unwrap();
18 let res = compiled.run().unwrap();
19
20 let tensor: Tensor<R2<3, 4>, f32, Cpu> = res;
21
22 assert_eq!(tensor.data().unwrap().to_vec(), vec![vec![9.0; 4]; 3],);
23}
More examples
examples/matmul/main.rs (line 14)
4fn bench<T: DType, const B: usize, const M: usize, const K: usize, const N: usize>(
5 type_name: &str,
6 alpha: T,
7 beta: T,
8) {
9 // Number of times to run the matmul for averaging
10 let iterations = 1;
11 let mut total = std::time::Duration::new(0, 0);
12
13 let mut graph = Graph::empty();
14 let a = GraphTensor::<R3<B, M, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(1.));
15 // Strided matmuls works on all devices.
16 let b = GraphTensor::<R3<B, N, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.)).t();
17 // let b = GraphTensor::<R3<B, K, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.));
18 let o = GraphTensor::<R3<B, M, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(3.));
19 let _c = a.matmul_axpby(b, o, alpha, beta);
20
21 graph.optimize();
22 let compiled: CompiledGraph<R3<B, M, N>, T, BestDevice<0>> = graph.compile().unwrap();
23
24 for _ in 0..iterations {
25 let start = Instant::now();
26
27 let tensor = std::hint::black_box(compiled.run().unwrap());
28 dbg!(tensor.data().unwrap());
29
30 total += start.elapsed();
31 }
32
33 let avg = total / (iterations as u32);
34 println!("Average execution time for {type_name} over {iterations} iterations: {avg:?}");
35}
Sourcepub fn sqrt(self) -> GraphTensor<S, T, D>
pub fn sqrt(self) -> GraphTensor<S, T, D>
Elementwise unary square root.
Source§impl<S: Shape, T: DType, D: Dev> GraphTensor<S, T, D>
impl<S: Shape, T: DType, D: Dev> GraphTensor<S, T, D>
Source§impl<const A: usize, T: DType, D: Dev> GraphTensor<R1<A>, T, D>
impl<const A: usize, T: DType, D: Dev> GraphTensor<R1<A>, T, D>
Sourcepub fn arange(graph: &mut Graph<T>, start: T, stop: T) -> Self
pub fn arange(graph: &mut Graph<T>, start: T, stop: T) -> Self
A GraphTensor representing a vector ranging from start
to stop
with step
computed using A.
Examples found in repository?
examples/hello_world/main.rs (line 5)
3fn main() {
4 let mut graph: Graph<f32> = Graph::empty();
5 let _arange = GraphTensor::<R1<10>, f32, Cpu>::arange(&mut graph, 0., 1.);
6 let a = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 1.0);
7 let b = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 2.0);
8 let c = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 3.0);
9 let d = GraphTensor::<R2<3, 4>, f32, Cpu>::fill(&mut graph, 4.0);
10 let res = a * b + c;
11 let _out = res + d;
12
13 graph.optimize();
14
15 graph.visualize("graph.png").unwrap();
16
17 let compiled: constensor_core::CompiledGraph<R2<3, 4>, f32, Cpu> = graph.compile().unwrap();
18 let res = compiled.run().unwrap();
19
20 let tensor: Tensor<R2<3, 4>, f32, Cpu> = res;
21
22 assert_eq!(tensor.data().unwrap().to_vec(), vec![vec![9.0; 4]; 3],);
23}
Source§impl<T: DType, const A: usize, const B: usize, D: Dev> GraphTensor<R2<A, B>, T, D>
impl<T: DType, const A: usize, const B: usize, D: Dev> GraphTensor<R2<A, B>, T, D>
Sourcepub fn t(&self) -> GraphTensor<R2<B, A>, T, D>
pub fn t(&self) -> GraphTensor<R2<B, A>, T, D>
Return a view of this matrix with dimensions transposed (A x B -> B x A).
Source§impl<T: DType, const A: usize, const B: usize, const C: usize, D: Dev> GraphTensor<R3<A, B, C>, T, D>
impl<T: DType, const A: usize, const B: usize, const C: usize, D: Dev> GraphTensor<R3<A, B, C>, T, D>
Sourcepub fn t(&self) -> GraphTensor<R3<A, C, B>, T, D>
pub fn t(&self) -> GraphTensor<R3<A, C, B>, T, D>
Return a view of this tensor with last two reversed axes (A x B x C -> A x C x B).
Examples found in repository?
examples/matmul/main.rs (line 16)
4fn bench<T: DType, const B: usize, const M: usize, const K: usize, const N: usize>(
5 type_name: &str,
6 alpha: T,
7 beta: T,
8) {
9 // Number of times to run the matmul for averaging
10 let iterations = 1;
11 let mut total = std::time::Duration::new(0, 0);
12
13 let mut graph = Graph::empty();
14 let a = GraphTensor::<R3<B, M, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(1.));
15 // Strided matmuls works on all devices.
16 let b = GraphTensor::<R3<B, N, K>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.)).t();
17 // let b = GraphTensor::<R3<B, K, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(2.));
18 let o = GraphTensor::<R3<B, M, N>, T, BestDevice<0>>::fill(&mut graph, T::from_f64(3.));
19 let _c = a.matmul_axpby(b, o, alpha, beta);
20
21 graph.optimize();
22 let compiled: CompiledGraph<R3<B, M, N>, T, BestDevice<0>> = graph.compile().unwrap();
23
24 for _ in 0..iterations {
25 let start = Instant::now();
26
27 let tensor = std::hint::black_box(compiled.run().unwrap());
28 dbg!(tensor.data().unwrap());
29
30 total += start.elapsed();
31 }
32
33 let avg = total / (iterations as u32);
34 println!("Average execution time for {type_name} over {iterations} iterations: {avg:?}");
35}
Trait Implementations§
Source§impl<S: Clone + Shape, T: Clone + DType, D: Clone + Dev> Clone for GraphTensor<S, T, D>
impl<S: Clone + Shape, T: Clone + DType, D: Clone + Dev> Clone for GraphTensor<S, T, D>
Source§fn clone(&self) -> GraphTensor<S, T, D>
fn clone(&self) -> GraphTensor<S, T, D>
Returns a copy of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreAuto Trait Implementations§
impl<S, T, D> Freeze for GraphTensor<S, T, D>
impl<S, T, D> !RefUnwindSafe for GraphTensor<S, T, D>
impl<S, T, D> !Send for GraphTensor<S, T, D>
impl<S, T, D> !Sync for GraphTensor<S, T, D>
impl<S, T, D> Unpin for GraphTensor<S, T, D>
impl<S, T, D> !UnwindSafe for GraphTensor<S, T, D>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more