1use core::marker::PhantomData;
2use cubecl_core::ir::StorageType;
3use cubecl_core::tensor_line_size_parallel;
4use cubecl_core::{Runtime, server};
5use cubecl_core::{calculate_cube_count_elemwise, server::Allocation};
6use cubecl_core::{prelude::*, server::CopyDescriptor};
7use cubecl_runtime::server::Handle;
8
9pub struct TensorHandle<R>
11where
12 R: Runtime,
13{
14 pub handle: server::Handle,
16 pub shape: Vec<usize>,
17 pub strides: Vec<usize>,
18 pub dtype: StorageType,
20 runtime: PhantomData<R>,
21}
22
23impl<R> core::fmt::Debug for TensorHandle<R>
24where
25 R: Runtime,
26{
27 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
28 f.write_fmt(format_args!(
29 "Tensor {{ shape: {:?}, strides: {:?}, dtype: {}}}",
30 self.shape, self.strides, self.dtype,
31 ))
32 }
33}
34
35impl<R> Clone for TensorHandle<R>
36where
37 R: Runtime,
38{
39 fn clone(&self) -> Self {
40 Self {
41 handle: self.handle.clone(),
42 shape: self.shape.clone(),
43 strides: self.strides.clone(),
44 dtype: self.dtype,
45 runtime: PhantomData,
46 }
47 }
48}
49
50impl<R> TensorHandle<R>
51where
52 R: Runtime,
53{
54 pub fn new(
56 handle: server::Handle,
57 shape: Vec<usize>,
58 strides: Vec<usize>,
59 storage: StorageType,
60 ) -> Self {
61 Self {
62 handle,
63 shape,
64 strides,
65 dtype: storage,
66 runtime: PhantomData,
67 }
68 }
69
70 pub fn empty(client: &ComputeClient<R>, shape: Vec<usize>, storage: StorageType) -> Self {
71 let elem_size = storage.size();
72 let Allocation { handle, strides } = client.empty_tensor(&shape, elem_size);
73
74 Self::new(handle, shape, strides, storage)
75 }
76
77 pub fn from_ref(handle: &TensorHandleRef<'_, R>, storage: StorageType) -> Self {
79 Self {
80 handle: handle.handle.clone(),
81 shape: handle.shape.to_vec(),
82 strides: handle.strides.to_vec(),
83 dtype: storage,
84 runtime: PhantomData,
85 }
86 }
87
88 pub fn new_contiguous(shape: Vec<usize>, handle: Handle, storage: StorageType) -> Self {
90 let strides = Self::contiguous_strides(&shape);
91
92 Self {
93 handle,
94 shape,
95 strides,
96 dtype: storage,
97 runtime: PhantomData,
98 }
99 }
100
101 pub fn can_mut(&self) -> bool {
103 self.handle.can_mut()
104 }
105
106 pub fn as_ref(&self) -> TensorHandleRef<'_, R> {
107 unsafe {
108 TensorHandleRef::from_raw_parts(
109 &self.handle,
110 &self.strides,
111 &self.shape,
112 self.dtype.size(),
113 )
114 }
115 }
116
117 pub fn as_arg<'a>(&'a self, vectorisation: u8) -> TensorArg<'a, R> {
119 let handle: TensorHandleRef<'a, R> = self.as_ref();
120
121 unsafe {
122 TensorArg::from_raw_parts_and_size(
123 handle.handle,
124 handle.strides,
125 handle.shape,
126 vectorisation,
127 handle.elem_size,
128 )
129 }
130 }
131
132 pub fn as_copy_descriptor<'a>(&'a self) -> CopyDescriptor<'a> {
133 CopyDescriptor {
134 binding: self.handle.clone().binding(),
135 shape: &self.shape,
136 strides: &self.strides,
137 elem_size: self.dtype.size(),
138 }
139 }
140
141 fn contiguous_strides(shape: &[usize]) -> Vec<usize> {
142 let mut strides = Vec::with_capacity(shape.len());
143
144 let mut current = 1;
145 shape.iter().enumerate().rev().for_each(|(_, val)| {
146 strides.push(current);
147 current *= val;
148 });
149 strides.reverse();
150 strides
151 }
152}
153impl<R> TensorHandle<R>
154where
155 R: Runtime,
156{
157 pub fn zeros(client: &ComputeClient<R>, shape: Vec<usize>, dtype: StorageType) -> Self {
158 let num_elements: usize = shape.iter().product();
159 let rank = shape.len();
160 let output = Self::empty(client, shape, dtype);
161
162 let line_size = tensor_line_size_parallel(
163 R::supported_line_sizes().iter().cloned(),
164 &output.shape,
165 &output.strides,
166 rank - 1,
167 );
168
169 let cube_dim = CubeDim::default();
170 let cube_count = calculate_cube_count_elemwise(num_elements / line_size as usize, cube_dim);
171 let array_len = output.handle.size() as usize / dtype.size();
172
173 unsafe {
174 init::zeros_array::launch_unchecked(
175 client,
176 cube_count,
177 cube_dim,
178 ArrayArg::from_raw_parts_and_size(
179 &output.handle,
180 array_len,
181 line_size,
182 dtype.size(),
183 ),
184 dtype,
185 )
186 .expect("Should be able to launch the kernel all the time")
187 };
188
189 output
190 }
191}
192
193pub(crate) mod init {
194 use cubecl::prelude::*;
195 use cubecl_core::{self as cubecl, ir::StorageType};
196
197 #[cube(launch_unchecked)]
198 pub fn zeros_array<C: Numeric>(output: &mut Array<Line<C>>, #[define(C)] _elem: StorageType) {
199 if ABSOLUTE_POS < output.len() {
200 output[ABSOLUTE_POS] = Line::cast_from(C::from_int(0));
201 }
202 }
203}