1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
use crate::{
compute::{WgpuComputeClient, WgpuHandle},
unary, WgpuDevice,
};
use crate::{element::WgpuElement, kernel::unary_default};
use burn_tensor::Shape;
use std::marker::PhantomData;
/// The basic tensor primitive struct.
#[derive(Debug, Clone)]
pub struct WgpuTensor<E: WgpuElement, const D: usize> {
/// Compute client for wgpu.
pub client: WgpuComputeClient,
/// The buffer where the data are stored.
pub handle: WgpuHandle,
/// The shape of the current tensor.
pub shape: Shape<D>,
/// The device of the current tensor.
pub device: WgpuDevice,
/// The strides of the current tensor.
pub strides: [usize; D],
pub(crate) elem: PhantomData<E>,
}
impl<E: WgpuElement, const D: usize> WgpuTensor<E, D> {
/// Create a new tensor.
pub fn new(
client: WgpuComputeClient,
device: WgpuDevice,
shape: Shape<D>,
handle: WgpuHandle,
) -> Self {
let mut strides = [0; D];
let mut current = 1;
shape
.dims
.iter()
.enumerate()
.rev()
.for_each(|(index, val)| {
strides[index] = current;
current *= val;
});
Self {
client,
handle,
shape,
strides,
device,
elem: PhantomData,
}
}
/// Change the context of the current tensor and return the newly transferred tensor.
pub fn to_client(&self, client: WgpuComputeClient, device: WgpuDevice) -> Self {
let bytes = self
.client
.read(&self.handle)
.read_sync()
.expect("Can only change client synchronously");
let handle = client.create(&bytes);
Self {
client,
handle,
shape: self.shape.clone(),
strides: self.strides,
device,
elem: PhantomData,
}
}
pub(crate) fn can_mut_broadcast(&self, tensor_other: &WgpuTensor<E, D>) -> bool {
if !self.handle.can_mut() {
return false;
}
for i in 0..D {
// Output tensor will be different from the mutable tensor.
if self.shape.dims[i] < tensor_other.shape.dims[i] {
return false;
}
}
true
}
/// Copy the current tensor.
pub fn copy(&self) -> Self {
// Seems like using the copy buffer from the `wgpu` API leads to race condition when they
// are used inplace afterward.
//
// To avoid them we need to execute the whole pipeline, which leads to significant
// slowdowns.
//
// The solution is just to use a simple unary compute shader.
unary!(CopyBuffer, body "output[id] = input[id];");
unary_default::<CopyBuffer, E, D>(self.clone())
}
/// Check if the tensor is safe to mutate.
pub fn can_mut(&self) -> bool {
self.handle.can_mut()
}
/// Assert that both tensors are on the same device.
pub fn assert_is_on_same_device(&self, other: &Self) {
if self.device != other.device {
panic!(
"Both tensors should be on the same device {:?} != {:?}",
self.device, other.device
);
}
}
/// Check if the current tensor is contiguous.
pub fn is_contiguous(&self) -> bool {
let mut current_stride = 0;
for d in 0..D {
let stride = self.strides[D - 1 - d];
if stride < current_stride {
return false;
}
current_stride = stride;
}
true
}
pub(crate) fn batch_swapped_with_row_col(&self) -> bool {
for d in 0..D - 2 {
let stride = self.strides[d];
if stride < self.strides[D - 2] || stride < self.strides[D - 1] {
return true;
}
}
false
}
}