Trait burn_tensor::ops::QTensorOps
source · pub trait QTensorOps<B: Backend> {
// Required methods
fn q_from_data<const D: usize>(
data: TensorData,
device: &Device<B>,
) -> QuantizedTensor<B, D>;
fn quantize<const D: usize>(
tensor: FloatTensor<B, D>,
scheme: &QuantizationScheme,
qparams: QuantizationParametersPrimitive<B>,
) -> QuantizedTensor<B, D>;
fn dequantize<const D: usize>(
tensor: QuantizedTensor<B, D>,
) -> FloatTensor<B, D>;
fn q_shape<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Shape<D>;
fn q_device<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Device<B>;
fn q_reshape<const D1: usize, const D2: usize>(
tensor: QuantizedTensor<B, D1>,
shape: Shape<D2>,
) -> QuantizedTensor<B, D2>;
fn q_into_data<const D: usize>(
tensor: QuantizedTensor<B, D>,
) -> impl Future<Output = TensorData> + Send;
// Provided methods
fn q_set_require_grad<const D: usize>(
tensor: QuantizedTensor<B, D>,
_require_grad: bool,
) -> QuantizedTensor<B, D> { ... }
fn q_is_require_grad<const D: usize>(
_tensor: &QuantizedTensor<B, D>,
) -> bool { ... }
}
Expand description
Quantized Tensor API for basic operations, see tensor for documentation on each function.
Required Methods§
sourcefn q_from_data<const D: usize>(
data: TensorData,
device: &Device<B>,
) -> QuantizedTensor<B, D>
fn q_from_data<const D: usize>( data: TensorData, device: &Device<B>, ) -> QuantizedTensor<B, D>
sourcefn quantize<const D: usize>(
tensor: FloatTensor<B, D>,
scheme: &QuantizationScheme,
qparams: QuantizationParametersPrimitive<B>,
) -> QuantizedTensor<B, D>
fn quantize<const D: usize>( tensor: FloatTensor<B, D>, scheme: &QuantizationScheme, qparams: QuantizationParametersPrimitive<B>, ) -> QuantizedTensor<B, D>
Convert the tensor to a lower precision data type based on the quantization scheme and parameters.
sourcefn dequantize<const D: usize>(
tensor: QuantizedTensor<B, D>,
) -> FloatTensor<B, D>
fn dequantize<const D: usize>( tensor: QuantizedTensor<B, D>, ) -> FloatTensor<B, D>
Convert the tensor back to a higher precision data type.
sourcefn q_shape<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Shape<D>
fn q_shape<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Shape<D>
sourcefn q_device<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Device<B>
fn q_device<const D: usize>(tensor: &QuantizedTensor<B, D>) -> Device<B>
sourcefn q_reshape<const D1: usize, const D2: usize>(
tensor: QuantizedTensor<B, D1>,
shape: Shape<D2>,
) -> QuantizedTensor<B, D2>
fn q_reshape<const D1: usize, const D2: usize>( tensor: QuantizedTensor<B, D1>, shape: Shape<D2>, ) -> QuantizedTensor<B, D2>
sourcefn q_into_data<const D: usize>(
tensor: QuantizedTensor<B, D>,
) -> impl Future<Output = TensorData> + Send
fn q_into_data<const D: usize>( tensor: QuantizedTensor<B, D>, ) -> impl Future<Output = TensorData> + Send
Provided Methods§
sourcefn q_set_require_grad<const D: usize>(
tensor: QuantizedTensor<B, D>,
_require_grad: bool,
) -> QuantizedTensor<B, D>
fn q_set_require_grad<const D: usize>( tensor: QuantizedTensor<B, D>, _require_grad: bool, ) -> QuantizedTensor<B, D>
Sets the require_grad
flag of a tensor.
sourcefn q_is_require_grad<const D: usize>(_tensor: &QuantizedTensor<B, D>) -> bool
fn q_is_require_grad<const D: usize>(_tensor: &QuantizedTensor<B, D>) -> bool
Returns the require_grad
flag of a tensor.
Object Safety§
This trait is not object safe.