pub struct ValueRef<'v, Type: ValueTypeMarker + ?Sized = DynValueTypeMarker> { /* private fields */ }Expand description
A temporary version of a Value with a lifetime specifier.
Implementations§
Source§impl<'a, T: PrimitiveTensorElementType + Debug> ValueRef<'a, TensorValueType<T>>
impl<'a, T: PrimitiveTensorElementType + Debug> ValueRef<'a, TensorValueType<T>>
Sourcepub fn from_array_view(
input: impl TensorArrayData<T> + 'a,
) -> Result<TensorRef<'a, T>>
pub fn from_array_view( input: impl TensorArrayData<T> + 'a, ) -> Result<TensorRef<'a, T>>
Construct a tensor from borrowed data.
Borrowed tensors can be created from:
- (with feature
ndarray) a shared reference to andarray::CowArray(&CowArray<'_, T, D>) orndarray::Array(&Array<T, D>); - (with feature
ndarray) anndarray::ArcArrayorndarray::ArrayView; - a tuple of
(shape, data)where:shapeis one ofVec<I>,[I; N]or&[I], whereIisi64orusize, anddatais one of&[T],Arc<[T]>, orArc<Box<[T]>>.
// Create a tensor from a raw data vector
let data = vec![1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0];
let tensor = TensorRef::from_array_view(([1usize, 2, 3], &*data))?;
// Create a tensor from an `ndarray::Array`
let array = ndarray::Array4::<f32>::zeros((1, 16, 16, 3));
let tensor = TensorRef::from_array_view(array.view())?;When passing an ndarray type, the data must have a contiguous memory layout, or else an error will be
returned. See ndarray::ArrayRef::as_standard_layout to convert an array to a contiguous layout.
Source§impl<'v, Type: ValueTypeMarker + ?Sized> ValueRef<'v, Type>
impl<'v, Type: ValueTypeMarker + ?Sized> ValueRef<'v, Type>
Sourcepub fn downcast<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>(
self,
) -> Result<ValueRef<'v, OtherType>>
pub fn downcast<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>( self, ) -> Result<ValueRef<'v, OtherType>>
Attempts to downcast a temporary dynamic value (like DynValue or DynTensor) to a more strongly typed
variant, like TensorRef<T>.
Sourcepub fn try_upgrade(self) -> Result<Value<Type>, Self>
pub fn try_upgrade(self) -> Result<Value<Type>, Self>
Attempts to upgrade this ValueRef to an owned Value holding the same data.
pub fn into_dyn(self) -> ValueRef<'v, DynValueTypeMarker>
Methods from Deref<Target = Value<Type>>§
pub fn try_extract_key_values<K: IntoTensorElementType + Clone + Hash + Eq, V: PrimitiveTensorElementType + Clone>( &self, ) -> Result<Vec<(K, V)>>
pub fn try_extract_map<K: IntoTensorElementType + Clone + Hash + Eq, V: PrimitiveTensorElementType + Clone>( &self, ) -> Result<HashMap<K, V>>
std only.pub fn try_extract_sequence<'s, OtherType: ValueTypeMarker + DowncastableTarget + Debug + Sized>( &'s self, ) -> Result<Vec<ValueRef<'s, OtherType>>>
Sourcepub fn to(
&self,
device: AllocationDevice,
device_id: i32,
) -> Result<Value<Type>>
Available on non-WebAssembly only.
pub fn to( &self, device: AllocationDevice, device_id: i32, ) -> Result<Value<Type>>
Copies the contents of this tensor to another device, returning the newly created tensor value.
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
let cpu_tensor = cuda_tensor.to(AllocationDevice::CPU, 0)?;
assert_eq!(cpu_tensor.memory_info().allocation_device(), AllocationDevice::CPU);
assert_eq!(**cpu_tensor.shape(), [1, 3, 224, 224]);Sourcepub fn to_async(
&self,
device: AllocationDevice,
device_id: i32,
) -> Result<Value<Type>>
Available on non-WebAssembly only.
pub fn to_async( &self, device: AllocationDevice, device_id: i32, ) -> Result<Value<Type>>
Asynchronously copies the contents of this tensor to another device.
Unlike Tensor::to, the device’s stream will not be synchronized (like via
cudaStreamSynchronize); thus this function is most useful for host-to-device transfers.
let cuda_tensor = tensor.to_async(AllocationDevice::CUDA, 0)?;
// pass to other CUDA code, or to session inputSourcepub fn copy_into(&self, target: &mut Value<Type>) -> Result<()>
Available on non-WebAssembly only.
pub fn copy_into(&self, target: &mut Value<Type>) -> Result<()>
Copies the contents of this tensor to another tensor potentially residing on a separate device.
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
let mut cpu_tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;;
cuda_tensor.copy_into(&mut cpu_tensor)?;Sourcepub fn copy_into_async(&self, target: &mut Value<Type>) -> Result<()>
Available on non-WebAssembly only.
pub fn copy_into_async(&self, target: &mut Value<Type>) -> Result<()>
Asynchronously copies the contents of this tensor to another tensor.
Unlike Tensor::copy_into, the device’s stream will not be synchronized
(like via cudaStreamSynchronize); thus this function is most useful for host-to-device transfers.
let cpu_tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;;
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let mut cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
cpu_tensor.copy_into_async(&mut cuda_tensor)?;Sourcepub fn try_extract_array<T: PrimitiveTensorElementType>(
&self,
) -> Result<ArrayViewD<'_, T>>
Available on crate feature ndarray only.
pub fn try_extract_array<T: PrimitiveTensorElementType>( &self, ) -> Result<ArrayViewD<'_, T>>
ndarray only.Attempt to extract the underlying data of type T into a read-only ndarray::ArrayView.
See also:
- the mutable counterpart of this function,
Tensor::try_extract_array_mut. - the infallible counterpart,
Tensor::extract_array, for typedTensor<T>s. - the alternative function for strings,
Tensor::try_extract_string_array.
let array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
let value = TensorRef::from_array_view(array.view())?.into_dyn();
let extracted = value.try_extract_array::<f32>()?;
assert_eq!(array.view().into_dyn(), extracted);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_arrayinstead) - The provided type
Tdoes not match the tensor’s element type. - The tensor’s data is not allocated in CPU memory.
Sourcepub fn try_extract_scalar<T: PrimitiveTensorElementType + Copy>(
&self,
) -> Result<T>
pub fn try_extract_scalar<T: PrimitiveTensorElementType + Copy>( &self, ) -> Result<T>
Attempt to extract the scalar from a tensor of type T.
let value = Tensor::from_array(((), vec![3.14_f32]))?.into_dyn();
let extracted = value.try_extract_scalar::<f32>()?;
assert_eq!(extracted, 3.14);§Errors
May return an error if:
- The tensor is not 0-dimensional.
- The provided type
Tdoes not match the tensor’s element type. - This is a
DynValue, and the value is not actually a tensor. - The tensor’s data is not allocated in CPU memory.
Sourcepub fn try_extract_tensor<T: PrimitiveTensorElementType>(
&self,
) -> Result<(&Shape, &[T])>
pub fn try_extract_tensor<T: PrimitiveTensorElementType>( &self, ) -> Result<(&Shape, &[T])>
Attempt to extract the underlying data into a view tuple, consisting of the tensor’s Shape and an
immutable view into its data.
See also:
- the mutable counterpart of this function,
Tensor::try_extract_tensor_mut. - the infallible counterpart,
Tensor::extract_tensor, for typedTensor<T>s. - the alternative function for strings,
Tensor::try_extract_strings.
let array = vec![1_i64, 2, 3, 4, 5];
let value = Tensor::from_array(([array.len()], array.clone().into_boxed_slice()))?.into_dyn();
let (extracted_shape, extracted_data) = value.try_extract_tensor::<i64>()?;
assert_eq!(extracted_data, &array);
assert_eq!(**extracted_shape, [5]);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_tensorinstead) - The provided type
Tdoes not match the tensor’s element type.
Sourcepub fn try_extract_string_array(&self) -> Result<ArrayD<String>>
Available on crate feature ndarray only.
pub fn try_extract_string_array(&self) -> Result<ArrayD<String>>
ndarray only.Attempt to extract the underlying data into a Rust ndarray.
let array = ndarray::Array1::from_vec(vec!["hello", "world"]);
let tensor = Tensor::from_string_array(&array)?.into_dyn();
let extracted = tensor.try_extract_string_array()?;
assert_eq!(array.into_dyn(), extracted);Sourcepub fn try_extract_strings(&self) -> Result<(&Shape, Vec<String>)>
pub fn try_extract_strings(&self) -> Result<(&Shape, Vec<String>)>
Attempt to extract the underlying string data into a tuple, consisting of the tensor’s shape and
an owned Vec of its data.
let array = vec!["hello", "world"];
let tensor = Tensor::from_string_array(([array.len()], &*array))?.into_dyn();
let (extracted_shape, extracted_data) = tensor.try_extract_strings()?;
assert_eq!(extracted_data, array);
assert_eq!(**extracted_shape, [2]);Sourcepub fn shape(&self) -> &Shape
pub fn shape(&self) -> &Shape
Returns the shape of the tensor.
let tensor = Tensor::<f32>::new(&allocator, [1_usize, 128, 128, 3])?;
assert_eq!(**tensor.shape(), [1, 128, 128, 3]);pub fn data_type(&self) -> &TensorElementType
Sourcepub fn data_ptr(&self) -> *const c_void
pub fn data_ptr(&self) -> *const c_void
Returns an immutable pointer to the tensor’s underlying data. The pointer may be null in the case of zero-sized tensors.
It’s important to note that the resulting pointer may not point to CPU-accessible memory. In the case of a
tensor created on a different EP device, e.g. via Tensor::new, the pointer returned by this function may be
a CUDA pointer, which would require a separate crate (like cudarc) to access.
Use Tensor::memory_info & MemoryInfo::allocation_device to check which device the data resides on before
accessing it.
let tensor = Tensor::<i64>::from_array((vec![5], vec![0, 1, 2, 3, 4]))?;
let ptr = tensor.data_ptr().cast::<i64>();
assert_eq!(unsafe { *ptr.add(3) }, 3);Sourcepub fn memory_info(&self) -> &MemoryInfo
pub fn memory_info(&self) -> &MemoryInfo
Returns information about the device this tensor is allocated on.
let tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;
// Tensors are allocated on CPU by default.
assert_eq!(tensor.memory_info().allocation_device(), AllocationDevice::CPU);
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
assert_eq!(tensor.memory_info().allocation_device(), AllocationDevice::CUDA);