pub struct Value<Type: ValueTypeMarker + ?Sized = DynValueTypeMarker> { /* private fields */ }Expand description
A Value contains data for inputs/outputs in ONNX Runtime graphs. Values can be a Tensor, Sequence
(aka array/vector), or Map.
§Creation
Values can be created via methods like Tensor::from_array, or as the output from running a Session.
// Create a Tensor value from a raw data vector
let value = Tensor::from_array(([1usize, 1, 1, 3], vec![1.0_f32, 2.0, 3.0].into_boxed_slice()))?;
// Create a Tensor value from an `ndarray::Array`
#[cfg(feature = "ndarray")]
let value = Tensor::from_array(ndarray::Array4::<f32>::zeros((1, 16, 16, 3)))?;
// Get a DynValue from a session's output
let value = &upsample.run(ort::inputs![value])?[0];See Tensor::from_array for more details on what tensor values are accepted.
§Usage
You can access the data contained in a Value by using the relevant extract methods.
You can also use DynValue::downcast to attempt to convert from a DynValue to a more strongly typed value.
For dynamic values, where the type is not known at compile time, see the try_extract_* methods:
Tensor::try_extract_tensor,Tensor::try_extract_arraySequence::try_extract_sequenceMap::try_extract_map
If the type was created from Rust (via a method like Tensor::from_array or via downcasting), you can directly
extract the data using the infallible extract methods:
Implementations§
Source§impl<Type: MapValueTypeMarker + ?Sized> Value<Type>
impl<Type: MapValueTypeMarker + ?Sized> Value<Type>
pub fn try_extract_key_values<K: IntoTensorElementType + Clone + Hash + Eq, V: PrimitiveTensorElementType + Clone>( &self, ) -> Result<Vec<(K, V)>>
pub fn try_extract_map<K: IntoTensorElementType + Clone + Hash + Eq, V: PrimitiveTensorElementType + Clone>( &self, ) -> Result<HashMap<K, V>>
std only.Source§impl<K: PrimitiveTensorElementType + Debug + Clone + Hash + Eq + 'static, V: PrimitiveTensorElementType + Debug + Clone + 'static> Value<MapValueType<K, V>>
impl<K: PrimitiveTensorElementType + Debug + Clone + Hash + Eq + 'static, V: PrimitiveTensorElementType + Debug + Clone + 'static> Value<MapValueType<K, V>>
Sourcepub fn new(data: impl IntoIterator<Item = (K, V)>) -> Result<Self>
pub fn new(data: impl IntoIterator<Item = (K, V)>) -> Result<Self>
Creates a Map from an iterable emitting K and V.
let mut map = HashMap::<i64, f32>::new();
map.insert(0, 1.0);
map.insert(1, 2.0);
map.insert(2, 3.0);
let value = Map::<i64, f32>::new(map)?;
assert_eq!(*value.extract_map().get(&0).unwrap(), 1.0);Source§impl<V: PrimitiveTensorElementType + Debug + Clone + 'static> Value<MapValueType<String, V>>
impl<V: PrimitiveTensorElementType + Debug + Clone + 'static> Value<MapValueType<String, V>>
Sourcepub fn new(data: impl IntoIterator<Item = (String, V)>) -> Result<Self>
pub fn new(data: impl IntoIterator<Item = (String, V)>) -> Result<Self>
Creates a Map from an iterable emitting K and V.
let mut map = HashMap::<String, f32>::new();
map.insert("one".to_string(), 1.0);
map.insert("two".to_string(), 2.0);
map.insert("three".to_string(), 3.0);
let value = Map::<String, f32>::new(map)?;
assert_eq!(*value.extract_map().get("one").unwrap(), 1.0);Source§impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq + 'static, V: IntoTensorElementType + Debug + Clone + 'static> Value<MapValueType<K, V>>
impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq + 'static, V: IntoTensorElementType + Debug + Clone + 'static> Value<MapValueType<K, V>>
Sourcepub fn new_kv(keys: Tensor<K>, values: Tensor<V>) -> Result<Self>
pub fn new_kv(keys: Tensor<K>, values: Tensor<V>) -> Result<Self>
Creates a Map from two tensors of keys & values respectively.
let keys = Tensor::<i64>::from_array(([4], vec![0, 1, 2, 3]))?;
let values = Tensor::<f32>::from_array(([4], vec![1., 2., 3., 4.]))?;
let value = Map::new_kv(keys, values)?;
assert_eq!(*value.extract_map().get(&0).unwrap(), 1.0);Source§impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq, V: PrimitiveTensorElementType + Debug + Clone> Value<MapValueType<K, V>>
impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq, V: PrimitiveTensorElementType + Debug + Clone> Value<MapValueType<K, V>>
pub fn extract_key_values(&self) -> Vec<(K, V)>
pub fn extract_map(&self) -> HashMap<K, V>
std only.Source§impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq, V: IntoTensorElementType + Debug + Clone> Value<MapValueType<K, V>>
impl<K: IntoTensorElementType + Debug + Clone + Hash + Eq, V: IntoTensorElementType + Debug + Clone> Value<MapValueType<K, V>>
Sourcepub fn upcast_ref(&self) -> DynMapRef<'_>
pub fn upcast_ref(&self) -> DynMapRef<'_>
Sourcepub fn upcast_mut(&mut self) -> DynMapRefMut<'_>
pub fn upcast_mut(&mut self) -> DynMapRefMut<'_>
Source§impl<Type: SequenceValueTypeMarker + Sized> Value<Type>
impl<Type: SequenceValueTypeMarker + Sized> Value<Type>
pub fn try_extract_sequence<'s, OtherType: ValueTypeMarker + DowncastableTarget + Debug + Sized>( &'s self, ) -> Result<Vec<ValueRef<'s, OtherType>>>
Source§impl<T: ValueTypeMarker + DowncastableTarget + Debug + Sized + 'static> Value<SequenceValueType<T>>
impl<T: ValueTypeMarker + DowncastableTarget + Debug + Sized + 'static> Value<SequenceValueType<T>>
Sourcepub fn new(values: impl IntoIterator<Item = Value<T>>) -> Result<Self>
pub fn new(values: impl IntoIterator<Item = Value<T>>) -> Result<Self>
Creates a Sequence from an array of Value<T>.
This Value<T> must be either a Tensor or Map.
let tensor1 = Tensor::<f32>::new(&allocator, [1_usize, 128, 128, 3])?;
let tensor2 = Tensor::<f32>::new(&allocator, [1_usize, 224, 224, 3])?;
let value = Sequence::new([tensor1, tensor2])?;
for tensor in value.extract_sequence() {
println!("{:?}", tensor.shape());
}Source§impl<T: ValueTypeMarker + DowncastableTarget + Debug + Sized> Value<SequenceValueType<T>>
impl<T: ValueTypeMarker + DowncastableTarget + Debug + Sized> Value<SequenceValueType<T>>
pub fn extract_sequence<'s>(&'s self) -> Vec<ValueRef<'s, T>>
pub fn len(&self) -> usize
pub fn is_empty(&self) -> bool
pub fn get(&self, index: usize) -> Option<ValueRef<'_, T>>
Sourcepub fn upcast(self) -> DynSequence
pub fn upcast(self) -> DynSequence
Converts from a strongly-typed Sequence<T> to a type-erased DynSequence.
Sourcepub fn upcast_ref(&self) -> DynSequenceRef<'_>
pub fn upcast_ref(&self) -> DynSequenceRef<'_>
Converts from a strongly-typed Sequence<T> to a reference to a type-erased DynSequence.
Sourcepub fn upcast_mut(&mut self) -> DynSequenceRefMut<'_>
pub fn upcast_mut(&mut self) -> DynSequenceRefMut<'_>
Converts from a strongly-typed Sequence<T> to a mutable reference to a type-erased DynSequence.
Source§impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Value<Type>
impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Value<Type>
Sourcepub fn to(
&self,
device: AllocationDevice,
device_id: i32,
) -> Result<Value<Type>>
Available on non-WebAssembly only.
pub fn to( &self, device: AllocationDevice, device_id: i32, ) -> Result<Value<Type>>
Copies the contents of this tensor to another device, returning the newly created tensor value.
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
let cpu_tensor = cuda_tensor.to(AllocationDevice::CPU, 0)?;
assert_eq!(cpu_tensor.memory_info().allocation_device(), AllocationDevice::CPU);
assert_eq!(**cpu_tensor.shape(), [1, 3, 224, 224]);Sourcepub fn to_async(
&self,
device: AllocationDevice,
device_id: i32,
) -> Result<Value<Type>>
Available on non-WebAssembly only.
pub fn to_async( &self, device: AllocationDevice, device_id: i32, ) -> Result<Value<Type>>
Asynchronously copies the contents of this tensor to another device.
Unlike Tensor::to, the device’s stream will not be synchronized (like via
cudaStreamSynchronize); thus this function is most useful for host-to-device transfers.
let cuda_tensor = tensor.to_async(AllocationDevice::CUDA, 0)?;
// pass to other CUDA code, or to session inputSourcepub fn copy_into(&self, target: &mut Value<Type>) -> Result<()>
Available on non-WebAssembly only.
pub fn copy_into(&self, target: &mut Value<Type>) -> Result<()>
Copies the contents of this tensor to another tensor potentially residing on a separate device.
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
let mut cpu_tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;;
cuda_tensor.copy_into(&mut cpu_tensor)?;Sourcepub fn copy_into_async(&self, target: &mut Value<Type>) -> Result<()>
Available on non-WebAssembly only.
pub fn copy_into_async(&self, target: &mut Value<Type>) -> Result<()>
Asynchronously copies the contents of this tensor to another tensor.
Unlike Tensor::copy_into, the device’s stream will not be synchronized
(like via cudaStreamSynchronize); thus this function is most useful for host-to-device transfers.
let cpu_tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;;
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let mut cuda_tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
cpu_tensor.copy_into_async(&mut cuda_tensor)?;Source§impl Value<TensorValueType<String>>
impl Value<TensorValueType<String>>
Sourcepub fn from_string_array<T: Utf8Data>(
input: impl TensorArrayData<T>,
) -> Result<Tensor<String>>
pub fn from_string_array<T: Utf8Data>( input: impl TensorArrayData<T>, ) -> Result<Tensor<String>>
Construct a Tensor from an array of strings.
String tensors can be created from:
- (with feature
ndarray) a shared reference to andarray::CowArray(&CowArray<'_, T, D>) orndarray::Array(&Array<T, D>); - (with feature
ndarray) anndarray::ArcArrayorndarray::ArrayView; - a tuple of
(shape, data)where:shapeis one ofVec<I>,[I; N]or&[I], whereIisi64orusize, anddatais one of&[T],Arc<[T]>, orArc<Box<[T]>>.
// Create a string tensor from a raw data vector
let data = vec!["hello", "world"];
let value = Tensor::from_string_array(([data.len()], &*data))?;
// Create a string tensor from an `ndarray::Array`
#[cfg(feature = "ndarray")]
let value = Tensor::from_string_array(&ndarray::Array::from_shape_vec((1,), vec!["document".to_owned()]).unwrap())?;Source§impl<T: PrimitiveTensorElementType + Debug> Value<TensorValueType<T>>
impl<T: PrimitiveTensorElementType + Debug> Value<TensorValueType<T>>
Sourcepub fn new(allocator: &Allocator, shape: impl Into<Shape>) -> Result<Tensor<T>>
pub fn new(allocator: &Allocator, shape: impl Into<Shape>) -> Result<Tensor<T>>
Construct a tensor via a given allocator with a given shape and datatype. The data in the tensor will be uninitialized.
This can be used to create a tensor with data on a certain device. For example, to create a tensor with pinned (CPU) memory for use with CUDA:
let allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA_PINNED, 0, AllocatorType::Device, MemoryType::CPUInput)?
)?;
let mut img_input = Tensor::<f32>::new(&allocator, [1_usize, 128, 128, 3])?;Sourcepub fn from_array(input: impl OwnedTensorArrayData<T>) -> Result<Tensor<T>>
pub fn from_array(input: impl OwnedTensorArrayData<T>) -> Result<Tensor<T>>
Construct an owned tensor from an array of data.
Owned tensors can be created from:
- (with feature
ndarray) an ownedndarray::Array, or - a tuple of
(shape, data)where:shapeis one ofVec<I>,[I]or&[I], whereIisi64orusize, anddatais one ofVec<T>orBox<[T]>.
// Create a tensor from a raw data vector
let tensor = Tensor::from_array(([1usize, 2, 3], vec![1.0_f32, 2.0, 3.0, 4.0, 5.0, 6.0].into_boxed_slice()))?;
// Create a tensor from an `ndarray::Array`
#[cfg(feature = "ndarray")]
let tensor = Tensor::from_array(ndarray::Array4::<f32>::zeros((1, 16, 16, 3)))?;When passing an ndarray::Array, the array may be copied in order to convert it to a contiguous layout if it
is not already. When creating a tensor from a Vec or boxed slice, the data is assumed to already be in
contiguous layout.
Creating string tensors requires a separate method; see Tensor::from_string_array.
Source§impl<Type: TensorValueTypeMarker + ?Sized> Value<Type>
impl<Type: TensorValueTypeMarker + ?Sized> Value<Type>
Sourcepub fn try_extract_array<T: PrimitiveTensorElementType>(
&self,
) -> Result<ArrayViewD<'_, T>>
Available on crate feature ndarray only.
pub fn try_extract_array<T: PrimitiveTensorElementType>( &self, ) -> Result<ArrayViewD<'_, T>>
ndarray only.Attempt to extract the underlying data of type T into a read-only ndarray::ArrayView.
See also:
- the mutable counterpart of this function,
Tensor::try_extract_array_mut. - the infallible counterpart,
Tensor::extract_array, for typedTensor<T>s. - the alternative function for strings,
Tensor::try_extract_string_array.
let array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
let value = TensorRef::from_array_view(array.view())?.into_dyn();
let extracted = value.try_extract_array::<f32>()?;
assert_eq!(array.view().into_dyn(), extracted);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_arrayinstead) - The provided type
Tdoes not match the tensor’s element type. - The tensor’s data is not allocated in CPU memory.
Sourcepub fn try_extract_scalar<T: PrimitiveTensorElementType + Copy>(
&self,
) -> Result<T>
pub fn try_extract_scalar<T: PrimitiveTensorElementType + Copy>( &self, ) -> Result<T>
Attempt to extract the scalar from a tensor of type T.
let value = Tensor::from_array(((), vec![3.14_f32]))?.into_dyn();
let extracted = value.try_extract_scalar::<f32>()?;
assert_eq!(extracted, 3.14);§Errors
May return an error if:
- The tensor is not 0-dimensional.
- The provided type
Tdoes not match the tensor’s element type. - This is a
DynValue, and the value is not actually a tensor. - The tensor’s data is not allocated in CPU memory.
Sourcepub fn try_extract_array_mut<T: PrimitiveTensorElementType>(
&mut self,
) -> Result<ArrayViewMutD<'_, T>>
Available on crate feature ndarray only.
pub fn try_extract_array_mut<T: PrimitiveTensorElementType>( &mut self, ) -> Result<ArrayViewMutD<'_, T>>
ndarray only.Attempt to extract the underlying data of type T into a mutable read-only ndarray::ArrayViewMut.
See also the infallible counterpart, Tensor::extract_array_mut, for typed Tensor<T>s.
let mut array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
{
let mut value = TensorRefMut::from_array_view_mut(array.view_mut())?.into_dyn();
let mut extracted = value.try_extract_array_mut::<f32>()?;
extracted[[0, 0, 0, 1]] = 0.0;
}
assert_eq!(array[[0, 0, 0, 1]], 0.0);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_array_mutinstead) - The provided type
Tdoes not match the tensor’s element type.
Sourcepub fn try_extract_tensor<T: PrimitiveTensorElementType>(
&self,
) -> Result<(&Shape, &[T])>
pub fn try_extract_tensor<T: PrimitiveTensorElementType>( &self, ) -> Result<(&Shape, &[T])>
Attempt to extract the underlying data into a view tuple, consisting of the tensor’s Shape and an
immutable view into its data.
See also:
- the mutable counterpart of this function,
Tensor::try_extract_tensor_mut. - the infallible counterpart,
Tensor::extract_tensor, for typedTensor<T>s. - the alternative function for strings,
Tensor::try_extract_strings.
let array = vec![1_i64, 2, 3, 4, 5];
let value = Tensor::from_array(([array.len()], array.clone().into_boxed_slice()))?.into_dyn();
let (extracted_shape, extracted_data) = value.try_extract_tensor::<i64>()?;
assert_eq!(extracted_data, &array);
assert_eq!(**extracted_shape, [5]);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_tensorinstead) - The provided type
Tdoes not match the tensor’s element type.
Sourcepub fn try_extract_tensor_mut<T: PrimitiveTensorElementType>(
&mut self,
) -> Result<(&Shape, &mut [T])>
pub fn try_extract_tensor_mut<T: PrimitiveTensorElementType>( &mut self, ) -> Result<(&Shape, &mut [T])>
Attempt to extract the underlying data into a view tuple, consisting of the tensor’s shape and a mutable view into its data.
See also the infallible counterpart, Tensor::extract_tensor_mut, for typed Tensor<T>s.
let array = vec![1_i64, 2, 3, 4, 5];
let mut value = Tensor::from_array(([array.len()], array.clone().into_boxed_slice()))?.into_dyn();
let (extracted_shape, extracted_data) = value.try_extract_tensor_mut::<i64>()?;
assert_eq!(extracted_data, &array);
assert_eq!(**extracted_shape, [5]);§Errors
May return an error if:
- This is a
DynValue, and the value is not actually a tensor. (for typedTensors, use the infallibleTensor::extract_tensor_mutinstead) - The provided type
Tdoes not match the tensor’s element type.
Sourcepub fn try_extract_string_array(&self) -> Result<ArrayD<String>>
Available on crate feature ndarray only.
pub fn try_extract_string_array(&self) -> Result<ArrayD<String>>
ndarray only.Attempt to extract the underlying data into a Rust ndarray.
let array = ndarray::Array1::from_vec(vec!["hello", "world"]);
let tensor = Tensor::from_string_array(&array)?.into_dyn();
let extracted = tensor.try_extract_string_array()?;
assert_eq!(array.into_dyn(), extracted);Sourcepub fn try_extract_strings(&self) -> Result<(&Shape, Vec<String>)>
pub fn try_extract_strings(&self) -> Result<(&Shape, Vec<String>)>
Attempt to extract the underlying string data into a tuple, consisting of the tensor’s shape and
an owned Vec of its data.
let array = vec!["hello", "world"];
let tensor = Tensor::from_string_array(([array.len()], &*array))?.into_dyn();
let (extracted_shape, extracted_data) = tensor.try_extract_strings()?;
assert_eq!(extracted_data, array);
assert_eq!(**extracted_shape, [2]);Sourcepub fn shape(&self) -> &Shape
pub fn shape(&self) -> &Shape
Returns the shape of the tensor.
let tensor = Tensor::<f32>::new(&allocator, [1_usize, 128, 128, 3])?;
assert_eq!(**tensor.shape(), [1, 128, 128, 3]);pub fn data_type(&self) -> &TensorElementType
Source§impl<T: PrimitiveTensorElementType + Debug> Value<TensorValueType<T>>
impl<T: PrimitiveTensorElementType + Debug> Value<TensorValueType<T>>
Sourcepub fn extract_array(&self) -> ArrayViewD<'_, T>
Available on crate feature ndarray only.
pub fn extract_array(&self) -> ArrayViewD<'_, T>
ndarray only.Extracts the underlying data into a read-only ndarray::ArrayView.
let array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
let tensor = TensorRef::from_array_view(&array)?;
let extracted = tensor.extract_array();
assert_eq!(array.view().into_dyn(), extracted);Sourcepub fn extract_array_mut(&mut self) -> ArrayViewMutD<'_, T>
Available on crate feature ndarray only.
pub fn extract_array_mut(&mut self) -> ArrayViewMutD<'_, T>
ndarray only.Extracts the underlying data into a mutable ndarray::ArrayViewMut.
let mut array = ndarray::Array4::<f32>::ones((1, 16, 16, 3));
{
let mut tensor = TensorRefMut::from_array_view_mut(array.view_mut())?;
let mut extracted = tensor.extract_array_mut();
extracted[[0, 0, 0, 1]] = 0.0;
}
assert_eq!(array[[0, 0, 0, 1]], 0.0);Sourcepub fn extract_tensor(&self) -> (&Shape, &[T])
pub fn extract_tensor(&self) -> (&Shape, &[T])
Extracts the underlying data into a view tuple, consisting of the tensor’s Shape and an immutable
view into its data.
let array = vec![1_i64, 2, 3, 4, 5];
let tensor = TensorRef::from_array_view(([array.len()], &*array))?;
let (extracted_shape, extracted_data) = tensor.extract_tensor();
assert_eq!(extracted_data, &array);
assert_eq!(**extracted_shape, [5]);Sourcepub fn extract_tensor_mut(&mut self) -> (&Shape, &mut [T])
pub fn extract_tensor_mut(&mut self) -> (&Shape, &mut [T])
Extracts the underlying data into a view tuple, consisting of the tensor’s shapes and a mutable view into its data.
let mut original_array = vec![1_i64, 2, 3, 4, 5];
{
let mut tensor = TensorRefMut::from_array_view_mut(([original_array.len()], &mut *original_array))?;
let (extracted_shape, extracted_data) = tensor.extract_tensor_mut();
extracted_data[2] = 42;
}
assert_eq!(original_array, [1, 2, 42, 4, 5]);Source§impl Value<DynTensorValueType>
impl Value<DynTensorValueType>
Sourcepub fn new(
allocator: &Allocator,
data_type: TensorElementType,
shape: impl Into<Shape>,
) -> Result<DynTensor>
pub fn new( allocator: &Allocator, data_type: TensorElementType, shape: impl Into<Shape>, ) -> Result<DynTensor>
Construct a tensor via a given allocator with a given shape and datatype. The data in the tensor will be uninitialized.
This can be used to create a tensor with data on a certain device. For example, to create a tensor with pinned (CPU) memory for use with CUDA:
let allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA_PINNED, 0, AllocatorType::Device, MemoryType::CPUInput)?
)?;
let mut img_input = DynTensor::new(&allocator, TensorElementType::Float32, [1_usize, 128, 128, 3])?;Source§impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Value<Type>
impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Value<Type>
Sourcepub fn data_ptr_mut(&mut self) -> *mut c_void
pub fn data_ptr_mut(&mut self) -> *mut c_void
Returns a mutable pointer to the tensor’s data. The pointer may be null in the case of zero-sized tensors.
It’s important to note that the resulting pointer may not point to CPU-accessible memory. In the case of a
tensor created on a different EP device, e.g. via Tensor::new, the pointer returned by this function may be
a CUDA pointer, which would require a separate crate (like cudarc) to access.
Use Tensor::memory_info & MemoryInfo::allocation_device to check which device the data resides on before
accessing it.
let mut tensor = Tensor::<i64>::from_array((vec![5], vec![0, 1, 2, 3, 4]))?;
let ptr = tensor.data_ptr_mut().cast::<i64>();
unsafe {
*ptr.add(3) = 42;
};
let (_, extracted) = tensor.extract_tensor();
assert_eq!(&extracted, &[0, 1, 2, 42, 4]);Sourcepub fn data_ptr(&self) -> *const c_void
pub fn data_ptr(&self) -> *const c_void
Returns an immutable pointer to the tensor’s underlying data. The pointer may be null in the case of zero-sized tensors.
It’s important to note that the resulting pointer may not point to CPU-accessible memory. In the case of a
tensor created on a different EP device, e.g. via Tensor::new, the pointer returned by this function may be
a CUDA pointer, which would require a separate crate (like cudarc) to access.
Use Tensor::memory_info & MemoryInfo::allocation_device to check which device the data resides on before
accessing it.
let tensor = Tensor::<i64>::from_array((vec![5], vec![0, 1, 2, 3, 4]))?;
let ptr = tensor.data_ptr().cast::<i64>();
assert_eq!(unsafe { *ptr.add(3) }, 3);Sourcepub fn memory_info(&self) -> &MemoryInfo
pub fn memory_info(&self) -> &MemoryInfo
Returns information about the device this tensor is allocated on.
let tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;
// Tensors are allocated on CPU by default.
assert_eq!(tensor.memory_info().allocation_device(), AllocationDevice::CPU);
let cuda_allocator = Allocator::new(
&session,
MemoryInfo::new(AllocationDevice::CUDA, 0, AllocatorType::Device, MemoryType::Default)?
)?;
let tensor = Tensor::<f32>::new(&cuda_allocator, [1_usize, 3, 224, 224])?;
assert_eq!(tensor.memory_info().allocation_device(), AllocationDevice::CUDA);Source§impl<T: IntoTensorElementType + Debug> Value<TensorValueType<T>>
impl<T: IntoTensorElementType + Debug> Value<TensorValueType<T>>
Sourcepub fn upcast_ref(&self) -> DynTensorRef<'_>
pub fn upcast_ref(&self) -> DynTensorRef<'_>
Creates a type-erased DynTensorRef from a strongly-typed Tensor<T>.
let tensor = Tensor::<f32>::new(&Allocator::default(), [1_usize, 3, 224, 224])?;
let tensor_dyn = tensor.upcast_ref();
let (_, original_extract) = tensor.extract_tensor();
let (_, ref_extract) = tensor_dyn.try_extract_tensor::<f32>()?;
assert_eq!(original_extract, ref_extract);Sourcepub fn upcast_mut(&mut self) -> DynTensorRefMut<'_>
pub fn upcast_mut(&mut self) -> DynTensorRefMut<'_>
Converts from a strongly-typed Tensor<T> to a mutable reference to a type-erased DynTensor.
let mut tensor = Tensor::<i64>::from_array((vec![5], vec![1, 2, 3, 4, 5]))?;
let mut tensor_dyn = tensor.upcast_mut();
let (_, mut_view) = tensor_dyn.try_extract_tensor_mut::<i64>()?;
mut_view[3] = 0;
let (_, original_view) = tensor.extract_tensor();
assert_eq!(original_view, &[1, 2, 3, 0, 5]);Source§impl<Type: ValueTypeMarker + ?Sized> Value<Type>
impl<Type: ValueTypeMarker + ?Sized> Value<Type>
Sourcepub unsafe fn from_ptr(
ptr: NonNull<OrtValue>,
session: Option<Arc<SharedSessionInner>>,
) -> Value<Type>
pub unsafe fn from_ptr( ptr: NonNull<OrtValue>, session: Option<Arc<SharedSessionInner>>, ) -> Value<Type>
Construct a Value from a C++ ort_sys::OrtValue pointer.
If the value belongs to a session (i.e. if it is the result of an inference run), you must provide the
SharedSessionInner (acquired from Session::inner). This ensures the
session is not dropped until any values owned by it is.
§Safety
ptrmust be a valid pointer to anort_sys::OrtValue.sessionmust beSomefor values returned from a session.
Sourcepub fn view_mut(&mut self) -> ValueRefMut<'_, Type>
pub fn view_mut(&mut self) -> ValueRefMut<'_, Type>
Create a mutable view of this value’s data.
Source§impl Value<DynValueTypeMarker>
impl Value<DynValueTypeMarker>
Sourcepub fn downcast<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>(
self,
) -> Result<Value<OtherType>>
pub fn downcast<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>( self, ) -> Result<Value<OtherType>>
Sourcepub fn downcast_ref<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>(
&self,
) -> Result<ValueRef<'_, OtherType>>
pub fn downcast_ref<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>( &self, ) -> Result<ValueRef<'_, OtherType>>
Attempts to downcast a dynamic value (like DynValue or DynTensor) to a more strongly typed reference
variant, like TensorRef<T>.
Sourcepub fn downcast_mut<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>(
&mut self,
) -> Result<ValueRefMut<'_, OtherType>>
pub fn downcast_mut<OtherType: ValueTypeMarker + DowncastableTarget + ?Sized>( &mut self, ) -> Result<ValueRefMut<'_, OtherType>>
Attempts to downcast a dynamic value (like DynValue or DynTensor) to a more strongly typed
mutable-reference variant, like TensorRefMut<T>.
Trait Implementations§
Source§impl<Type: ValueTypeMarker + ?Sized> AsPointer for Value<Type>
impl<Type: ValueTypeMarker + ?Sized> AsPointer for Value<Type>
Source§impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Clone for Value<Type>
Available on non-WebAssembly only.
impl<Type: DefiniteTensorValueTypeMarker + ?Sized> Clone for Value<Type>
Source§fn clone(&self) -> Self
fn clone(&self) -> Self
Creates a copy of this tensor and its data on the same device it resides on.
let array = vec![1_i64, 2, 3, 4, 5];
let tensor = Tensor::from_array(([array.len()], array.into_boxed_slice()))?;
let new_tensor = tensor.clone();
// same data
assert_eq!(tensor.extract_tensor(), new_tensor.extract_tensor());
// different allocations
assert_ne!(tensor.ptr(), new_tensor.ptr());
assert_ne!(tensor.data_ptr(), new_tensor.data_ptr());1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more