[−][src]Trait opencv::hub_prelude::DeviceInfoTrait
Class providing functionality for querying the specified GPU properties.
Required methods
pub fn as_raw_DeviceInfo(&self) -> *const c_void
[src]
pub fn as_raw_mut_DeviceInfo(&mut self) -> *mut c_void
[src]
Provided methods
pub fn device_id(&self) -> Result<i32>
[src]
Returns system index of the CUDA device starting with 0.
pub fn name(&self) -> Result<String>
[src]
ASCII string identifying device
pub fn total_global_mem(&self) -> Result<size_t>
[src]
global memory available on device in bytes
pub fn shared_mem_per_block(&self) -> Result<size_t>
[src]
shared memory available per block in bytes
pub fn regs_per_block(&self) -> Result<i32>
[src]
32-bit registers available per block
pub fn warp_size(&self) -> Result<i32>
[src]
warp size in threads
pub fn mem_pitch(&self) -> Result<size_t>
[src]
maximum pitch in bytes allowed by memory copies
pub fn max_threads_per_block(&self) -> Result<i32>
[src]
maximum number of threads per block
pub fn max_threads_dim(&self) -> Result<Vec3i>
[src]
maximum size of each dimension of a block
pub fn max_grid_size(&self) -> Result<Vec3i>
[src]
maximum size of each dimension of a grid
pub fn clock_rate(&self) -> Result<i32>
[src]
clock frequency in kilohertz
pub fn total_const_mem(&self) -> Result<size_t>
[src]
constant memory available on device in bytes
pub fn major_version(&self) -> Result<i32>
[src]
major compute capability
pub fn minor_version(&self) -> Result<i32>
[src]
minor compute capability
pub fn texture_alignment(&self) -> Result<size_t>
[src]
alignment requirement for textures
pub fn texture_pitch_alignment(&self) -> Result<size_t>
[src]
pitch alignment requirement for texture references bound to pitched memory
pub fn multi_processor_count(&self) -> Result<i32>
[src]
number of multiprocessors on device
pub fn kernel_exec_timeout_enabled(&self) -> Result<bool>
[src]
specified whether there is a run time limit on kernels
pub fn integrated(&self) -> Result<bool>
[src]
device is integrated as opposed to discrete
pub fn can_map_host_memory(&self) -> Result<bool>
[src]
device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer
pub fn compute_mode(&self) -> Result<DeviceInfo_ComputeMode>
[src]
compute mode
pub fn max_texture1_d(&self) -> Result<i32>
[src]
maximum 1D texture size
pub fn max_texture1_d_mipmap(&self) -> Result<i32>
[src]
maximum 1D mipmapped texture size
pub fn max_texture1_d_linear(&self) -> Result<i32>
[src]
maximum size for 1D textures bound to linear memory
pub fn max_texture_2d(&self) -> Result<Vec2i>
[src]
maximum 2D texture dimensions
pub fn max_texture2_d_mipmap(&self) -> Result<Vec2i>
[src]
maximum 2D mipmapped texture dimensions
pub fn max_texture2_d_linear(&self) -> Result<Vec3i>
[src]
maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory
pub fn max_texture2_d_gather(&self) -> Result<Vec2i>
[src]
maximum 2D texture dimensions if texture gather operations have to be performed
pub fn max_texture_3d(&self) -> Result<Vec3i>
[src]
maximum 3D texture dimensions
pub fn max_texture_cubemap(&self) -> Result<i32>
[src]
maximum Cubemap texture dimensions
pub fn max_texture1_d_layered(&self) -> Result<Vec2i>
[src]
maximum 1D layered texture dimensions
pub fn max_texture2_d_layered(&self) -> Result<Vec3i>
[src]
maximum 2D layered texture dimensions
pub fn max_texture_cubemap_layered(&self) -> Result<Vec2i>
[src]
maximum Cubemap layered texture dimensions
pub fn max_surface1_d(&self) -> Result<i32>
[src]
maximum 1D surface size
pub fn max_surface_2d(&self) -> Result<Vec2i>
[src]
maximum 2D surface dimensions
pub fn max_surface_3d(&self) -> Result<Vec3i>
[src]
maximum 3D surface dimensions
pub fn max_surface1_d_layered(&self) -> Result<Vec2i>
[src]
maximum 1D layered surface dimensions
pub fn max_surface2_d_layered(&self) -> Result<Vec3i>
[src]
maximum 2D layered surface dimensions
pub fn max_surface_cubemap(&self) -> Result<i32>
[src]
maximum Cubemap surface dimensions
pub fn max_surface_cubemap_layered(&self) -> Result<Vec2i>
[src]
maximum Cubemap layered surface dimensions
pub fn surface_alignment(&self) -> Result<size_t>
[src]
alignment requirements for surfaces
pub fn concurrent_kernels(&self) -> Result<bool>
[src]
device can possibly execute multiple kernels concurrently
pub fn ecc_enabled(&self) -> Result<bool>
[src]
device has ECC support enabled
pub fn pci_bus_id(&self) -> Result<i32>
[src]
PCI bus ID of the device
pub fn pci_device_id(&self) -> Result<i32>
[src]
PCI device ID of the device
pub fn pci_domain_id(&self) -> Result<i32>
[src]
PCI domain ID of the device
pub fn tcc_driver(&self) -> Result<bool>
[src]
true if device is a Tesla device using TCC driver, false otherwise
pub fn async_engine_count(&self) -> Result<i32>
[src]
number of asynchronous engines
pub fn unified_addressing(&self) -> Result<bool>
[src]
device shares a unified address space with the host
pub fn memory_clock_rate(&self) -> Result<i32>
[src]
peak memory clock frequency in kilohertz
pub fn memory_bus_width(&self) -> Result<i32>
[src]
global memory bus width in bits
pub fn l2_cache_size(&self) -> Result<i32>
[src]
size of L2 cache in bytes
pub fn max_threads_per_multi_processor(&self) -> Result<i32>
[src]
maximum resident threads per multiprocessor
pub fn query_memory(
&self,
total_memory: &mut size_t,
free_memory: &mut size_t
) -> Result<()>
[src]
&self,
total_memory: &mut size_t,
free_memory: &mut size_t
) -> Result<()>
gets free and total device memory
pub fn free_memory(&self) -> Result<size_t>
[src]
pub fn total_memory(&self) -> Result<size_t>
[src]
pub fn supports(&self, feature_set: FeatureSet) -> Result<bool>
[src]
Provides information on CUDA feature support.
Parameters
- feature_set: Features to be checked. See cuda::FeatureSet.
This function returns true if the device has the specified CUDA feature. Otherwise, it returns false
pub fn is_compatible(&self) -> Result<bool>
[src]
Checks the CUDA module and device compatibility.
This function returns true if the CUDA module can be run on the specified device. Otherwise, it returns false .