async_cuda_core/
device.rs

1use crate::ffi;
2use crate::runtime::Future;
3
4type Result<T> = std::result::Result<T, crate::error::Error>;
5
6/// Returns the number of compute-capable devices.
7///
8/// [CUDA documentation](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g18808e54893cfcaafefeab31a73cc55f)
9///
10/// # Return value
11///
12/// Number of CUDA devices or error in case of failure.
13pub async fn num_devices() -> Result<usize> {
14    Future::new(ffi::device::num_devices).await
15}
16
17/// CUDA device ID.
18pub type DeviceId = usize;
19
20/// CUDA device.
21pub struct Device;
22
23impl Device {
24    /// Returns which device is currently being used by [`DeviceId`].
25    ///
26    /// [CUDA documentation](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g80861db2ce7c29b6e8055af8ae01bc78)
27    pub async fn get() -> Result<DeviceId> {
28        Future::new(ffi::device::Device::get).await
29    }
30
31    /// Set device to be used for GPU executions.
32    ///
33    /// [CUDA documentation](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g159587909ffa0791bbe4b40187a4c6bb)
34    ///
35    /// # Arguments
36    ///
37    /// * `id` - Device ID to use.
38    pub async fn set(id: DeviceId) -> Result<()> {
39        Future::new(move || ffi::device::Device::set(id)).await
40    }
41
42    /// Synchronize the current CUDA device.
43    ///
44    /// [CUDA documentation](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g10e20b05a95f638a4071a655503df25d)
45    ///
46    /// # Warning
47    ///
48    /// Note that this operation will block all device operations, even from other processes while
49    /// running. Use this operation sparingly.
50    pub async fn synchronize() -> Result<()> {
51        Future::new(ffi::device::Device::synchronize).await
52    }
53
54    /// Gets free and total device memory.
55    ///
56    /// [CUDA documentation](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1g376b97f5ab20321ca46f7cfa9511b978)
57    ///
58    /// # Return value
59    ///
60    /// Total amount of memory and free memory in bytes.
61    pub async fn memory_info() -> Result<MemoryInfo> {
62        Future::new(ffi::device::Device::memory_info).await
63    }
64}
65
66/// CUDA device memory information.
67#[derive(Debug, Clone, Copy, PartialEq)]
68pub struct MemoryInfo {
69    /// Amount of free device memory in bytes.
70    pub free: usize,
71    /// Total amount of device memory in bytes.
72    pub total: usize,
73}
74
75#[cfg(test)]
76mod tests {
77    use super::*;
78
79    #[tokio::test]
80    async fn test_num_devices() {
81        assert!(matches!(num_devices().await, Ok(num) if num > 0));
82    }
83
84    #[tokio::test]
85    async fn test_get_device() {
86        assert!(matches!(Device::get().await, Ok(0)));
87    }
88
89    #[tokio::test]
90    async fn test_set_device() {
91        assert!(Device::set(0).await.is_ok());
92        assert!(matches!(Device::get().await, Ok(0)));
93    }
94
95    #[tokio::test]
96    async fn test_synchronize() {
97        assert!(Device::synchronize().await.is_ok());
98    }
99
100    #[tokio::test]
101    async fn test_memory_info() {
102        let memory_info = Device::memory_info().await.unwrap();
103        assert!(memory_info.free > 0);
104        assert!(memory_info.total > 0);
105    }
106}