1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
//! Devices on which tensor computations are run.

/// A torch device.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Device {
    /// The main CPU device.
    Cpu,
    /// The main GPU device.
    Cuda(usize),
    /// The main MPS device.
    Mps,
}

/// Cuda related helper functions.
pub enum Cuda {}
impl Cuda {
    /// Returns the number of CUDA devices available.
    pub fn device_count() -> i64 {
        let res = unsafe_torch!(torch_sys::cuda::atc_cuda_device_count());
        i64::from(res)
    }

    /// Returns true if at least one CUDA device is available.
    pub fn is_available() -> bool {
        unsafe_torch!(torch_sys::cuda::atc_cuda_is_available()) != 0
    }

    /// Returns true if CUDA is available, and CuDNN is available.
    pub fn cudnn_is_available() -> bool {
        unsafe_torch!(torch_sys::cuda::atc_cudnn_is_available()) != 0
    }

    /// Sets the seed for the current GPU.
    ///
    /// # Arguments
    ///
    /// * `seed` - An unsigned 64bit int to be used as seed.
    pub fn manual_seed(seed: u64) {
        unsafe_torch!(torch_sys::cuda::atc_manual_seed(seed));
    }

    /// Sets the seed for all available GPUs.
    ///
    /// # Arguments
    ///
    /// * `seed` - An unsigned 64bit int to be used as seed.
    pub fn manual_seed_all(seed: u64) {
        unsafe_torch!(torch_sys::cuda::atc_manual_seed_all(seed));
    }

    /// Waits for all kernels in all streams on a CUDA device to complete.
    ///
    /// # Arguments
    ///
    /// * `device_index` - A signed 64bit int to indice which device to wait for.
    pub fn synchronize(device_index: i64) {
        unsafe_torch!(torch_sys::cuda::atc_synchronize(device_index));
    }

    /// Returns true if cudnn is enabled by the user.
    ///
    /// This does not indicate whether cudnn is actually usable.
    pub fn user_enabled_cudnn() -> bool {
        unsafe_torch!(torch_sys::cuda::atc_user_enabled_cudnn()) != 0
    }

    /// Enable or disable cudnn.
    pub fn set_user_enabled_cudnn(b: bool) {
        unsafe_torch!(torch_sys::cuda::atc_set_user_enabled_cudnn(i32::from(b)))
    }

    /// Sets cudnn benchmark mode.
    ///
    /// When set cudnn will try to optimize the generators durning
    /// the first network runs and then use the optimized architecture
    /// in the following runs. This can result in significant performance
    /// improvements.
    pub fn cudnn_set_benchmark(b: bool) {
        unsafe_torch!(torch_sys::cuda::atc_set_benchmark_cudnn(i32::from(b)))
    }
}

impl Device {
    pub(super) fn c_int(self) -> libc::c_int {
        match self {
            Device::Cpu => -1,
            Device::Cuda(device_index) => device_index as libc::c_int,
            Device::Mps => -2,
        }
    }

    pub(super) fn of_c_int(v: libc::c_int) -> Self {
        match v {
            -1 => Device::Cpu,
            -2 => Device::Mps,
            index if index >= 0 => Device::Cuda(index as usize),
            _ => panic!("unexpected device {}", v),
        }
    }

    /// Returns a GPU device if available, else default to CPU.
    pub fn cuda_if_available() -> Device {
        if Cuda::is_available() {
            Device::Cuda(0)
        } else {
            Device::Cpu
        }
    }

    pub fn is_cuda(self) -> bool {
        match self {
            Device::Cuda(_) => true,
            Device::Cpu => false,
            Device::Mps => false,
        }
    }
}