1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
//! Devices on which tensor computations are run.
/// A torch device.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Device {
/// The main CPU device.
Cpu,
/// The main GPU device.
Cuda(usize),
}
/// Cuda related helper functions.
pub enum Cuda {}
impl Cuda {
/// Returns the number of GPU that can be used.
pub fn device_count() -> i64 {
let res = unsafe_torch!(torch_sys::atc_cuda_device_count());
i64::from(res)
}
/// Returns true if cuda support is available.
pub fn is_available() -> bool {
unsafe_torch!(torch_sys::atc_cuda_is_available()) != 0
}
/// Returns true if cudnn support is available.
pub fn cudnn_is_available() -> bool {
unsafe_torch!(torch_sys::atc_cudnn_is_available()) != 0
}
/// Sets cudnn benchmark mode.
///
/// When set cudnn will try to optimize the generators durning
/// the first network runs and then use the optimized architecture
/// in the following runs. This can result in significant performance
/// improvements.
pub fn cudnn_set_benchmark(b: bool) {
unsafe_torch!(torch_sys::atc_set_benchmark_cudnn(if b { 1 } else { 0 }))
}
}
impl Device {
pub(super) fn c_int(self) -> libc::c_int {
match self {
Device::Cpu => -1,
Device::Cuda(device_index) => device_index as libc::c_int,
}
}
pub(super) fn of_c_int(v: libc::c_int) -> Self {
match v {
-1 => Device::Cpu,
index if index >= 0 => Device::Cuda(index as usize),
_ => panic!("unexpected device {}", v),
}
}
/// Returns a GPU device if available, else default to CPU.
pub fn cuda_if_available() -> Device {
if Cuda::is_available() {
Device::Cuda(0)
} else {
Device::Cpu
}
}
pub fn is_cuda(self) -> bool {
match self {
Device::Cuda(_) => true,
Device::Cpu => false,
}
}
}