1use crate::IntoKind;
2
3use super::TchTensor;
4use super::element::TchElement;
5use burn_backend::backend::{Backend, DeviceId, DeviceOps, ExecutionError};
6use burn_backend::ops::IntTensorOps;
7
8#[derive(Clone, Copy, Debug, PartialEq, Eq)]
9#[derive(Default)]
25pub enum LibTorchDevice {
26 #[default]
28 Cpu,
29
30 Cuda(usize),
33
34 Mps,
36
37 Vulkan,
39}
40
41impl From<LibTorchDevice> for tch::Device {
42 #[allow(
43 unreachable_code,
44 reason = "CUDA branch always panics if the library is missing"
45 )]
46 fn from(device: LibTorchDevice) -> Self {
47 match device {
48 LibTorchDevice::Cpu => tch::Device::Cpu,
49 LibTorchDevice::Cuda(_num) => {
50 include!(concat!(env!("OUT_DIR"), "/tch_gpu_check.rs"));
51 tch::Device::Cuda(_num)
52 }
53 LibTorchDevice::Mps => tch::Device::Mps,
54 LibTorchDevice::Vulkan => tch::Device::Vulkan,
55 }
56 }
57}
58
59impl From<tch::Device> for LibTorchDevice {
60 fn from(device: tch::Device) -> Self {
61 match device {
62 tch::Device::Cpu => LibTorchDevice::Cpu,
63 tch::Device::Cuda(num) => LibTorchDevice::Cuda(num),
64 tch::Device::Mps => LibTorchDevice::Mps,
65 tch::Device::Vulkan => LibTorchDevice::Vulkan,
66 }
67 }
68}
69
70impl burn_backend::Device for LibTorchDevice {
71 fn from_id(device_id: DeviceId) -> Self {
72 match device_id.type_id {
73 0 => Self::Cuda(device_id.index_id as usize),
74 1 => Self::Mps,
75 2 => Self::Cpu,
76 3 => Self::Vulkan,
77 _ => LibTorchDevice::Cpu,
78 }
79 }
80
81 fn to_id(&self) -> DeviceId {
82 match self {
83 LibTorchDevice::Cuda(index) => DeviceId::new(0, *index as u32),
84 LibTorchDevice::Mps => DeviceId::new(1, 0),
85 LibTorchDevice::Cpu => DeviceId::new(2, 0),
86 LibTorchDevice::Vulkan => DeviceId::new(3, 0),
87 }
88 }
89
90 fn device_count(_type_id: u16) -> usize {
91 1
93 }
94}
95
96impl DeviceOps for LibTorchDevice {}
97
98#[derive(Clone, Copy, Default, Debug)]
108pub struct LibTorch<E = f32> {
109 _e: E,
110}
111
112impl<E: TchElement> Backend for LibTorch<E> {
113 type Device = LibTorchDevice;
114
115 type FloatTensorPrimitive = TchTensor;
116 type FloatElem = E;
117
118 type IntTensorPrimitive = TchTensor;
119 type IntElem = i64;
120
121 type BoolTensorPrimitive = TchTensor;
122 type BoolElem = bool;
123
124 type QuantizedTensorPrimitive = TchTensor;
125
126 fn seed(_device: &Self::Device, seed: u64) {
127 tch::manual_seed(seed as i64);
128 }
129
130 fn ad_enabled() -> bool {
131 false
132 }
133
134 fn name(device: &Self::Device) -> String {
135 match device {
136 LibTorchDevice::Cpu => "libtorch<cpu>",
137 LibTorchDevice::Cuda(_) => "libtorch<cuda>",
138 LibTorchDevice::Mps => "libtorch<metal>",
139 LibTorchDevice::Vulkan => "libtorch<vulkan>",
140 }
141 .to_string()
142 }
143
144 fn sync(device: &Self::Device) -> Result<(), ExecutionError> {
145 match device {
146 LibTorchDevice::Cpu => (),
147 LibTorchDevice::Cuda(index) => {
148 tch::Cuda::synchronize(*index as i64);
149 }
150 _ => {
151 burn_backend::read_sync(Self::int_into_data(Self::int_zeros(
153 [1].into(),
154 device,
155 E::dtype().into(),
156 )))
157 .unwrap();
158 }
159 };
160
161 Ok(())
162 }
163
164 fn supports_dtype(_device: &Self::Device, dtype: burn_backend::DType) -> bool {
165 dtype.try_into_kind().is_ok()
166 }
167}