burn_ndarray/
backend.rs

1use crate::element::{FloatNdArrayElement, IntNdArrayElement, QuantElement};
2use crate::{NdArrayQTensor, NdArrayTensor, NdArrayTensorFloat};
3use alloc::string::String;
4use burn_common::stub::Mutex;
5use burn_ir::{BackendIr, HandleKind, TensorHandle};
6use burn_tensor::backend::{Backend, DeviceId, DeviceOps};
7use burn_tensor::ops::{BoolTensor, FloatTensor, IntTensor, QuantizedTensor};
8use core::marker::PhantomData;
9use rand::{SeedableRng, rngs::StdRng};
10
11pub(crate) static SEED: Mutex<Option<StdRng>> = Mutex::new(None);
12
13/// The device type for the ndarray backend.
14#[derive(Clone, Copy, Debug, PartialEq, Eq)]
15pub enum NdArrayDevice {
16    /// The CPU device.
17    Cpu,
18}
19
20impl DeviceOps for NdArrayDevice {
21    fn id(&self) -> burn_tensor::backend::DeviceId {
22        match self {
23            NdArrayDevice::Cpu => DeviceId::new(0, 0),
24        }
25    }
26}
27
28impl Default for NdArrayDevice {
29    fn default() -> Self {
30        Self::Cpu
31    }
32}
33
34/// Tensor backend that uses the [ndarray](ndarray) crate for executing tensor operations.
35///
36/// This backend is compatible with CPUs and can be compiled for almost any platform, including
37/// `wasm`, `arm`, and `x86`.
38#[derive(Clone, Copy, Default, Debug)]
39pub struct NdArray<E = f32, I = i64, Q = i8> {
40    _e: PhantomData<E>,
41    _i: PhantomData<I>,
42    _q: PhantomData<Q>,
43}
44
45impl<E: FloatNdArrayElement, I: IntNdArrayElement, Q: QuantElement> Backend for NdArray<E, I, Q> {
46    type Device = NdArrayDevice;
47
48    type FloatTensorPrimitive = NdArrayTensorFloat;
49    type FloatElem = E;
50
51    type IntTensorPrimitive = NdArrayTensor<I>;
52    type IntElem = I;
53
54    type BoolTensorPrimitive = NdArrayTensor<bool>;
55    type BoolElem = bool;
56
57    type QuantizedTensorPrimitive = NdArrayQTensor<Q>;
58    type QuantizedEncoding = Q;
59
60    fn ad_enabled() -> bool {
61        false
62    }
63
64    fn name(_device: &Self::Device) -> String {
65        String::from("ndarray")
66    }
67
68    fn seed(seed: u64) {
69        let rng = StdRng::seed_from_u64(seed);
70        let mut seed = SEED.lock().unwrap();
71        *seed = Some(rng);
72    }
73}
74
75impl<E: FloatNdArrayElement, I: IntNdArrayElement, Q: QuantElement> BackendIr for NdArray<E, I, Q> {
76    type Handle = HandleKind<Self>;
77
78    fn float_tensor(handle: TensorHandle<Self::Handle>) -> FloatTensor<Self> {
79        match handle.handle {
80            HandleKind::Float(handle) => handle,
81            _ => panic!("Expected float handle, got {}", handle.handle.name()),
82        }
83    }
84
85    fn int_tensor(handle: TensorHandle<Self::Handle>) -> IntTensor<Self> {
86        match handle.handle {
87            HandleKind::Int(handle) => handle,
88            _ => panic!("Expected int handle, got {}", handle.handle.name()),
89        }
90    }
91
92    fn bool_tensor(handle: TensorHandle<Self::Handle>) -> BoolTensor<Self> {
93        match handle.handle {
94            HandleKind::Bool(handle) => handle,
95            _ => panic!("Expected bool handle, got {}", handle.handle.name()),
96        }
97    }
98
99    fn quantized_tensor(handle: TensorHandle<Self::Handle>) -> QuantizedTensor<Self> {
100        match handle.handle {
101            HandleKind::Quantized(handle) => handle,
102            _ => panic!("Expected quantized handle, got {}", handle.handle.name()),
103        }
104    }
105
106    fn float_tensor_handle(tensor: FloatTensor<Self>) -> Self::Handle {
107        HandleKind::Float(tensor)
108    }
109
110    fn int_tensor_handle(tensor: IntTensor<Self>) -> Self::Handle {
111        HandleKind::Int(tensor)
112    }
113
114    fn bool_tensor_handle(tensor: BoolTensor<Self>) -> Self::Handle {
115        HandleKind::Bool(tensor)
116    }
117
118    fn quantized_tensor_handle(tensor: QuantizedTensor<Self>) -> Self::Handle {
119        HandleKind::Quantized(tensor)
120    }
121}