1#![cfg_attr(not(feature = "std"), no_std)]
2#![warn(missing_docs)]
3#![cfg_attr(docsrs, feature(doc_auto_cfg))]
4#![recursion_limit = "135"]
5
6#[macro_use]
9extern crate derive_new;
10
11pub use serde;
13
14pub mod config;
16
17#[cfg(feature = "std")]
19pub mod data;
20
21pub mod optim;
23
24#[cfg(feature = "std")]
26pub mod lr_scheduler;
27
28pub mod grad_clipping;
30
31pub mod module;
33
34pub mod nn;
36
37pub mod record;
39
40pub mod tensor;
42
43extern crate alloc;
44
45#[cfg(all(
47 test,
48 not(feature = "test-tch"),
49 not(feature = "test-wgpu"),
50 not(feature = "test-cuda")
51))]
52pub type TestBackend = burn_ndarray::NdArray<f32>;
53
54#[cfg(all(test, feature = "test-tch"))]
55pub type TestBackend = burn_tch::LibTorch<f32>;
57
58#[cfg(all(test, feature = "test-wgpu"))]
59pub type TestBackend = burn_wgpu::Wgpu;
61
62#[cfg(all(test, feature = "test-cuda"))]
63pub type TestBackend = burn_cuda::Cuda;
65
66#[cfg(test)]
68pub type TestAutodiffBackend = burn_autodiff::Autodiff<TestBackend>;
69
70#[cfg(all(test, feature = "test-memory-checks"))]
71mod tests {
72 burn_fusion::memory_checks!();
73}
74
75pub type LearningRate = f64; pub mod prelude {
82 pub use crate::{
86 config::Config,
87 module::Module,
88 nn,
89 tensor::{
90 Bool, Device, ElementConversion, Float, Int, Shape, Tensor, TensorData,
91 backend::Backend, s,
92 },
93 };
94}