1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]

//! The core crate of Burn.

#[macro_use]
extern crate derive_new;

/// Re-export serde for proc macros.
pub use serde;

/// The configuration module.
pub mod config;

/// Data module.
#[cfg(feature = "std")]
pub mod data;

/// Optimizer module.
#[cfg(feature = "std")]
pub mod optim;

/// Learning rate scheduler module.
#[cfg(feature = "std")]
pub mod lr_scheduler;

/// Gradient clipping module.
pub mod grad_clipping;

/// Module for the neural network module.
pub mod module;

/// Neural network module.
pub mod nn;

/// Module for the recorder.
pub mod record;

/// Module for the tensor.
pub mod tensor;

/// Backend module.
pub mod backend;

extern crate alloc;

#[cfg(all(test, not(feature = "test-tch"), not(feature = "test-wgpu"),))]
pub type TestBackend = burn_ndarray::NdArray<f32>;

#[cfg(all(test, feature = "test-tch"))]
pub type TestBackend = burn_tch::LibTorch<f32>;

#[cfg(all(test, feature = "test-wgpu", not(target_os = "macos")))]
pub type TestBackend = burn_wgpu::Wgpu<burn_wgpu::Vulkan, f32, i32>;

#[cfg(all(test, feature = "test-wgpu", target_os = "macos"))]
pub type TestBackend = burn_wgpu::Wgpu<burn_wgpu::Metal, f32, i32>;

#[cfg(feature = "std")]
#[cfg(test)]
pub type TestAutodiffBackend = burn_autodiff::Autodiff<TestBackend>;

/// Type alias for the learning rate.
///
/// LearningRate also implements [learning rate scheduler](crate::lr_scheduler::LrScheduler) so it
/// can be used for constant learning rate.
pub type LearningRate = f64; // We could potentially change the type.