boostr 0.1.0

ML framework built on numr - attention, quantization, model architectures
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
//! Generic MoE implementation — split across focused submodules.
//!
//! THE algorithm — same for all backends.

#[path = "moe/dispatch.rs"]
pub mod dispatch;
#[path = "moe/grouped_gemm.rs"]
pub mod grouped_gemm;
#[path = "moe/routing.rs"]
pub mod routing;

pub use dispatch::*;
pub use grouped_gemm::*;
pub use routing::*;