wgml/models/llama2/
mod.rs

1//! Llama2 inference on the GPU or CPU.
2//!
3pub use tokenizer::*;
4pub use transformer::*;
5
6pub mod cpu;
7mod tokenizer;
8mod transformer;