Struct llama_cpp_2::model::params::LlamaModelParams
source · pub struct LlamaModelParams { /* private fields */ }
Expand description
A safe wrapper around llama_model_params
.
Implementations§
source§impl LlamaModelParams
impl LlamaModelParams
sourcepub fn n_gpu_layers(&self) -> i32
pub fn n_gpu_layers(&self) -> i32
Get the number of layers to offload to the GPU.
sourcepub fn vocab_only(&self) -> bool
pub fn vocab_only(&self) -> bool
only load the vocabulary, no weights
sourcepub fn with_n_gpu_layers(self, n_gpu_layers: u32) -> Self
pub fn with_n_gpu_layers(self, n_gpu_layers: u32) -> Self
sets the number of gpu layers to offload to the GPU.
let params = LlamaModelParams::default();
let params = params.with_n_gpu_layers(1);
assert_eq!(params.n_gpu_layers(), 1);
sourcepub fn with_vocab_only(self, vocab_only: bool) -> Self
pub fn with_vocab_only(self, vocab_only: bool) -> Self
sets vocab_only
Trait Implementations§
source§impl Debug for LlamaModelParams
impl Debug for LlamaModelParams
source§impl Default for LlamaModelParams
impl Default for LlamaModelParams
Default parameters for LlamaModel
. (as defined in llama.cpp by llama_model_default_params
)
let params = LlamaModelParams::default();
assert_eq!(params.n_gpu_layers(), 0, "n_gpu_layers should be 0");
assert_eq!(params.main_gpu(), 0, "main_gpu should be 0");
assert_eq!(params.vocab_only(), false, "vocab_only should be false");
assert_eq!(params.use_mmap(), true, "use_mmap should be true");
assert_eq!(params.use_mlock(), false, "use_mlock should be false");
Auto Trait Implementations§
impl RefUnwindSafe for LlamaModelParams
impl !Send for LlamaModelParams
impl !Sync for LlamaModelParams
impl Unpin for LlamaModelParams
impl UnwindSafe for LlamaModelParams
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more