use std::fmt::Debug;
#[allow(clippy::module_name_repetitions)]
#[derive(Debug)]
pub struct LlamaModelParams {
pub(crate) params: llama_cpp_sys_2::llama_model_params,
}
impl LlamaModelParams {
#[must_use]
pub fn n_gpu_layers(&self) -> i32 {
self.params.n_gpu_layers
}
#[must_use]
pub fn main_gpu(&self) -> i32 {
self.params.main_gpu
}
#[must_use]
pub fn vocab_only(&self) -> bool {
self.params.vocab_only
}
#[must_use]
pub fn use_mmap(&self) -> bool {
self.params.use_mmap
}
#[must_use]
pub fn use_mlock(&self) -> bool {
self.params.use_mlock
}
#[must_use]
pub fn with_n_gpu_layers(mut self, n_gpu_layers: u32) -> Self {
let n_gpu_layers = i32::try_from(n_gpu_layers).unwrap_or(i32::MAX);
self.params.n_gpu_layers = n_gpu_layers;
self
}
#[must_use]
pub fn with_vocab_only(mut self, vocab_only: bool) -> Self {
self.params.vocab_only = vocab_only;
self
}
}
impl Default for LlamaModelParams {
fn default() -> Self {
LlamaModelParams {
params: unsafe { llama_cpp_sys_2::llama_model_default_params() },
}
}
}