Struct llama_cpp_2::context::params::LlamaContextParams
source · pub struct LlamaContextParams { /* private fields */ }
Expand description
A safe wrapper around llama_context_params
.
Generally this should be created with Default::default()
and then modified with with_*
methods.
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let ctx_params = LlamaContextParams::default()
.with_n_ctx(NonZeroU32::new(2048))
.with_seed(1234);
assert_eq!(ctx_params.seed(), 1234);
assert_eq!(ctx_params.n_ctx(), NonZeroU32::new(2048));
Implementations§
source§impl LlamaContextParams
impl LlamaContextParams
sourcepub fn with_seed(self, seed: u32) -> Self
pub fn with_seed(self, seed: u32) -> Self
Set the seed of the context
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default();
let params = params.with_seed(1234);
assert_eq!(params.seed(), 1234);
sourcepub fn seed(&self) -> u32
pub fn seed(&self) -> u32
Get the seed of the context
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_seed(1234);
assert_eq!(params.seed(), 1234);
sourcepub fn with_n_ctx(self, n_ctx: Option<NonZeroU32>) -> Self
pub fn with_n_ctx(self, n_ctx: Option<NonZeroU32>) -> Self
Set the side of the context
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default();
let params = params.with_n_ctx(NonZeroU32::new(2048));
assert_eq!(params.n_ctx(), NonZeroU32::new(2048));
sourcepub fn n_ctx(&self) -> Option<NonZeroU32>
pub fn n_ctx(&self) -> Option<NonZeroU32>
sourcepub fn with_n_batch(self, n_batch: u32) -> Self
pub fn with_n_batch(self, n_batch: u32) -> Self
Set the n_batch
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_n_batch(2048);
assert_eq!(params.n_batch(), 2048);
sourcepub fn n_batch(&self) -> u32
pub fn n_batch(&self) -> u32
Get the n_batch
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default();
assert_eq!(params.n_batch(), 2048);
sourcepub fn with_rope_scaling_type(self, rope_scaling_type: RopeScalingType) -> Self
pub fn with_rope_scaling_type(self, rope_scaling_type: RopeScalingType) -> Self
Set the type of rope scaling.
§Examples
use llama_cpp_2::context::params::{LlamaContextParams, RopeScalingType};
let params = LlamaContextParams::default()
.with_rope_scaling_type(RopeScalingType::Linear);
assert_eq!(params.rope_scaling_type(), RopeScalingType::Linear);
sourcepub fn rope_scaling_type(&self) -> RopeScalingType
pub fn rope_scaling_type(&self) -> RopeScalingType
Get the type of rope scaling.
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert_eq!(params.rope_scaling_type(), llama_cpp_2::context::params::RopeScalingType::Unspecified);
sourcepub fn with_rope_freq_base(self, rope_freq_base: f32) -> Self
pub fn with_rope_freq_base(self, rope_freq_base: f32) -> Self
Set the rope frequency base.
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_rope_freq_base(0.5);
assert_eq!(params.rope_freq_base(), 0.5);
sourcepub fn rope_freq_base(&self) -> f32
pub fn rope_freq_base(&self) -> f32
Get the rope frequency base.
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert_eq!(params.rope_freq_base(), 0.0);
sourcepub fn with_rope_freq_scale(self, rope_freq_scale: f32) -> Self
pub fn with_rope_freq_scale(self, rope_freq_scale: f32) -> Self
Set the rope frequency scale.
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_rope_freq_scale(0.5);
assert_eq!(params.rope_freq_scale(), 0.5);
sourcepub fn rope_freq_scale(&self) -> f32
pub fn rope_freq_scale(&self) -> f32
Get the rope frequency scale.
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert_eq!(params.rope_freq_scale(), 0.0);
sourcepub fn n_threads(&self) -> u32
pub fn n_threads(&self) -> u32
Get the number of threads.
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert_eq!(params.n_threads(), 4);
sourcepub fn n_threads_batch(&self) -> u32
pub fn n_threads_batch(&self) -> u32
Get the number of threads allocated for batches.
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert_eq!(params.n_threads_batch(), 4);
sourcepub fn with_n_threads(self, n_threads: u32) -> Self
pub fn with_n_threads(self, n_threads: u32) -> Self
Set the number of threads.
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_n_threads(8);
assert_eq!(params.n_threads(), 8);
sourcepub fn with_n_threads_batch(self, n_threads: u32) -> Self
pub fn with_n_threads_batch(self, n_threads: u32) -> Self
Set the number of threads allocated for batches.
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_n_threads_batch(8);
assert_eq!(params.n_threads_batch(), 8);
sourcepub fn embeddings(&self) -> bool
pub fn embeddings(&self) -> bool
Check whether embeddings are enabled
§Examples
let params = llama_cpp_2::context::params::LlamaContextParams::default();
assert!(!params.embeddings());
sourcepub fn with_embeddings(self, embedding: bool) -> Self
pub fn with_embeddings(self, embedding: bool) -> Self
Enable the use of embeddings
§Examples
use llama_cpp_2::context::params::LlamaContextParams;
let params = LlamaContextParams::default()
.with_embeddings(true);
assert!(params.embeddings());
Trait Implementations§
source§impl Clone for LlamaContextParams
impl Clone for LlamaContextParams
source§fn clone(&self) -> LlamaContextParams
fn clone(&self) -> LlamaContextParams
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moresource§impl Debug for LlamaContextParams
impl Debug for LlamaContextParams
source§impl Default for LlamaContextParams
impl Default for LlamaContextParams
Default parameters for LlamaContext
. (as defined in llama.cpp by llama_context_default_params
)
use llama_cpp_2::context::params::{LlamaContextParams, RopeScalingType};
let params = LlamaContextParams::default();
assert_eq!(params.n_ctx(), NonZeroU32::new(512), "n_ctx should be 512");
assert_eq!(params.rope_scaling_type(), RopeScalingType::Unspecified);
impl Send for LlamaContextParams
SAFETY: we do not currently allow setting or reading the pointers that cause this to not be automatically send or sync.