#[repr(C)]pub struct common_sampler_params {Show 32 fields
pub seed: u32,
pub n_prev: i32,
pub n_probs: i32,
pub min_keep: i32,
pub top_k: i32,
pub top_p: f32,
pub min_p: f32,
pub xtc_probability: f32,
pub xtc_threshold: f32,
pub tfs_z: f32,
pub typ_p: f32,
pub temp: f32,
pub dynatemp_range: f32,
pub dynatemp_exponent: f32,
pub penalty_last_n: i32,
pub penalty_repeat: f32,
pub penalty_freq: f32,
pub penalty_present: f32,
pub dry_multiplier: f32,
pub dry_base: f32,
pub dry_allowed_length: i32,
pub dry_penalty_last_n: i32,
pub mirostat: i32,
pub mirostat_tau: f32,
pub mirostat_eta: f32,
pub penalize_nl: bool,
pub ignore_eos: bool,
pub no_perf: bool,
pub dry_sequence_breakers: Vec<String>,
pub samplers: Vec<common_sampler_type>,
pub grammar: Vec<String>,
pub logit_bias: Vec<(i32, f64)>,
}Expand description
common sampler params
Fields§
§seed: u32the seed used to initialize llama_sampler
n_prev: i32number of previous tokens to remember
n_probs: i32if greater than 0, output the probabilities of top n_probs tokens.
min_keep: i320 = disabled, otherwise samplers should return at least min_keep tokens
top_k: i32<= 0 to use vocab size
top_p: f321.0 = disabled
min_p: f320.0 = disabled
xtc_probability: f320.0 = disabled
xtc_threshold: f320.5 disables XTC
tfs_z: f321.0 = disabled
typ_p: f32typical_p, 1.0 = disabled
temp: f32<= 0.0 to sample greedily, 0.0 to not output probabilities
dynatemp_range: f320.0 = disabled
dynatemp_exponent: f32controls how entropy maps to temperature in dynamic temperature sampler
penalty_last_n: i32last n tokens to penalize (0 = disable penalty, -1 = context size)
penalty_repeat: f321.0 = disabled
penalty_freq: f320.0 = disabled
penalty_present: f320.0 = disabled
dry_multiplier: f320.0 = disabled; DRY repetition penalty for tokens extending repetition:
dry_base: f320.0 = disabled; multiplier * base ^ (length of sequence before token - allowed length)
dry_allowed_length: i32tokens extending repetitions beyond this receive penalty
dry_penalty_last_n: i32how many tokens to scan for repetitions (0 = disable penalty, -1 = context size)
mirostat: i320 = disabled, 1 = mirostat, 2 = mirostat 2.0
mirostat_tau: f32target entropy
mirostat_eta: f32learning rate
penalize_nl: boolconsider newlines as a repeatable token
ignore_eos: bool§no_perf: booldisable performance metrics
dry_sequence_breakers: Vec<String>§samplers: Vec<common_sampler_type>§grammar: Vec<String>§logit_bias: Vec<(i32, f64)>