Skip to main content

llama_cpp_sys_v3/
types.rs

1#![allow(non_camel_case_types)]
2
3use std::ffi::c_void;
4
5pub type llama_pos = i32;
6pub type llama_token = i32;
7pub type llama_seq_id = i32;
8
9pub const LLAMA_TOKEN_NULL: llama_token = -1;
10
11// Opaque types
12#[repr(C)]
13pub struct llama_vocab {
14    _data: [u8; 0],
15    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
16}
17
18#[repr(C)]
19pub struct llama_model {
20    _data: [u8; 0],
21    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
22}
23
24#[repr(C)]
25pub struct llama_context {
26    _data: [u8; 0],
27    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
28}
29
30#[repr(C)]
31pub struct llama_sampler {
32    _data: [u8; 0],
33    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
34}
35
36#[repr(C)]
37#[derive(Debug, Copy, Clone)]
38pub struct llama_batch {
39    pub n_tokens: i32,
40    pub token: *mut llama_token,
41    pub embd: *mut f32,
42    pub pos: *mut llama_pos,
43    pub n_seq_id: *mut i32,
44    pub seq_id: *mut *mut llama_seq_id,
45    pub logits: *mut i8, // bool in C
46}
47
48#[repr(C)]
49#[derive(Debug, Copy, Clone)]
50pub struct llama_model_params {
51    pub devices: *mut std::ffi::c_void,
52    pub tensor_buft_overrides: *const std::ffi::c_void,
53    pub n_gpu_layers: i32,
54    pub split_mode: i32,
55    pub main_gpu: i32,
56    pub tensor_split: *const f32,
57    pub progress_callback: Option<unsafe extern "C" fn(f32, *mut std::ffi::c_void) -> bool>,
58    pub progress_callback_user_data: *mut std::ffi::c_void,
59    pub kv_overrides: *const std::ffi::c_void,
60    pub vocab_only: bool,
61    pub use_mmap: bool,
62    pub use_direct_io: bool,
63    pub use_mlock: bool,
64    pub check_tensors: bool,
65    pub use_extra_bufts: bool,
66    pub no_host: bool,
67    pub no_alloc: bool,
68}
69
70#[repr(C)]
71#[derive(Debug, Copy, Clone)]
72pub struct llama_context_params {
73    pub n_ctx: u32,
74    pub n_batch: u32,
75    pub n_ubatch: u32,
76    pub n_seq_max: u32,
77    pub n_threads: i32,
78    pub n_threads_batch: i32,
79    pub rope_scaling_type: i32,
80    pub pooling_type: i32,
81    pub attention_type: i32,
82    pub flash_attn_type: i32,
83    pub rope_freq_base: f32,
84    pub rope_freq_scale: f32,
85    pub yarn_ext_factor: f32,
86    pub yarn_attn_factor: f32,
87    pub yarn_beta_fast: f32,
88    pub yarn_beta_slow: f32,
89    pub yarn_orig_ctx: u32,
90    pub defrag_thold: f32,
91    pub cb_eval: Option<unsafe extern "C" fn(*mut std::ffi::c_void, *mut std::ffi::c_void) -> bool>,
92    pub cb_eval_user_data: *mut std::ffi::c_void,
93    pub type_k: i32,
94    pub type_v: i32,
95    pub abort_callback: Option<unsafe extern "C" fn(*mut std::ffi::c_void) -> bool>,
96    pub abort_callback_data: *mut std::ffi::c_void,
97    pub embeddings: bool,
98    pub offload_kqv: bool,
99    pub no_perf: bool,
100    pub op_offload: bool,
101    pub swa_full: bool,
102    pub kv_unified: bool,
103    pub samplers: *mut std::ffi::c_void,
104    pub n_samplers: usize,
105}
106
107#[repr(C)]
108#[derive(Debug, Copy, Clone)]
109pub struct llama_sampler_chain_params {
110    pub no_perf: bool,
111}