Skip to main content

llama_cpp_sys_v3/
types.rs

1#![allow(non_camel_case_types)]
2
3// No imports needed
4
5pub type llama_pos = i32;
6pub type llama_token = i32;
7pub type llama_seq_id = i32;
8
9pub const LLAMA_TOKEN_NULL: llama_token = -1;
10
11// Opaque types
12#[repr(C)]
13pub struct llama_vocab {
14    _data: [u8; 0],
15    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
16}
17
18#[repr(C)]
19pub struct llama_model {
20    _data: [u8; 0],
21    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
22}
23
24#[repr(C)]
25pub struct llama_context {
26    _data: [u8; 0],
27    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
28}
29
30#[repr(C)]
31pub struct llama_memory {
32    _data: [u8; 0],
33    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
34}
35
36#[repr(C)]
37pub struct llama_sampler {
38    _data: [u8; 0],
39    _marker: core::marker::PhantomData<(*mut u8, core::marker::PhantomPinned)>,
40}
41
42#[repr(C)]
43#[derive(Debug, Copy, Clone)]
44pub struct llama_chat_message {
45    pub role: *const std::ffi::c_char,
46    pub content: *const std::ffi::c_char,
47}
48
49#[repr(C)]
50#[derive(Debug, Copy, Clone)]
51pub struct llama_batch {
52    pub n_tokens: i32,
53    pub token: *mut llama_token,
54    pub embd: *mut f32,
55    pub pos: *mut llama_pos,
56    pub n_seq_id: *mut i32,
57    pub seq_id: *mut *mut llama_seq_id,
58    pub logits: *mut i8, // bool in C
59}
60
61#[repr(C)]
62#[derive(Debug, Copy, Clone)]
63pub struct llama_model_params {
64    pub devices: *mut std::ffi::c_void,
65    pub tensor_buft_overrides: *const std::ffi::c_void,
66    pub n_gpu_layers: i32,
67    pub split_mode: i32,
68    pub main_gpu: i32,
69    pub tensor_split: *const f32,
70    pub progress_callback: Option<unsafe extern "C" fn(f32, *mut std::ffi::c_void) -> bool>,
71    pub progress_callback_user_data: *mut std::ffi::c_void,
72    pub kv_overrides: *const std::ffi::c_void,
73    pub vocab_only: bool,
74    pub use_mmap: bool,
75    pub use_direct_io: bool,
76    pub use_mlock: bool,
77    pub check_tensors: bool,
78    pub use_extra_bufts: bool,
79    pub no_host: bool,
80    pub no_alloc: bool,
81}
82
83#[repr(C)]
84#[derive(Debug, Copy, Clone)]
85pub struct llama_context_params {
86    pub n_ctx: u32,
87    pub n_batch: u32,
88    pub n_ubatch: u32,
89    pub n_seq_max: u32,
90    pub n_threads: i32,
91    pub n_threads_batch: i32,
92    pub rope_scaling_type: i32,
93    pub pooling_type: i32,
94    pub attention_type: i32,
95    pub flash_attn_type: i32,
96    pub rope_freq_base: f32,
97    pub rope_freq_scale: f32,
98    pub yarn_ext_factor: f32,
99    pub yarn_attn_factor: f32,
100    pub yarn_beta_fast: f32,
101    pub yarn_beta_slow: f32,
102    pub yarn_orig_ctx: u32,
103    pub defrag_thold: f32,
104    pub cb_eval: Option<unsafe extern "C" fn(*mut std::ffi::c_void, *mut std::ffi::c_void) -> bool>,
105    pub cb_eval_user_data: *mut std::ffi::c_void,
106    pub type_k: i32,
107    pub type_v: i32,
108    pub abort_callback: Option<unsafe extern "C" fn(*mut std::ffi::c_void) -> bool>,
109    pub abort_callback_data: *mut std::ffi::c_void,
110    pub embeddings: bool,
111    pub offload_kqv: bool,
112    pub no_perf: bool,
113    pub op_offload: bool,
114    pub swa_full: bool,
115    pub kv_unified: bool,
116    pub samplers: *mut std::ffi::c_void,
117    pub n_samplers: usize,
118}
119
120#[repr(C)]
121#[derive(Debug, Copy, Clone)]
122pub struct llama_sampler_chain_params {
123    pub no_perf: bool,
124}