1use std::ffi::{CStr, CString, c_char};
3use std::num::NonZeroU16;
4use std::os::raw::c_int;
5use std::path::Path;
6
7fn truncated_buffer_to_string(
8 mut buffer: Vec<u8>,
9 length: usize,
10) -> Result<String, ApplyChatTemplateError> {
11 buffer.truncate(length);
12
13 Ok(String::from_utf8(buffer)?)
14}
15
16fn validate_string_length_for_tokenizer(length: usize) -> Result<c_int, StringToTokenError> {
17 Ok(c_int::try_from(length)?)
18}
19
20fn cstring_with_validated_len(str: &str) -> Result<(CString, c_int), StringToTokenError> {
21 let c_string = CString::new(str)?;
22 let len = validate_string_length_for_tokenizer(c_string.as_bytes().len())?;
23 Ok((c_string, len))
24}
25use std::ptr::{self, NonNull};
26
27use crate::context::LlamaContext;
28use crate::context::params::LlamaContextParams;
29use crate::llama_backend::LlamaBackend;
30use crate::openai::OpenAIChatTemplateParams;
31use crate::token::LlamaToken;
32use crate::token_type::LlamaTokenAttrs;
33use crate::{
34 ApplyChatTemplateError, ChatTemplateError, LlamaContextLoadError, LlamaLoraAdapterInitError,
35 LlamaModelLoadError, MetaValError, StringToTokenError, TokenToStringError,
36};
37
38pub mod add_bos;
39pub mod chat_template_result;
40pub mod grammar_trigger;
41pub mod llama_chat_message;
42pub mod llama_chat_template;
43pub mod llama_lora_adapter;
44pub mod params;
45pub mod rope_type;
46pub mod split_mode;
47pub mod vocab_type;
48
49pub use add_bos::AddBos;
50pub use chat_template_result::ChatTemplateResult;
51pub use grammar_trigger::{GrammarTrigger, GrammarTriggerType};
52pub use llama_chat_message::LlamaChatMessage;
53pub use llama_chat_template::LlamaChatTemplate;
54pub use llama_lora_adapter::LlamaLoraAdapter;
55pub use rope_type::RopeType;
56pub use vocab_type::{LlamaTokenTypeFromIntError, VocabType};
57
58use chat_template_result::{new_empty_chat_template_raw_result, parse_chat_template_raw_result};
59use params::LlamaModelParams;
60
61#[derive(Debug)]
63#[repr(transparent)]
64pub struct LlamaModel {
65 pub model: NonNull<llama_cpp_bindings_sys::llama_model>,
67}
68
69unsafe impl Send for LlamaModel {}
70
71unsafe impl Sync for LlamaModel {}
72
73impl LlamaModel {
74 #[must_use]
76 pub fn vocab_ptr(&self) -> *const llama_cpp_bindings_sys::llama_vocab {
77 unsafe { llama_cpp_bindings_sys::llama_model_get_vocab(self.model.as_ptr()) }
78 }
79
80 pub fn n_ctx_train(&self) -> Result<u32, std::num::TryFromIntError> {
86 let n_ctx_train = unsafe { llama_cpp_bindings_sys::llama_n_ctx_train(self.model.as_ptr()) };
87
88 u32::try_from(n_ctx_train)
89 }
90
91 pub fn tokens(
93 &self,
94 decode_special: bool,
95 ) -> impl Iterator<Item = (LlamaToken, Result<String, TokenToStringError>)> + '_ {
96 (0..self.n_vocab())
97 .map(LlamaToken::new)
98 .map(move |llama_token| {
99 let mut decoder = encoding_rs::UTF_8.new_decoder();
100 (
101 llama_token,
102 self.token_to_piece(llama_token, &mut decoder, decode_special, None),
103 )
104 })
105 }
106
107 #[must_use]
109 pub fn token_bos(&self) -> LlamaToken {
110 let token = unsafe { llama_cpp_bindings_sys::llama_token_bos(self.vocab_ptr()) };
111 LlamaToken(token)
112 }
113
114 #[must_use]
116 pub fn token_eos(&self) -> LlamaToken {
117 let token = unsafe { llama_cpp_bindings_sys::llama_token_eos(self.vocab_ptr()) };
118 LlamaToken(token)
119 }
120
121 #[must_use]
123 pub fn token_nl(&self) -> LlamaToken {
124 let token = unsafe { llama_cpp_bindings_sys::llama_token_nl(self.vocab_ptr()) };
125 LlamaToken(token)
126 }
127
128 #[must_use]
130 pub fn is_eog_token(&self, token: LlamaToken) -> bool {
131 unsafe { llama_cpp_bindings_sys::llama_token_is_eog(self.vocab_ptr(), token.0) }
132 }
133
134 #[must_use]
136 pub fn decode_start_token(&self) -> LlamaToken {
137 let token =
138 unsafe { llama_cpp_bindings_sys::llama_model_decoder_start_token(self.model.as_ptr()) };
139 LlamaToken(token)
140 }
141
142 #[must_use]
144 pub fn token_sep(&self) -> LlamaToken {
145 let token = unsafe { llama_cpp_bindings_sys::llama_vocab_sep(self.vocab_ptr()) };
146 LlamaToken(token)
147 }
148
149 pub fn str_to_token(
169 &self,
170 str: &str,
171 add_bos: AddBos,
172 ) -> Result<Vec<LlamaToken>, StringToTokenError> {
173 let add_bos = match add_bos {
174 AddBos::Always => true,
175 AddBos::Never => false,
176 };
177
178 let tokens_estimation = std::cmp::max(8, (str.len() / 2) + usize::from(add_bos));
179 let mut buffer: Vec<LlamaToken> = Vec::with_capacity(tokens_estimation);
180
181 let (c_string, c_string_len) = cstring_with_validated_len(str)?;
182 let buffer_capacity = c_int::try_from(buffer.capacity())?;
183
184 let size = unsafe {
185 llama_cpp_bindings_sys::llama_tokenize(
186 self.vocab_ptr(),
187 c_string.as_ptr(),
188 c_string_len,
189 buffer
190 .as_mut_ptr()
191 .cast::<llama_cpp_bindings_sys::llama_token>(),
192 buffer_capacity,
193 add_bos,
194 true,
195 )
196 };
197
198 let size = if size.is_negative() {
199 buffer.reserve_exact(usize::try_from(-size)?);
200 unsafe {
201 llama_cpp_bindings_sys::llama_tokenize(
202 self.vocab_ptr(),
203 c_string.as_ptr(),
204 c_string_len,
205 buffer
206 .as_mut_ptr()
207 .cast::<llama_cpp_bindings_sys::llama_token>(),
208 -size,
209 add_bos,
210 true,
211 )
212 }
213 } else {
214 size
215 };
216
217 let size = usize::try_from(size)?;
218
219 unsafe { buffer.set_len(size) }
221
222 Ok(buffer)
223 }
224
225 pub fn token_attr(
231 &self,
232 LlamaToken(id): LlamaToken,
233 ) -> Result<LlamaTokenAttrs, crate::token_type::LlamaTokenTypeFromIntError> {
234 let token_type =
235 unsafe { llama_cpp_bindings_sys::llama_token_get_attr(self.vocab_ptr(), id) };
236
237 LlamaTokenAttrs::try_from(token_type)
238 }
239
240 pub fn token_to_piece(
256 &self,
257 token: LlamaToken,
258 decoder: &mut encoding_rs::Decoder,
259 special: bool,
260 lstrip: Option<NonZeroU16>,
261 ) -> Result<String, TokenToStringError> {
262 let bytes = match self.token_to_piece_bytes(token, 8, special, lstrip) {
263 Err(TokenToStringError::InsufficientBufferSpace(required_size)) => {
264 let buffer_size: usize = (-required_size).try_into()?;
265
266 self.token_to_piece_bytes(token, buffer_size, special, lstrip)
267 }
268 other => other,
269 }?;
270
271 let mut output_piece = String::with_capacity(bytes.len());
272 let (_result, _decoded_size, _had_replacements) =
273 decoder.decode_to_string(&bytes, &mut output_piece, false);
274
275 Ok(output_piece)
276 }
277
278 #[allow(clippy::missing_panics_doc)]
290 pub fn token_to_piece_bytes(
291 &self,
292 token: LlamaToken,
293 buffer_size: usize,
294 special: bool,
295 lstrip: Option<NonZeroU16>,
296 ) -> Result<Vec<u8>, TokenToStringError> {
297 let string = CString::new(vec![b'*'; buffer_size]).expect("no null");
299 let len = string.as_bytes().len();
300 let len = c_int::try_from(len)?;
301 let buf = string.into_raw();
302 let lstrip = lstrip.map_or(0, |strip_count| i32::from(strip_count.get()));
303 let size = unsafe {
304 llama_cpp_bindings_sys::llama_token_to_piece(
305 self.vocab_ptr(),
306 token.0,
307 buf,
308 len,
309 lstrip,
310 special,
311 )
312 };
313
314 match size {
315 0 => Err(TokenToStringError::UnknownTokenType),
316 error_code if error_code.is_negative() => {
317 Err(TokenToStringError::InsufficientBufferSpace(error_code))
318 }
319 size => {
320 let string = unsafe { CString::from_raw(buf) };
321 let mut bytes = string.into_bytes();
322 let len = usize::try_from(size)?;
323 bytes.truncate(len);
324
325 Ok(bytes)
326 }
327 }
328 }
329
330 #[must_use]
335 pub fn n_vocab(&self) -> i32 {
336 unsafe { llama_cpp_bindings_sys::llama_n_vocab(self.vocab_ptr()) }
337 }
338
339 pub fn vocab_type(&self) -> Result<VocabType, LlamaTokenTypeFromIntError> {
345 let vocab_type = unsafe { llama_cpp_bindings_sys::llama_vocab_type(self.vocab_ptr()) };
346
347 VocabType::try_from(vocab_type)
348 }
349
350 #[must_use]
353 pub fn n_embd(&self) -> c_int {
354 unsafe { llama_cpp_bindings_sys::llama_n_embd(self.model.as_ptr()) }
355 }
356
357 #[must_use]
359 pub fn size(&self) -> u64 {
360 unsafe { llama_cpp_bindings_sys::llama_model_size(self.model.as_ptr()) }
361 }
362
363 #[must_use]
365 pub fn n_params(&self) -> u64 {
366 unsafe { llama_cpp_bindings_sys::llama_model_n_params(self.model.as_ptr()) }
367 }
368
369 #[must_use]
371 pub fn is_recurrent(&self) -> bool {
372 unsafe { llama_cpp_bindings_sys::llama_model_is_recurrent(self.model.as_ptr()) }
373 }
374
375 pub fn n_layer(&self) -> Result<u32, std::num::TryFromIntError> {
381 u32::try_from(unsafe { llama_cpp_bindings_sys::llama_model_n_layer(self.model.as_ptr()) })
382 }
383
384 pub fn n_head(&self) -> Result<u32, std::num::TryFromIntError> {
390 u32::try_from(unsafe { llama_cpp_bindings_sys::llama_model_n_head(self.model.as_ptr()) })
391 }
392
393 pub fn n_head_kv(&self) -> Result<u32, std::num::TryFromIntError> {
399 u32::try_from(unsafe { llama_cpp_bindings_sys::llama_model_n_head_kv(self.model.as_ptr()) })
400 }
401
402 #[must_use]
406 pub fn is_hybrid(&self) -> bool {
407 unsafe { llama_cpp_bindings_sys::llama_model_is_hybrid(self.model.as_ptr()) }
408 }
409
410 pub fn meta_val_str(&self, key: &str) -> Result<String, MetaValError> {
415 let key_cstring = CString::new(key)?;
416 let key_ptr = key_cstring.as_ptr();
417
418 extract_meta_string(
419 |buf_ptr, buf_len| unsafe {
420 llama_cpp_bindings_sys::llama_model_meta_val_str(
421 self.model.as_ptr(),
422 key_ptr,
423 buf_ptr,
424 buf_len,
425 )
426 },
427 256,
428 )
429 }
430
431 #[must_use]
433 pub fn meta_count(&self) -> i32 {
434 unsafe { llama_cpp_bindings_sys::llama_model_meta_count(self.model.as_ptr()) }
435 }
436
437 pub fn meta_key_by_index(&self, index: i32) -> Result<String, MetaValError> {
442 extract_meta_string(
443 |buf_ptr, buf_len| unsafe {
444 llama_cpp_bindings_sys::llama_model_meta_key_by_index(
445 self.model.as_ptr(),
446 index,
447 buf_ptr,
448 buf_len,
449 )
450 },
451 256,
452 )
453 }
454
455 pub fn meta_val_str_by_index(&self, index: i32) -> Result<String, MetaValError> {
460 extract_meta_string(
461 |buf_ptr, buf_len| unsafe {
462 llama_cpp_bindings_sys::llama_model_meta_val_str_by_index(
463 self.model.as_ptr(),
464 index,
465 buf_ptr,
466 buf_len,
467 )
468 },
469 256,
470 )
471 }
472
473 #[must_use]
475 pub fn rope_type(&self) -> Option<RopeType> {
476 let raw = unsafe { llama_cpp_bindings_sys::llama_model_rope_type(self.model.as_ptr()) };
477
478 rope_type::rope_type_from_raw(raw)
479 }
480
481 pub fn chat_template(
499 &self,
500 name: Option<&str>,
501 ) -> Result<LlamaChatTemplate, ChatTemplateError> {
502 let name_cstr = name.map(CString::new);
503 let name_ptr = match name_cstr {
504 Some(Ok(name)) => name.as_ptr(),
505 _ => ptr::null(),
506 };
507 let result = unsafe {
508 llama_cpp_bindings_sys::llama_model_chat_template(self.model.as_ptr(), name_ptr)
509 };
510
511 if result.is_null() {
512 Err(ChatTemplateError::MissingTemplate)
513 } else {
514 let chat_template_cstr = unsafe { CStr::from_ptr(result) };
515 let chat_template = CString::new(chat_template_cstr.to_bytes())
516 .expect("CStr bytes cannot contain interior null bytes");
517
518 Ok(LlamaChatTemplate(chat_template))
519 }
520 }
521
522 #[tracing::instrument(skip_all, fields(params))]
532 pub fn load_from_file(
533 _: &LlamaBackend,
534 path: impl AsRef<Path>,
535 params: &LlamaModelParams,
536 ) -> Result<Self, LlamaModelLoadError> {
537 let path = path.as_ref();
538
539 let path_str = path
540 .to_str()
541 .ok_or_else(|| LlamaModelLoadError::PathToStrError(path.to_path_buf()))?;
542
543 if !path.exists() {
544 return Err(LlamaModelLoadError::FileNotFound(path.to_path_buf()));
545 }
546
547 let cstr = CString::new(path_str)?;
548 let llama_model = unsafe {
549 llama_cpp_bindings_sys::llama_load_model_from_file(cstr.as_ptr(), params.params)
550 };
551
552 let model = match NonNull::new(llama_model) {
553 Some(ptr) => ptr,
554 None if !path.exists() => {
555 return Err(LlamaModelLoadError::FileNotFound(path.to_path_buf()));
556 }
557 None => return Err(LlamaModelLoadError::NullResult),
558 };
559
560 Ok(Self { model })
561 }
562
563 pub fn lora_adapter_init(
569 &self,
570 path: impl AsRef<Path>,
571 ) -> Result<LlamaLoraAdapter, LlamaLoraAdapterInitError> {
572 let path = path.as_ref();
573
574 let path_str = path
575 .to_str()
576 .ok_or_else(|| LlamaLoraAdapterInitError::PathToStrError(path.to_path_buf()))?;
577
578 if !path.exists() {
579 return Err(LlamaLoraAdapterInitError::FileNotFound(path.to_path_buf()));
580 }
581
582 let cstr = CString::new(path_str)?;
583 let raw_adapter = unsafe {
584 llama_cpp_bindings_sys::llama_adapter_lora_init(self.model.as_ptr(), cstr.as_ptr())
585 };
586
587 let Some(adapter) = NonNull::new(raw_adapter) else {
588 return Err(LlamaLoraAdapterInitError::NullResult);
589 };
590
591 Ok(LlamaLoraAdapter {
592 lora_adapter: adapter,
593 })
594 }
595
596 #[allow(clippy::needless_pass_by_value)]
603 pub fn new_context<'model>(
604 &'model self,
605 _: &LlamaBackend,
606 params: LlamaContextParams,
607 ) -> Result<LlamaContext<'model>, LlamaContextLoadError> {
608 let context_params = params.context_params;
609 let context = unsafe {
610 llama_cpp_bindings_sys::llama_new_context_with_model(
611 self.model.as_ptr(),
612 context_params,
613 )
614 };
615 let context = NonNull::new(context).ok_or(LlamaContextLoadError::NullReturn)?;
616
617 Ok(LlamaContext::new(self, context, params.embeddings()))
618 }
619
620 #[tracing::instrument(skip_all)]
638 pub fn apply_chat_template(
639 &self,
640 tmpl: &LlamaChatTemplate,
641 chat: &[LlamaChatMessage],
642 add_ass: bool,
643 ) -> Result<String, ApplyChatTemplateError> {
644 let message_length = chat.iter().fold(0, |acc, chat_message| {
645 acc + chat_message.role.to_bytes().len() + chat_message.content.to_bytes().len()
646 });
647 let mut buff: Vec<u8> = vec![0; message_length * 2];
648
649 let chat: Vec<llama_cpp_bindings_sys::llama_chat_message> = chat
650 .iter()
651 .map(|chat_message| llama_cpp_bindings_sys::llama_chat_message {
652 role: chat_message.role.as_ptr(),
653 content: chat_message.content.as_ptr(),
654 })
655 .collect();
656
657 let tmpl_ptr = tmpl.0.as_ptr();
658
659 let buff_len: i32 = buff.len().try_into()?;
660
661 let res = unsafe {
662 llama_cpp_bindings_sys::llama_chat_apply_template(
663 tmpl_ptr,
664 chat.as_ptr(),
665 chat.len(),
666 add_ass,
667 buff.as_mut_ptr().cast::<c_char>(),
668 buff_len,
669 )
670 };
671
672 if res > buff_len {
673 let required_size: usize = res.try_into()?;
674 buff.resize(required_size, 0);
675
676 let new_buff_len: i32 = buff.len().try_into()?;
677
678 let res = unsafe {
679 llama_cpp_bindings_sys::llama_chat_apply_template(
680 tmpl_ptr,
681 chat.as_ptr(),
682 chat.len(),
683 add_ass,
684 buff.as_mut_ptr().cast::<c_char>(),
685 new_buff_len,
686 )
687 };
688 let final_size: usize = res.try_into()?;
689
690 return truncated_buffer_to_string(buff, final_size);
691 }
692
693 let final_size: usize = res.try_into()?;
694
695 truncated_buffer_to_string(buff, final_size)
696 }
697
698 #[tracing::instrument(skip_all)]
705 pub fn apply_chat_template_with_tools_oaicompat(
706 &self,
707 tmpl: &LlamaChatTemplate,
708 messages: &[LlamaChatMessage],
709 tools_json: Option<&str>,
710 json_schema: Option<&str>,
711 add_generation_prompt: bool,
712 ) -> Result<ChatTemplateResult, ApplyChatTemplateError> {
713 let chat: Vec<llama_cpp_bindings_sys::llama_chat_message> = messages
714 .iter()
715 .map(|chat_message| llama_cpp_bindings_sys::llama_chat_message {
716 role: chat_message.role.as_ptr(),
717 content: chat_message.content.as_ptr(),
718 })
719 .collect();
720
721 let tools_cstr = tools_json.map(CString::new).transpose()?;
722 let json_schema_cstr = json_schema.map(CString::new).transpose()?;
723
724 let mut raw_result = new_empty_chat_template_raw_result();
725
726 let rc = unsafe {
727 llama_cpp_bindings_sys::llama_rs_apply_chat_template_with_tools_oaicompat(
728 self.model.as_ptr(),
729 tmpl.0.as_ptr(),
730 chat.as_ptr(),
731 chat.len(),
732 tools_cstr
733 .as_ref()
734 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
735 json_schema_cstr
736 .as_ref()
737 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
738 add_generation_prompt,
739 &raw mut raw_result,
740 )
741 };
742
743 let parse_tool_calls = tools_json.is_some_and(|tools| !tools.is_empty());
744
745 unsafe { parse_chat_template_raw_result(rc, &raw mut raw_result, parse_tool_calls) }
746 }
747
748 #[tracing::instrument(skip_all)]
753 pub fn apply_chat_template_oaicompat(
754 &self,
755 tmpl: &LlamaChatTemplate,
756 params: &OpenAIChatTemplateParams<'_>,
757 ) -> Result<ChatTemplateResult, ApplyChatTemplateError> {
758 let parse_tool_calls = params.parse_tool_calls;
759 let messages_cstr = CString::new(params.messages_json)?;
760 let tools_cstr = params.tools_json.map(CString::new).transpose()?;
761 let tool_choice_cstr = params.tool_choice.map(CString::new).transpose()?;
762 let json_schema_cstr = params.json_schema.map(CString::new).transpose()?;
763 let grammar_cstr = params.grammar.map(CString::new).transpose()?;
764 let reasoning_cstr = params.reasoning_format.map(CString::new).transpose()?;
765 let kwargs_cstr = params.chat_template_kwargs.map(CString::new).transpose()?;
766
767 let mut raw_result = new_empty_chat_template_raw_result();
768
769 let ffi_params = llama_cpp_bindings_sys::llama_rs_chat_template_oaicompat_params {
770 messages: messages_cstr.as_ptr(),
771 tools: tools_cstr
772 .as_ref()
773 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
774 tool_choice: tool_choice_cstr
775 .as_ref()
776 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
777 json_schema: json_schema_cstr
778 .as_ref()
779 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
780 grammar: grammar_cstr
781 .as_ref()
782 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
783 reasoning_format: reasoning_cstr
784 .as_ref()
785 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
786 chat_template_kwargs: kwargs_cstr
787 .as_ref()
788 .map_or(ptr::null(), |cstr| cstr.as_ptr()),
789 add_generation_prompt: params.add_generation_prompt,
790 use_jinja: params.use_jinja,
791 parallel_tool_calls: params.parallel_tool_calls,
792 enable_thinking: params.enable_thinking,
793 add_bos: params.add_bos,
794 add_eos: params.add_eos,
795 };
796
797 let rc = unsafe {
798 llama_cpp_bindings_sys::llama_rs_apply_chat_template_oaicompat(
799 self.model.as_ptr(),
800 tmpl.0.as_ptr(),
801 &raw const ffi_params,
802 &raw mut raw_result,
803 )
804 };
805
806 unsafe { parse_chat_template_raw_result(rc, &raw mut raw_result, parse_tool_calls) }
807 }
808}
809
810fn extract_meta_string<TCFunction>(
811 c_function: TCFunction,
812 capacity: usize,
813) -> Result<String, MetaValError>
814where
815 TCFunction: Fn(*mut c_char, usize) -> i32,
816{
817 let mut buffer = vec![0u8; capacity];
818 let result = c_function(buffer.as_mut_ptr().cast::<c_char>(), buffer.len());
819
820 if result < 0 {
821 return Err(MetaValError::NegativeReturn(result));
822 }
823
824 let returned_len = result.cast_unsigned() as usize;
825
826 if returned_len >= capacity {
827 return extract_meta_string(c_function, returned_len + 1);
828 }
829
830 if buffer.get(returned_len) != Some(&0) {
831 return Err(MetaValError::NegativeReturn(-1));
832 }
833
834 buffer.truncate(returned_len);
835
836 Ok(String::from_utf8(buffer)?)
837}
838
839impl Drop for LlamaModel {
840 fn drop(&mut self) {
841 unsafe { llama_cpp_bindings_sys::llama_free_model(self.model.as_ptr()) }
842 }
843}
844
845#[cfg(test)]
846mod extract_meta_string_tests {
847 use super::extract_meta_string;
848 use crate::MetaValError;
849
850 #[test]
851 fn returns_error_when_null_terminator_missing() {
852 let result = extract_meta_string(
853 |buf_ptr, buf_len| {
854 let buffer =
855 unsafe { std::slice::from_raw_parts_mut(buf_ptr.cast::<u8>(), buf_len) };
856 buffer[0] = b'a';
857 buffer[1] = b'b';
858 buffer[2] = b'c';
860 2
861 },
862 4,
863 );
864
865 assert_eq!(result.unwrap_err(), MetaValError::NegativeReturn(-1));
866 }
867
868 #[test]
869 fn returns_error_for_negative_return_value() {
870 let result = extract_meta_string(|_buf_ptr, _buf_len| -5, 4);
871
872 assert_eq!(result.unwrap_err(), MetaValError::NegativeReturn(-5));
873 }
874
875 #[test]
876 fn returns_error_for_invalid_utf8_data() {
877 let result = extract_meta_string(
878 |buf_ptr, buf_len| {
879 let buffer =
880 unsafe { std::slice::from_raw_parts_mut(buf_ptr.cast::<u8>(), buf_len) };
881 buffer[0] = 0xFF;
882 buffer[1] = 0xFE;
883 buffer[2] = 0;
884 2
885 },
886 4,
887 );
888
889 assert!(result.is_err());
890 assert!(result.unwrap_err().to_string().contains("FromUtf8Error"));
891 }
892
893 #[test]
894 fn triggers_buffer_resize_when_returned_len_exceeds_capacity() {
895 let call_count = std::cell::Cell::new(0);
896 let result = extract_meta_string(
897 |buf_ptr, buf_len| {
898 let count = call_count.get();
899 call_count.set(count + 1);
900 if count == 0 {
901 10
903 } else {
904 let buffer =
906 unsafe { std::slice::from_raw_parts_mut(buf_ptr.cast::<u8>(), buf_len) };
907 buffer[0] = b'h';
908 buffer[1] = b'i';
909 buffer[2] = 0;
910 2
911 }
912 },
913 4,
914 );
915
916 assert_eq!(result.unwrap(), "hi");
917 }
918
919 #[test]
920 fn cstring_with_validated_len_null_byte_returns_error() {
921 let result = super::cstring_with_validated_len("null\0byte");
922
923 assert!(result.is_err());
924 }
925
926 #[test]
927 fn validate_string_length_overflow_returns_error() {
928 let result = super::validate_string_length_for_tokenizer(usize::MAX);
929
930 assert!(result.is_err());
931 }
932
933 #[test]
934 fn truncated_buffer_to_string_with_invalid_utf8_returns_error() {
935 let invalid_utf8 = vec![0xff, 0xfe, 0xfd];
936 let result = super::truncated_buffer_to_string(invalid_utf8, 3);
937
938 assert!(result.is_err());
939 }
940}
941
942#[cfg(test)]
943#[cfg(feature = "tests_that_use_llms")]
944mod tests {
945 use serial_test::serial;
946
947 use super::LlamaModel;
948 use crate::llama_backend::LlamaBackend;
949 use crate::model::AddBos;
950 use crate::model::params::LlamaModelParams;
951 use crate::test_model;
952
953 #[test]
954 #[serial]
955 fn model_loads_with_valid_metadata() {
956 let (_backend, model) = test_model::load_default_model().unwrap();
957 assert!(model.n_vocab() > 0);
958 assert!(model.n_embd() > 0);
959 assert!(model.n_params() > 0);
960 assert!(model.n_ctx_train().unwrap() > 0);
961 }
962
963 #[test]
964 #[serial]
965 fn special_tokens_exist() {
966 let (_backend, model) = test_model::load_default_model().unwrap();
967 let bos = model.token_bos();
968 let eos = model.token_eos();
969 assert_ne!(bos, eos);
970 assert!(model.is_eog_token(eos));
971 assert!(!model.is_eog_token(bos));
972 }
973
974 #[test]
975 #[serial]
976 fn str_to_token_roundtrip() {
977 let (_backend, model) = test_model::load_default_model().unwrap();
978 let tokens = model.str_to_token("hello world", AddBos::Never).unwrap();
979 assert!(!tokens.is_empty());
980 let mut decoder = encoding_rs::UTF_8.new_decoder();
981 let piece = model
982 .token_to_piece(tokens[0], &mut decoder, false, None)
983 .unwrap();
984 assert!(!piece.is_empty());
985 }
986
987 #[test]
988 #[serial]
989 fn chat_template_returns_non_empty() {
990 let (_backend, model) = test_model::load_default_model().unwrap();
991 let template = model.chat_template(None);
992 assert!(template.is_ok());
993 }
994
995 #[test]
996 #[serial]
997 fn apply_chat_template_produces_prompt() {
998 let (_backend, model) = test_model::load_default_model().unwrap();
999 let template = model.chat_template(None).unwrap();
1000 let message =
1001 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1002 let prompt = model.apply_chat_template(&template, &[message], true);
1003 assert!(prompt.is_ok());
1004 assert!(!prompt.unwrap().is_empty());
1005 }
1006
1007 #[test]
1008 #[serial]
1009 fn apply_chat_template_oaicompat_produces_result() {
1010 let (_backend, model) = test_model::load_default_model().unwrap();
1011 let template = model.chat_template(None).unwrap();
1012 let params = crate::openai::OpenAIChatTemplateParams {
1013 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1014 tools_json: None,
1015 tool_choice: None,
1016 json_schema: None,
1017 grammar: None,
1018 reasoning_format: Some("none"),
1019 chat_template_kwargs: None,
1020 add_generation_prompt: true,
1021 use_jinja: true,
1022 parallel_tool_calls: false,
1023 enable_thinking: false,
1024 add_bos: false,
1025 add_eos: false,
1026 parse_tool_calls: false,
1027 };
1028 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1029 assert!(result.is_ok());
1030 assert!(!result.unwrap().prompt.is_empty());
1031 }
1032
1033 #[test]
1034 #[serial]
1035 fn meta_count_returns_positive() {
1036 let (_backend, model) = test_model::load_default_model().unwrap();
1037 assert!(model.meta_count() > 0);
1038 }
1039
1040 #[test]
1041 #[serial]
1042 fn tokens_iterator_produces_valid_entries() {
1043 let (_backend, model) = test_model::load_default_model().unwrap();
1044 let mut count = 0;
1045
1046 for (token, piece_result) in model.tokens(false) {
1047 assert!(token.0 >= 0);
1048 let _ = piece_result;
1050 count += 1;
1051
1052 if count >= 100 {
1053 break;
1054 }
1055 }
1056
1057 assert_eq!(count, 100);
1058 }
1059
1060 #[test]
1061 #[serial]
1062 fn token_to_piece_bytes_returns_bytes_for_known_token() {
1063 let (_backend, model) = test_model::load_default_model().unwrap();
1064 let tokens = model.str_to_token("hello", AddBos::Never).unwrap();
1065 let bytes = model
1066 .token_to_piece_bytes(tokens[0], 32, false, None)
1067 .unwrap();
1068
1069 assert!(!bytes.is_empty());
1070 }
1071
1072 #[test]
1073 #[serial]
1074 fn n_layer_returns_positive() {
1075 let (_backend, model) = test_model::load_default_model().unwrap();
1076
1077 assert!(model.n_layer().unwrap() > 0);
1078 }
1079
1080 #[test]
1081 #[serial]
1082 fn n_head_returns_positive() {
1083 let (_backend, model) = test_model::load_default_model().unwrap();
1084
1085 assert!(model.n_head().unwrap() > 0);
1086 }
1087
1088 #[test]
1089 #[serial]
1090 fn n_head_kv_returns_positive() {
1091 let (_backend, model) = test_model::load_default_model().unwrap();
1092
1093 assert!(model.n_head_kv().unwrap() > 0);
1094 }
1095
1096 #[test]
1097 #[serial]
1098 fn meta_key_by_index_returns_valid_key() {
1099 let (_backend, model) = test_model::load_default_model().unwrap();
1100 let key = model.meta_key_by_index(0).unwrap();
1101
1102 assert!(!key.is_empty());
1103 }
1104
1105 #[test]
1106 #[serial]
1107 fn meta_val_str_by_index_returns_valid_value() {
1108 let (_backend, model) = test_model::load_default_model().unwrap();
1109 let value = model.meta_val_str_by_index(0).unwrap();
1110
1111 assert!(!value.is_empty());
1112 }
1113
1114 #[test]
1115 #[serial]
1116 fn meta_key_by_index_out_of_range_returns_error() {
1117 let (_backend, model) = test_model::load_default_model().unwrap();
1118 let result = model.meta_key_by_index(999_999);
1119
1120 assert!(result.is_err());
1121 }
1122
1123 #[test]
1124 #[serial]
1125 fn meta_val_str_by_index_out_of_range_returns_error() {
1126 let (_backend, model) = test_model::load_default_model().unwrap();
1127 let result = model.meta_val_str_by_index(999_999);
1128
1129 assert!(result.is_err());
1130 }
1131
1132 #[test]
1133 #[serial]
1134 fn meta_val_str_returns_value_for_known_key() {
1135 let (_backend, model) = test_model::load_default_model().unwrap();
1136 let first_key = model.meta_key_by_index(0).unwrap();
1137 let value = model.meta_val_str(&first_key).unwrap();
1138
1139 assert!(!value.is_empty());
1140 }
1141
1142 #[test]
1143 #[serial]
1144 fn model_size_returns_nonzero() {
1145 let (_backend, model) = test_model::load_default_model().unwrap();
1146
1147 assert!(model.size() > 0);
1148 }
1149
1150 #[test]
1151 #[serial]
1152 fn is_recurrent_returns_false_for_transformer() {
1153 let (_backend, model) = test_model::load_default_model().unwrap();
1154
1155 assert!(!model.is_recurrent());
1156 }
1157
1158 #[test]
1159 #[serial]
1160 fn rope_type_does_not_panic() {
1161 let (_backend, model) = test_model::load_default_model().unwrap();
1162 let _rope_type = model.rope_type();
1163 }
1164
1165 #[test]
1166 #[serial]
1167 fn load_model_with_invalid_path_returns_error() {
1168 let backend = LlamaBackend::init().unwrap();
1169 let model_params = LlamaModelParams::default();
1170 let result = LlamaModel::load_from_file(&backend, "/nonexistent/model.gguf", &model_params);
1171
1172 assert_eq!(
1173 result.unwrap_err(),
1174 crate::LlamaModelLoadError::FileNotFound(std::path::PathBuf::from(
1175 "/nonexistent/model.gguf"
1176 ))
1177 );
1178 }
1179
1180 #[test]
1181 #[serial]
1182 fn load_model_with_invalid_file_content_returns_null_result() {
1183 let backend = LlamaBackend::init().unwrap();
1184 let model_params = LlamaModelParams::default();
1185 let dummy_path = std::env::temp_dir().join("llama_test_invalid_model.gguf");
1186 std::fs::write(&dummy_path, b"not a valid gguf model file").unwrap();
1187
1188 let result = LlamaModel::load_from_file(&backend, &dummy_path, &model_params);
1189
1190 assert_eq!(result.unwrap_err(), crate::LlamaModelLoadError::NullResult);
1191 let _ = std::fs::remove_file(&dummy_path);
1192 }
1193
1194 #[cfg(unix)]
1195 #[test]
1196 #[serial]
1197 fn load_model_with_non_utf8_path_returns_path_to_str_error() {
1198 use std::ffi::OsStr;
1199 use std::os::unix::ffi::OsStrExt;
1200
1201 let backend = LlamaBackend::init().unwrap();
1202 let model_params = LlamaModelParams::default();
1203 let non_utf8_path = std::path::Path::new(OsStr::from_bytes(b"/tmp/\xff\xfe.gguf"));
1204
1205 let result = LlamaModel::load_from_file(&backend, non_utf8_path, &model_params);
1206
1207 assert_eq!(
1208 result.unwrap_err(),
1209 crate::LlamaModelLoadError::PathToStrError(non_utf8_path.to_path_buf())
1210 );
1211 }
1212
1213 #[cfg(unix)]
1214 #[test]
1215 #[serial]
1216 fn lora_adapter_init_with_non_utf8_path_returns_error() {
1217 use std::ffi::OsStr;
1218 use std::os::unix::ffi::OsStrExt;
1219
1220 let (_backend, model) = test_model::load_default_model().unwrap();
1221 let non_utf8_path = std::path::Path::new(OsStr::from_bytes(b"/tmp/\xff\xfe.gguf"));
1222
1223 let result = model.lora_adapter_init(non_utf8_path);
1224
1225 assert_eq!(
1226 result.unwrap_err(),
1227 crate::LlamaLoraAdapterInitError::PathToStrError(non_utf8_path.to_path_buf())
1228 );
1229 }
1230
1231 #[test]
1232 #[serial]
1233 fn lora_adapter_init_with_invalid_path_returns_error() {
1234 let (_backend, model) = test_model::load_default_model().unwrap();
1235 let result = model.lora_adapter_init("/nonexistent/path/lora.gguf");
1236
1237 assert_eq!(
1238 result.unwrap_err(),
1239 crate::LlamaLoraAdapterInitError::FileNotFound(std::path::PathBuf::from(
1240 "/nonexistent/path/lora.gguf"
1241 ))
1242 );
1243 }
1244
1245 #[test]
1246 #[serial]
1247 fn new_context_returns_valid_context() {
1248 let (backend, model) = test_model::load_default_model().unwrap();
1249 let ctx_params = crate::context::params::LlamaContextParams::default()
1250 .with_n_ctx(std::num::NonZeroU32::new(256));
1251 let context = model.new_context(&backend, ctx_params).unwrap();
1252
1253 assert!(context.n_ctx() > 0);
1254 }
1255
1256 #[test]
1257 #[serial]
1258 fn token_nl_returns_valid_token() {
1259 let (_backend, model) = test_model::load_default_model().unwrap();
1260 let nl_token = model.token_nl();
1261
1262 assert!(nl_token.0 >= 0);
1263 }
1264
1265 #[test]
1266 #[serial]
1267 fn decode_start_token_returns_valid_token() {
1268 let (_backend, model) = test_model::load_default_model().unwrap();
1269 let _decode_start = model.decode_start_token();
1270 }
1271
1272 #[test]
1273 #[serial]
1274 fn token_sep_returns_valid_token() {
1275 let (_backend, model) = test_model::load_default_model().unwrap();
1276 let _sep_token = model.token_sep();
1277 }
1278
1279 #[test]
1280 #[serial]
1281 fn token_to_piece_handles_large_token_requiring_buffer_resize() {
1282 let (_backend, model) = test_model::load_default_model().unwrap();
1283 let mut decoder = encoding_rs::UTF_8.new_decoder();
1284
1285 for (token, _) in model.tokens(true).take(200) {
1286 let result = model.token_to_piece(token, &mut decoder, true, None);
1287 assert!(result.is_ok());
1288 }
1289 }
1290
1291 #[test]
1292 #[serial]
1293 fn token_to_piece_bytes_insufficient_buffer_returns_error() {
1294 let (_backend, model) = test_model::load_default_model().unwrap();
1295 let tokens = model.str_to_token("hello", AddBos::Never).unwrap();
1296 let result = model.token_to_piece_bytes(tokens[0], 1, false, None);
1297
1298 assert!(
1299 result
1300 .unwrap_err()
1301 .to_string()
1302 .contains("Insufficient Buffer Space")
1303 );
1304 }
1305
1306 #[test]
1307 #[serial]
1308 fn token_to_piece_with_lstrip() {
1309 let (_backend, model) = test_model::load_default_model().unwrap();
1310 let mut decoder = encoding_rs::UTF_8.new_decoder();
1311 let tokens = model.str_to_token("hello", AddBos::Never).unwrap();
1312 let result =
1313 model.token_to_piece(tokens[0], &mut decoder, false, std::num::NonZeroU16::new(1));
1314
1315 assert!(result.is_ok());
1316 }
1317
1318 #[test]
1319 #[serial]
1320 fn n_vocab_matches_tokens_iterator_count() {
1321 let (_backend, model) = test_model::load_default_model().unwrap();
1322 let n_vocab = model.n_vocab();
1323 let count = model.tokens(false).count();
1324
1325 assert_eq!(count, n_vocab as usize);
1326 }
1327
1328 #[test]
1329 #[serial]
1330 fn token_attr_returns_valid_attr() {
1331 let (_backend, model) = test_model::load_default_model().unwrap();
1332 let bos = model.token_bos();
1333 let _attr = model.token_attr(bos).unwrap();
1334 }
1335
1336 #[test]
1337 #[serial]
1338 fn vocab_type_returns_valid_type() {
1339 let (_backend, model) = test_model::load_default_model().unwrap();
1340 let _vocab_type = model.vocab_type().unwrap();
1341 }
1342
1343 #[test]
1344 #[serial]
1345 fn apply_chat_template_buffer_resize_with_long_messages() {
1346 let (_backend, model) = test_model::load_default_model().unwrap();
1347 let template = model.chat_template(None).unwrap();
1348 let long_content = "a".repeat(2000);
1349 let message =
1350 crate::model::LlamaChatMessage::new("user".to_string(), long_content).unwrap();
1351 let prompt = model.apply_chat_template(&template, &[message], true);
1352
1353 assert!(prompt.is_ok());
1354 assert!(!prompt.unwrap().is_empty());
1355 }
1356
1357 #[test]
1358 #[serial]
1359 fn meta_val_str_with_long_value_triggers_buffer_resize() {
1360 let (_backend, model) = test_model::load_default_model().unwrap();
1361 let count = model.meta_count();
1362
1363 for index in 0..count {
1364 let key = model.meta_key_by_index(index);
1365 let value = model.meta_val_str_by_index(index);
1366 assert!(key.is_ok());
1367 assert!(value.is_ok());
1368 }
1369 }
1370
1371 #[test]
1372 #[serial]
1373 fn str_to_token_with_add_bos_never() {
1374 let (_backend, model) = test_model::load_default_model().unwrap();
1375 let tokens_with_bos = model.str_to_token("hello", AddBos::Always).unwrap();
1376 let tokens_without_bos = model.str_to_token("hello", AddBos::Never).unwrap();
1377
1378 assert!(tokens_with_bos.len() >= tokens_without_bos.len());
1379 }
1380
1381 #[test]
1382 #[serial]
1383 fn apply_chat_template_with_tools_oaicompat_produces_result() {
1384 let (_backend, model) = test_model::load_default_model().unwrap();
1385 let template = model.chat_template(None).unwrap();
1386 let message =
1387 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1388 let result =
1389 model.apply_chat_template_with_tools_oaicompat(&template, &[message], None, None, true);
1390
1391 assert!(result.is_ok());
1392 assert!(!result.unwrap().prompt.is_empty());
1393 }
1394
1395 #[test]
1396 #[serial]
1397 fn apply_chat_template_with_tools_oaicompat_with_tools_json() {
1398 let (_backend, model) = test_model::load_default_model().unwrap();
1399 let template = model.chat_template(None).unwrap();
1400 let message =
1401 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1402 let tools =
1403 r#"[{"type":"function","function":{"name":"test","parameters":{"type":"object"}}}]"#;
1404 let result = model.apply_chat_template_with_tools_oaicompat(
1405 &template,
1406 &[message],
1407 Some(tools),
1408 None,
1409 true,
1410 );
1411
1412 assert!(result.is_ok());
1413 }
1414
1415 #[test]
1416 #[serial]
1417 fn apply_chat_template_with_tools_oaicompat_with_json_schema() {
1418 let (_backend, model) = test_model::load_default_model().unwrap();
1419 let template = model.chat_template(None).unwrap();
1420 let message =
1421 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1422 let schema = r#"{"type":"object","properties":{"name":{"type":"string"}}}"#;
1423 let result = model.apply_chat_template_with_tools_oaicompat(
1424 &template,
1425 &[message],
1426 None,
1427 Some(schema),
1428 true,
1429 );
1430
1431 assert!(result.is_ok());
1432 }
1433
1434 #[test]
1435 #[serial]
1436 fn apply_chat_template_oaicompat_with_tools_and_tool_choice() {
1437 let (_backend, model) = test_model::load_default_model().unwrap();
1438 let template = model.chat_template(None).unwrap();
1439 let params = crate::openai::OpenAIChatTemplateParams {
1440 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1441 tools_json: Some(
1442 r#"[{"type":"function","function":{"name":"test","parameters":{"type":"object","properties":{}}}}]"#,
1443 ),
1444 tool_choice: Some("auto"),
1445 json_schema: None,
1446 grammar: None,
1447 reasoning_format: Some("none"),
1448 chat_template_kwargs: None,
1449 add_generation_prompt: true,
1450 use_jinja: true,
1451 parallel_tool_calls: false,
1452 enable_thinking: false,
1453 add_bos: false,
1454 add_eos: false,
1455 parse_tool_calls: true,
1456 };
1457 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1458
1459 assert!(result.is_ok());
1460 }
1461
1462 #[test]
1463 #[serial]
1464 fn apply_chat_template_oaicompat_with_json_schema_field() {
1465 let (_backend, model) = test_model::load_default_model().unwrap();
1466 let template = model.chat_template(None).unwrap();
1467 let params = crate::openai::OpenAIChatTemplateParams {
1468 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1469 tools_json: None,
1470 tool_choice: None,
1471 json_schema: Some(r#"{"type":"object","properties":{"name":{"type":"string"}}}"#),
1472 grammar: None,
1473 reasoning_format: Some("none"),
1474 chat_template_kwargs: None,
1475 add_generation_prompt: true,
1476 use_jinja: true,
1477 parallel_tool_calls: false,
1478 enable_thinking: false,
1479 add_bos: false,
1480 add_eos: false,
1481 parse_tool_calls: false,
1482 };
1483 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1484
1485 assert!(result.is_ok());
1486 }
1487
1488 #[test]
1489 #[serial]
1490 fn apply_chat_template_oaicompat_with_grammar_field() {
1491 let (_backend, model) = test_model::load_default_model().unwrap();
1492 let template = model.chat_template(None).unwrap();
1493 let params = crate::openai::OpenAIChatTemplateParams {
1494 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1495 tools_json: None,
1496 tool_choice: None,
1497 json_schema: None,
1498 grammar: Some("root ::= \"hello\""),
1499 reasoning_format: Some("none"),
1500 chat_template_kwargs: None,
1501 add_generation_prompt: true,
1502 use_jinja: true,
1503 parallel_tool_calls: false,
1504 enable_thinking: false,
1505 add_bos: false,
1506 add_eos: false,
1507 parse_tool_calls: false,
1508 };
1509 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1510
1511 assert!(result.is_ok());
1512 }
1513
1514 #[test]
1515 #[serial]
1516 fn apply_chat_template_oaicompat_with_kwargs_field() {
1517 let (_backend, model) = test_model::load_default_model().unwrap();
1518 let template = model.chat_template(None).unwrap();
1519 let params = crate::openai::OpenAIChatTemplateParams {
1520 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1521 tools_json: None,
1522 tool_choice: None,
1523 json_schema: None,
1524 grammar: None,
1525 reasoning_format: Some("none"),
1526 chat_template_kwargs: Some(r#"{"bos_token": "<|im_start|>"}"#),
1527 add_generation_prompt: true,
1528 use_jinja: true,
1529 parallel_tool_calls: false,
1530 enable_thinking: false,
1531 add_bos: false,
1532 add_eos: false,
1533 parse_tool_calls: false,
1534 };
1535 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1536
1537 assert!(result.is_ok());
1538 }
1539
1540 #[test]
1541 #[serial]
1542 fn chat_template_with_nonexistent_name_returns_error() {
1543 let (_backend, model) = test_model::load_default_model().unwrap();
1544
1545 let result = model.chat_template(Some("nonexistent_template_name_xyz"));
1546
1547 assert_eq!(
1548 result.unwrap_err(),
1549 crate::ChatTemplateError::MissingTemplate
1550 );
1551 }
1552
1553 #[test]
1554 #[serial]
1555 fn lora_adapter_init_with_invalid_gguf_returns_null_result() {
1556 let (_backend, model) = test_model::load_default_model().unwrap();
1557 let dummy_path = std::env::temp_dir().join("llama_test_dummy_lora.gguf");
1558 std::fs::write(&dummy_path, b"not a valid gguf").unwrap();
1559
1560 let result = model.lora_adapter_init(&dummy_path);
1561
1562 assert_eq!(
1563 result.unwrap_err(),
1564 crate::LlamaLoraAdapterInitError::NullResult
1565 );
1566 let _ = std::fs::remove_file(&dummy_path);
1567 }
1568
1569 #[test]
1570 #[serial]
1571 fn str_to_token_with_many_tokens_triggers_buffer_resize() {
1572 let (_backend, model) = test_model::load_default_model().unwrap();
1573 let many_numbers: String = (0..2000).map(|number| format!("{number} ")).collect();
1577
1578 let tokens = model.str_to_token(&many_numbers, AddBos::Always).unwrap();
1579
1580 assert!(tokens.len() > many_numbers.len() / 2);
1581 }
1582
1583 #[test]
1584 #[serial]
1585 fn rope_type_returns_valid_result_for_test_model() {
1586 let (_backend, model) = test_model::load_default_model().unwrap();
1587
1588 let _rope_type = model.rope_type();
1589 }
1590
1591 #[test]
1592 #[serial]
1593 fn meta_val_str_with_null_byte_in_key_returns_error() {
1594 let (_backend, model) = test_model::load_default_model().unwrap();
1595 let result = model.meta_val_str("key\0with_null");
1596
1597 assert!(result.is_err());
1598 }
1599
1600 #[test]
1601 #[serial]
1602 fn apply_chat_template_with_tools_null_byte_in_tools_returns_error() {
1603 let (_backend, model) = test_model::load_default_model().unwrap();
1604 let template = model.chat_template(None).unwrap();
1605 let message =
1606 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1607 let result = model.apply_chat_template_with_tools_oaicompat(
1608 &template,
1609 &[message],
1610 Some("tools\0null"),
1611 None,
1612 true,
1613 );
1614
1615 assert!(result.is_err());
1616 }
1617
1618 #[test]
1619 #[serial]
1620 fn apply_chat_template_with_tools_null_byte_in_json_schema_returns_error() {
1621 let (_backend, model) = test_model::load_default_model().unwrap();
1622 let template = model.chat_template(None).unwrap();
1623 let message =
1624 crate::model::LlamaChatMessage::new("user".to_string(), "hello".to_string()).unwrap();
1625 let result = model.apply_chat_template_with_tools_oaicompat(
1626 &template,
1627 &[message],
1628 None,
1629 Some("schema\0null"),
1630 true,
1631 );
1632
1633 assert!(result.is_err());
1634 }
1635
1636 #[test]
1637 #[serial]
1638 fn apply_chat_template_oaicompat_with_null_byte_in_messages_returns_error() {
1639 let (_backend, model) = test_model::load_default_model().unwrap();
1640 let template = model.chat_template(None).unwrap();
1641 let params = crate::openai::OpenAIChatTemplateParams {
1642 messages_json: "messages\0null",
1643 tools_json: None,
1644 tool_choice: None,
1645 json_schema: None,
1646 grammar: None,
1647 reasoning_format: None,
1648 chat_template_kwargs: None,
1649 add_generation_prompt: true,
1650 use_jinja: true,
1651 parallel_tool_calls: false,
1652 enable_thinking: false,
1653 add_bos: false,
1654 add_eos: false,
1655 parse_tool_calls: false,
1656 };
1657 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1658
1659 assert!(result.is_err());
1660 }
1661
1662 #[test]
1663 #[serial]
1664 fn apply_chat_template_oaicompat_with_null_byte_in_tools_returns_error() {
1665 let (_backend, model) = test_model::load_default_model().unwrap();
1666 let template = model.chat_template(None).unwrap();
1667 let params = crate::openai::OpenAIChatTemplateParams {
1668 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1669 tools_json: Some("tools\0null"),
1670 tool_choice: None,
1671 json_schema: None,
1672 grammar: None,
1673 reasoning_format: None,
1674 chat_template_kwargs: None,
1675 add_generation_prompt: true,
1676 use_jinja: true,
1677 parallel_tool_calls: false,
1678 enable_thinking: false,
1679 add_bos: false,
1680 add_eos: false,
1681 parse_tool_calls: false,
1682 };
1683 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1684
1685 assert!(result.is_err());
1686 }
1687
1688 #[test]
1689 #[serial]
1690 fn apply_chat_template_oaicompat_with_null_byte_in_tool_choice_returns_error() {
1691 let (_backend, model) = test_model::load_default_model().unwrap();
1692 let template = model.chat_template(None).unwrap();
1693 let params = crate::openai::OpenAIChatTemplateParams {
1694 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1695 tools_json: None,
1696 tool_choice: Some("choice\0null"),
1697 json_schema: None,
1698 grammar: None,
1699 reasoning_format: None,
1700 chat_template_kwargs: None,
1701 add_generation_prompt: true,
1702 use_jinja: true,
1703 parallel_tool_calls: false,
1704 enable_thinking: false,
1705 add_bos: false,
1706 add_eos: false,
1707 parse_tool_calls: false,
1708 };
1709 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1710
1711 assert!(result.is_err());
1712 }
1713
1714 #[test]
1715 #[serial]
1716 fn apply_chat_template_oaicompat_with_null_byte_in_json_schema_returns_error() {
1717 let (_backend, model) = test_model::load_default_model().unwrap();
1718 let template = model.chat_template(None).unwrap();
1719 let params = crate::openai::OpenAIChatTemplateParams {
1720 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1721 tools_json: None,
1722 tool_choice: None,
1723 json_schema: Some("schema\0null"),
1724 grammar: None,
1725 reasoning_format: None,
1726 chat_template_kwargs: None,
1727 add_generation_prompt: true,
1728 use_jinja: true,
1729 parallel_tool_calls: false,
1730 enable_thinking: false,
1731 add_bos: false,
1732 add_eos: false,
1733 parse_tool_calls: false,
1734 };
1735 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1736
1737 assert!(result.is_err());
1738 }
1739
1740 #[test]
1741 #[serial]
1742 fn apply_chat_template_oaicompat_with_null_byte_in_grammar_returns_error() {
1743 let (_backend, model) = test_model::load_default_model().unwrap();
1744 let template = model.chat_template(None).unwrap();
1745 let params = crate::openai::OpenAIChatTemplateParams {
1746 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1747 tools_json: None,
1748 tool_choice: None,
1749 json_schema: None,
1750 grammar: Some("grammar\0null"),
1751 reasoning_format: None,
1752 chat_template_kwargs: None,
1753 add_generation_prompt: true,
1754 use_jinja: true,
1755 parallel_tool_calls: false,
1756 enable_thinking: false,
1757 add_bos: false,
1758 add_eos: false,
1759 parse_tool_calls: false,
1760 };
1761 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1762
1763 assert!(result.is_err());
1764 }
1765
1766 #[test]
1767 #[serial]
1768 fn apply_chat_template_oaicompat_with_null_byte_in_reasoning_format_returns_error() {
1769 let (_backend, model) = test_model::load_default_model().unwrap();
1770 let template = model.chat_template(None).unwrap();
1771 let params = crate::openai::OpenAIChatTemplateParams {
1772 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1773 tools_json: None,
1774 tool_choice: None,
1775 json_schema: None,
1776 grammar: None,
1777 reasoning_format: Some("format\0null"),
1778 chat_template_kwargs: None,
1779 add_generation_prompt: true,
1780 use_jinja: true,
1781 parallel_tool_calls: false,
1782 enable_thinking: false,
1783 add_bos: false,
1784 add_eos: false,
1785 parse_tool_calls: false,
1786 };
1787 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1788
1789 assert!(result.is_err());
1790 }
1791
1792 #[test]
1793 #[serial]
1794 fn apply_chat_template_oaicompat_with_null_byte_in_kwargs_returns_error() {
1795 let (_backend, model) = test_model::load_default_model().unwrap();
1796 let template = model.chat_template(None).unwrap();
1797 let params = crate::openai::OpenAIChatTemplateParams {
1798 messages_json: r#"[{"role":"user","content":"hello"}]"#,
1799 tools_json: None,
1800 tool_choice: None,
1801 json_schema: None,
1802 grammar: None,
1803 reasoning_format: None,
1804 chat_template_kwargs: Some("kwargs\0null"),
1805 add_generation_prompt: true,
1806 use_jinja: true,
1807 parallel_tool_calls: false,
1808 enable_thinking: false,
1809 add_bos: false,
1810 add_eos: false,
1811 parse_tool_calls: false,
1812 };
1813 let result = model.apply_chat_template_oaicompat(&template, ¶ms);
1814
1815 assert!(result.is_err());
1816 }
1817
1818 #[test]
1819 #[serial]
1820 fn new_context_with_huge_ctx_returns_null_error() {
1821 let (_backend, model) = test_model::load_default_model().unwrap();
1822 let ctx_params = crate::context::params::LlamaContextParams::default()
1823 .with_n_ctx(std::num::NonZeroU32::new(u32::MAX));
1824
1825 let result = model.new_context(&_backend, ctx_params);
1826
1827 assert!(result.is_err());
1828 }
1829
1830 #[test]
1831 #[serial]
1832 fn sample_returns_result_and_succeeds_with_valid_index() {
1833 use crate::sampling::LlamaSampler;
1834 use crate::token::LlamaToken;
1835
1836 let (backend, model) = test_model::load_default_model().unwrap();
1837 let ctx_params = crate::context::params::LlamaContextParams::default()
1838 .with_n_ctx(std::num::NonZeroU32::new(256));
1839 let mut context = model.new_context(&backend, ctx_params).unwrap();
1840
1841 let tokens = model.str_to_token("Hello", AddBos::Always).unwrap();
1842 let mut batch = crate::llama_batch::LlamaBatch::new(512, 1).unwrap();
1843
1844 batch.add_sequence(&tokens, 0, false).unwrap();
1845
1846 context.decode(&mut batch).unwrap();
1847
1848 let mut sampler =
1849 LlamaSampler::chain_simple([LlamaSampler::temp(0.8), LlamaSampler::greedy()]);
1850
1851 let result = sampler.sample(&context, batch.n_tokens() - 1);
1854
1855 assert!(result.is_ok());
1856 }
1857
1858 #[test]
1859 #[serial]
1860 fn grammar_sampler_constrains_output_to_yes_or_no() {
1861 use crate::sampling::LlamaSampler;
1862 use std::sync::Arc;
1863
1864 let (backend, model) = test_model::load_default_model().unwrap();
1865
1866 let ctx_params = crate::context::params::LlamaContextParams::default()
1867 .with_n_ctx(std::num::NonZeroU32::new(512));
1868 let mut context = model.new_context(&backend, ctx_params).unwrap();
1869
1870 let prompt = "<|im_start|>user\nIs the sky blue? Answer yes or no.<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n";
1871 let tokens = model.str_to_token(prompt, AddBos::Always).unwrap();
1872 let mut batch = crate::llama_batch::LlamaBatch::new(512, 1).unwrap();
1873
1874 batch.add_sequence(&tokens, 0, false).unwrap();
1875
1876 context.decode(&mut batch).unwrap();
1877
1878 let mut sampler = LlamaSampler::chain_simple([
1879 LlamaSampler::grammar(&model, r#"root ::= [Yy] [Ee] [Ss] | [Nn] [Oo]"#, "root")
1880 .unwrap(),
1881 LlamaSampler::temp(0.8),
1882 LlamaSampler::greedy(),
1883 ]);
1884
1885 let token = sampler.sample(&context, batch.n_tokens() - 1).unwrap();
1886
1887 assert!(
1888 !model.is_eog_token(token),
1889 "Grammar sampler should not allow EOS as first token"
1890 );
1891
1892 let mut decoder = encoding_rs::UTF_8.new_decoder();
1893 let piece = model
1894 .token_to_piece(token, &mut decoder, true, None)
1895 .unwrap();
1896 let first_char = piece.chars().next().unwrap().to_lowercase().next().unwrap();
1897
1898 assert!(
1899 first_char == 'y' || first_char == 'n',
1900 "Grammar should constrain first token to start with y/n, got: '{piece}'"
1901 );
1902 }
1903
1904 #[test]
1905 #[serial]
1906 fn json_schema_grammar_sampler_constrains_output_to_json() {
1907 use crate::sampling::LlamaSampler;
1908 use std::sync::Arc;
1909
1910 let (backend, model) = test_model::load_default_model().unwrap();
1911
1912 let ctx_params = crate::context::params::LlamaContextParams::default()
1913 .with_n_ctx(std::num::NonZeroU32::new(512));
1914 let mut context = model.new_context(&backend, ctx_params).unwrap();
1915
1916 let prompt = "<|im_start|>user\nWhat is 2+2? Respond with a JSON object.<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n";
1917 let tokens = model.str_to_token(prompt, AddBos::Always).unwrap();
1918 let mut batch = crate::llama_batch::LlamaBatch::new(512, 1).unwrap();
1919
1920 batch.add_sequence(&tokens, 0, false).unwrap();
1921
1922 context.decode(&mut batch).unwrap();
1923
1924 let grammar_str = crate::json_schema_to_grammar(
1925 r#"{"type": "object", "properties": {"answer": {"type": "string"}}, "required": ["answer"]}"#
1926 ).unwrap();
1927
1928 let mut sampler = LlamaSampler::chain_simple([
1929 LlamaSampler::grammar(&model, &grammar_str, "root").unwrap(),
1930 LlamaSampler::temp(0.8),
1931 LlamaSampler::greedy(),
1932 ]);
1933
1934 let token = sampler.sample(&context, batch.n_tokens() - 1).unwrap();
1935
1936 assert!(
1937 !model.is_eog_token(token),
1938 "Grammar sampler should not allow EOS as first token"
1939 );
1940
1941 let mut decoder = encoding_rs::UTF_8.new_decoder();
1942 let piece = model
1943 .token_to_piece(token, &mut decoder, true, None)
1944 .unwrap();
1945
1946 assert!(
1947 piece.starts_with('{'),
1948 "JSON schema grammar should constrain first token to start with '{{', got: '{piece}'"
1949 );
1950 }
1951
1952 #[test]
1953 #[serial]
1954 fn sample_with_grammar_produces_constrained_output_in_loop() {
1955 use crate::sampling::LlamaSampler;
1956 use std::sync::Arc;
1957
1958 let (backend, model) = test_model::load_default_model().unwrap();
1959
1960 let ctx_params = crate::context::params::LlamaContextParams::default()
1961 .with_n_ctx(std::num::NonZeroU32::new(512));
1962 let mut context = model.new_context(&backend, ctx_params).unwrap();
1963
1964 let prompt = "<|im_start|>user\nIs the sky blue? yes or no<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n";
1965 let tokens = model.str_to_token(prompt, AddBos::Always).unwrap();
1966 let mut batch = crate::llama_batch::LlamaBatch::new(512, 1).unwrap();
1967
1968 batch.add_sequence(&tokens, 0, false).unwrap();
1969
1970 context.decode(&mut batch).unwrap();
1971
1972 let mut sampler = LlamaSampler::chain_simple([
1973 LlamaSampler::grammar(&model, r#"root ::= "yes" | "no""#, "root").unwrap(),
1974 LlamaSampler::temp(0.8),
1975 LlamaSampler::greedy(),
1976 ]);
1977
1978 let mut generated = String::new();
1979 let mut decoder = encoding_rs::UTF_8.new_decoder();
1980 let mut position = batch.n_tokens();
1981
1982 for iteration in 0..10 {
1983 let token = sampler.sample(&context, -1).unwrap();
1984 let is_eog = model.is_eog_token(token);
1985
1986 eprintln!(" iteration={iteration} token={} eog={is_eog}", token.0);
1987
1988 if is_eog {
1989 break;
1990 }
1991
1992 let piece = model
1993 .token_to_piece(token, &mut decoder, true, None)
1994 .unwrap();
1995
1996 eprintln!(" piece='{piece}'");
1997
1998 generated.push_str(&piece);
1999
2000 batch.clear();
2001 batch.add(token, position, &[0], true).unwrap();
2002 position += 1;
2003
2004 context.decode(&mut batch).unwrap();
2005 }
2006
2007 let lowercase = generated.to_lowercase();
2008
2009 assert!(
2010 lowercase == "yes" || lowercase == "no",
2011 "Grammar loop should produce 'yes' or 'no', got: '{generated}'"
2012 );
2013 }
2014
2015 #[test]
2016 #[serial]
2017 fn sample_without_grammar_produces_multiple_tokens() {
2018 use crate::sampling::LlamaSampler;
2019 use std::sync::Arc;
2020
2021 let (backend, model) = test_model::load_default_model().unwrap();
2022
2023 let ctx_params = crate::context::params::LlamaContextParams::default()
2024 .with_n_ctx(std::num::NonZeroU32::new(512));
2025 let mut context = model.new_context(&backend, ctx_params).unwrap();
2026
2027 let prompt =
2028 "<|im_start|>user\nSay hello<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n";
2029 let tokens = model.str_to_token(prompt, AddBos::Always).unwrap();
2030 let mut batch = crate::llama_batch::LlamaBatch::new(512, 1).unwrap();
2031
2032 batch.add_sequence(&tokens, 0, false).unwrap();
2033
2034 context.decode(&mut batch).unwrap();
2035
2036 let mut sampler =
2037 LlamaSampler::chain_simple([LlamaSampler::temp(0.8), LlamaSampler::greedy()]);
2038
2039 let mut token_count = 0;
2040 let mut position = batch.n_tokens();
2041
2042 for _ in 0..5 {
2043 let token = sampler.sample(&context, -1).unwrap();
2044
2045 if model.is_eog_token(token) {
2046 break;
2047 }
2048
2049 token_count += 1;
2050
2051 batch.clear();
2052 batch.add(token, position, &[0], true).unwrap();
2053 position += 1;
2054
2055 context.decode(&mut batch).unwrap();
2056 }
2057
2058 assert!(
2059 token_count > 0,
2060 "Should produce at least one token without grammar"
2061 );
2062 }
2063}