Skip to main content

llama_cpp_bindings/
lib.rs

1//! Bindings to the llama.cpp library.
2//!
3//! As llama.cpp is a very fast moving target, this crate does not attempt to create a stable API
4//! with all the rust idioms. Instead it provided safe wrappers around nearly direct bindings to
5//! llama.cpp. This makes it easier to keep up with the changes in llama.cpp, but does mean that
6//! the API is not as nice as it could be.
7//!
8//! # Feature Flags
9//!
10//! - `cuda` enables CUDA gpu support.
11//! - `sampler` adds the [`context::sample::sampler`] struct for a more rusty way of sampling.
12
13pub mod context;
14pub mod error;
15pub mod llama_backend;
16pub mod llama_backend_device;
17pub mod llama_backend_numa_strategy;
18pub mod llama_batch;
19pub mod llama_utility_ggml_time_us;
20pub mod llama_utility_json_schema_to_grammar;
21pub mod llama_utility_llama_time_us;
22pub mod llama_utility_max_devices;
23pub mod llama_utility_mlock_supported;
24pub mod llama_utility_mmap_supported;
25pub mod llama_utility_status_is_ok;
26pub mod llama_utility_status_to_i32;
27#[cfg(feature = "llguidance")]
28pub mod llguidance_sampler;
29pub mod log;
30pub mod log_options;
31pub mod model;
32#[cfg(feature = "mtmd")]
33pub mod mtmd;
34pub mod openai;
35pub mod sampling;
36pub mod timing;
37pub mod token;
38pub mod token_type;
39
40pub use error::{
41    ApplyChatTemplateError, ChatParseError, ChatTemplateError, DecodeError, EmbeddingsError,
42    EncodeError, GrammarError, LlamaContextLoadError, LlamaCppError, LlamaLoraAdapterInitError,
43    LlamaLoraAdapterRemoveError, LlamaLoraAdapterSetError, LlamaModelLoadError, MetaValError,
44    ModelParamsError, NewLlamaChatMessageError, Result, SamplerAcceptError, SamplingError,
45    StringToTokenError, TokenSamplingError, TokenToStringError,
46};
47
48pub use llama_backend_device::{
49    LlamaBackendDevice, LlamaBackendDeviceType, list_llama_ggml_backend_devices,
50};
51
52pub use llama_utility_ggml_time_us::ggml_time_us;
53pub use llama_utility_json_schema_to_grammar::json_schema_to_grammar;
54pub use llama_utility_llama_time_us::llama_time_us;
55pub use llama_utility_max_devices::max_devices;
56pub use llama_utility_mlock_supported::mlock_supported;
57pub use llama_utility_mmap_supported::mmap_supported;
58pub use llama_utility_status_is_ok::status_is_ok;
59pub use llama_utility_status_to_i32::status_to_i32;
60
61pub use log::send_logs_to_tracing;
62pub use log_options::LogOptions;