Skip to main content

llama_cpp_v3/
backend.rs

1#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2pub enum Backend {
3    Cpu,
4    Cuda,
5    Vulkan,
6    Hip,
7    Sycl,
8    OpenCl,
9}
10
11impl Backend {
12    /// The string used in the GitHub release filename (e.g. `llama-bXXXX-bin-win-vulkan-x64.zip`)
13    pub fn release_name_component(&self) -> &'static str {
14        match self {
15            Backend::Cpu => "cpu",
16            Backend::Cuda => "cuda-cu12.4", // using CUDA 12.4 for now, could be made configurable
17            Backend::Vulkan => "vulkan",
18            Backend::Hip => "sycl", // Needs to be mapped correctly, rocblas typically
19            Backend::Sycl => "sycl",
20            Backend::OpenCl => "opencl", // Not pre-built in standard releases but for completeness
21        }
22    }
23
24    /// The name of the main DLL file
25    pub fn dll_name(&self) -> &'static str {
26        if cfg!(windows) {
27            "llama.dll"
28        } else if cfg!(target_os = "macos") {
29            "libllama.dylib"
30        } else {
31            "libllama.so"
32        }
33    }
34}