llama-cpp-v3 0.1.6

Safe and ergonomic Rust wrapper for llama.cpp with dynamic loading
Documentation
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Backend {
    Cpu,
    Cuda,
    Vulkan,
    Hip,
    Sycl,
    OpenCl,
}

impl Backend {
    /// The string used in the GitHub release filename (e.g. `llama-bXXXX-bin-win-vulkan-x64.zip`)
    pub fn release_name_component(&self) -> &'static str {
        match self {
            Backend::Cpu => "cpu",
            Backend::Cuda => "cuda-cu12.4", // using CUDA 12.4 for now, could be made configurable
            Backend::Vulkan => "vulkan",
            Backend::Hip => "sycl", // Needs to be mapped correctly, rocblas typically
            Backend::Sycl => "sycl",
            Backend::OpenCl => "opencl", // Not pre-built in standard releases but for completeness
        }
    }

    /// The name of the main DLL file
    pub fn dll_name(&self) -> &'static str {
        if cfg!(windows) {
            "llama.dll"
        } else if cfg!(target_os = "macos") {
            "libllama.dylib"
        } else {
            "libllama.so"
        }
    }
}