llama-cpp-sys-4 0.2.2

Low Level Bindings to llama.cpp
Documentation
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.

[package]
edition = "2021"
name = "llama-cpp-sys-4"
version = "0.2.2"
build = "build.rs"
links = "llama"
include = [
    "wrapper.h",
    "build.rs",
    "/src",
    "/llama.cpp/common/**/*",
    "/llama.cpp/ggml/include/*.h",
    "/llama.cpp/ggml/src/*.h",
    "/llama.cpp/ggml/src/*.c",
    "/llama.cpp/ggml/src/*.cpp",
    "/llama.cpp/ggml/src/ggml-blas/**/*",
    "/llama.cpp/ggml/src/ggml-cann/**/*",
    "/llama.cpp/ggml/src/ggml-cpu/**/*",
    "/llama.cpp/ggml/src/ggml-hexagon/**/*",
    "/llama.cpp/ggml/src/ggml-hip/**/*",
    "/llama.cpp/ggml/src/ggml-metal/**/*",
    "/llama.cpp/ggml/src/ggml-musa/**/*",
    "/llama.cpp/ggml/src/ggml-opencl/**/*",
    "/llama.cpp/ggml/src/ggml-rpc/**/*",
    "/llama.cpp/ggml/src/ggml-sycl/**/*",
    "/llama.cpp/ggml/src/ggml-virtgpu/**/*",
    "/llama.cpp/ggml/src/ggml-vulkan/**/*",
    "/llama.cpp/ggml/src/ggml-webgpu/**/*",
    "/llama.cpp/ggml/src/ggml-zdnn/**/*",
    "/llama.cpp/ggml/src/ggml-zendnn/**/*",
    "/llama.cpp/src/**/*",
    "/llama.cpp/convert_hf_to_gguf.py",
    "/llama.cpp/ggml/src/ggml-cuda.cu",
    "/llama.cpp/ggml/src/ggml-metal.m",
    "/llama.cpp/ggml/src/ggml-metal.metal",
    "/llama.cpp/include/llama.h",
    "/llama.cpp/include/llama-cpp.h",
    "/llama.cpp/ggml/src/ggml-cuda/**/*",
    "/llama.cpp/ggml/src/vulkan-shaders/**/*",
    "/llama.cpp/ggml/src/llamafile/sgemm.h",
    "/llama.cpp/ggml/src/llamafile/sgemm.cpp",
    "/llama.cpp/vendor/**/*",
    "/llama.cpp/pocs",
    "/llama.cpp/tools/CMakeLists.txt",
    "/llama.cpp/tools/mtmd/**/*",
    "/llama.cpp/tools/batched-bench/**/*",
    "/llama.cpp/tools/cli/**/*",
    "/llama.cpp/tools/completion/**/*",
    "/llama.cpp/tools/cvector-generator/**/*",
    "/llama.cpp/tools/export-lora/**/*",
    "/llama.cpp/tools/fit-params/**/*",
    "/llama.cpp/tools/gguf-split/**/*",
    "/llama.cpp/tools/imatrix/**/*",
    "/llama.cpp/tools/llama-bench/**/*",
    "/llama.cpp/tools/parser/**/*",
    "/llama.cpp/tools/perplexity/**/*",
    "/llama.cpp/tools/quantize/**/*",
    "/llama.cpp/tools/results/**/*",
    "/llama.cpp/tools/rpc/**/*",
    "/llama.cpp/tools/server/**/*",
    "/llama.cpp/tools/tokenize/**/*",
    "/llama.cpp/tools/tts/**/*",
    "/llama.cpp/vendor/stb/**/*",
    "/llama.cpp/vendor/miniaudio/**/*",
    "/llama.cpp/vendor/sheredom/**/*",
    "/llama.cpp/common/**/*",
    "/llama.cpp/CMakeLists.txt",
    "/llama.cpp/ggml/CMakeLists.txt",
    "/llama.cpp/ggml/src/CMakeLists.txt",
    "/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt",
    "/llama.cpp/cmake",
    "/llama.cpp/ggml/cmake",
]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Low Level Bindings to llama.cpp"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/eugenehp/llama-cpp-rs"

[features]
cuda = []
dynamic-link = []
metal = []
mpi = []
mtmd = []
native = []
openmp = []
rpc = []
vulkan = []

[lib]
name = "llama_cpp_sys_4"
path = "src/lib.rs"

[dependencies]

[build-dependencies.bindgen]
version = "0.72.1"

[build-dependencies.cc]
version = "1.2.56"
features = ["parallel"]

[build-dependencies.cmake]
version = "0.1"

[build-dependencies.glob]
version = "0.3.3"