[package]
edition = "2024"
name = "llama-server"
version = "0.1.1"
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Download, embed, and run llama.cpp in your Rust projects"
readme = "README.md"
license = "MIT"
repository = "https://github.com/hecrj/llama-server"
[lib]
name = "llama_server"
path = "src/lib.rs"
[dependencies.bitflags]
version = "2"
[dependencies.directories]
version = "6"
[dependencies.futures]
version = "0.3"
[dependencies.reqwest]
version = "0.13"
features = ["json"]
[dependencies.serde]
version = "1"
features = ["derive"]
[dependencies.sipper]
version = "0.1"
[dependencies.tokio]
version = "1"
features = [
"rt",
"fs",
"io-util",
"process",
]
[dependencies.zip]
version = "7"
[dev-dependencies.tokio]
version = "1"
features = ["macros"]
[lints.rust]
missing_docs = "deny"
unsafe_code = "deny"
unused_results = "deny"
[lints.rust.rust_2018_idioms]
level = "deny"
priority = -1