[package]
edition = "2021"
rust-version = "1.75"
name = "ai_assistant_core"
version = "0.2.0"
authors = ["Orlando Jose Luque Moraira <orlando.luque@gmail.com>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Simple, ergonomic Rust client & server for local LLMs (Ollama, LM Studio, OpenAI-compatible). Chat, list models, stream responses, serve your model remotely."
homepage = "https://ai-assistant.runawaybrains.com"
documentation = "https://docs.rs/ai_assistant_core"
readme = "README.md"
keywords = [
"ai",
"llm",
"ollama",
"lm-studio",
"openai",
]
categories = [
"api-bindings",
"text-processing",
"network-programming",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/OrlandoLuque/ai_assistant_core"
[features]
default = []
nat = ["tokio/net"]
serve = [
"dep:axum",
"dep:tower-http",
"dep:tower",
"tokio/signal",
]
[lib]
name = "ai_assistant_core"
path = "src/lib.rs"
[[bin]]
name = "ai_serve"
path = "src/bin/ai_serve.rs"
required-features = ["serve"]
[[example]]
name = "quick_chat"
path = "examples/quick_chat.rs"
[[example]]
name = "serve_model"
path = "examples/serve_model.rs"
[[example]]
name = "serve_with_nat"
path = "examples/serve_with_nat.rs"
[dependencies.axum]
version = "0.8"
optional = true
[dependencies.futures]
version = "0.3"
[dependencies.reqwest]
version = "0.12"
features = [
"json",
"stream",
]
[dependencies.serde]
version = "1"
features = ["derive"]
[dependencies.serde_json]
version = "1"
[dependencies.thiserror]
version = "2"
[dependencies.tokio]
version = "1"
features = [
"rt-multi-thread",
"macros",
]
[dependencies.tower]
version = "0.5"
optional = true
[dependencies.tower-http]
version = "0.6"
features = ["cors"]
optional = true
[dev-dependencies.tokio]
version = "1"
features = [
"rt-multi-thread",
"macros",
"test-util",
]