vllora_llm 0.1.21

LLM client layer for the Vllora AI Gateway: unified chat-completions over multiple providers (OpenAI, Anthropic, Gemini, Bedrock, LangDB proxy) with optional tracing/telemetry.
[dependencies.async-openai]
features = ["chat-completion-types", "chat-completion", "completions", "response-types", "responses", "embedding-types", "embedding", "image-types", "image", "moderation", "moderation-types", "_api"]
package = "async-openai-compat"
version = "0.30.5"

[dependencies.async-trait]
version = "0.1"

[dependencies.aws-config]
features = ["behavior-version-latest"]
version = "1.8.12"

[dependencies.aws-credential-types]
version = "1.2.11"

[dependencies.aws-sdk-bedrock]
version = "1.127.0"

[dependencies.aws-sdk-bedrockruntime]
version = "1.120.0"

[dependencies.aws-smithy-runtime-api]
version = "1.9.3"

[dependencies.aws-smithy-types]
features = ["serde-deserialize", "serde-serialize"]
version = "1.3.5"

[dependencies.base64]
version = "0.22.1"

[dependencies.chrono]
features = ["serde"]
version = "0.4"

[dependencies.clust]
package = "langdb_clust"
version = "0.9.11"

[dependencies.futures]
version = "0.3.30"

[dependencies.minijinja]
version = "2.12.0"

[dependencies.opentelemetry]
features = ["metrics"]
version = "0.31"

[dependencies.rand]
version = "0.9.2"

[dependencies.regex]
version = "1.12"

[dependencies.reqwest]
default-features = false
features = ["json", "stream"]
version = "0.12.24"

[dependencies.reqwest-eventsource]
version = "0.6.0"

[dependencies.rmcp]
default-features = true
features = ["reqwest", "client", "server", "transport-streamable-http-client-reqwest", "transport-sse-client-reqwest"]
version = "0.10.0"

[dependencies.schemars]
optional = true
version = "1.0.4"

[dependencies.serde]
features = ["derive"]
version = "1.0"

[dependencies.serde_json]
version = "1.0"

[dependencies.serde_tuple]
version = "1.1.3"

[dependencies.thiserror]
version = "2.0.17"

[dependencies.tokio]
features = ["rt", "rt-multi-thread", "sync", "io-std"]
version = "1.48.0"

[dependencies.tokio-stream]
features = ["io-util"]
version = "0.1.17"

[dependencies.tracing]
features = ["log", "valuable"]
version = "0.1.44"

[dependencies.tracing-futures]
features = ["futures-03"]
version = "0.2.5"

[dependencies.tracing-opentelemetry]
default-features = false
version = "0.32"

[dependencies.tracing-subscriber]
features = ["env-filter", "tracing-log", "valuable"]
version = "0.3.22"

[dependencies.uuid]
features = ["serde", "v4"]
version = "1.19.0"

[dependencies.validator]
features = ["derive"]
version = "0.20"

[dependencies.valuable]
features = ["derive"]
version = "0.1"

[dependencies.vllora_telemetry]
version = "0.1.21"

[features]
default = []
schemars = ["dep:schemars"]

[lib]
name = "vllora_llm"
path = "src/lib.rs"

[package]
authors = ["Vllora Team<api@vllora.dev>"]
autobenches = false
autobins = false
autoexamples = false
autolib = false
autotests = false
build = false
categories = ["web-programming", "network-programming"]
description = "LLM client layer for the Vllora AI Gateway: unified chat-completions over multiple providers (OpenAI, Anthropic, Gemini, Bedrock, LangDB proxy) with optional tracing/telemetry."
documentation = "https://vllora.dev/docs"
edition = "2021"
keywords = ["llm", "openai", "anthropic", "gemini", "mcp"]
license = "Apache-2.0"
name = "vllora_llm"
readme = "README.md"
repository = "https://github.com/vllora/vllora"
version = "0.1.21"