[package]
name = "api_openai"
version = "0.3.0"
edition = "2021"
rust-version = "1.70.0"
authors = [
"Kostiantyn Wandalen <wandalen@obox.systems>",
]
license = "MIT"
readme = "readme.md"
documentation = "https://docs.rs/api_openai"
repository = "https://github.com/Wandalen/api_llm/tree/master/api/openai"
homepage = "https://github.com/Wandalen/api_llm/tree/master/api/openai"
description = """
OpenAI's API for accessing large language models (LLMs).
"""
categories = [ "algorithms", "development-tools" ]
keywords = [ "fundamental", "general-purpose" ]
autoexamples = false
[lints]
workspace = true
[package.metadata.cargo-udeps.ignore]
development = [ "tracing-subscriber" ]
[package.metadata.docs.rs]
features = [ "full" ]
all-features = false
[features]
default = [ "full" ]
full = [ "enabled", "integration", "retry", "circuit_breaker", "rate_limiting", "failover", "health_checks", "enterprise", "caching", "batching", "compression", "streaming_control", "audio", "moderation", "input_validation", "model_comparison", "request_templates", "buffered_streaming" ]
enabled = [
"dep:mod_interface",
"dep:former",
"dep:error_tools",
"dep:derive_tools",
"dep:workspace_tools",
"dep:async-trait",
"dep:url",
"dep:rand",
"dep:chrono",
"dep:uuid",
"dep:regex",
"dep:serde",
"dep:serde_json",
"dep:serde_yaml",
"dep:base64",
"dep:secrecy",
"dep:futures-core",
"dep:futures-util",
"dep:futures",
"dep:backoff",
"dep:tokio",
"dep:bytes",
"dep:eventsource-stream",
"dep:reqwest",
"dep:tracing",
"dep:tokio-tungstenite",
]
integration = []
retry = []
circuit_breaker = []
rate_limiting = []
failover = []
health_checks = []
caching = [ "sha2", "blake3" ]
compression = [ "flate2" ]
batching = [ "blake3" ]
streaming_control = []
audio = []
moderation = []
input_validation = []
enterprise = []
model_comparison = []
request_templates = []
buffered_streaming = []
performance = [
"retry",
"circuit_breaker",
"rate_limiting",
"failover",
"health_checks",
"caching",
"batching"
]
optimized = [
"performance",
"compression",
"enterprise"
]
minimal = []
dev-optimized = [
"retry",
"circuit_breaker",
"caching"
]
[dependencies]
mod_interface = { workspace = true, optional = true }
former = { workspace = true, optional = true }
error_tools = { workspace = true, optional = true }
derive_tools = { workspace = true, optional = true }
workspace_tools = { workspace = true, features = [ "secrets" ], optional = true }
async-trait = { workspace = true, optional = true }
url = { workspace = true, optional = true }
rand = { workspace = true, optional = true }
chrono = { workspace = true, optional = true }
uuid = { workspace = true, features = ["v4"], optional = true }
regex = { workspace = true, optional = true }
serde = { workspace = true, features = ["derive"], optional = true }
serde_json = { workspace = true, optional = true }
serde_yaml = { workspace = true, optional = true }
base64 = { workspace = true, optional = true }
secrecy = { workspace = true, optional = true }
blake3 = { workspace = true, optional = true }
flate2 = { workspace = true, optional = true }
sha2 = { workspace = true, optional = true }
futures-core = { workspace = true, optional = true }
futures-util = { workspace = true, optional = true }
futures = { workspace = true, optional = true }
backoff = { workspace = true, features = [ "tokio" ], optional = true }
tokio = { workspace = true, features = [ "macros", "sync", "time", "rt-multi-thread" ], optional = true }
bytes = { workspace = true, optional = true }
eventsource-stream = { workspace = true, optional = true }
reqwest = { workspace = true, features = [
"json",
"stream",
"multipart",
"rustls-tls",
], default-features = false, optional = true }
tracing = { workspace = true, optional = true }
tokio-tungstenite = { workspace = true, optional = true }
[dev-dependencies]
tempfile = { workspace = true }
tracing-subscriber = { workspace = true }
[[example]]
name = "openai_chat"
path = "examples/openai_chat.rs"
[[example]]
name = "openai_multi_turn_conversation"
path = "examples/openai_multi_turn_conversation.rs"
[[example]]
name = "openai_interactive_chat"
path = "examples/openai_interactive_chat.rs"
[[example]]
name = "openai_responses_create_with_tools"
path = "examples/openai_responses_create_with_tools.rs"
[[example]]
name = "openai_responses_create_image_input"
path = "examples/openai_responses_create_image_input.rs"
[[example]]
name = "openai_ai_code_reviewer"
path = "examples/openai_ai_code_reviewer.rs"
[[example]]
name = "openai_web_research_assistant"
path = "examples/openai_web_research_assistant.rs"
[[example]]
name = "openai_document_analyzer_vision"
path = "examples/openai_document_analyzer_vision.rs"
[[example]]
name = "openai_batch_content_processor"
path = "examples/openai_batch_content_processor.rs"
[[example]]
name = "openai_cached_interactive_chat"
path = "examples/openai_cached_interactive_chat.rs"
[[example]]
name = "openai_request_batching_demo"
path = "examples/openai_request_batching_demo.rs"