Skip to main content

llm_stack_openai/
lib.rs

1//! `OpenAI` provider for the llm-stack SDK.
2//!
3//! This crate implements [`Provider`](llm_stack::Provider) for `OpenAI`'s
4//! Chat Completions API, supporting both non-streaming and streaming
5//! generation with tool calling and structured output.
6//!
7//! # Quick start
8//!
9//! ```rust,no_run
10//! use llm_stack_openai::{OpenAiConfig, OpenAiProvider};
11//! use llm_stack::{ChatMessage, ChatParams, Provider};
12//!
13//! # async fn example() -> Result<(), llm_stack::LlmError> {
14//! let provider = OpenAiProvider::new(OpenAiConfig {
15//!     api_key: std::env::var("OPENAI_API_KEY").unwrap(),
16//!     ..Default::default()
17//! });
18//!
19//! let params = ChatParams {
20//!     messages: vec![ChatMessage::user("Hello!")],
21//!     ..Default::default()
22//! };
23//!
24//! let response = provider.generate(&params).await?;
25//! println!("{}", response.text().unwrap_or("no text"));
26//! # Ok(())
27//! # }
28//! ```
29
30#![warn(missing_docs)]
31
32mod config;
33mod convert;
34mod factory;
35mod provider;
36mod stream;
37mod types;
38
39pub use config::OpenAiConfig;
40pub use factory::{OpenAiFactory, register_global};
41pub use provider::OpenAiProvider;