llm_kit_openai/lib.rs
1//! # LLM Kit OpenAI
2//!
3//! OpenAI provider implementation for the LLM Kit.
4//!
5//! This crate provides integration with OpenAI's chat completion API,
6//! following the LLM Kit provider pattern for Rust.
7//!
8//! ## Features
9//!
10//! - Chat completions with streaming support
11//! - Tool calling (function calling)
12//! - Multi-modal inputs (text, images, audio, PDFs)
13//! - Reasoning models support (o1, o3, etc.)
14//! - Provider-specific options (logprobs, reasoning effort, service tiers, etc.)
15//! - Type-safe configuration
16//!
17//! ## Quick Start
18//!
19//! ### Using the Client Builder (Recommended)
20//!
21//! ```no_run
22//! use llm_kit_openai::OpenAIClient;
23//! use llm_kit_provider::language_model::LanguageModel;
24//!
25//! #[tokio::main]
26//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
27//! // Create provider using the client builder
28//! let provider = OpenAIClient::new()
29//! .api_key("your-api-key") // Or use OPENAI_API_KEY env var
30//! .build();
31//!
32//! // Get a language model
33//! let model = provider.chat("gpt-4o");
34//!
35//! println!("Model: {}", model.model_id());
36//! println!("Provider: {}", model.provider());
37//! Ok(())
38//! }
39//! ```
40//!
41//! ### Using Settings Directly (Alternative)
42//!
43//! ```no_run
44//! use llm_kit_openai::{OpenAIProvider, OpenAIProviderSettings};
45//! use llm_kit_provider::language_model::LanguageModel;
46//!
47//! #[tokio::main]
48//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
49//! // Create provider (uses OPENAI_API_KEY from environment)
50//! let provider = OpenAIProvider::new(OpenAIProviderSettings::default());
51//!
52//! // Get a language model
53//! let model = provider.chat("gpt-4o");
54//!
55//! println!("Model: {}", model.model_id());
56//! println!("Provider: {}", model.provider());
57//! Ok(())
58//! }
59//! ```
60//!
61//! ## Configuration
62//!
63//! ### Using the Client Builder
64//!
65//! ```rust
66//! use llm_kit_openai::OpenAIClient;
67//!
68//! let provider = OpenAIClient::new()
69//! .api_key("your-api-key")
70//! .base_url("https://api.openai.com/v1")
71//! .organization("org-123")
72//! .project("proj-456")
73//! .header("Custom-Header", "value")
74//! .name("my-openai-provider")
75//! .build();
76//! ```
77//!
78//! ### Using Settings Directly
79//!
80//! ```rust
81//! use llm_kit_openai::{OpenAIProvider, OpenAIProviderSettings};
82//!
83//! let settings = OpenAIProviderSettings::new()
84//! .with_api_key("your-api-key")
85//! .with_base_url("https://api.openai.com/v1")
86//! .with_organization("org-123")
87//! .with_project("proj-456")
88//! .add_header("Custom-Header", "value")
89//! .with_name("my-openai-provider");
90//!
91//! let provider = OpenAIProvider::new(settings);
92//! ```
93//!
94//! ## Architecture
95//!
96//! The implementation follows the LLM Kit provider pattern:
97//!
98//! - **Provider** (`OpenAIProvider`): Creates model instances
99//! - **Language Model** (`OpenAIChatLanguageModel`): Implements text generation and streaming
100//! - **Message Conversion**: Converts SDK messages to OpenAI format
101//! - **Tool Preparation**: Converts SDK tools to OpenAI function format
102//! - **API Types**: Request and response types for the OpenAI API
103//!
104//! ## Provider-Specific Options
105//!
106//! OpenAI-specific options can be passed through `provider_options`:
107//!
108//! ```no_run
109//! use llm_kit_openai::chat::OpenAIChatLanguageModelOptions;
110//! use serde_json::json;
111//!
112//! let options = OpenAIChatLanguageModelOptions {
113//! reasoning_effort: Some(llm_kit_openai::chat::openai_chat_options::ReasoningEffort::High),
114//! logprobs: Some(llm_kit_openai::chat::openai_chat_options::LogprobsOption::Number(5)),
115//! ..Default::default()
116//! };
117//! ```
118//!
119//! ## Supported Models
120//!
121//! All OpenAI chat models are supported, including:
122//!
123//! - GPT-4 family: `gpt-4`, `gpt-4-turbo`, `gpt-4o`, etc.
124//! - GPT-3.5: `gpt-3.5-turbo`
125//! - Reasoning models: `o1`, `o3-mini`, etc.
126//! - GPT-5 family (when available)
127//!
128//! ## Reasoning Models
129//!
130//! Reasoning models (o1, o3, etc.) have special handling:
131//!
132//! - System messages use "developer" role instead of "system"
133//! - Unsupported settings (temperature, top_p, etc.) are automatically removed
134//! - Uses `max_completion_tokens` instead of `max_tokens`
135//!
136
137#![warn(missing_docs)]
138
139/// Chat completion API implementation
140pub mod chat;
141/// Client builder for creating OpenAI providers
142pub mod client;
143/// OpenAI provider implementation
144pub mod provider;
145/// Settings and configuration for OpenAI providers
146pub mod settings;
147
148// Re-export main types for convenience
149pub use chat::{OpenAIChatLanguageModel, OpenAIChatLanguageModelOptions, OpenAIChatModelId};
150pub use client::OpenAIClient;
151pub use provider::OpenAIProvider;
152pub use settings::OpenAIProviderSettings;