openai_tools/lib.rs
1//! # OpenAI Tools for Rust
2//!
3//! A comprehensive Rust library for interacting with OpenAI's APIs, providing easy-to-use
4//! interfaces for chat completions, responses, and various AI-powered functionalities.
5//! This crate offers both high-level convenience methods and low-level control for
6//! advanced use cases.
7//!
8//! ## Features
9//!
10//! ### Core APIs
11//! - **Chat Completions API**: Chat with streaming, function calling, and structured output
12//! - **Responses API**: Assistant-style interactions with multi-modal input
13//! - **Conversations API**: Long-running conversation state management
14//! - **Embedding API**: Text to vector embeddings for semantic search
15//! - **Realtime API**: WebSocket-based real-time audio/text streaming
16//!
17//! ### Content & Media APIs
18//! - **Images API**: DALL-E image generation, editing, and variations
19//! - **Audio API**: Text-to-speech, transcription, and translation
20//! - **Moderations API**: Content policy violation detection
21//!
22//! ### Management APIs
23//! - **Models API**: List and retrieve available models
24//! - **Files API**: Upload and manage files for fine-tuning/batch
25//! - **Batch API**: Async bulk processing with 50% cost savings
26//! - **Fine-tuning API**: Custom model training
27//!
28//! ## Quick Start
29//!
30//! Add this to your `Cargo.toml`:
31//!
32//! ```toml
33//! [dependencies]
34//! openai-tools = "0.1.0"
35//! tokio = { version = "1.0", features = ["full"] }
36//! serde = { version = "1.0", features = ["derive"] }
37//! ```
38//!
39//! Set up your API key:
40//!
41//! ```bash
42//! export OPENAI_API_KEY="your-api-key-here"
43//! ```
44//!
45//! ## Basic Chat Completion
46//!
47//! ```rust,no_run
48//! use openai_tools::chat::request::ChatCompletion;
49//! use openai_tools::common::message::Message;
50//! use openai_tools::common::role::Role;
51//! use openai_tools::common::models::ChatModel;
52//!
53//! #[tokio::main]
54//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
55//! let mut chat = ChatCompletion::new();
56//! let messages = vec![
57//! Message::from_string(Role::User, "Hello! How are you?")
58//! ];
59//!
60//! let response = chat
61//! .model(ChatModel::Gpt4oMini) // Type-safe model selection
62//! .messages(messages)
63//! .temperature(0.7)
64//! .chat()
65//! .await?;
66//!
67//! println!("AI: {}", response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap());
68//! Ok(())
69//! }
70//! ```
71//!
72//! ## Structured Output with JSON Schema
73//!
74//! ```rust,no_run
75//! use openai_tools::chat::request::ChatCompletion;
76//! use openai_tools::common::{message::Message, role::Role, structured_output::Schema, models::ChatModel};
77//! use serde::{Deserialize, Serialize};
78//!
79//! #[derive(Debug, Serialize, Deserialize)]
80//! struct PersonInfo {
81//! name: String,
82//! age: u32,
83//! occupation: String,
84//! }
85//!
86//! #[tokio::main]
87//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
88//! let mut chat = ChatCompletion::new();
89//!
90//! // Create JSON schema
91//! let mut schema = Schema::chat_json_schema("person_info");
92//! schema.add_property("name", "string", "Person's full name");
93//! schema.add_property("age", "number", "Person's age");
94//! schema.add_property("occupation", "string", "Person's job");
95//!
96//! let messages = vec![
97//! Message::from_string(Role::User,
98//! "Extract info: John Smith, 30, Software Engineer")
99//! ];
100//!
101//! let response = chat
102//! .model(ChatModel::Gpt4oMini)
103//! .messages(messages)
104//! .json_schema(schema)
105//! .chat()
106//! .await?;
107//!
108//! let person: PersonInfo = serde_json::from_str(
109//! response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap()
110//! )?;
111//!
112//! println!("Extracted: {} ({}), {}", person.name, person.age, person.occupation);
113//! Ok(())
114//! }
115//! ```
116//!
117//! ## Function Calling with Tools
118//!
119//! ```rust,no_run
120//! use openai_tools::chat::request::ChatCompletion;
121//! use openai_tools::common::{message::Message, role::Role, tool::Tool, parameters::ParameterProperty, models::ChatModel};
122//!
123//! #[tokio::main]
124//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
125//! let mut chat = ChatCompletion::new();
126//!
127//! // Define a weather tool
128//! let weather_tool = Tool::function(
129//! "get_weather",
130//! "Get current weather for a location",
131//! vec![
132//! ("location", ParameterProperty::from_string("City name")),
133//! ("unit", ParameterProperty::from_string("Temperature unit (celsius/fahrenheit)")),
134//! ],
135//! false,
136//! );
137//!
138//! let messages = vec![
139//! Message::from_string(Role::User, "What's the weather in Tokyo?")
140//! ];
141//!
142//! let response = chat
143//! .model(ChatModel::Gpt4oMini)
144//! .messages(messages)
145//! .tools(vec![weather_tool])
146//! .chat()
147//! .await?;
148//!
149//! // Handle tool calls
150//! if let Some(tool_calls) = &response.choices[0].message.tool_calls {
151//! for call in tool_calls {
152//! println!("Tool: {}", call.function.name);
153//! if let Ok(args) = call.function.arguments_as_map() {
154//! println!("Args: {:?}", args);
155//! }
156//! // Execute the function and continue conversation...
157//! }
158//! }
159//! Ok(())
160//! }
161//! ```
162//!
163//! ## Multi-modal Input with Responses API
164//!
165//! ```rust,no_run
166//! use openai_tools::responses::request::Responses;
167//! use openai_tools::common::{message::{Message, Content}, role::Role, models::ChatModel};
168//!
169//! #[tokio::main]
170//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
171//! let mut responses = Responses::new();
172//!
173//! responses
174//! .model(ChatModel::Gpt4oMini)
175//! .instructions("You are an image analysis assistant.");
176//!
177//! // Multi-modal message with text and image
178//! let message = Message::from_message_array(
179//! Role::User,
180//! vec![
181//! Content::from_text("What do you see in this image?"),
182//! Content::from_image_file("path/to/image.jpg"),
183//! ],
184//! );
185//!
186//! responses.messages(vec![message]);
187//!
188//! let response = responses.complete().await?;
189//! let text = response.output_text().unwrap();
190//! println!("Response: {}", text);
191//! Ok(())
192//! }
193//! ```
194//!
195//! ## Choosing the Right API
196//!
197//! | Use Case | Recommended API | Module |
198//! |----------|-----------------|--------|
199//! | Simple Q&A, chatbot | Chat Completions | [`chat`] |
200//! | Multi-turn assistant with state | Responses + Conversations | [`responses`], [`conversations`] |
201//! | Real-time voice interaction | Realtime | [`realtime`] |
202//! | Semantic search, similarity | Embeddings | [`embedding`] |
203//! | Image generation (DALL-E) | Images | [`images`] |
204//! | Speech-to-text, TTS | Audio | [`audio`] |
205//! | Content moderation | Moderations | [`moderations`] |
206//! | Bulk processing (50% off) | Batch | [`batch`] |
207//! | Custom model training | Fine-tuning | [`fine_tuning`] |
208//!
209//! ## Module Structure
210//!
211//! ### Core APIs
212//!
213//! - [`chat`] - Chat Completions API (`/v1/chat/completions`)
214//! - [`chat::request`] - `ChatCompletion` builder
215//! - [`chat::response`] - Response types
216//!
217//! - [`responses`] - Responses API (`/v1/responses`)
218//! - [`responses::request`] - `Responses` builder with CRUD operations
219//! - [`responses::response`] - Response types
220//!
221//! - [`conversations`] - Conversations API (`/v1/conversations`)
222//! - [`conversations::request`] - `Conversations` client
223//! - [`conversations::response`] - Conversation and item types
224//!
225//! - [`embedding`] - Embeddings API (`/v1/embeddings`)
226//! - [`embedding::request`] - `Embedding` builder
227//! - [`embedding::response`] - Vector response types
228//!
229//! - [`realtime`] - Realtime API (WebSocket)
230//! - [`realtime::client`] - `RealtimeClient` and `RealtimeSession`
231//! - [`realtime::events`] - Client/server event types
232//!
233//! ### Content & Media APIs
234//!
235//! - [`images`] - Images API (`/v1/images`)
236//! - Generate, edit, create variations with DALL-E
237//!
238//! - [`audio`] - Audio API (`/v1/audio`)
239//! - Text-to-speech, transcription, translation
240//!
241//! - [`moderations`] - Moderations API (`/v1/moderations`)
242//! - Content policy violation detection
243//!
244//! ### Management APIs
245//!
246//! - [`models`] - Models API (`/v1/models`)
247//! - List and retrieve available models
248//!
249//! - [`files`] - Files API (`/v1/files`)
250//! - Upload/download files for fine-tuning and batch
251//!
252//! - [`batch`] - Batch API (`/v1/batches`)
253//! - Async bulk processing with 50% cost savings
254//!
255//! - [`fine_tuning`] - Fine-tuning API (`/v1/fine_tuning/jobs`)
256//! - Custom model training and management
257//!
258//! ### Shared Utilities
259//!
260//! - [`common`] - Shared types across all APIs
261//! - [`common::models`] - Type-safe model enums (`ChatModel`, `EmbeddingModel`, etc.)
262//! - [`common::message`] - Message and content structures
263//! - [`common::role`] - User roles (User, Assistant, System, Tool)
264//! - [`common::tool`] - Function calling definitions
265//! - [`common::auth`] - Authentication (OpenAI, Azure, custom)
266//! - [`common::errors`] - Error types
267//! - [`common::structured_output`] - JSON schema utilities
268//!
269//! ## Error Handling
270//!
271//! All operations return `Result<T, OpenAIToolError>`:
272//!
273//! ```rust,no_run
274//! use openai_tools::common::errors::OpenAIToolError;
275//! # use openai_tools::chat::request::ChatCompletion;
276//!
277//! # #[tokio::main]
278//! # async fn main() {
279//! # let mut chat = ChatCompletion::new();
280//! match chat.chat().await {
281//! Ok(response) => {
282//! println!("Success: {:?}", response.choices[0].message.content);
283//! },
284//! // Network/HTTP errors (connection failed, timeout, etc.)
285//! Err(OpenAIToolError::RequestError(e)) => {
286//! eprintln!("Network error: {}", e);
287//! },
288//! // JSON parsing errors (unexpected response format)
289//! Err(OpenAIToolError::SerdeJsonError(e)) => {
290//! eprintln!("JSON parse error: {}", e);
291//! },
292//! // WebSocket errors (Realtime API)
293//! Err(OpenAIToolError::WebSocketError(msg)) => {
294//! eprintln!("WebSocket error: {}", msg);
295//! },
296//! // Realtime API specific errors
297//! Err(OpenAIToolError::RealtimeError { code, message }) => {
298//! eprintln!("Realtime error [{}]: {}", code, message);
299//! },
300//! // Other errors
301//! Err(e) => eprintln!("Error: {}", e),
302//! }
303//! # }
304//! ```
305//!
306//! For API errors (rate limits, invalid requests), check the HTTP response status
307//! in `RequestError`.
308//!
309//! ## Provider Configuration
310//!
311//! This library supports multiple providers: OpenAI, Azure OpenAI, and OpenAI-compatible APIs.
312//!
313//! ### OpenAI (Default)
314//!
315//! ```bash
316//! export OPENAI_API_KEY="sk-..."
317//! ```
318//!
319//! ```rust,no_run
320//! use openai_tools::chat::request::ChatCompletion;
321//!
322//! let chat = ChatCompletion::new(); // Uses OPENAI_API_KEY
323//! ```
324//!
325//! ### Azure OpenAI
326//!
327//! ```bash
328//! export AZURE_OPENAI_API_KEY="..."
329//! export AZURE_OPENAI_BASE_URL="https://my-resource.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview"
330//! ```
331//!
332//! ```rust,no_run
333//! use openai_tools::chat::request::ChatCompletion;
334//!
335//! // From environment variables
336//! let chat = ChatCompletion::azure().unwrap();
337//!
338//! // Or with explicit URL
339//! let chat = ChatCompletion::with_url(
340//! "https://my-resource.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview",
341//! "api-key"
342//! );
343//! ```
344//!
345//! ### OpenAI-Compatible APIs (Ollama, vLLM, LocalAI)
346//!
347//! ```rust,no_run
348//! use openai_tools::chat::request::ChatCompletion;
349//!
350//! let chat = ChatCompletion::with_url("http://localhost:11434/v1", "ollama");
351//! ```
352//!
353//! ### Auto-Detect Provider
354//!
355//! ```rust,no_run
356//! use openai_tools::chat::request::ChatCompletion;
357//!
358//! // Uses Azure if AZURE_OPENAI_API_KEY is set, otherwise OpenAI
359//! let chat = ChatCompletion::detect_provider().unwrap();
360//! ```
361//!
362//! ## Type-Safe Model Selection
363//!
364//! All APIs use enum-based model selection for compile-time validation:
365//!
366//! ```rust,no_run
367//! use openai_tools::common::models::{ChatModel, EmbeddingModel, RealtimeModel, FineTuningModel};
368//! use openai_tools::chat::request::ChatCompletion;
369//! use openai_tools::embedding::request::Embedding;
370//!
371//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
372//! // Chat/Responses API
373//! let mut chat = ChatCompletion::new();
374//! chat.model(ChatModel::Gpt4oMini); // Cost-effective
375//! chat.model(ChatModel::Gpt4o); // Most capable
376//! chat.model(ChatModel::O3Mini); // Reasoning model
377//!
378//! // Embedding API
379//! let mut embedding = Embedding::new()?;
380//! embedding.model(EmbeddingModel::TextEmbedding3Small);
381//!
382//! // Custom/fine-tuned models
383//! chat.model(ChatModel::custom("ft:gpt-4o-mini:my-org::abc123"));
384//! # Ok(())
385//! # }
386//! ```
387//!
388
389pub mod audio;
390pub mod batch;
391pub mod chat;
392pub mod common;
393pub mod conversations;
394pub mod embedding;
395pub mod files;
396pub mod fine_tuning;
397pub mod images;
398pub mod models;
399pub mod moderations;
400pub mod realtime;
401pub mod responses;