openai_tools/lib.rs
1//! # OpenAI Tools for Rust
2//!
3//! A comprehensive Rust library for interacting with OpenAI's APIs, providing easy-to-use
4//! interfaces for chat completions, responses, and various AI-powered functionalities.
5//! This crate offers both high-level convenience methods and low-level control for
6//! advanced use cases.
7//!
8//! ## Features
9//!
10//! ### Core APIs
11//! - **Chat Completions API**: Chat with streaming, function calling, and structured output
12//! - **Responses API**: Assistant-style interactions with multi-modal input
13//! - **Conversations API**: Long-running conversation state management
14//! - **Embedding API**: Text to vector embeddings for semantic search
15//! - **Realtime API**: WebSocket-based real-time audio/text streaming
16//!
17//! ### Content & Media APIs
18//! - **Images API**: DALL-E image generation, editing, and variations
19//! - **Audio API**: Text-to-speech, transcription, and translation
20//! - **Moderations API**: Content policy violation detection
21//!
22//! ### Management APIs
23//! - **Models API**: List and retrieve available models
24//! - **Files API**: Upload and manage files for fine-tuning/batch
25//! - **Batch API**: Async bulk processing with 50% cost savings
26//! - **Fine-tuning API**: Custom model training
27//!
28//! ## Quick Start
29//!
30//! Add this to your `Cargo.toml`:
31//!
32//! ```toml
33//! [dependencies]
34//! openai-tools = "1.0"
35//! tokio = { version = "1.0", features = ["full"] }
36//! serde = { version = "1.0", features = ["derive"] }
37//! ```
38//!
39//! Set up your API key:
40//!
41//! ```bash
42//! export OPENAI_API_KEY="your-api-key-here"
43//! ```
44//!
45//! ## Basic Chat Completion
46//!
47//! ```rust,no_run
48//! use openai_tools::chat::request::ChatCompletion;
49//! use openai_tools::common::message::Message;
50//! use openai_tools::common::role::Role;
51//! use openai_tools::common::models::ChatModel;
52//!
53//! #[tokio::main]
54//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
55//! let mut chat = ChatCompletion::new();
56//! let messages = vec![
57//! Message::from_string(Role::User, "Hello! How are you?")
58//! ];
59//!
60//! let response = chat
61//! .model(ChatModel::Gpt4oMini) // Type-safe model selection
62//! .messages(messages)
63//! .temperature(0.7)
64//! .chat()
65//! .await?;
66//!
67//! println!("AI: {}", response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap());
68//! Ok(())
69//! }
70//! ```
71//!
72//! ## Structured Output with JSON Schema
73//!
74//! ```rust,no_run
75//! use openai_tools::chat::request::ChatCompletion;
76//! use openai_tools::common::{message::Message, role::Role, structured_output::Schema, models::ChatModel};
77//! use serde::{Deserialize, Serialize};
78//!
79//! #[derive(Debug, Serialize, Deserialize)]
80//! struct PersonInfo {
81//! name: String,
82//! age: u32,
83//! occupation: String,
84//! }
85//!
86//! #[tokio::main]
87//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
88//! let mut chat = ChatCompletion::new();
89//!
90//! // Create JSON schema
91//! let mut schema = Schema::chat_json_schema("person_info");
92//! schema.add_property("name", "string", "Person's full name");
93//! schema.add_property("age", "number", "Person's age");
94//! schema.add_property("occupation", "string", "Person's job");
95//!
96//! let messages = vec![
97//! Message::from_string(Role::User,
98//! "Extract info: John Smith, 30, Software Engineer")
99//! ];
100//!
101//! let response = chat
102//! .model(ChatModel::Gpt4oMini)
103//! .messages(messages)
104//! .json_schema(schema)
105//! .chat()
106//! .await?;
107//!
108//! let person: PersonInfo = serde_json::from_str(
109//! response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap()
110//! )?;
111//!
112//! println!("Extracted: {} ({}), {}", person.name, person.age, person.occupation);
113//! Ok(())
114//! }
115//! ```
116//!
117//! ## Function Calling with Tools
118//!
119//! ```rust,no_run
120//! use openai_tools::chat::request::ChatCompletion;
121//! use openai_tools::common::{message::Message, role::Role, tool::Tool, parameters::ParameterProperty, models::ChatModel};
122//!
123//! #[tokio::main]
124//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
125//! let mut chat = ChatCompletion::new();
126//!
127//! // Define a weather tool
128//! let weather_tool = Tool::function(
129//! "get_weather",
130//! "Get current weather for a location",
131//! vec![
132//! ("location", ParameterProperty::from_string("City name")),
133//! ("unit", ParameterProperty::from_string("Temperature unit (celsius/fahrenheit)")),
134//! ],
135//! false,
136//! );
137//!
138//! let messages = vec![
139//! Message::from_string(Role::User, "What's the weather in Tokyo?")
140//! ];
141//!
142//! let response = chat
143//! .model(ChatModel::Gpt4oMini)
144//! .messages(messages)
145//! .tools(vec![weather_tool])
146//! .chat()
147//! .await?;
148//!
149//! // Handle tool calls
150//! if let Some(tool_calls) = &response.choices[0].message.tool_calls {
151//! for call in tool_calls {
152//! println!("Tool: {}", call.function.name);
153//! if let Ok(args) = call.function.arguments_as_map() {
154//! println!("Args: {:?}", args);
155//! }
156//! // Execute the function and continue conversation...
157//! }
158//! }
159//! Ok(())
160//! }
161//! ```
162//!
163//! ## Multi-modal Input (Text + Image)
164//!
165//! Both Chat Completions API and Responses API support multi-modal messages.
166//! The same `Content` and `Message` types work with both APIs - serialization
167//! format differences are handled automatically.
168//!
169//! ### Chat Completions API
170//!
171//! ```rust,no_run
172//! use openai_tools::chat::request::ChatCompletion;
173//! use openai_tools::common::{message::{Message, Content}, role::Role, models::ChatModel};
174//!
175//! #[tokio::main]
176//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
177//! let mut chat = ChatCompletion::new();
178//!
179//! let message = Message::from_message_array(
180//! Role::User,
181//! vec![
182//! Content::from_text("What do you see in this image?"),
183//! Content::from_image_url("https://example.com/image.jpg"),
184//! ],
185//! );
186//!
187//! let response = chat
188//! .model(ChatModel::Gpt4oMini)
189//! .messages(vec![message])
190//! .chat()
191//! .await?;
192//!
193//! println!("AI: {}", response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap());
194//! Ok(())
195//! }
196//! ```
197//!
198//! ### Responses API
199//!
200//! ```rust,no_run
201//! use openai_tools::responses::request::Responses;
202//! use openai_tools::common::{message::{Message, Content}, role::Role, models::ChatModel};
203//!
204//! #[tokio::main]
205//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
206//! let mut responses = Responses::new();
207//!
208//! responses
209//! .model(ChatModel::Gpt4oMini)
210//! .instructions("You are an image analysis assistant.");
211//!
212//! let message = Message::from_message_array(
213//! Role::User,
214//! vec![
215//! Content::from_text("What do you see in this image?"),
216//! Content::from_image_file("path/to/image.jpg"),
217//! ],
218//! );
219//!
220//! responses.messages(vec![message]);
221//!
222//! let response = responses.complete().await?;
223//! let text = response.output_text().unwrap();
224//! println!("Response: {}", text);
225//! Ok(())
226//! }
227//! ```
228//!
229//! ## Choosing the Right API
230//!
231//! | Use Case | Recommended API | Module |
232//! |----------|-----------------|--------|
233//! | Simple Q&A, chatbot | Chat Completions | [`chat`] |
234//! | Multi-turn assistant with state | Responses + Conversations | [`responses`], [`conversations`] |
235//! | Real-time voice interaction | Realtime | [`realtime`] |
236//! | Semantic search, similarity | Embeddings | [`embedding`] |
237//! | Image generation (DALL-E) | Images | [`images`] |
238//! | Speech-to-text, TTS | Audio | [`audio`] |
239//! | Content moderation | Moderations | [`moderations`] |
240//! | Bulk processing (50% off) | Batch | [`batch`] |
241//! | Custom model training | Fine-tuning | [`fine_tuning`] |
242//!
243//! ## Module Structure
244//!
245//! ### Core APIs
246//!
247//! - [`chat`] - Chat Completions API (`/v1/chat/completions`)
248//! - [`chat::request`] - `ChatCompletion` builder
249//! - [`chat::response`] - Response types
250//!
251//! - [`responses`] - Responses API (`/v1/responses`)
252//! - [`responses::request`] - `Responses` builder with CRUD operations
253//! - [`responses::response`] - Response types
254//!
255//! - [`conversations`] - Conversations API (`/v1/conversations`)
256//! - [`conversations::request`] - `Conversations` client
257//! - [`conversations::response`] - Conversation and item types
258//!
259//! - [`embedding`] - Embeddings API (`/v1/embeddings`)
260//! - [`embedding::request`] - `Embedding` builder
261//! - [`embedding::response`] - Vector response types
262//!
263//! - [`realtime`] - Realtime API (WebSocket)
264//! - [`realtime::client`] - `RealtimeClient` and `RealtimeSession`
265//! - [`realtime::events`] - Client/server event types
266//!
267//! ### Content & Media APIs
268//!
269//! - [`images`] - Images API (`/v1/images`)
270//! - Generate, edit, create variations with DALL-E
271//!
272//! - [`audio`] - Audio API (`/v1/audio`)
273//! - Text-to-speech, transcription, translation
274//!
275//! - [`moderations`] - Moderations API (`/v1/moderations`)
276//! - Content policy violation detection
277//!
278//! ### Management APIs
279//!
280//! - [`models`] - Models API (`/v1/models`)
281//! - List and retrieve available models
282//!
283//! - [`files`] - Files API (`/v1/files`)
284//! - Upload/download files for fine-tuning and batch
285//!
286//! - [`batch`] - Batch API (`/v1/batches`)
287//! - Async bulk processing with 50% cost savings
288//!
289//! - [`fine_tuning`] - Fine-tuning API (`/v1/fine_tuning/jobs`)
290//! - Custom model training and management
291//!
292//! ### Shared Utilities
293//!
294//! - [`common`] - Shared types across all APIs
295//! - [`common::models`] - Type-safe model enums (`ChatModel`, `EmbeddingModel`, etc.)
296//! - [`common::message`] - Message and content structures
297//! - [`common::role`] - User roles (User, Assistant, System, Tool)
298//! - [`common::tool`] - Function calling definitions
299//! - [`common::auth`] - Authentication (OpenAI, Azure, custom)
300//! - [`common::errors`] - Error types
301//! - [`common::structured_output`] - JSON schema utilities
302//!
303//! ## Error Handling
304//!
305//! All operations return `Result<T, OpenAIToolError>`:
306//!
307//! ```rust,no_run
308//! use openai_tools::common::errors::OpenAIToolError;
309//! # use openai_tools::chat::request::ChatCompletion;
310//!
311//! # #[tokio::main]
312//! # async fn main() {
313//! # let mut chat = ChatCompletion::new();
314//! match chat.chat().await {
315//! Ok(response) => {
316//! println!("Success: {:?}", response.choices[0].message.content);
317//! },
318//! // Network/HTTP errors (connection failed, timeout, etc.)
319//! Err(OpenAIToolError::RequestError(e)) => {
320//! eprintln!("Network error: {}", e);
321//! },
322//! // JSON parsing errors (unexpected response format)
323//! Err(OpenAIToolError::SerdeJsonError(e)) => {
324//! eprintln!("JSON parse error: {}", e);
325//! },
326//! // WebSocket errors (Realtime API)
327//! Err(OpenAIToolError::WebSocketError(msg)) => {
328//! eprintln!("WebSocket error: {}", msg);
329//! },
330//! // Realtime API specific errors
331//! Err(OpenAIToolError::RealtimeError { code, message }) => {
332//! eprintln!("Realtime error [{}]: {}", code, message);
333//! },
334//! // Other errors
335//! Err(e) => eprintln!("Error: {}", e),
336//! }
337//! # }
338//! ```
339//!
340//! For API errors (rate limits, invalid requests), check the HTTP response status
341//! in `RequestError`.
342//!
343//! ## Provider Configuration
344//!
345//! This library supports multiple providers: OpenAI, Azure OpenAI, and OpenAI-compatible APIs.
346//!
347//! ### OpenAI (Default)
348//!
349//! ```bash
350//! export OPENAI_API_KEY="sk-..."
351//! ```
352//!
353//! ```rust,no_run
354//! use openai_tools::chat::request::ChatCompletion;
355//!
356//! let chat = ChatCompletion::new(); // Uses OPENAI_API_KEY
357//! ```
358//!
359//! ### Azure OpenAI
360//!
361//! ```bash
362//! export AZURE_OPENAI_API_KEY="..."
363//! export AZURE_OPENAI_BASE_URL="https://my-resource.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview"
364//! ```
365//!
366//! ```rust,no_run
367//! use openai_tools::chat::request::ChatCompletion;
368//!
369//! // From environment variables
370//! let chat = ChatCompletion::azure().unwrap();
371//!
372//! // Or with explicit URL
373//! let chat = ChatCompletion::with_url(
374//! "https://my-resource.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2024-08-01-preview",
375//! "api-key"
376//! );
377//! ```
378//!
379//! ### OpenAI-Compatible APIs (Ollama, vLLM, LocalAI)
380//!
381//! ```rust,no_run
382//! use openai_tools::chat::request::ChatCompletion;
383//!
384//! let chat = ChatCompletion::with_url("http://localhost:11434/v1", "ollama");
385//! ```
386//!
387//! ### Auto-Detect Provider
388//!
389//! ```rust,no_run
390//! use openai_tools::chat::request::ChatCompletion;
391//!
392//! // Uses Azure if AZURE_OPENAI_API_KEY is set, otherwise OpenAI
393//! let chat = ChatCompletion::detect_provider().unwrap();
394//! ```
395//!
396//! ## Type-Safe Model Selection
397//!
398//! All APIs use enum-based model selection for compile-time validation:
399//!
400//! ```rust,no_run
401//! use openai_tools::common::models::{ChatModel, EmbeddingModel, RealtimeModel, FineTuningModel};
402//! use openai_tools::chat::request::ChatCompletion;
403//! use openai_tools::embedding::request::Embedding;
404//!
405//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
406//! // Chat/Responses API
407//! let mut chat = ChatCompletion::new();
408//! chat.model(ChatModel::Gpt4oMini); // Cost-effective
409//! chat.model(ChatModel::Gpt4o); // Most capable
410//! chat.model(ChatModel::O3Mini); // Reasoning model
411//!
412//! // Embedding API
413//! let mut embedding = Embedding::new()?;
414//! embedding.model(EmbeddingModel::TextEmbedding3Small);
415//!
416//! // Custom/fine-tuned models
417//! chat.model(ChatModel::custom("ft:gpt-4o-mini:my-org::abc123"));
418//! # Ok(())
419//! # }
420//! ```
421//!
422
423pub mod audio;
424pub mod batch;
425pub mod chat;
426pub mod common;
427pub mod conversations;
428pub mod embedding;
429pub mod files;
430pub mod fine_tuning;
431pub mod images;
432pub mod models;
433pub mod moderations;
434pub mod realtime;
435pub mod responses;