openai_tools/
lib.rs

1//! # OpenAI Tools for Rust
2//!
3//! A comprehensive Rust library for interacting with OpenAI's APIs, providing easy-to-use
4//! interfaces for chat completions, responses, and various AI-powered functionalities.
5//! This crate offers both high-level convenience methods and low-level control for
6//! advanced use cases.
7//!
8//! ## Features
9//!
10//! - **Chat Completions API**: Full support for OpenAI's Chat Completions with streaming, function calling, and structured output
11//! - **Responses API**: Advanced assistant-style interactions with multi-modal input support
12//!
13//! ## Quick Start
14//!
15//! Add this to your `Cargo.toml`:
16//!
17//! ```toml
18//! [dependencies]
19//! openai-tools = "0.1.0"
20//! tokio = { version = "1.0", features = ["full"] }
21//! serde = { version = "1.0", features = ["derive"] }
22//! ```
23//!
24//! Set up your API key:
25//!
26//! ```bash
27//! export OPENAI_API_KEY="your-api-key-here"
28//! ```
29//!
30//! ## Basic Chat Completion
31//!
32//! ```rust,no_run
33//! use openai_tools::chat::request::ChatCompletion;
34//! use openai_tools::common::message::Message;
35//! use openai_tools::common::role::Role;
36//!
37//! #[tokio::main]
38//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
39//!     let mut chat = ChatCompletion::new();
40//!     let messages = vec![
41//!         Message::from_string(Role::User, "Hello! How are you?")
42//!     ];
43//!
44//!     let response = chat
45//!         .model_id("gpt-4o-mini")
46//!         .messages(messages)
47//!         .temperature(0.7)
48//!         .chat()
49//!         .await?;
50//!
51//!     println!("AI: {}", response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap());
52//!     Ok(())
53//! }
54//! ```
55//!
56//! ## Structured Output with JSON Schema
57//!
58//! ```rust,no_run
59//! use openai_tools::chat::request::ChatCompletion;
60//! use openai_tools::common::{message::Message, role::Role, structured_output::Schema};
61//! use serde::{Deserialize, Serialize};
62//!
63//! #[derive(Debug, Serialize, Deserialize)]
64//! struct PersonInfo {
65//!     name: String,
66//!     age: u32,
67//!     occupation: String,
68//! }
69//!
70//! #[tokio::main]
71//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
72//!     let mut chat = ChatCompletion::new();
73//!     
74//!     // Create JSON schema
75//!     let mut schema = Schema::chat_json_schema("person_info");
76//!     schema.add_property("name", "string", "Person's full name");
77//!     schema.add_property("age", "number", "Person's age");
78//!     schema.add_property("occupation", "string", "Person's job");
79//!     
80//!     let messages = vec![
81//!         Message::from_string(Role::User,
82//!             "Extract info: John Smith, 30, Software Engineer")
83//!     ];
84//!
85//!     let response = chat
86//!         .model_id("gpt-4o-mini")
87//!         .messages(messages)
88//!         .json_schema(schema)
89//!         .chat()
90//!         .await?;
91//!
92//!     let person: PersonInfo = serde_json::from_str(
93//!         response.choices[0].message.content.as_ref().unwrap().text.as_ref().unwrap()
94//!     )?;
95//!     
96//!     println!("Extracted: {} ({}), {}", person.name, person.age, person.occupation);
97//!     Ok(())
98//! }
99//! ```
100//!
101//! ## Function Calling with Tools
102//!
103//! ```rust,no_run
104//! use openai_tools::chat::request::ChatCompletion;
105//! use openai_tools::common::{message::Message, role::Role, tool::Tool, parameters::ParameterProp};
106//!
107//! #[tokio::main]
108//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
109//!     let mut chat = ChatCompletion::new();
110//!     
111//!     // Define a weather tool
112//!     let weather_tool = Tool::function(
113//!         "get_weather",
114//!         "Get current weather for a location",
115//!         vec![
116//!             ("location", ParameterProp::string("City name")),
117//!             ("unit", ParameterProp::string("Temperature unit (celsius/fahrenheit)")),
118//!         ],
119//!         false,
120//!     );
121//!
122//!     let messages = vec![
123//!         Message::from_string(Role::User, "What's the weather in Tokyo?")
124//!     ];
125//!
126//!     let response = chat
127//!         .model_id("gpt-4o-mini")
128//!         .messages(messages)
129//!         .tools(vec![weather_tool])
130//!         .chat()
131//!         .await?;
132//!
133//!     // Handle tool calls
134//!     if let Some(tool_calls) = &response.choices[0].message.tool_calls {
135//!         for call in tool_calls {
136//!             println!("Tool: {}", call.function.name);
137//!             if let Ok(args) = call.function.arguments_as_map() {
138//!                 println!("Args: {:?}", args);
139//!             }
140//!             // Execute the function and continue conversation...
141//!         }
142//!     }
143//!     Ok(())
144//! }
145//! ```
146//!
147//! ## Multi-modal Input with Responses API
148//!
149//! ```rust,no_run
150//! use openai_tools::responses::request::Responses;
151//! use openai_tools::common::{message::{Message, Content}, role::Role};
152//!
153//! #[tokio::main]
154//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
155//!     let mut responses = Responses::new();
156//!     
157//!     responses
158//!         .model_id("gpt-4o-mini")
159//!         .instructions("You are an image analysis assistant.");
160//!     
161//!     // Multi-modal message with text and image
162//!     let message = Message::from_message_array(
163//!         Role::User,
164//!         vec![
165//!             Content::from_text("What do you see in this image?"),
166//!             Content::from_image_file("path/to/image.jpg"),
167//!         ],
168//!     );
169//!     
170//!     responses.messages(vec![message]);
171//!     
172//!     let response = responses.complete().await?;
173//!     if let Some(content) = &response.output[0].content {
174//!         let text = &content[0].text;
175//!         println!("Analysis: {}", text);
176//!     }
177//!     Ok(())
178//! }
179//! ```
180//!
181//! ## Module Structure
182//!
183//! This crate is organized into three main modules:
184//!
185//! - [`chat`] - OpenAI Chat Completions API interface
186//!   - [`chat::request`] - Request building and sending
187//!   - [`chat::response`] - Response data structures
188//!
189//! - [`responses`] - OpenAI Responses API interface (assistant-style interactions)
190//!   - [`responses::request`] - Advanced request handling with multi-modal support
191//!   - [`responses::response`] - Response structures for assistant interactions
192//!
193//! - [`common`] - Shared utilities and data types
194//!   - [`common::message`] - Message and content structures
195//!   - [`common::role`] - User roles (User, Assistant, System, etc.)
196//!   - [`common::tool`] - Function calling and tool definitions
197//!   - [`common::structured_output`] - JSON schema utilities
198//!   - [`common::errors`] - Error types and handling
199//!   - [`common::usage`] - Token usage tracking
200//!
201//! ## Error Handling
202//!
203//! All operations return `Result` types with detailed error information:
204//!
205//! ```rust,no_run
206//! use openai_tools::common::errors::OpenAIToolError;
207//! # use openai_tools::chat::request::ChatCompletion;
208//!
209//! # #[tokio::main]
210//! # async fn main() {
211//! # let mut chat = ChatCompletion::new();
212//! match chat.chat().await {
213//!     Ok(response) => {
214//!         if let Some(content) = &response.choices[0].message.content {
215//!             if let Some(text) = &content.text {
216//!                 println!("Success: {}", text);
217//!             }
218//!         }
219//!     },
220//!     Err(OpenAIToolError::RequestError(e)) => eprintln!("Network error: {}", e),
221//!     Err(OpenAIToolError::SerdeJsonError(e)) => eprintln!("JSON error: {}", e),
222//!     Err(e) => eprintln!("Other error: {}", e),
223//! }
224//! # }
225//! ```
226//!
227//! ## Environment Configuration
228//!
229//! The library automatically loads configuration from environment variables and `.env` files:
230//!
231//! ```bash
232//! # Required
233//! OPENAI_API_KEY=your-api-key-here
234//!
235//! # Optional
236//! RUST_LOG=info  # For enabling debug logging
237//! ```
238//!
239
240pub mod chat;
241pub mod common;
242pub mod responses;