openai_interface/lib.rs
1//! A low-level Rust interface for interacting with OpenAI's API.
2//!
3//! This crate provides a simple, efficient, and low-level way to interact with OpenAI's API,
4//! supporting both streaming and non-streaming responses. It leverages Rust's powerful type
5//! system for safety and performance, while exposing the full flexibility of the API.
6//!
7//! # Features
8//!
9//! - **Chat Completions**: Full support for OpenAI's chat completion API
10//! - **Streaming and Non-streaming**: Support for both streaming and non-streaming responses
11//! - **Strong Typing**: Complete type definitions for all API requests and responses
12//! - **Error Handling**: Comprehensive error handling with detailed error types
13//! - **Async/Await**: Built with async/await support for efficient asynchronous operations
14//! - **Musl Support**: Designed to work with musl libc for lightweight deployments
15//!
16//! # Examples
17//!
18//! ## Non-streaming Chat Completion
19//!
20//! This example demonstrates how to make a non-streaming request to the chat completion API.
21//!
22//! ```rust
23//! use std::sync::LazyLock;
24//! use openai_interface::chat::request::{Message, RequestBody};
25//! use openai_interface::chat::response::no_streaming::ChatCompletion;
26//! use openai_interface::rest::post::NoStream;
27//!
28//! // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
29//! const DEEPSEEK_API_KEY: LazyLock<&str> =
30//! LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
31//! const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
32//! const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
33//!
34//! #[tokio::main]
35//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
36//! let request = RequestBody {
37//! messages: vec![
38//! Message::System {
39//! content: "You are a helpful assistant.".to_string(),
40//! name: None,
41//! },
42//! Message::User {
43//! content: "Hello, how are you?".to_string(),
44//! name: None,
45//! },
46//! ],
47//! model: DEEPSEEK_MODEL.to_string(),
48//! stream: false,
49//! ..Default::default()
50//! };
51//!
52//! // Send the request
53//! let chat_completion: ChatCompletion = request
54//! .get_response(DEEPSEEK_CHAT_URL, &*DEEPSEEK_API_KEY)
55//! .await?;
56//! let text = chat_completion.choices[0]
57//! .message
58//! .content
59//! .as_deref()
60//! .unwrap();
61//! println!("{:?}", text);
62//! Ok(())
63//! }
64//! ```
65//!
66//! ## Streaming Chat Completion
67//!
68//! This example demonstrates how to handle streaming responses from the API. As with the non-streaming
69//! example, all API parameters can be adjusted directly through the request struct.
70//!
71//! ```rust
72//! use openai_interface::chat::response::streaming::{CompletionContent, ChatCompletionChunk};
73//! use openai_interface::chat::request::{Message, RequestBody};
74//! use openai_interface::rest::post::Stream;
75//! use futures_util::StreamExt;
76//!
77//! use std::sync::LazyLock;
78//!
79//! // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
80//! const DEEPSEEK_API_KEY: LazyLock<&str> =
81//! LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
82//! const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
83//! const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
84//!
85//! #[tokio::main]
86//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
87//! let request = RequestBody {
88//! messages: vec![
89//! Message::System {
90//! content: "You are a helpful assistant.".to_string(),
91//! name: None,
92//! },
93//! Message::User {
94//! content: "Who are you?".to_string(),
95//! name: None,
96//! },
97//! ],
98//! model: DEEPSEEK_MODEL.to_string(),
99//! stream: true,
100//! ..Default::default()
101//! };
102//!
103//! // Send the request
104//! let mut response_stream = request
105//! .get_stream_response(DEEPSEEK_CHAT_URL, *DEEPSEEK_API_KEY)
106//! .await?;
107//!
108//! let mut message = String::new();
109//!
110//! while let Some(chunk_result) = response_stream.next().await {
111//! let chunk: ChatCompletionChunk = chunk_result?;
112//! let content = match chunk.choices[0].delta.content.as_ref().unwrap() {
113//! CompletionContent::Content(s) => s,
114//! CompletionContent::ReasoningContent(s) => s,
115//! };
116//! println!("lib::test_streaming message: {}", content);
117//! message.push_str(content);
118//! }
119//!
120//! println!("lib::test_streaming message: {}", message);
121//! Ok(())
122//! }
123//! ```
124//!
125//! # Musl Build
126//!
127//! This crate is designed to work with musl libc, making it suitable for
128//! lightweight deployments in containerized environments. Longer compile times
129//! may be required as OpenSSL needs to be built from source.
130//!
131//! To build for musl:
132//! ```bash
133//! rustup target add x86_64-unknown-linux-musl
134//! cargo build --target x86_64-unknown-linux-musl
135//! ```
136
137pub mod chat;
138pub mod completions;
139pub mod errors;
140pub mod files;
141pub mod rest;
142
143#[cfg(test)]
144mod tests {
145 use crate::chat::request::{Message, RequestBody};
146 use crate::chat::response::no_streaming::ChatCompletion;
147 use crate::chat::response::streaming::{ChatCompletionChunk, CompletionContent};
148 use crate::rest::post::{NoStream, Stream};
149 use futures_util::StreamExt;
150 use std::sync::LazyLock;
151
152 // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
153 const DEEPSEEK_API_KEY: LazyLock<&str> =
154 LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
155 const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
156 const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
157
158 #[tokio::test]
159 async fn test_no_streaming() -> Result<(), Box<dyn std::error::Error>> {
160 let request = RequestBody {
161 messages: vec![
162 Message::System {
163 content: "You are a helpful assistant.".to_string(),
164 name: None,
165 },
166 Message::User {
167 content: "Hello, how are you?".to_string(),
168 name: None,
169 },
170 ],
171 model: DEEPSEEK_MODEL.to_string(),
172 stream: false,
173 ..Default::default()
174 };
175
176 // Send the request
177 let chat_completion: ChatCompletion = request
178 .get_response(DEEPSEEK_CHAT_URL, &*DEEPSEEK_API_KEY)
179 .await?;
180 let text = chat_completion.choices[0]
181 .message
182 .content
183 .as_deref()
184 .unwrap();
185 println!("lib::test_no_streaming message: {}", text);
186 Ok(())
187 }
188
189 #[tokio::test]
190 async fn test_streaming() -> Result<(), Box<dyn std::error::Error>> {
191 let request = RequestBody {
192 messages: vec![
193 Message::System {
194 content: "You are a helpful assistant.".to_string(),
195 name: None,
196 },
197 Message::User {
198 content: "Who are you?".to_string(),
199 name: None,
200 },
201 ],
202 model: DEEPSEEK_MODEL.to_string(),
203 stream: true,
204 ..Default::default()
205 };
206
207 // Send the request
208 let mut response_stream = request
209 .get_stream_response(DEEPSEEK_CHAT_URL, *DEEPSEEK_API_KEY)
210 .await?;
211
212 let mut message = String::new();
213
214 while let Some(chunk_result) = response_stream.next().await {
215 let chunk: ChatCompletionChunk = chunk_result?;
216 let content = match chunk.choices[0].delta.content.as_ref().unwrap() {
217 CompletionContent::Content(s) => s,
218 CompletionContent::ReasoningContent(s) => s,
219 };
220 println!("lib::test_streaming message: {}", content);
221 message.push_str(content);
222 }
223
224 println!("lib::test_streaming message: {}", message);
225 Ok(())
226 }
227}