openai_interface/lib.rs
1//! A low-level Rust interface for interacting with OpenAI's API.
2//!
3//! This crate provides a simple, efficient, and low-level way to interact with OpenAI's API,
4//! supporting both streaming and non-streaming responses. It leverages Rust's powerful type
5//! system for safety and performance, while exposing the full flexibility of the API.
6//!
7//! # Features
8//!
9//! - **Chat Completions**: Full support for OpenAI's chat completion API
10//! - **Streaming and Non-streaming**: Support for both streaming and non-streaming responses
11//! - **Strong Typing**: Complete type definitions for all API requests and responses
12//! - **Error Handling**: Comprehensive error handling with detailed error types
13//! - **Async/Await**: Built with async/await support for efficient asynchronous operations
14//! - **Musl Support**: Designed to work with musl libc for lightweight deployments
15//!
16//! # Examples
17//!
18//! ## Non-streaming Chat Completion
19//!
20//! This example demonstrates how to make a non-streaming request to the chat completion API.
21//!
22//! ```rust
23//! use std::sync::LazyLock;
24//! use openai_interface::chat::request::{Message, RequestBody};
25//! use openai_interface::chat::response::no_streaming::ChatCompletion;
26//! use openai_interface::rest::post::NoStream;
27//!
28//! // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
29//! const DEEPSEEK_API_KEY: LazyLock<&str> =
30//! LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
31//! const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
32//! const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
33//!
34//! #[tokio::main]
35//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
36//! let request = RequestBody {
37//! messages: vec![
38//! Message::System {
39//! content: "You are a helpful assistant.".to_string(),
40//! name: None,
41//! },
42//! Message::User {
43//! content: "Hello, how are you?".to_string(),
44//! name: None,
45//! },
46//! ],
47//! model: DEEPSEEK_MODEL.to_string(),
48//! stream: false,
49//! ..Default::default()
50//! };
51//!
52//! // Send the request
53//! let chat_completion: ChatCompletion = request
54//! .get_response(DEEPSEEK_CHAT_URL, &*DEEPSEEK_API_KEY)
55//! .await?;
56//! let text = chat_completion.choices[0]
57//! .message
58//! .content
59//! .as_deref()
60//! .unwrap();
61//! println!("{:?}", text);
62//! Ok(())
63//! }
64//! ```
65//!
66//! ## Streaming Chat Completion
67//!
68//! This example demonstrates how to handle streaming responses from the API. As with the non-streaming
69//! example, all API parameters can be adjusted directly through the request struct.
70//!
71//! ```rust
72//! use openai_interface::chat::response::streaming::{CompletionContent, ChatCompletionChunk};
73//! use openai_interface::chat::request::{Message, RequestBody};
74//! use openai_interface::rest::post::Stream;
75//! use futures_util::StreamExt;
76//!
77//! use std::sync::LazyLock;
78//!
79//! // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
80//! const DEEPSEEK_API_KEY: LazyLock<&str> =
81//! LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
82//! const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
83//! const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
84//!
85//! #[tokio::main]
86//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
87//! let request = RequestBody {
88//! messages: vec![
89//! Message::System {
90//! content: "You are a helpful assistant.".to_string(),
91//! name: None,
92//! },
93//! Message::User {
94//! content: "Who are you?".to_string(),
95//! name: None,
96//! },
97//! ],
98//! model: DEEPSEEK_MODEL.to_string(),
99//! stream: true,
100//! ..Default::default()
101//! };
102//!
103//! // Send the request
104//! let mut response_stream = request
105//! .get_stream_response(DEEPSEEK_CHAT_URL, *DEEPSEEK_API_KEY)
106//! .await?;
107//!
108//! let mut message = String::new();
109//!
110//! while let Some(chunk_result) = response_stream.next().await {
111//! let chunk: ChatCompletionChunk = chunk_result?;
112//! let content = match chunk.choices[0].delta.content.as_ref().unwrap() {
113//! CompletionContent::Content(s) => s,
114//! CompletionContent::ReasoningContent(s) => s,
115//! };
116//! println!("lib::test_streaming message: {}", content);
117//! message.push_str(content);
118//! }
119//!
120//! println!("lib::test_streaming message: {}", message);
121//! Ok(())
122//! }
123//! ```
124//!
125//! # Musl Build
126//!
127//! This crate is designed to work with musl libc, making it suitable for
128//! lightweight deployments in containerized environments. Longer compile times
129//! may be required as OpenSSL needs to be built from source.
130//!
131//! To build for musl:
132//! ```bash
133//! rustup target add x86_64-unknown-linux-musl
134//! cargo build --target x86_64-unknown-linux-musl
135//! ```
136
137pub mod chat;
138pub mod completions;
139pub mod errors;
140pub mod rest;
141
142#[cfg(test)]
143mod tests {
144 use crate::chat::request::{Message, RequestBody};
145 use crate::chat::response::no_streaming::ChatCompletion;
146 use crate::chat::response::streaming::{ChatCompletionChunk, CompletionContent};
147 use crate::rest::post::{NoStream, Stream};
148 use futures_util::StreamExt;
149 use std::sync::LazyLock;
150
151 // You need to provide your own DeepSeek API key at /keys/deepseek_domestic_key
152 const DEEPSEEK_API_KEY: LazyLock<&str> =
153 LazyLock::new(|| include_str!("../keys/deepseek_domestic_key").trim());
154 const DEEPSEEK_CHAT_URL: &'static str = "https://api.deepseek.com/chat/completions";
155 const DEEPSEEK_MODEL: &'static str = "deepseek-chat";
156
157 #[tokio::test]
158 async fn test_no_streaming() -> Result<(), Box<dyn std::error::Error>> {
159 let request = RequestBody {
160 messages: vec![
161 Message::System {
162 content: "You are a helpful assistant.".to_string(),
163 name: None,
164 },
165 Message::User {
166 content: "Hello, how are you?".to_string(),
167 name: None,
168 },
169 ],
170 model: DEEPSEEK_MODEL.to_string(),
171 stream: false,
172 ..Default::default()
173 };
174
175 // Send the request
176 let chat_completion: ChatCompletion = request
177 .get_response(DEEPSEEK_CHAT_URL, &*DEEPSEEK_API_KEY)
178 .await?;
179 let text = chat_completion.choices[0]
180 .message
181 .content
182 .as_deref()
183 .unwrap();
184 println!("lib::test_no_streaming message: {}", text);
185 Ok(())
186 }
187
188 #[tokio::test]
189 async fn test_streaming() -> Result<(), Box<dyn std::error::Error>> {
190 let request = RequestBody {
191 messages: vec![
192 Message::System {
193 content: "You are a helpful assistant.".to_string(),
194 name: None,
195 },
196 Message::User {
197 content: "Who are you?".to_string(),
198 name: None,
199 },
200 ],
201 model: DEEPSEEK_MODEL.to_string(),
202 stream: true,
203 ..Default::default()
204 };
205
206 // Send the request
207 let mut response_stream = request
208 .get_stream_response(DEEPSEEK_CHAT_URL, *DEEPSEEK_API_KEY)
209 .await?;
210
211 let mut message = String::new();
212
213 while let Some(chunk_result) = response_stream.next().await {
214 let chunk: ChatCompletionChunk = chunk_result?;
215 let content = match chunk.choices[0].delta.content.as_ref().unwrap() {
216 CompletionContent::Content(s) => s,
217 CompletionContent::ReasoningContent(s) => s,
218 };
219 println!("lib::test_streaming message: {}", content);
220 message.push_str(content);
221 }
222
223 println!("lib::test_streaming message: {}", message);
224 Ok(())
225 }
226}