async_openai/
lib.rs

1//! Rust library for OpenAI
2//!
3//! ## Creating client
4//!
5//! ```
6//! use async_openai::{Client, config::OpenAIConfig};
7//!
8//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
9//! let client = Client::new();
10//!
11//! // Above is shortcut for
12//! let config = OpenAIConfig::default();
13//! let client = Client::with_config(config);
14//!
15//! // OR use API key from different source and a non default organization
16//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
17//! let config = OpenAIConfig::new()
18//!     .with_api_key(api_key)
19//!     .with_org_id("the-continental");
20//!
21//! let client = Client::with_config(config);
22//!
23//! // Use custom reqwest client
24//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai").build().unwrap();
25//! let client = Client::new().with_http_client(http_client);
26//! ```
27//!
28//!
29//! ## Making requests
30//!
31//!```
32//!# tokio_test::block_on(async {
33//! use async_openai::{Client, types::responses::{CreateResponseArgs}};
34//!
35//! // Create client
36//! let client = Client::new();
37//!
38//! // Create request using builder pattern
39//! // Every request struct has companion builder struct with same name + Args suffix
40//! let request = CreateResponseArgs::default()
41//!     .model("gpt-5-mini")
42//!     .input("tell me the recipe of pav bhaji")
43//!     .max_output_tokens(512u32)
44//!     .build()?;
45//!
46//! // Call API
47//! let response = client
48//!     .responses()      // Get the API "group" (responses, images, etc.) from the client
49//!     .create(request)  // Make the API call in that "group"
50//!     .await?;
51//!
52//! println!("{:?}", response.output_text());
53//! # Ok::<(), Box<dyn std::error::Error>>(())
54//! # });
55//!```
56//!
57//! ## Bring Your Own Types
58//!
59//! To use custom types for inputs and outputs, enable `byot` feature which provides additional generic methods with same name and `_byot` suffix.
60//! This feature is available on methods whose return type is not `Bytes`
61//!
62//!```
63//!# #[cfg(feature = "byot")]
64//!# tokio_test::block_on(async {
65//! use async_openai::Client;
66//! use serde_json::{Value, json};
67//!
68//! let client = Client::new();
69//!
70//! let response: Value = client
71//!        .chat()
72//!        .create_byot(json!({
73//!            "messages": [
74//!                {
75//!                    "role": "developer",
76//!                    "content": "You are a helpful assistant"
77//!                },
78//!                {
79//!                    "role": "user",
80//!                    "content": "What do you think about life?"
81//!                }
82//!            ],
83//!            "model": "gpt-4o",
84//!            "store": false
85//!        }))
86//!        .await?;
87//!
88//!  if let Some(content) = response["choices"][0]["message"]["content"].as_str() {
89//!     println!("{}", content);
90//!  }
91//! # Ok::<(), Box<dyn std::error::Error>>(())
92//! # });
93//!```
94//!
95//! **References: Borrow Instead of Move**
96//!
97//! With `byot` use reference to request types
98//!
99//! ```
100//! # #[cfg(feature = "byot")]
101//! # tokio_test::block_on(async {
102//! # use async_openai::{Client, types::responses::{CreateResponse, Response}};
103//! # let client = Client::new();
104//! # let request = CreateResponse::default();
105//! let response: Response = client
106//!   .responses()
107//!   .create_byot(&request).await?;
108//! # Ok::<(), Box<dyn std::error::Error>>(())
109//! # });
110//! ```
111//!
112//! ## Rust Types
113//!
114//! To only use Rust types from the crate - use feature flag `types`.
115//!
116//! There are granular feature flags like `response-types`, `chat-completion-types`, etc.
117//!
118//! These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`.
119//!
120//! ## Configurable Requests
121//!
122//! **Individual Request**
123//!
124//! Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.
125//!
126//! For example:
127//! ```
128//! # tokio_test::block_on(async {
129//! # use async_openai::Client;
130//! # use async_openai::traits::RequestOptionsBuilder;
131//! # let client = Client::new();
132//! client
133//!   .chat()
134//!   // query can be a struct or a map too.
135//!   .query(&[("limit", "10")])?
136//!   // header for demo
137//!   .header("key", "value")?
138//!   .list()
139//!   .await?;
140//! # Ok::<(), Box<dyn std::error::Error>>(())
141//! # });
142//! ```
143//!
144//! **All Requests**
145//!
146//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.
147//!
148//! ## OpenAI-compatible Providers
149//!
150//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.
151//!
152//! **Configurable Path**
153//!
154//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.
155//!
156//! For example:
157//! ```
158//! # tokio_test::block_on(async {
159//! # use async_openai::{Client, types::chat::CreateChatCompletionRequestArgs};
160//! # use async_openai::traits::RequestOptionsBuilder;
161//! # let client = Client::new();
162//! # let request = CreateChatCompletionRequestArgs::default()
163//! #     .model("gpt-4")
164//! #     .messages([])
165//! #     .build()
166//! #     .unwrap();
167//! client
168//!   .chat()
169//!   .path("/v1/messages")?
170//!   .create(request)
171//!   .await?;
172//! # Ok::<(), Box<dyn std::error::Error>>(())
173//! # });
174//! ```
175//!
176//! **Dynamic Dispatch**
177//!
178//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible providers.
179//!
180//! For any struct that implements `Config` trait, wrap it in a smart pointer and cast the pointer to `dyn Config`
181//! trait object, then create a client with `Box` or `Arc` wrapped configuration.
182//!
183//! For example:
184//! ```
185//! use async_openai::{Client, config::{Config, OpenAIConfig}};
186//!
187//! // Use `Box` or `std::sync::Arc` to wrap the config
188//! let config = Box::new(OpenAIConfig::default()) as Box<dyn Config>;
189//! // create client
190//! let client: Client<Box<dyn Config>> = Client::with_config(config);
191//!
192//! // A function can now accept a `&Client<Box<dyn Config>>` parameter
193//! // which can invoke any openai compatible api
194//! fn chat_completion(client: &Client<Box<dyn Config>>) {
195//!     todo!()
196//! }
197//! ```
198//!
199//! ## Microsoft Azure
200//!
201//! ```
202//! use async_openai::{Client, config::AzureConfig};
203//!
204//! let config = AzureConfig::new()
205//!     .with_api_base("https://my-resource-name.openai.azure.com")
206//!     .with_api_version("2023-03-15-preview")
207//!     .with_deployment_id("deployment-id")
208//!     .with_api_key("...");
209//!
210//! let client = Client::with_config(config);
211//!
212//! // Note that `async-openai` only implements OpenAI spec
213//! // and doesn't maintain parity with the spec of Azure OpenAI service.
214//!
215//! ```
216//!
217//!
218//! ## Examples
219//! For full working examples for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
220//!
221#![cfg_attr(docsrs, feature(doc_cfg))]
222
223#[cfg(all(feature = "_api", feature = "byot"))]
224pub(crate) use async_openai_macros::byot;
225
226#[cfg(all(feature = "_api", not(feature = "byot")))]
227pub(crate) use async_openai_macros::byot_passthrough as byot;
228
229// #[cfg(all(not(feature = "_api"), not(feature = "byot")))]
230// #[macro_export]
231// macro_rules! byot {
232//     ($($tt:tt)*) => {
233//         $($tt)*
234//     };
235// }
236
237#[cfg(feature = "administration")]
238mod admin;
239#[cfg(feature = "assistant")]
240mod assistants;
241#[cfg(feature = "audio")]
242mod audio;
243#[cfg(feature = "batch")]
244mod batches;
245#[cfg(feature = "chat-completion")]
246mod chat;
247#[cfg(feature = "chatkit")]
248mod chatkit;
249#[cfg(feature = "_api")]
250mod client;
251#[cfg(feature = "completions")]
252mod completion;
253#[cfg(feature = "_api")]
254pub mod config;
255#[cfg(feature = "container")]
256mod containers;
257#[cfg(feature = "image")]
258mod download;
259#[cfg(feature = "embedding")]
260mod embedding;
261pub mod error;
262#[cfg(feature = "evals")]
263mod evals;
264#[cfg(feature = "file")]
265mod file;
266#[cfg(feature = "finetuning")]
267mod fine_tuning;
268#[cfg(feature = "image")]
269mod image;
270#[cfg(feature = "_api")]
271mod impls;
272#[cfg(feature = "model")]
273mod model;
274#[cfg(feature = "moderation")]
275mod moderation;
276#[cfg(feature = "realtime")]
277mod realtime;
278#[cfg(feature = "_api")]
279mod request_options;
280#[cfg(feature = "responses")]
281mod responses;
282#[cfg(feature = "_api")]
283pub mod traits;
284pub mod types;
285#[cfg(feature = "upload")]
286mod uploads;
287#[cfg(any(
288    feature = "audio",
289    feature = "file",
290    feature = "upload",
291    feature = "image",
292    feature = "video",
293    feature = "container"
294))]
295mod util;
296#[cfg(feature = "vectorstore")]
297mod vectorstores;
298#[cfg(feature = "video")]
299mod video;
300#[cfg(feature = "webhook")]
301pub mod webhooks;
302
303// admin::* would be good - however its expanded here so that docs.rs shows the feature flags
304#[cfg(feature = "administration")]
305pub use admin::{
306    Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites,
307    ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits,
308    ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage,
309    UserRoles, Users,
310};
311#[cfg(feature = "assistant")]
312pub use assistants::{Assistants, Messages, Runs, Steps, Threads};
313#[cfg(feature = "audio")]
314pub use audio::{Audio, Speech, Transcriptions, Translations};
315#[cfg(feature = "batch")]
316pub use batches::Batches;
317#[cfg(feature = "chat-completion")]
318pub use chat::Chat;
319#[cfg(feature = "chatkit")]
320pub use chatkit::Chatkit;
321#[cfg(feature = "_api")]
322pub use client::Client;
323#[cfg(feature = "completions")]
324pub use completion::Completions;
325#[cfg(feature = "container")]
326pub use containers::{ContainerFiles, Containers};
327#[cfg(feature = "embedding")]
328pub use embedding::Embeddings;
329#[cfg(feature = "evals")]
330pub use evals::{EvalRunOutputItems, EvalRuns, Evals};
331#[cfg(feature = "file")]
332pub use file::Files;
333#[cfg(feature = "finetuning")]
334pub use fine_tuning::FineTuning;
335#[cfg(feature = "image")]
336pub use image::Images;
337#[cfg(feature = "model")]
338pub use model::Models;
339#[cfg(feature = "moderation")]
340pub use moderation::Moderations;
341#[cfg(feature = "realtime")]
342pub use realtime::Realtime;
343#[cfg(feature = "_api")]
344pub use request_options::RequestOptions;
345#[cfg(feature = "responses")]
346pub use responses::{ConversationItems, Conversations, Responses};
347#[cfg(feature = "upload")]
348pub use uploads::Uploads;
349#[cfg(feature = "vectorstore")]
350pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores};
351#[cfg(feature = "video")]
352pub use video::Videos;