Skip to main content

async_openai/
lib.rs

1//! Rust library for OpenAI
2//!
3//! ## Creating client
4//!
5//! ```
6//! use async_openai::{Client, config::OpenAIConfig};
7//!
8//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
9//! let client = Client::new();
10//!
11//! // Above is shortcut for
12//! let config = OpenAIConfig::default();
13//! let client = Client::with_config(config);
14//!
15//! // OR use API key from different source and a non default organization
16//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
17//! let config = OpenAIConfig::new()
18//!     .with_api_key(api_key)
19//!     .with_org_id("the-continental");
20//!
21//! let client = Client::with_config(config);
22//!
23//! // Use custom reqwest client
24//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai").build().unwrap();
25//! let client = Client::new().with_http_client(http_client);
26//! ```
27//!
28//!
29//! ## Making requests
30//!
31//!```
32//!# tokio_test::block_on(async {
33//! use async_openai::{Client, types::responses::{CreateResponseArgs}};
34//!
35//! // Create client
36//! let client = Client::new();
37//!
38//! // Create request using builder pattern
39//! // Every request struct has companion builder struct with same name + Args suffix
40//! let request = CreateResponseArgs::default()
41//!     .model("gpt-5-mini")
42//!     .input("tell me the recipe of pav bhaji")
43//!     .max_output_tokens(512u32)
44//!     .build()?;
45//!
46//! // Call API
47//! let response = client
48//!     .responses()      // Get the API "group" (responses, images, etc.) from the client
49//!     .create(request)  // Make the API call in that "group"
50//!     .await?;
51//!
52//! println!("{:?}", response.output_text());
53//! # Ok::<(), Box<dyn std::error::Error>>(())
54//! # });
55//!```
56//!
57//! ## Bring Your Own Types
58//!
59//! To use custom types for inputs and outputs, enable `byot` feature which provides additional generic methods with same name and `_byot` suffix.
60//! This feature is available on methods whose return type is not `Bytes`
61//!
62//!```
63//!# #[cfg(feature = "byot")]
64//!# tokio_test::block_on(async {
65//! use async_openai::Client;
66//! use serde_json::{Value, json};
67//!
68//! let client = Client::new();
69//!
70//! let response: Value = client
71//!        .chat()
72//!        .create_byot(json!({
73//!            "messages": [
74//!                {
75//!                    "role": "developer",
76//!                    "content": "You are a helpful assistant"
77//!                },
78//!                {
79//!                    "role": "user",
80//!                    "content": "What do you think about life?"
81//!                }
82//!            ],
83//!            "model": "gpt-4o",
84//!            "store": false
85//!        }))
86//!        .await?;
87//!
88//!  if let Some(content) = response["choices"][0]["message"]["content"].as_str() {
89//!     println!("{}", content);
90//!  }
91//! # Ok::<(), Box<dyn std::error::Error>>(())
92//! # });
93//!```
94//!
95//! **References: Borrow Instead of Move**
96//!
97//! With `byot` use reference to request types
98//!
99//! ```
100//! # #[cfg(feature = "byot")]
101//! # tokio_test::block_on(async {
102//! # use async_openai::{Client, types::responses::{CreateResponse, Response}};
103//! # let client = Client::new();
104//! # let request = CreateResponse::default();
105//! let response: Response = client
106//!   .responses()
107//!   .create_byot(&request).await?;
108//! # Ok::<(), Box<dyn std::error::Error>>(())
109//! # });
110//! ```
111//!
112//! ## Rust Types
113//!
114//! To only use Rust types from the crate - use feature flag `types`.
115//!
116//! There are granular feature flags like `response-types`, `chat-completion-types`, etc.
117//!
118//! These granular types are enabled when the corresponding API feature is enabled - for example `responses` will enable `response-types`.
119//!
120//! ## WASM
121//! WASM is supported for all APIs.
122//! See [examples/wasm-responses](https://github.com/64bit/async-openai/tree/main/examples/wasm-responses) or [examples/tower-wasm](https://github.com/64bit/async-openai/tree/main/examples/tower-wasm).
123//!
124//! ## Configurable Requests
125//!
126//! **Individual Request**
127//!
128//! Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.
129//!
130//! For example:
131//! ```
132//! # tokio_test::block_on(async {
133//! # use async_openai::Client;
134//! # use async_openai::traits::RequestOptionsBuilder;
135//! # let client = Client::new();
136//! client
137//!   .chat()
138//!   // query can be a struct or a map too.
139//!   .query(&[("limit", "10")])?
140//!   // header for demo
141//!   .header("key", "value")?
142//!   .list()
143//!   .await?;
144//! # Ok::<(), Box<dyn std::error::Error>>(())
145//! # });
146//! ```
147//!
148//! **All Requests**
149//!
150//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.
151//!
152//! ## OpenAI-compatible Providers
153//!
154//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.
155//!
156//! **Configurable Path**
157//!
158//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.
159//!
160//! For example:
161//! ```
162//! # tokio_test::block_on(async {
163//! # use async_openai::{Client, types::chat::CreateChatCompletionRequestArgs};
164//! # use async_openai::traits::RequestOptionsBuilder;
165//! # let client = Client::new();
166//! # let request = CreateChatCompletionRequestArgs::default()
167//! #     .model("gpt-4")
168//! #     .messages([])
169//! #     .build()
170//! #     .unwrap();
171//! client
172//!   .chat()
173//!   .path("/v1/messages")?
174//!   .create(request)
175//!   .await?;
176//! # Ok::<(), Box<dyn std::error::Error>>(())
177//! # });
178//! ```
179//!
180//! **Dynamic Dispatch**
181//!
182//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible providers.
183//!
184//! Create a client with `Box` or `Arc` wrapped configuration.
185//!
186//! For example:
187//! ```
188//! use async_openai::{Client, config::{Config, OpenAIConfig}};
189//!
190//! // Use `Box` or `std::sync::Arc` to wrap the config
191//! let config = Box::new(OpenAIConfig::default()) as Box<dyn Config>;
192//! // create client
193//! let client: Client<Box<dyn Config>> = Client::with_config(config);
194//!
195//! // A function can now accept a `&Client<Box<dyn Config>>` parameter
196//! // which can invoke any openai compatible api
197//! fn chat_completion(client: &Client<Box<dyn Config>>) {
198//!     todo!()
199//! }
200//! ```
201//!
202//! ## Microsoft Azure
203//!
204//! ```
205//! use async_openai::{Client, config::AzureConfig};
206//!
207//! let config = AzureConfig::new()
208//!     .with_api_base("https://my-resource-name.openai.azure.com")
209//!     .with_api_version("2023-03-15-preview")
210//!     .with_deployment_id("deployment-id")
211//!     .with_api_key("...");
212//!
213//! let client = Client::with_config(config);
214//!
215//! // Note that `async-openai` only implements OpenAI spec
216//! // and doesn't maintain parity with the spec of Azure OpenAI service.
217//!
218//! ```
219//!
220//!
221//! ## Middleware
222//!
223//! Middleware is supported via Tower ecosystem. See [`middleware`] for more detail.
224//!
225//! ## Examples
226//! For full working examples for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
227//!
228#![cfg_attr(docsrs, feature(doc_cfg))]
229
230#[cfg(all(feature = "_api", feature = "byot"))]
231#[allow(unused_imports)]
232pub(crate) use async_openai_macros::byot;
233
234#[cfg(all(feature = "_api", not(feature = "byot")))]
235#[allow(unused_imports)]
236pub(crate) use async_openai_macros::byot_passthrough as byot;
237
238// #[cfg(all(not(feature = "_api"), not(feature = "byot")))]
239// #[macro_export]
240// macro_rules! byot {
241//     ($($tt:tt)*) => {
242//         $($tt)*
243//     };
244// }
245
246#[cfg(feature = "administration")]
247mod admin;
248#[cfg(feature = "assistant")]
249mod assistants;
250#[cfg(feature = "audio")]
251mod audio;
252#[cfg(feature = "batch")]
253mod batches;
254#[cfg(feature = "chat-completion")]
255mod chat;
256#[cfg(feature = "chatkit")]
257mod chatkit;
258#[cfg(feature = "_api")]
259mod client;
260#[cfg(feature = "completions")]
261mod completion;
262#[cfg(feature = "_api")]
263pub mod config;
264#[cfg(feature = "container")]
265mod containers;
266#[cfg(feature = "image")]
267mod download;
268#[cfg(feature = "embedding")]
269mod embedding;
270pub mod error;
271#[cfg(feature = "evals")]
272mod evals;
273#[cfg(feature = "_api")]
274mod executor;
275#[cfg(feature = "file")]
276mod file;
277#[cfg(feature = "finetuning")]
278mod fine_tuning;
279#[cfg(feature = "image")]
280mod image;
281#[cfg(feature = "_api")]
282mod impls;
283#[cfg(feature = "middleware")]
284pub mod middleware;
285#[cfg(feature = "model")]
286mod model;
287#[cfg(feature = "moderation")]
288mod moderation;
289#[cfg(feature = "realtime")]
290mod realtime;
291#[cfg(feature = "_api")]
292mod request_options;
293#[cfg(feature = "responses")]
294mod responses;
295#[cfg(feature = "_api")]
296#[allow(dead_code)]
297#[path = "middleware/retry/mod.rs"]
298mod retry;
299#[cfg(feature = "skill")]
300mod skills;
301#[cfg(feature = "_api")]
302pub mod traits;
303pub mod types;
304#[cfg(feature = "upload")]
305mod uploads;
306#[cfg(any(
307    feature = "audio",
308    feature = "file",
309    feature = "upload",
310    feature = "image",
311    feature = "video",
312    feature = "container",
313    feature = "skill"
314))]
315mod util;
316#[cfg(feature = "vectorstore")]
317mod vectorstores;
318#[cfg(feature = "video")]
319mod video;
320#[cfg(feature = "webhook")]
321pub mod webhooks;
322
323// admin::* would be good - however its expanded here so that docs.rs shows the feature flags
324#[cfg(feature = "administration")]
325pub use admin::{
326    Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites,
327    ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits,
328    ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage,
329    UserRoles, Users,
330};
331#[cfg(feature = "assistant")]
332#[deprecated(
333    note = "Assistants API is deprecated and will be removed in August 2026. Use the Responses API."
334)]
335#[allow(deprecated)]
336pub use assistants::{Assistants, Messages, Runs, Steps, Threads};
337#[cfg(feature = "audio")]
338pub use audio::{Audio, Speech, Transcriptions, Translations};
339#[cfg(feature = "batch")]
340pub use batches::Batches;
341#[cfg(feature = "chat-completion")]
342pub use chat::Chat;
343#[cfg(feature = "chatkit")]
344pub use chatkit::Chatkit;
345#[cfg(feature = "_api")]
346pub use client::Client;
347#[cfg(feature = "completions")]
348pub use completion::Completions;
349#[cfg(feature = "container")]
350pub use containers::{ContainerFiles, Containers};
351#[cfg(feature = "embedding")]
352pub use embedding::Embeddings;
353#[cfg(feature = "evals")]
354pub use evals::{EvalRunOutputItems, EvalRuns, Evals};
355#[cfg(feature = "file")]
356pub use file::Files;
357#[cfg(feature = "finetuning")]
358pub use fine_tuning::FineTuning;
359#[cfg(feature = "image")]
360pub use image::Images;
361#[cfg(feature = "model")]
362pub use model::Models;
363#[cfg(feature = "moderation")]
364pub use moderation::Moderations;
365#[cfg(feature = "realtime")]
366pub use realtime::Realtime;
367#[cfg(feature = "_api")]
368pub use request_options::RequestOptions;
369#[cfg(feature = "responses")]
370pub use responses::{ConversationItems, Conversations, Responses};
371#[cfg(feature = "skill")]
372pub use skills::{SkillVersions, Skills};
373#[cfg(feature = "upload")]
374pub use uploads::Uploads;
375#[cfg(feature = "vectorstore")]
376pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores};
377#[cfg(feature = "video")]
378pub use video::Videos;