async_openai/
lib.rs

1//! Rust library for OpenAI
2//!
3//! ## Creating client
4//!
5//! ```
6//! use async_openai::{Client, config::OpenAIConfig};
7//!
8//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
9//! let client = Client::new();
10//!
11//! // Above is shortcut for
12//! let config = OpenAIConfig::default();
13//! let client = Client::with_config(config);
14//!
15//! // OR use API key from different source and a non default organization
16//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
17//! let config = OpenAIConfig::new()
18//!     .with_api_key(api_key)
19//!     .with_org_id("the-continental");
20//!
21//! let client = Client::with_config(config);
22//!
23//! // Use custom reqwest client
24//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai").build().unwrap();
25//! let client = Client::new().with_http_client(http_client);
26//! ```
27//!
28//!
29//! ## Making requests
30//!
31//!```
32//!# tokio_test::block_on(async {
33//! use async_openai::{Client, types::responses::{CreateResponseArgs}};
34//!
35//! // Create client
36//! let client = Client::new();
37//!
38//! // Create request using builder pattern
39//! // Every request struct has companion builder struct with same name + Args suffix
40//! let request = CreateResponseArgs::default()
41//!     .model("gpt-5-mini")
42//!     .input("tell me the recipe of pav bhaji")
43//!     .max_output_tokens(512u32)
44//!     .build()?;
45//!
46//! // Call API
47//! let response = client
48//!     .responses()      // Get the API "group" (responses, images, etc.) from the client
49//!     .create(request)  // Make the API call in that "group"
50//!     .await?;
51//!
52//! println!("{:?}", response.output_text());
53//! # Ok::<(), Box<dyn std::error::Error>>(())
54//! # });
55//!```
56//!
57//! ## Bring Your Own Types
58//!
59//! To use custom types for inputs and outputs, enable `byot` feature which provides additional generic methods with same name and `_byot` suffix.
60//! This feature is available on methods whose return type is not `Bytes`
61//!
62//!```
63//!# #[cfg(feature = "byot")]
64//!# tokio_test::block_on(async {
65//! use async_openai::Client;
66//! use serde_json::{Value, json};
67//!
68//! let client = Client::new();
69//!
70//! let response: Value = client
71//!        .chat()
72//!        .create_byot(json!({
73//!            "messages": [
74//!                {
75//!                    "role": "developer",
76//!                    "content": "You are a helpful assistant"
77//!                },
78//!                {
79//!                    "role": "user",
80//!                    "content": "What do you think about life?"
81//!                }
82//!            ],
83//!            "model": "gpt-4o",
84//!            "store": false
85//!        }))
86//!        .await?;
87//!
88//!  if let Some(content) = response["choices"][0]["message"]["content"].as_str() {
89//!     println!("{}", content);
90//!  }
91//! # Ok::<(), Box<dyn std::error::Error>>(())
92//! # });
93//!```
94//!
95//! **References: Borrow Instead of Move**
96//!
97//! With `byot` use reference to request types
98//!
99//! ```
100//! # #[cfg(feature = "byot")]
101//! # tokio_test::block_on(async {
102//! # use async_openai::{Client, types::responses::{CreateResponse, Response}};
103//! # let client = Client::new();
104//! # let request = CreateResponse::default();
105//! let response: Response = client
106//!   .responses()
107//!   .create_byot(&request).await?;
108//! # Ok::<(), Box<dyn std::error::Error>>(())
109//! # });
110//! ```
111//!
112//! ## Rust Types
113//!
114//! To only use Rust types from the crate - use feature flag `types`.
115//!
116//! There are granular feature flags like `response-types`, `chat-completion-types`, etc.
117//!
118//! These granular types are enabled when the corresponding API feature is enabled - for example `responses` will enable `response-types`.
119//!
120//! ## WASM
121//! For WASM targets streaming, retries, file operations are not implemented yet.
122//! See [examples/wasm-responses](https://github.com/64bit/async-openai/tree/main/examples/wasm-responses) for a working example.
123//!
124//! ## Configurable Requests
125//!
126//! **Individual Request**
127//!
128//! Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.
129//!
130//! For example:
131//! ```
132//! # tokio_test::block_on(async {
133//! # use async_openai::Client;
134//! # use async_openai::traits::RequestOptionsBuilder;
135//! # let client = Client::new();
136//! client
137//!   .chat()
138//!   // query can be a struct or a map too.
139//!   .query(&[("limit", "10")])?
140//!   // header for demo
141//!   .header("key", "value")?
142//!   .list()
143//!   .await?;
144//! # Ok::<(), Box<dyn std::error::Error>>(())
145//! # });
146//! ```
147//!
148//! **All Requests**
149//!
150//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.
151//!
152//! ## OpenAI-compatible Providers
153//!
154//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.
155//!
156//! **Configurable Path**
157//!
158//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.
159//!
160//! For example:
161//! ```
162//! # tokio_test::block_on(async {
163//! # use async_openai::{Client, types::chat::CreateChatCompletionRequestArgs};
164//! # use async_openai::traits::RequestOptionsBuilder;
165//! # let client = Client::new();
166//! # let request = CreateChatCompletionRequestArgs::default()
167//! #     .model("gpt-4")
168//! #     .messages([])
169//! #     .build()
170//! #     .unwrap();
171//! client
172//!   .chat()
173//!   .path("/v1/messages")?
174//!   .create(request)
175//!   .await?;
176//! # Ok::<(), Box<dyn std::error::Error>>(())
177//! # });
178//! ```
179//!
180//! **Dynamic Dispatch**
181//!
182//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible providers.
183//!
184//! For any struct that implements `Config` trait, wrap it in a smart pointer and cast the pointer to `dyn Config`
185//! trait object, then create a client with `Box` or `Arc` wrapped configuration.
186//!
187//! For example:
188//! ```
189//! use async_openai::{Client, config::{Config, OpenAIConfig}};
190//!
191//! // Use `Box` or `std::sync::Arc` to wrap the config
192//! let config = Box::new(OpenAIConfig::default()) as Box<dyn Config>;
193//! // create client
194//! let client: Client<Box<dyn Config>> = Client::with_config(config);
195//!
196//! // A function can now accept a `&Client<Box<dyn Config>>` parameter
197//! // which can invoke any openai compatible api
198//! fn chat_completion(client: &Client<Box<dyn Config>>) {
199//!     todo!()
200//! }
201//! ```
202//!
203//! ## Microsoft Azure
204//!
205//! ```
206//! use async_openai::{Client, config::AzureConfig};
207//!
208//! let config = AzureConfig::new()
209//!     .with_api_base("https://my-resource-name.openai.azure.com")
210//!     .with_api_version("2023-03-15-preview")
211//!     .with_deployment_id("deployment-id")
212//!     .with_api_key("...");
213//!
214//! let client = Client::with_config(config);
215//!
216//! // Note that `async-openai` only implements OpenAI spec
217//! // and doesn't maintain parity with the spec of Azure OpenAI service.
218//!
219//! ```
220//!
221//!
222//! ## Examples
223//! For full working examples for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
224//!
225#![cfg_attr(docsrs, feature(doc_cfg))]
226
227#[cfg(all(feature = "_api", feature = "byot"))]
228pub(crate) use async_openai_macros::byot;
229
230#[cfg(all(feature = "_api", not(feature = "byot")))]
231pub(crate) use async_openai_macros::byot_passthrough as byot;
232
233// #[cfg(all(not(feature = "_api"), not(feature = "byot")))]
234// #[macro_export]
235// macro_rules! byot {
236//     ($($tt:tt)*) => {
237//         $($tt)*
238//     };
239// }
240
241#[cfg(feature = "administration")]
242mod admin;
243#[cfg(feature = "assistant")]
244mod assistants;
245#[cfg(feature = "audio")]
246mod audio;
247#[cfg(feature = "batch")]
248mod batches;
249#[cfg(feature = "chat-completion")]
250mod chat;
251#[cfg(feature = "chatkit")]
252mod chatkit;
253#[cfg(feature = "_api")]
254mod client;
255#[cfg(feature = "completions")]
256mod completion;
257#[cfg(feature = "_api")]
258pub mod config;
259#[cfg(feature = "container")]
260mod containers;
261#[cfg(feature = "image")]
262mod download;
263#[cfg(feature = "embedding")]
264mod embedding;
265pub mod error;
266#[cfg(feature = "evals")]
267mod evals;
268#[cfg(feature = "file")]
269mod file;
270#[cfg(feature = "finetuning")]
271mod fine_tuning;
272#[cfg(feature = "image")]
273mod image;
274#[cfg(feature = "_api")]
275mod impls;
276#[cfg(feature = "model")]
277mod model;
278#[cfg(feature = "moderation")]
279mod moderation;
280#[cfg(feature = "realtime")]
281mod realtime;
282#[cfg(feature = "_api")]
283mod request_options;
284#[cfg(feature = "responses")]
285mod responses;
286#[cfg(feature = "_api")]
287pub mod traits;
288pub mod types;
289#[cfg(feature = "upload")]
290mod uploads;
291#[cfg(any(
292    feature = "audio",
293    feature = "file",
294    feature = "upload",
295    feature = "image",
296    feature = "video",
297    feature = "container"
298))]
299mod util;
300#[cfg(feature = "vectorstore")]
301mod vectorstores;
302#[cfg(feature = "video")]
303mod video;
304#[cfg(feature = "webhook")]
305pub mod webhooks;
306
307// admin::* would be good - however its expanded here so that docs.rs shows the feature flags
308#[cfg(feature = "administration")]
309pub use admin::{
310    Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites,
311    ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits,
312    ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage,
313    UserRoles, Users,
314};
315#[cfg(feature = "assistant")]
316pub use assistants::{Assistants, Messages, Runs, Steps, Threads};
317#[cfg(feature = "audio")]
318pub use audio::{Audio, Speech, Transcriptions, Translations};
319#[cfg(feature = "batch")]
320pub use batches::Batches;
321#[cfg(feature = "chat-completion")]
322pub use chat::Chat;
323#[cfg(feature = "chatkit")]
324pub use chatkit::Chatkit;
325#[cfg(feature = "_api")]
326pub use client::Client;
327#[cfg(feature = "completions")]
328pub use completion::Completions;
329#[cfg(feature = "container")]
330pub use containers::{ContainerFiles, Containers};
331#[cfg(feature = "embedding")]
332pub use embedding::Embeddings;
333#[cfg(feature = "evals")]
334pub use evals::{EvalRunOutputItems, EvalRuns, Evals};
335#[cfg(feature = "file")]
336pub use file::Files;
337#[cfg(feature = "finetuning")]
338pub use fine_tuning::FineTuning;
339#[cfg(feature = "image")]
340pub use image::Images;
341#[cfg(feature = "model")]
342pub use model::Models;
343#[cfg(feature = "moderation")]
344pub use moderation::Moderations;
345#[cfg(feature = "realtime")]
346pub use realtime::Realtime;
347#[cfg(feature = "_api")]
348pub use request_options::RequestOptions;
349#[cfg(feature = "responses")]
350pub use responses::{ConversationItems, Conversations, Responses};
351#[cfg(feature = "upload")]
352pub use uploads::Uploads;
353#[cfg(feature = "vectorstore")]
354pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores};
355#[cfg(feature = "video")]
356pub use video::Videos;