async_openai_wasm/lib.rs
1//! Rust library for OpenAI on WASM
2//!
3//! ## Creating client
4//!
5//! ```
6//! use async_openai_wasm::{Client, config::OpenAIConfig};
7//!
8//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
9//! let client = Client::new();
10//!
11//! // Above is shortcut for
12//! let config = OpenAIConfig::default();
13//! let client = Client::with_config(config);
14//!
15//! // OR use API key from different source and a non default organization
16//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
17//! let config = OpenAIConfig::new()
18//! .with_api_key(api_key)
19//! .with_org_id("the-continental");
20//!
21//! let client = Client::with_config(config);
22//!
23//! // Use custom reqwest client
24//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai-wasm").build().unwrap();
25//! let client = Client::new().with_http_client(http_client);
26//! ```
27//!
28//!
29//! ## Making requests
30//!
31//!```
32//!# tokio_test::block_on(async {
33//! use async_openai_wasm::{Client, types::responses::{CreateResponseArgs}};
34//!
35//! // Create client
36//! let client = Client::new();
37//!
38//! // Create request using builder pattern
39//! // Every request struct has companion builder struct with same name + Args suffix
40//! let request = CreateResponseArgs::default()
41//! .model("gpt-5-mini")
42//! .input("tell me the recipe of pav bhaji")
43//! .max_output_tokens(512u32)
44//! .build()?;
45//!
46//! // Call API
47//! let response = client
48//! .responses() // Get the API "group" (responses, images, etc.) from the client
49//! .create(request) // Make the API call in that "group"
50//! .await?;
51//!
52//! println!("{:?}", response.output_text());
53//! # Ok::<(), Box<dyn std::error::Error>>(())
54//! # });
55//!```
56//!
57//! ## Bring Your Own Types
58//!
59//! To use custom types for inputs and outputs, enable `byot` feature which provides additional generic methods with same name and `_byot` suffix.
60//! This feature is available on methods whose return type is not `Bytes`
61//!
62//!```
63//!# #[cfg(feature = "byot")]
64//!# tokio_test::block_on(async {
65//! use async_openai::Client;
66//! use serde_json::{Value, json};
67//!
68//! let client = Client::new();
69//!
70//! let response: Value = client
71//! .chat()
72//! .create_byot(json!({
73//! "messages": [
74//! {
75//! "role": "developer",
76//! "content": "You are a helpful assistant"
77//! },
78//! {
79//! "role": "user",
80//! "content": "What do you think about life?"
81//! }
82//! ],
83//! "model": "gpt-4o",
84//! "store": false
85//! }))
86//! .await?;
87//!
88//! if let Some(content) = response["choices"][0]["message"]["content"].as_str() {
89//! println!("{}", content);
90//! }
91//! # Ok::<(), Box<dyn std::error::Error>>(())
92//! # });
93//!```
94//!
95//! **References: Borrow Instead of Move**
96//!
97//! With `byot` use reference to request types
98//!
99//! ```
100//! # #[cfg(feature = "byot")]
101//! # tokio_test::block_on(async {
102//! # use async_openai::{Client, types::responses::{CreateResponse, Response}};
103//! # let client = Client::new();
104//! # let request = CreateResponse::default();
105//! let response: Response = client
106//! .responses()
107//! .create_byot(&request).await?;
108//! # Ok::<(), Box<dyn std::error::Error>>(())
109//! # });
110//! ```
111//!
112//! ## Rust Types
113//!
114//! To only use Rust types from the crate - use feature flag `types`.
115//!
116//! There are granular feature flags like `response-types`, `chat-completion-types`, etc.
117//!
118//! These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`.
119//!
120//! ## Configurable Requests
121//!
122//! **Individual Request**
123//!
124//! Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.
125//!
126//! For example:
127//! ```
128//! # tokio_test::block_on(async {
129//! # use async_openai::Client;
130//! # use async_openai::traits::RequestOptionsBuilder;
131//! # let client = Client::new();
132//! client
133//! .chat()
134//! // query can be a struct or a map too.
135//! .query(&[("limit", "10")])?
136//! // header for demo
137//! .header("key", "value")?
138//! .list()
139//! .await?;
140//! # Ok::<(), Box<dyn std::error::Error>>(())
141//! # });
142//! ```
143//!
144//! **All Requests**
145//!
146//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.
147//!
148//! ## OpenAI-compatible Providers
149//!
150//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.
151//!
152//! **Configurable Path**
153//!
154//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.
155//!
156//! For example:
157//! ```
158//! # tokio_test::block_on(async {
159//! # use async_openai::{Client, types::chat::CreateChatCompletionRequestArgs};
160//! # use async_openai::traits::RequestOptionsBuilder;
161//! # let client = Client::new();
162//! # let request = CreateChatCompletionRequestArgs::default()
163//! # .model("gpt-4")
164//! # .messages([])
165//! # .build()
166//! # .unwrap();
167//! client
168//! .chat()
169//! .path("/v1/messages")?
170//! .create(request)
171//! .await?;
172//! # Ok::<(), Box<dyn std::error::Error>>(())
173//! # });
174//! ```
175//!
176//! **Dynamic Dispatch**
177//!
178//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible providers.
179//!
180//! For any struct that implements `Config` trait, wrap it in a smart pointer and cast the pointer to `dyn Config`
181//! trait object, then create a client with `Box` or `Arc` wrapped configuration.
182//!
183//! For example:
184//! ```
185//! use async_openai::{Client, config::{Config, OpenAIConfig}};
186//!
187//! // Use `Box` or `std::sync::Arc` to wrap the config
188//! let config = Box::new(OpenAIConfig::default()) as Box<dyn Config>;
189//! // create client
190//! let client: Client<Box<dyn Config>> = Client::with_config(config);
191//!
192//! // A function can now accept a `&Client<Box<dyn Config>>` parameter
193//! // which can invoke any openai compatible api
194//! fn chat_completion(client: &Client<Box<dyn Config>>) {
195//! todo!()
196//! }
197//! ```
198//!
199//! ## Microsoft Azure
200//!
201//! ```
202//! use async_openai_wasm::{Client, config::AzureConfig};
203//!
204//! let config = AzureConfig::new()
205//! .with_api_base("https://my-resource-name.openai.azure.com")
206//! .with_api_version("2023-03-15-preview")
207//! .with_deployment_id("deployment-id")
208//! .with_api_key("...");
209//!
210//! let client = Client::with_config(config);
211//!
212//! // Note that `async-openai` only implements OpenAI spec
213//! // and doesn't maintain parity with the spec of Azure OpenAI service.
214//!
215//! ```
216//!
217//!
218//! ## Examples
219//! For full working examples of the original `async-openai` for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
220//! Also see [wasm examples](https://github.com/ifsheldon/async-openai-wasm/tree/main/examples)
221//!
222#![cfg_attr(docsrs, feature(doc_cfg))]
223
224#[cfg(all(feature = "_api", feature = "byot"))]
225pub(crate) use async_openai_wasm_macros::byot;
226
227#[cfg(all(feature = "_api", not(feature = "byot")))]
228pub(crate) use async_openai_wasm_macros::byot_passthrough as byot;
229
230// #[cfg(all(not(feature = "_api"), not(feature = "byot")))]
231// #[macro_export]
232// macro_rules! byot {
233// ($($tt:tt)*) => {
234// $($tt)*
235// };
236// }
237
238#[cfg(feature = "administration")]
239mod admin;
240#[cfg(feature = "assistant")]
241mod assistants;
242#[cfg(feature = "audio")]
243mod audio;
244#[cfg(feature = "batch")]
245mod batches;
246#[cfg(feature = "chat-completion")]
247mod chat;
248#[cfg(feature = "chatkit")]
249mod chatkit;
250#[cfg(feature = "_api")]
251mod client;
252#[cfg(feature = "completions")]
253mod completion;
254#[cfg(feature = "_api")]
255pub mod config;
256#[cfg(feature = "container")]
257mod containers;
258#[cfg(feature = "embedding")]
259mod embedding;
260pub mod error;
261#[cfg(feature = "evals")]
262mod evals;
263#[cfg(feature = "file")]
264mod file;
265#[cfg(feature = "finetuning")]
266mod fine_tuning;
267#[cfg(feature = "image")]
268mod image;
269#[cfg(feature = "_api")]
270mod impls;
271#[cfg(feature = "model")]
272mod model;
273#[cfg(feature = "moderation")]
274mod moderation;
275#[cfg(feature = "realtime")]
276mod realtime;
277#[cfg(feature = "_api")]
278mod request_options;
279#[cfg(feature = "responses")]
280mod responses;
281#[cfg(feature = "_api")]
282pub mod traits;
283pub mod types;
284#[cfg(feature = "upload")]
285mod uploads;
286#[cfg(any(
287 feature = "audio",
288 feature = "file",
289 feature = "upload",
290 feature = "image",
291 feature = "video",
292 feature = "container"
293))]
294mod util;
295#[cfg(feature = "vectorstore")]
296mod vectorstores;
297#[cfg(feature = "video")]
298mod video;
299#[cfg(feature = "webhook")]
300pub mod webhooks;
301
302// admin::* would be good - however its expanded here so that docs.rs shows the feature flags
303#[cfg(feature = "administration")]
304pub use admin::{
305 Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites,
306 ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits,
307 ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage,
308 UserRoles, Users,
309};
310#[cfg(feature = "assistant")]
311pub use assistants::{Assistants, Messages, Runs, Steps, Threads};
312#[cfg(feature = "audio")]
313pub use audio::{Audio, Speech, Transcriptions, Translations};
314#[cfg(feature = "batch")]
315pub use batches::Batches;
316#[cfg(feature = "chat-completion")]
317pub use chat::Chat;
318#[cfg(feature = "chatkit")]
319pub use chatkit::Chatkit;
320#[cfg(feature = "_api")]
321pub use client::{Client, OpenAIEventStream, OpenAIFormEventStream};
322#[cfg(feature = "completions")]
323pub use completion::Completions;
324#[cfg(feature = "container")]
325pub use containers::{ContainerFiles, Containers};
326#[cfg(feature = "embedding")]
327pub use embedding::Embeddings;
328#[cfg(feature = "evals")]
329pub use evals::{EvalRunOutputItems, EvalRuns, Evals};
330#[cfg(feature = "file")]
331pub use file::Files;
332#[cfg(feature = "finetuning")]
333pub use fine_tuning::FineTuning;
334#[cfg(feature = "image")]
335pub use image::Images;
336#[cfg(feature = "model")]
337pub use model::Models;
338#[cfg(feature = "moderation")]
339pub use moderation::Moderations;
340#[cfg(feature = "realtime")]
341pub use realtime::Realtime;
342#[cfg(feature = "_api")]
343pub use request_options::RequestOptions;
344#[cfg(feature = "responses")]
345pub use responses::{ConversationItems, Conversations, Responses};
346#[cfg(feature = "upload")]
347pub use uploads::Uploads;
348#[cfg(feature = "vectorstore")]
349pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores};
350#[cfg(feature = "video")]
351pub use video::Videos;