Skip to main content

novel_openai/
lib.rs

1//! Rust library for OpenAI
2//!
3//! ## Creating client
4//!
5//! ```
6//! use novel_openai::Client;
7//! use novel_openai::config::OpenAIConfig;
8//!
9//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
10//! let client = Client::new();
11//!
12//! // Above is shortcut for
13//! let config = OpenAIConfig::default();
14//! let client = Client::with_config(config);
15//!
16//! // OR use API key from different source and a non default organization
17//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
18//! let config = OpenAIConfig::new()
19//!     .with_api_key(api_key)
20//!     .with_org_id("the-continental");
21//!
22//! let client = Client::with_config(config);
23//!
24//! // Use custom reqwest client
25//! let http_client = reqwest::ClientBuilder::new()
26//!     .user_agent("async-openai")
27//!     .build()
28//!     .unwrap();
29//! let client = Client::new().with_http_client(http_client);
30//! ```
31//!
32//!
33//! ## Making requests
34//!
35//! ```
36//! # tokio_test::block_on(async {
37//!
38//! use novel_openai::Client;
39//! use novel_openai::spec::responses::CreateResponseArgs;
40//!
41//! // Create client
42//! let client = Client::new();
43//!
44//! // Create request using builder pattern
45//! // Every request struct has companion builder struct with same name + Args suffix
46//! let request = CreateResponseArgs::default()
47//!     .model("gpt-5-mini")
48//!     .input("tell me the recipe of pav bhaji")
49//!     .max_output_tokens(512u32)
50//!     .build()?;
51//!
52//! // Call API
53//! let response = client
54//!     .responses()      // Get the API "group" (responses, images, etc.) from the client
55//!     .create(request)    // Make the API call in that "group"
56//!     .await?;
57//!
58//! println!("{:?}", response.output_text());
59//! # Ok::<(), Box<dyn std::error::Error>>(())
60//! # });
61//! ```
62//!
63//! ## Bring Your Own Types
64//!
65//! To use custom types for inputs and outputs, enable `byot` feature which provides additional
66//! generic methods with same name and `_byot` suffix. This feature is available on methods whose
67//! return type is not `Bytes`
68//!
69//! ```
70//! # #[cfg(feature = "byot")]
71//! # tokio_test::block_on(async {
72//! use novel_openai::Client;
73//! use serde_json::{Value, json};
74//!
75//! let client = Client::new();
76//!
77//! let response: Value = client
78//!        .chat()
79//!        .create_byot(json!({
80//!            "messages": [
81//!                {
82//!                    "role": "developer",
83//!                    "content": "You are a helpful assistant"
84//!                },
85//!                {
86//!                    "role": "user",
87//!                    "content": "What do you think about life?"
88//!                }
89//!            ],
90//!            "model": "gpt-4o",
91//!            "store": false
92//!        }))
93//!        .await?;
94//!
95//!  if let Some(content) = response["choices"][0]["message"]["content"].as_str() {
96//!     println!("{}", content);
97//!  }
98//! # Ok::<(), Box<dyn std::error::Error>>(())
99//! # });
100//! ```
101//!
102//! **References: Borrow Instead of Move**
103//!
104//! With `byot` use reference to request types
105//!
106//! ```
107//! # #[cfg(feature = "byot")]
108//! # tokio_test::block_on(async {
109//! # use async_openai::{Client, spec::responses::{CreateResponse, Response}};
110//! # let client = Client::new();
111//! # let request = CreateResponse::default();
112//! let response: Response = client.responses().create_byot(&request).await?;
113//! # Ok::<(), Box<dyn std::error::Error>>(())
114//! # });
115//! ```
116//!
117//! ## Rust Types
118//!
119//! To only use Rust types from the crate - use feature flag `types`.
120//!
121//! There are granular feature flags like `response-types`, `chat-completion-types`, etc.
122//!
123//! These granular types are enabled when the corresponding API feature is enabled - for example
124//! `responses` will enable `response-types`.
125//!
126//! ## WASM
127//! For WASM targets streaming, retries, file operations are not implemented yet.
128//! See [examples/wasm-responses](https://github.com/64bit/async-openai/tree/main/examples/wasm-responses) for a working example.
129//!
130//! ## Configurable Requests
131//!
132//! **Individual Request**
133//!
134//! Certain individual APIs that need additional query or header parameters - these can be provided
135//! by chaining `.query()`, `.header()`, `.headers()` on the API group.
136//!
137//! For example:
138//! ```
139//! # tokio_test::block_on(async {
140//! # use async_openai::Client;
141//! # use async_openai::traits::RequestOptionsBuilder;
142//! # let client = Client::new();
143//! client
144//!   .chat()
145//!   // query can be a struct or a map too.
146//!   .query(&[("limit", "10")])?
147//!   // header for demo
148//!   .header("key", "value")?
149//!   .list()
150//!   .await?;
151//! # Ok::<(), Box<dyn std::error::Error>>(())
152//! # });
153//! ```
154//!
155//! **All Requests**
156//!
157//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for
158//! all requests.
159//!
160//! ## OpenAI-compatible Providers
161//!
162//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with
163//! compatible providers.
164//!
165//! **Configurable Path**
166//!
167//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be
168//! changed by using `.path()`, method on the API group.
169//!
170//! For example:
171//! ```
172//! # tokio_test::block_on(async {
173//! # use async_openai::{Client, spec::chat::CreateChatCompletionRequestArgs};
174//! # use async_openai::traits::RequestOptionsBuilder;
175//! # let client = Client::new();
176//! # let request = CreateChatCompletionRequestArgs::default()
177//! #     .model("gpt-4")
178//! #     .messages([])
179//! #     .build()
180//! #     .unwrap();
181//! client.chat().path("/v1/messages")?.create(request).await?;
182//! # Ok::<(), Box<dyn std::error::Error>>(())
183//! # });
184//! ```
185//!
186//! **Dynamic Dispatch**
187//!
188//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible
189//! providers.
190//!
191//! For any struct that implements `Config` trait, wrap it in a smart pointer and cast the pointer
192//! to `dyn Config` trait object, then create a client with `Box` or `Arc` wrapped configuration.
193//!
194//! For example:
195//! ```
196//! use async_openai::Client;
197//! use async_openai::config::{Config, OpenAIConfig};
198//!
199//! // Use `Box` or `std::sync::Arc` to wrap the config
200//! let config = Box::new(OpenAIConfig::default()) as Box<dyn Config>;
201//! // create client
202//! let client: Client<Box<dyn Config>> = Client::with_config(config);
203//!
204//! // A function can now accept a `&Client<Box<dyn Config>>` parameter
205//! // which can invoke any openai compatible api
206//! fn chat_completion(client: &Client<Box<dyn Config>>) {
207//!     todo!()
208//! }
209//! ```
210//!
211//! ## Microsoft Azure
212//!
213//! ```
214//! use async_openai::Client;
215//! use async_openai::config::AzureConfig;
216//!
217//! let config = AzureConfig::new()
218//!     .with_api_base("https://my-resource-name.openai.azure.com")
219//!     .with_api_version("2023-03-15-preview")
220//!     .with_deployment_id("deployment-id")
221//!     .with_api_key("...");
222//!
223//! let client = Client::with_config(config);
224//!
225//! // Note that `async-openai` only implements OpenAI spec
226//! // and doesn't maintain parity with the spec of Azure OpenAI service.
227//! ```
228//!
229//!
230//! ## Examples
231//! For full working examples for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
232#![cfg_attr(docsrs, feature(doc_cfg))]
233
234#[cfg(all(feature = "_api", feature = "byot"))]
235pub(crate) use novel_openai_macros::byot;
236#[cfg(all(feature = "_api", not(feature = "byot")))]
237pub(crate) use novel_openai_macros::byot_passthrough as byot;
238
239// #[cfg(all(not(feature = "_api"), not(feature = "byot")))]
240// #[macro_export]
241// macro_rules! byot {
242//     ($($tt:tt)*) => {
243//         $($tt)*
244//     };
245// }
246
247#[cfg(feature = "administration")]
248mod admin;
249#[cfg(feature = "assistant")]
250mod assistants;
251#[cfg(feature = "audio")]
252mod audio;
253#[cfg(feature = "batch")]
254mod batches;
255#[cfg(feature = "chat-completion")]
256mod chat;
257#[cfg(feature = "chatkit")]
258mod chatkit;
259#[cfg(feature = "_api")]
260mod client;
261#[cfg(feature = "completions")]
262mod completion;
263#[cfg(feature = "_api")]
264pub mod config;
265#[cfg(feature = "container")]
266mod containers;
267#[cfg(feature = "image")]
268mod download;
269#[cfg(feature = "embedding")]
270mod embedding;
271pub mod error;
272#[cfg(feature = "evals")]
273mod evals;
274#[cfg(feature = "file")]
275mod file;
276#[cfg(feature = "finetuning")]
277mod fine_tuning;
278#[cfg(feature = "image")]
279mod image;
280#[cfg(feature = "_api")]
281mod impls;
282#[cfg(feature = "model")]
283mod model;
284#[cfg(feature = "moderation")]
285mod moderation;
286#[cfg(feature = "realtime")]
287mod realtime;
288#[cfg(feature = "_api")]
289mod request_options;
290#[cfg(feature = "responses")]
291mod responses;
292pub mod spec;
293#[cfg(feature = "_api")]
294pub mod traits;
295#[cfg(feature = "upload")]
296mod uploads;
297#[cfg(any(
298    feature = "audio",
299    feature = "file",
300    feature = "upload",
301    feature = "image",
302    feature = "video",
303    feature = "container"
304))]
305mod util;
306#[cfg(feature = "vectorstore")]
307mod vectorstores;
308#[cfg(feature = "video")]
309mod video;
310#[cfg(feature = "webhook")]
311pub mod webhooks;
312
313// admin::* would be good - however its expanded here so that docs.rs shows the feature flags
314#[cfg(feature = "administration")]
315pub use admin::{
316    Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites,
317    ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits,
318    ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage,
319    UserRoles, Users,
320};
321#[cfg(feature = "assistant")]
322pub use assistants::{Assistants, Messages, Runs, Steps, Threads};
323#[cfg(feature = "audio")]
324pub use audio::{Audio, Speech, Transcriptions, Translations};
325#[cfg(feature = "batch")]
326pub use batches::Batches;
327#[cfg(feature = "chat-completion")]
328pub use chat::Chat;
329#[cfg(feature = "chatkit")]
330pub use chatkit::Chatkit;
331#[cfg(feature = "_api")]
332pub use client::Client;
333#[cfg(feature = "completions")]
334pub use completion::Completions;
335#[cfg(feature = "container")]
336pub use containers::{ContainerFiles, Containers};
337#[cfg(feature = "embedding")]
338pub use embedding::Embeddings;
339#[cfg(feature = "evals")]
340pub use evals::{EvalRunOutputItems, EvalRuns, Evals};
341#[cfg(feature = "file")]
342pub use file::Files;
343#[cfg(feature = "finetuning")]
344pub use fine_tuning::FineTuning;
345#[cfg(feature = "image")]
346pub use image::Images;
347#[cfg(feature = "model")]
348pub use model::Models;
349#[cfg(feature = "moderation")]
350pub use moderation::Moderations;
351#[cfg(feature = "realtime")]
352pub use realtime::Realtime;
353#[cfg(feature = "_api")]
354pub use request_options::RequestOptions;
355#[cfg(feature = "responses")]
356pub use responses::{ConversationItems, Conversations, Responses};
357#[cfg(feature = "upload")]
358pub use uploads::Uploads;
359#[cfg(feature = "vectorstore")]
360pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores};
361#[cfg(feature = "video")]
362pub use video::Videos;