1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
//! Rust library for OpenAI
//!
//! ## Creating client
//!
//! ```
//! use async_openai::{Client, config::OpenAIConfig};
//!
//! // Create a OpenAI client with api key from env var OPENAI_API_KEY and default base url.
//! let client = Client::new();
//!
//! // Above is shortcut for
//! let config = OpenAIConfig::default();
//! let client = Client::with_config(config);
//!
//! // OR use API key from different source and a non default organization
//! let api_key = "sk-..."; // This secret could be from a file, or environment variable.
//! let config = OpenAIConfig::new()
//! .with_api_key(api_key)
//! .with_org_id("the-continental");
//!
//! let client = Client::with_config(config);
//!
//! // Use custom reqwest client
//! let http_client = reqwest::ClientBuilder::new().user_agent("async-openai").build().unwrap();
//! let client = Client::new().with_http_client(http_client);
//! ```
//!
//! ## Microsoft Azure Endpoints
//!
//! ```
//! use async_openai::{Client, config::AzureConfig};
//!
//! let config = AzureConfig::new()
//! .with_api_base("https://my-resource-name.openai.azure.com")
//! .with_api_version("2023-03-15-preview")
//! .with_deployment_id("deployment-id")
//! .with_api_key("...");
//!
//! let client = Client::with_config(config);
//!
//! // Note that `async-openai` only implements OpenAI spec
//! // and doesn't maintain parity with the spec of Azure OpenAI service.
//!
//! ```
//!
//! ## Making requests
//!
//!```
//!# tokio_test::block_on(async {
//!
//! use async_openai::{Client, types::{CreateCompletionRequestArgs}};
//!
//! // Create client
//! let client = Client::new();
//!
//! // Create request using builder pattern
//! // Every request struct has companion builder struct with same name + Args suffix
//! let request = CreateCompletionRequestArgs::default()
//! .model("gpt-3.5-turbo-instruct")
//! .prompt("Tell me the recipe of alfredo pasta")
//! .max_tokens(40_u32)
//! .build()
//! .unwrap();
//!
//! // Call API
//! let response = client
//! .completions() // Get the API "group" (completions, images, etc.) from the client
//! .create(request) // Make the API call in that "group"
//! .await
//! .unwrap();
//!
//! println!("{}", response.choices.first().unwrap().text);
//! # });
//!```
//!
//! ## Examples
//! For full working examples for all supported features see [examples](https://github.com/64bit/async-openai/tree/main/examples) directory in the repository.
//!
pub use AssistantFiles;
pub use Assistants;
pub use Audio;
pub use AuditLogs;
pub use Batches;
pub use Chat;
pub use Client;
pub use Completions;
pub use Embeddings;
pub use Files;
pub use FineTuning;
pub use Images;
pub use Invites;
pub use MessageFiles;
pub use Messages;
pub use Models;
pub use Moderations;
pub use ProjectAPIKeys;
pub use ProjectServiceAccounts;
pub use ProjectUsers;
pub use Projects;
pub use Runs;
pub use Steps;
pub use Threads;
pub use Uploads;
pub use Users;
pub use VectorStoreFileBatches;
pub use VectorStoreFiles;
pub use VectorStores;