1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
//! The audio API of the OpenAI API.
//!
//! ## NOTE
//! This is only available for the `audio` feature flag.
//!
//! ## Supported APIs
//! - [x] [Speech](https://platform.openai.com/docs/api-reference/audio/createSpeech)
//! - [x] [Transcriptions](https://platform.openai.com/docs/api-reference/audio/createTranscription)
//! - [x] [Translations](https://platform.openai.com/docs/api-reference/audio/createTranslation)
//!
//! ## Supported text response formats
//! - [x] Plain text
//! - [x] JSON
//! - [x] Verbose JSON
//! - [x] SubRip Subtitle
//! - [x] WebVTT
//!
//! ## Examples
//!
//! ### Speech
//! An example to call the speech API with the `audio` feature flag, `tokio`, `anyhow` and `tokio_stream` crate is as follows:
//!
//! ```no_run
//! use oaapi::Client;
//! use oaapi::audio::SpeechRequestBody;
//! use oaapi::audio::SpeechInput;
//! use oaapi::audio::Voice;
//!
//! use tokio_stream::StreamExt;
//!
//! #[tokio::main]
//! async fn main() -> anyhow::Result<()> {
//! // 1. Create a client with the API key from the environment variable: "OPENAI_API_KEY"
//! let client = Client::from_env()?;
//! // or specify the API key directly.
//! // let client = Client::new(oaapi::ApiKey::new("OPENAI_API_KEY"), None, None);
//!
//! // 2. Create a request body parameters.
//! let request_body = SpeechRequestBody {
//! input: SpeechInput::new("Text to speech.")?,
//! voice: Voice::Alloy,
//! ..Default::default()
//! };
//!
//! // 3. Call the API.
//! let mut stream = client
//! .audio_speech(request_body)
//! .await?;
//!
//! // 4. Read the stream of the speech data.
//! while let Some(chunk) = stream.next().await {
//! // Do something with the chunk.
//! }
//!
//! Ok(())
//! }
//! ```
//!
//! ### Transcriptions
//! An example to call the transcriptions API with the `audio` feature flag, `tokio` and `anyhow` crate is as follows:
//!
//! ```no_run
//! use oaapi::Client;
//! use oaapi::audio::File;
//! use oaapi::audio::TranscriptionsRequestBody;
//!
//! #[tokio::main]
//! async fn main() -> anyhow::Result<()> {
//! // 1. Create a client with the API key from the environment variable: "OPENAI_API_KEY"
//! let client = Client::from_env()?;
//! // or specify the API key directly.
//! // let client = Client::new(oaapi::ApiKey::new("OPENAI_API_KEY"), None, None);
//!
//! // 2. Load the audio file that you want to transcribe.
//! let file_path = "path/to/audio/file.mp3";
//! let file = tokio::fs::read(file_path).await?;
//! let file = File::new(file_path, file)?;
//!
//! // 3. Create a request body parameters.
//! let request_body = TranscriptionsRequestBody {
//! file,
//! ..Default::default()
//! };
//!
//! // 4. Call the API with specifying the response format.
//! let response = client
//! .audio_transcribe_into_json(request_body)
//! .await?;
//!
//! // 5. Use the response.
//! println!("Result:\n{}", response);
//!
//! Ok(())
//! }
//! ```
//!
//! ### Translations
//! An example to call the translations API with the `audio` feature flag, `tokio` and `anyhow` crate is as follows:
//!
//! ```no_run
//! use oaapi::Client;
//! use oaapi::audio::File;
//! use oaapi::audio::TranslationsRequestBody;
//!
//! #[tokio::main]
//! async fn main() -> anyhow::Result<()> {
//! // 1. Create a client with the API key from the environment variable: "OPENAI_API_KEY"
//! let client = Client::from_env()?;
//! // or specify the API key directly.
//! // let client = Client::new(oaapi::ApiKey::new("OPENAI_API_KEY"), None, None);
//!
//! // 2. Load the audio file that you want to translate.
//! let file_path = "path/to/audio/file.mp3";
//! let file = tokio::fs::read(file_path).await?;
//! let file = File::new(file_path, file)?;
//!
//! // 3. Create a request body parameters.
//! let request_body = TranslationsRequestBody {
//! file,
//! ..Default::default()
//! };
//!
//! // 4. Call the API with specifying the response format.
//! let response = client
//! .audio_translate_into_verbose_json(request_body)
//! .await?;
//!
//! // 5. Use the response.
//! println!("Result:\n{}", response);
//!
//! Ok(())
//! }
//! ```
pub use SpeechRequestBody;
pub use TranscriptionsRequestBody;
pub use TranslationsRequestBody;
pub use AudioApiError;
pub use TextFormatError;
pub use File;
pub use Iso639_1;
pub use AudioModel;
pub use SpeechModel;
pub use JsonResponse;
pub use JsonResponseFormatter;
pub use PlainTextResponseFormatter;
pub use SpeechResponseFormat;
pub use SrtResponseFormatter;
pub use TextResponseFormat;
pub use TextResponseFormatter;
pub use VerboseJsonResponse;
pub use VerboseJsonResponseFormatter;
pub use VttResponseFormatter;
pub use AudioApiResult;
pub use SpeechStreamResult;
pub use TextFormatResult;
pub use SpeechInput;
pub use Speed;
pub use TimestampGranularity;
pub use Voice;
pub use speech;
pub use transcribe_into_json;
pub use transcribe_into_plain_text;
pub use transcribe_into_srt;
pub use transcribe_into_verbose_json;
pub use transcribe_into_vtt;
pub use translate_into_json;
pub use translate_into_plain_text;
pub use translate_into_srt;
pub use translate_into_verbose_json;
pub use translate_into_vtt;