audio_transcriptions/
audio_transcriptions.rs1use openai_api_rs::v1::api::OpenAIClient;
2use openai_api_rs::v1::audio::{AudioTranscriptionRequest, WHISPER_1};
3use std::env;
4use std::fs::File;
5use std::io::Read;
6
7#[tokio::main]
8async fn main() -> Result<(), Box<dyn std::error::Error>> {
9 let api_key = env::var("OPENAI_API_KEY").unwrap().to_string();
10 let mut client = OpenAIClient::builder().with_api_key(api_key).build()?;
11
12 let file_path = "examples/data/problem.mp3";
13
14 let req = AudioTranscriptionRequest::new(file_path.to_string(), WHISPER_1.to_string());
16
17 let req_json = req.clone().response_format("json".to_string());
18
19 let result = client.audio_transcription(req_json).await?;
20 println!("{:?}", result);
21
22 let req_raw = req.clone().response_format("text".to_string());
23
24 let result = client.audio_transcription_raw(req_raw).await?;
25 println!("{:?}", result);
26
27 let mut file = File::open(file_path)?;
29 let mut buffer = Vec::new();
30 file.read_to_end(&mut buffer)?;
31
32 let req = AudioTranscriptionRequest::new_bytes(buffer, WHISPER_1.to_string());
33
34 let req_json = req.clone().response_format("json".to_string());
35
36 let result = client.audio_transcription(req_json).await?;
37 println!("{:?}", result);
38
39 Ok(())
40}
41
42