use std::{fs::File, io::BufReader};
use llm::builder::{LLMBackend, LLMBuilder};
use rodio::{Decoder, OutputStream, Sink};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY").unwrap_or("test_key".into());
let tts = LLMBuilder::new()
.backend(LLMBackend::OpenAI)
.api_key(api_key)
.model("tts-1")
.voice("ash")
.build()?;
let text = "Hello! This is an example of text-to-speech synthesis using OpenAI with LLM crates and rodio in Rust.";
let audio_data = tts.speech(text).await?;
std::fs::write("output-speech.mp3", audio_data)?;
let (_stream, stream_handle) = OutputStream::try_default().unwrap();
let sink = Sink::try_new(&stream_handle).unwrap();
let file = File::open("output-speech.mp3").unwrap();
let source = Decoder::new(BufReader::new(file)).unwrap();
sink.append(source);
sink.sleep_until_end();
Ok(())
}