use llm::builder::{LLMBackend, LLMBuilder};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("ELEVENLABS_API_KEY").unwrap_or("test_key".into());
let tts = LLMBuilder::new()
.backend(LLMBackend::ElevenLabs)
.api_key(api_key)
.model("eleven_multilingual_v2")
.voice("JBFqnCBsd6RMkjVDRZzb")
.build()?;
let text = "Hello! This is an example of text-to-speech synthesis using ElevenLabs.";
let audio_data = tts.speech(text).await?;
std::fs::write("output-speech-elevenlabs.mp3", audio_data)?;
println!("Audio file generated successfully: output-speech-elevenlabs.mp3");
Ok(())
}