Skip to main content

quantum_sdk/
audio.rs

1use std::collections::HashMap;
2
3use serde::{Deserialize, Serialize};
4
5use crate::client::Client;
6use crate::error::Result;
7
8/// Request body for text-to-speech.
9#[derive(Debug, Clone, Serialize, Default)]
10pub struct TextToSpeechRequest {
11    /// TTS model (e.g. "tts-1", "eleven_multilingual_v2", "grok-3-tts").
12    pub model: String,
13
14    /// Text to synthesise into speech.
15    pub text: String,
16
17    /// Voice to use (e.g. "alloy", "echo", "nova", "Rachel").
18    #[serde(skip_serializing_if = "Option::is_none")]
19    pub voice: Option<String>,
20
21    /// Audio format (e.g. "mp3", "wav", "opus"). Default: "mp3".
22    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
23    pub output_format: Option<String>,
24
25    /// Speech rate (provider-dependent).
26    #[serde(skip_serializing_if = "Option::is_none")]
27    pub speed: Option<f64>,
28}
29
30/// Backwards-compatible alias.
31pub type TtsRequest = TextToSpeechRequest;
32
33/// Response from text-to-speech.
34#[derive(Debug, Clone, Deserialize)]
35pub struct TextToSpeechResponse {
36    /// Base64-encoded audio data.
37    pub audio_base64: String,
38
39    /// Audio format (e.g. "mp3").
40    pub format: String,
41
42    /// Audio file size.
43    pub size_bytes: i64,
44
45    /// Model that generated the audio.
46    pub model: String,
47
48    /// Total cost in ticks.
49    #[serde(default)]
50    pub cost_ticks: i64,
51
52    /// Unique request identifier.
53    #[serde(default)]
54    pub request_id: String,
55}
56
57/// Backwards-compatible alias.
58pub type TtsResponse = TextToSpeechResponse;
59
60/// Request body for speech-to-text.
61#[derive(Debug, Clone, Serialize, Default)]
62pub struct SpeechToTextRequest {
63    /// STT model (e.g. "whisper-1", "scribe_v2").
64    pub model: String,
65
66    /// Base64-encoded audio data.
67    pub audio_base64: String,
68
69    /// Original filename (helps with format detection). Default: "audio.mp3".
70    #[serde(skip_serializing_if = "Option::is_none")]
71    pub filename: Option<String>,
72
73    /// BCP-47 language code hint (e.g. "en", "de").
74    #[serde(skip_serializing_if = "Option::is_none")]
75    pub language: Option<String>,
76}
77
78/// Backwards-compatible alias.
79pub type SttRequest = SpeechToTextRequest;
80
81/// Response from speech-to-text.
82#[derive(Debug, Clone, Deserialize)]
83pub struct SpeechToTextResponse {
84    /// Transcribed text.
85    pub text: String,
86
87    /// Model that performed transcription.
88    pub model: String,
89
90    /// Total cost in ticks.
91    #[serde(default)]
92    pub cost_ticks: i64,
93
94    /// Unique request identifier.
95    #[serde(default)]
96    pub request_id: String,
97}
98
99/// Backwards-compatible alias.
100pub type SttResponse = SpeechToTextResponse;
101
102/// Request body for music generation.
103#[derive(Debug, Clone, Serialize, Default)]
104pub struct MusicRequest {
105    /// Music generation model (e.g. "lyria").
106    pub model: String,
107
108    /// Describes the music to generate.
109    pub prompt: String,
110
111    /// Target duration in seconds (default 30).
112    #[serde(skip_serializing_if = "Option::is_none")]
113    pub duration_seconds: Option<i32>,
114}
115
116/// Response from music generation.
117#[derive(Debug, Clone, Deserialize)]
118pub struct MusicResponse {
119    /// Generated music clips.
120    #[serde(default)]
121    pub audio_clips: Vec<MusicClip>,
122
123    /// Model that generated the music.
124    #[serde(default)]
125    pub model: String,
126
127    /// Total cost in ticks.
128    #[serde(default)]
129    pub cost_ticks: i64,
130
131    /// Unique request identifier.
132    #[serde(default)]
133    pub request_id: String,
134}
135
136/// A single generated music clip.
137#[derive(Debug, Clone, Deserialize)]
138pub struct MusicClip {
139    /// Base64-encoded audio data.
140    pub base64: String,
141
142    /// Audio format (e.g. "mp3", "wav").
143    #[serde(default)]
144    pub format: String,
145
146    /// Audio file size.
147    #[serde(default)]
148    pub size_bytes: i64,
149
150    /// Clip index within the batch.
151    #[serde(default)]
152    pub index: i32,
153}
154
155/// Request body for sound effects generation.
156#[derive(Debug, Clone, Serialize, Default)]
157pub struct SoundEffectRequest {
158    /// Text prompt describing the sound effect.
159    pub prompt: String,
160
161    /// Optional duration in seconds.
162    #[serde(skip_serializing_if = "Option::is_none")]
163    pub duration_seconds: Option<f64>,
164}
165
166/// Response from sound effects generation.
167#[derive(Debug, Clone, Deserialize)]
168pub struct SoundEffectResponse {
169    /// Base64-encoded audio data.
170    pub audio_base64: String,
171
172    /// Audio format (e.g. "mp3").
173    pub format: String,
174
175    /// File size in bytes.
176    #[serde(default)]
177    pub size_bytes: i64,
178
179    /// Model used.
180    #[serde(default)]
181    pub model: String,
182
183    /// Total cost in ticks.
184    #[serde(default)]
185    pub cost_ticks: i64,
186
187    /// Unique request identifier.
188    #[serde(default)]
189    pub request_id: String,
190}
191
192// ---------------------------------------------------------------------------
193// Advanced Audio Types
194// ---------------------------------------------------------------------------
195
196/// Generic audio response used by multiple advanced audio endpoints.
197#[derive(Debug, Clone, Deserialize)]
198pub struct AudioResponse {
199    /// Base64-encoded audio data.
200    #[serde(default)]
201    pub audio_base64: Option<String>,
202
203    /// Audio format (e.g. "mp3", "wav").
204    #[serde(default)]
205    pub format: Option<String>,
206
207    /// File size in bytes.
208    #[serde(default)]
209    pub size_bytes: Option<i64>,
210
211    /// Model used.
212    #[serde(default)]
213    pub model: Option<String>,
214
215    /// Total cost in ticks.
216    #[serde(default)]
217    pub cost_ticks: i64,
218
219    /// Unique request identifier.
220    #[serde(default)]
221    pub request_id: String,
222
223    /// Additional response fields.
224    #[serde(flatten)]
225    pub extra: HashMap<String, serde_json::Value>,
226}
227
228/// A single dialogue turn (used for building the request — converted to text + voices).
229#[derive(Debug, Clone, Serialize, Deserialize, Default)]
230pub struct DialogueTurn {
231    /// Speaker name or identifier.
232    pub speaker: String,
233
234    /// Text for this speaker to say.
235    pub text: String,
236
237    /// Voice ID to use for this speaker.
238    #[serde(skip_serializing_if = "Option::is_none")]
239    pub voice: Option<String>,
240}
241
242/// Voice mapping for ElevenLabs dialogue.
243#[derive(Debug, Clone, Serialize)]
244pub struct DialogueVoice {
245    pub voice_id: String,
246    pub name: String,
247}
248
249/// Request body sent to the QAI proxy for dialogue generation.
250/// The proxy expects `text` (full script) + `voices` (speaker-to-voice mapping).
251#[derive(Debug, Clone, Serialize, Default)]
252pub struct DialogueRequest {
253    /// Full dialogue script (e.g. "Speaker1: Hello!\nSpeaker2: Hi there!").
254    pub text: String,
255
256    /// Voice mappings — each speaker name mapped to a voice_id.
257    pub voices: Vec<DialogueVoice>,
258
259    /// Dialogue model.
260    #[serde(skip_serializing_if = "Option::is_none")]
261    pub model: Option<String>,
262
263    /// Output audio format.
264    #[serde(rename = "output_format", skip_serializing_if = "Option::is_none")]
265    pub output_format: Option<String>,
266
267    /// Seed for reproducible generation.
268    #[serde(skip_serializing_if = "Option::is_none")]
269    pub seed: Option<i32>,
270}
271
272impl DialogueRequest {
273    /// Build a DialogueRequest from individual turns.
274    /// Converts turns into the text + voices format the API expects.
275    pub fn from_turns(turns: Vec<DialogueTurn>, model: Option<String>) -> Self {
276        // Build the script text: "Speaker: text\n..."
277        let text = turns.iter()
278            .map(|t| format!("{}: {}", t.speaker, t.text))
279            .collect::<Vec<_>>()
280            .join("\n");
281
282        // Deduplicate voices — one entry per unique speaker
283        let mut seen = std::collections::HashSet::new();
284        let voices: Vec<DialogueVoice> = turns.iter()
285            .filter(|t| t.voice.is_some() && seen.insert(t.speaker.clone()))
286            .map(|t| DialogueVoice {
287                voice_id: t.voice.clone().unwrap_or_default(),
288                name: t.speaker.clone(),
289            })
290            .collect();
291
292        Self {
293            text,
294            voices,
295            model,
296            ..Default::default()
297        }
298    }
299}
300
301/// Request body for speech-to-speech conversion.
302#[derive(Debug, Clone, Serialize, Default)]
303pub struct SpeechToSpeechRequest {
304    /// Model for conversion.
305    #[serde(skip_serializing_if = "Option::is_none")]
306    pub model: Option<String>,
307
308    /// Base64-encoded source audio.
309    pub audio_base64: String,
310
311    /// Target voice.
312    #[serde(skip_serializing_if = "Option::is_none")]
313    pub voice: Option<String>,
314
315    /// Output audio format.
316    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
317    pub output_format: Option<String>,
318}
319
320/// Request body for voice isolation.
321#[derive(Debug, Clone, Serialize, Default)]
322pub struct IsolateVoiceRequest {
323    /// Base64-encoded audio to isolate voice from.
324    pub audio_base64: String,
325
326    /// Output audio format.
327    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
328    pub output_format: Option<String>,
329}
330
331/// Backwards-compatible alias.
332pub type IsolateRequest = IsolateVoiceRequest;
333
334/// Request body for voice remixing.
335#[derive(Debug, Clone, Serialize, Default)]
336pub struct RemixVoiceRequest {
337    /// Base64-encoded source audio.
338    pub audio_base64: String,
339
340    /// Target voice for the remix.
341    #[serde(skip_serializing_if = "Option::is_none")]
342    pub voice: Option<String>,
343
344    /// Model for remixing.
345    #[serde(skip_serializing_if = "Option::is_none")]
346    pub model: Option<String>,
347
348    /// Output audio format.
349    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
350    pub output_format: Option<String>,
351}
352
353/// Backwards-compatible alias.
354pub type RemixRequest = RemixVoiceRequest;
355
356/// Request body for audio dubbing.
357#[derive(Debug, Clone, Serialize, Default)]
358pub struct DubRequest {
359    /// Base64-encoded source audio or video.
360    pub audio_base64: String,
361
362    /// Original filename (helps detect format).
363    #[serde(skip_serializing_if = "Option::is_none")]
364    pub filename: Option<String>,
365
366    /// Target language (BCP-47 code, e.g. "es", "de").
367    pub target_language: String,
368
369    /// Source language (auto-detected if omitted).
370    #[serde(skip_serializing_if = "Option::is_none")]
371    pub source_language: Option<String>,
372}
373
374/// Request body for audio alignment / forced alignment.
375#[derive(Debug, Clone, Serialize, Default)]
376pub struct AlignRequest {
377    /// Base64-encoded audio data.
378    pub audio_base64: String,
379
380    /// Transcript text to align against the audio.
381    pub text: String,
382
383    /// Language code.
384    #[serde(skip_serializing_if = "Option::is_none")]
385    pub language: Option<String>,
386}
387
388/// A single alignment segment.
389#[derive(Debug, Clone, Deserialize)]
390pub struct AlignmentSegment {
391    /// Aligned text.
392    pub text: String,
393
394    /// Start time in seconds.
395    pub start: f64,
396
397    /// End time in seconds.
398    pub end: f64,
399}
400
401/// A single word with timing information from forced alignment.
402#[derive(Debug, Clone, Deserialize)]
403pub struct AlignedWord {
404    /// Word text.
405    pub text: String,
406
407    /// Start time in seconds.
408    pub start_time: f64,
409
410    /// End time in seconds.
411    pub end_time: f64,
412
413    /// Alignment confidence score.
414    #[serde(default)]
415    pub confidence: f64,
416}
417
418/// Response from audio alignment.
419#[derive(Debug, Clone, Deserialize)]
420pub struct AlignResponse {
421    /// Aligned segments.
422    #[serde(default)]
423    pub segments: Vec<AlignmentSegment>,
424
425    /// Word-level alignment.
426    #[serde(default)]
427    pub alignment: Vec<AlignedWord>,
428
429    /// Model used.
430    #[serde(default)]
431    pub model: String,
432
433    /// Total cost in ticks.
434    #[serde(default)]
435    pub cost_ticks: i64,
436
437    /// Unique request identifier.
438    #[serde(default)]
439    pub request_id: String,
440}
441
442// ---------------------------------------------------------------------------
443// Typed response structs (parity with Go SDK)
444// ---------------------------------------------------------------------------
445
446/// Response from dialogue generation.
447#[derive(Debug, Clone, Deserialize)]
448pub struct DialogueResponse {
449    pub audio_base64: String,
450    pub format: String,
451    #[serde(default)]
452    pub size_bytes: i64,
453    #[serde(default)]
454    pub model: String,
455    #[serde(default)]
456    pub cost_ticks: i64,
457    #[serde(default)]
458    pub request_id: String,
459}
460
461/// Response from speech-to-speech conversion.
462#[derive(Debug, Clone, Deserialize)]
463pub struct SpeechToSpeechResponse {
464    pub audio_base64: String,
465    pub format: String,
466    #[serde(default)]
467    pub size_bytes: i64,
468    #[serde(default)]
469    pub model: String,
470    #[serde(default)]
471    pub cost_ticks: i64,
472    #[serde(default)]
473    pub request_id: String,
474}
475
476/// Response from voice isolation.
477#[derive(Debug, Clone, Deserialize)]
478pub struct IsolateVoiceResponse {
479    pub audio_base64: String,
480    pub format: String,
481    #[serde(default)]
482    pub size_bytes: i64,
483    #[serde(default)]
484    pub cost_ticks: i64,
485    #[serde(default)]
486    pub request_id: String,
487}
488
489/// Response from voice remixing.
490#[derive(Debug, Clone, Deserialize)]
491pub struct RemixVoiceResponse {
492    #[serde(default)]
493    pub audio_base64: Option<String>,
494    pub format: String,
495    #[serde(default)]
496    pub size_bytes: i64,
497    #[serde(default)]
498    pub voice_id: Option<String>,
499    #[serde(default)]
500    pub cost_ticks: i64,
501    #[serde(default)]
502    pub request_id: String,
503}
504
505/// Response from dubbing.
506#[derive(Debug, Clone, Deserialize)]
507pub struct DubResponse {
508    pub dubbing_id: String,
509    pub audio_base64: String,
510    pub format: String,
511    #[serde(default)]
512    pub target_lang: String,
513    #[serde(default)]
514    pub status: String,
515    #[serde(default)]
516    pub processing_time_seconds: f64,
517    #[serde(default)]
518    pub cost_ticks: i64,
519    #[serde(default)]
520    pub request_id: String,
521}
522
523/// Response from voice design.
524#[derive(Debug, Clone, Deserialize)]
525pub struct VoiceDesignResponse {
526    pub previews: Vec<VoicePreview>,
527    #[serde(default)]
528    pub cost_ticks: i64,
529    #[serde(default)]
530    pub request_id: String,
531}
532
533/// A single voice preview from voice design.
534#[derive(Debug, Clone, Deserialize)]
535pub struct VoicePreview {
536    pub generated_voice_id: String,
537    pub audio_base64: String,
538    pub format: String,
539}
540
541/// Response from Starfish TTS.
542#[derive(Debug, Clone, Deserialize)]
543pub struct StarfishTTSResponse {
544    #[serde(default)]
545    pub audio_base64: Option<String>,
546    #[serde(default)]
547    pub url: Option<String>,
548    pub format: String,
549    #[serde(default)]
550    pub size_bytes: i64,
551    #[serde(default)]
552    pub duration: f64,
553    #[serde(default)]
554    pub model: String,
555    #[serde(default)]
556    pub cost_ticks: i64,
557    #[serde(default)]
558    pub request_id: String,
559}
560
561/// Advanced music generation request.
562#[derive(Debug, Clone, Serialize, Default)]
563pub struct MusicAdvancedRequest {
564    pub prompt: String,
565    #[serde(skip_serializing_if = "Option::is_none")]
566    pub duration_seconds: Option<i32>,
567    #[serde(skip_serializing_if = "Option::is_none")]
568    pub model: Option<String>,
569    #[serde(skip_serializing_if = "Option::is_none")]
570    pub finetune_id: Option<String>,
571}
572
573/// A single clip from advanced music generation.
574#[derive(Debug, Clone, Deserialize)]
575pub struct MusicAdvancedClip {
576    #[serde(default)]
577    pub base64: String,
578    #[serde(default)]
579    pub format: String,
580    #[serde(default)]
581    pub size: i64,
582}
583
584/// Response from advanced music generation.
585#[derive(Debug, Clone, Deserialize)]
586pub struct MusicAdvancedResponse {
587    #[serde(default)]
588    pub clips: Vec<MusicAdvancedClip>,
589    #[serde(default)]
590    pub model: String,
591    #[serde(default)]
592    pub cost_ticks: i64,
593    #[serde(default)]
594    pub request_id: String,
595}
596
597/// Music finetune info.
598#[derive(Debug, Clone, Serialize, Deserialize)]
599pub struct MusicFinetuneInfo {
600    pub finetune_id: String,
601    pub name: String,
602    #[serde(default)]
603    pub description: Option<String>,
604    #[serde(default)]
605    pub status: String,
606    #[serde(default)]
607    pub model_id: Option<String>,
608    #[serde(default)]
609    pub created_at: Option<String>,
610}
611
612/// Response from listing music finetunes.
613#[derive(Debug, Clone, Deserialize)]
614pub struct MusicFinetuneListResponse {
615    pub finetunes: Vec<MusicFinetuneInfo>,
616}
617
618/// Request to create a music finetune.
619#[derive(Debug, Clone, Serialize)]
620pub struct MusicFinetuneCreateRequest {
621    pub name: String,
622    #[serde(skip_serializing_if = "Option::is_none")]
623    pub description: Option<String>,
624    pub samples: Vec<String>,
625}
626
627/// Request body for voice design (generating a voice from a description).
628#[derive(Debug, Clone, Serialize, Default)]
629pub struct VoiceDesignRequest {
630    /// Text description of the desired voice.
631    #[serde(rename = "voice_description")]
632    pub description: String,
633
634    /// Sample text to speak with the designed voice.
635    #[serde(rename = "sample_text")]
636    pub text: String,
637
638    /// Output audio format.
639    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
640    pub output_format: Option<String>,
641}
642
643/// Request body for Starfish TTS.
644#[derive(Debug, Clone, Serialize, Default)]
645pub struct StarfishTTSRequest {
646    /// Text to synthesise.
647    pub text: String,
648
649    /// Voice identifier.
650    #[serde(skip_serializing_if = "Option::is_none")]
651    pub voice: Option<String>,
652
653    /// Output audio format.
654    #[serde(rename = "format", skip_serializing_if = "Option::is_none")]
655    pub output_format: Option<String>,
656
657    /// Speech speed multiplier.
658    #[serde(skip_serializing_if = "Option::is_none")]
659    pub speed: Option<f64>,
660}
661
662// ---------------------------------------------------------------------------
663// Eleven Music (advanced music generation with sections, finetunes, etc.)
664// ---------------------------------------------------------------------------
665
666/// A section within an Eleven Music generation request.
667#[derive(Debug, Clone, Serialize, Deserialize, Default)]
668pub struct MusicSection {
669    pub section_type: String,
670    #[serde(skip_serializing_if = "Option::is_none")]
671    pub lyrics: Option<String>,
672    #[serde(skip_serializing_if = "Option::is_none")]
673    pub style: Option<String>,
674    #[serde(skip_serializing_if = "Option::is_none")]
675    pub style_exclude: Option<String>,
676}
677
678/// Request body for advanced music generation (ElevenLabs Eleven Music).
679#[derive(Debug, Clone, Serialize, Default)]
680pub struct ElevenMusicRequest {
681    pub model: String,
682    pub prompt: String,
683    #[serde(skip_serializing_if = "Option::is_none")]
684    pub sections: Option<Vec<MusicSection>>,
685    #[serde(skip_serializing_if = "Option::is_none")]
686    pub duration_seconds: Option<i32>,
687    #[serde(skip_serializing_if = "Option::is_none")]
688    pub language: Option<String>,
689    #[serde(skip_serializing_if = "Option::is_none")]
690    pub vocals: Option<bool>,
691    #[serde(skip_serializing_if = "Option::is_none")]
692    pub style: Option<String>,
693    #[serde(skip_serializing_if = "Option::is_none")]
694    pub style_exclude: Option<String>,
695    #[serde(skip_serializing_if = "Option::is_none")]
696    pub finetune_id: Option<String>,
697    #[serde(skip_serializing_if = "Option::is_none")]
698    pub edit_reference_id: Option<String>,
699    #[serde(skip_serializing_if = "Option::is_none")]
700    pub edit_instruction: Option<String>,
701}
702
703/// A single music clip from advanced generation.
704#[derive(Debug, Clone, Deserialize)]
705pub struct ElevenMusicClip {
706    /// Base64-encoded audio data.
707    #[serde(default)]
708    pub base64: String,
709    /// Audio format (e.g. "mp3").
710    #[serde(default)]
711    pub format: String,
712    /// File size in bytes.
713    #[serde(default)]
714    pub size: i64,
715}
716
717/// Response from advanced music generation.
718/// Backend returns: { clips: [...], model, cost_ticks, request_id }
719#[derive(Debug, Clone, Deserialize)]
720pub struct ElevenMusicResponse {
721    /// Generated music clips.
722    #[serde(default)]
723    pub clips: Vec<ElevenMusicClip>,
724    /// Model used.
725    #[serde(default)]
726    pub model: String,
727    /// Total cost in ticks.
728    #[serde(default)]
729    pub cost_ticks: i64,
730    /// Unique request identifier.
731    #[serde(default)]
732    pub request_id: String,
733}
734
735/// Info about a music finetune.
736#[derive(Debug, Clone, Serialize, Deserialize)]
737pub struct FinetuneInfo {
738    pub finetune_id: String,
739    pub name: String,
740    #[serde(default)]
741    pub status: String,
742    #[serde(default)]
743    pub created_at: Option<String>,
744}
745
746/// Response from listing finetunes.
747#[derive(Debug, Clone, Deserialize)]
748pub struct ListFinetunesResponse {
749    pub finetunes: Vec<FinetuneInfo>,
750}
751
752// ---------------------------------------------------------------------------
753// Client impl
754// ---------------------------------------------------------------------------
755
756impl Client {
757    /// Generates speech from text.
758    pub async fn speak(&self, req: &TextToSpeechRequest) -> Result<TextToSpeechResponse> {
759        let (mut resp, meta) = self
760            .post_json::<TextToSpeechRequest, TextToSpeechResponse>("/qai/v1/audio/tts", req)
761            .await?;
762        if resp.cost_ticks == 0 {
763            resp.cost_ticks = meta.cost_ticks;
764        }
765        if resp.request_id.is_empty() {
766            resp.request_id = meta.request_id;
767        }
768        Ok(resp)
769    }
770
771    /// Converts speech to text.
772    pub async fn transcribe(&self, req: &SpeechToTextRequest) -> Result<SpeechToTextResponse> {
773        let (mut resp, meta) = self
774            .post_json::<SpeechToTextRequest, SpeechToTextResponse>("/qai/v1/audio/stt", req)
775            .await?;
776        if resp.cost_ticks == 0 {
777            resp.cost_ticks = meta.cost_ticks;
778        }
779        if resp.request_id.is_empty() {
780            resp.request_id = meta.request_id;
781        }
782        Ok(resp)
783    }
784
785    /// Generates sound effects from a text prompt (ElevenLabs).
786    pub async fn sound_effects(&self, req: &SoundEffectRequest) -> Result<SoundEffectResponse> {
787        let (mut resp, meta) = self
788            .post_json::<SoundEffectRequest, SoundEffectResponse>(
789                "/qai/v1/audio/sound-effects",
790                req,
791            )
792            .await?;
793        if resp.cost_ticks == 0 {
794            resp.cost_ticks = meta.cost_ticks;
795        }
796        if resp.request_id.is_empty() {
797            resp.request_id = meta.request_id;
798        }
799        Ok(resp)
800    }
801
802    /// Generates music from a text prompt.
803    pub async fn generate_music(&self, req: &MusicRequest) -> Result<MusicResponse> {
804        let (mut resp, meta) = self
805            .post_json::<MusicRequest, MusicResponse>("/qai/v1/audio/music", req)
806            .await?;
807        if resp.cost_ticks == 0 {
808            resp.cost_ticks = meta.cost_ticks;
809        }
810        if resp.request_id.is_empty() {
811            resp.request_id = meta.request_id;
812        }
813        Ok(resp)
814    }
815
816    /// Generates multi-speaker dialogue audio.
817    pub async fn dialogue(&self, req: &DialogueRequest) -> Result<AudioResponse> {
818        let (mut resp, meta) = self
819            .post_json::<DialogueRequest, AudioResponse>("/qai/v1/audio/dialogue", req)
820            .await?;
821        if resp.cost_ticks == 0 {
822            resp.cost_ticks = meta.cost_ticks;
823        }
824        if resp.request_id.is_empty() {
825            resp.request_id = meta.request_id;
826        }
827        Ok(resp)
828    }
829
830    /// Converts speech to a different voice.
831    pub async fn speech_to_speech(
832        &self,
833        req: &SpeechToSpeechRequest,
834    ) -> Result<AudioResponse> {
835        let (mut resp, meta) = self
836            .post_json::<SpeechToSpeechRequest, AudioResponse>(
837                "/qai/v1/audio/speech-to-speech",
838                req,
839            )
840            .await?;
841        if resp.cost_ticks == 0 {
842            resp.cost_ticks = meta.cost_ticks;
843        }
844        if resp.request_id.is_empty() {
845            resp.request_id = meta.request_id;
846        }
847        Ok(resp)
848    }
849
850    /// Isolates voice from background noise and music.
851    pub async fn isolate_voice(&self, req: &IsolateVoiceRequest) -> Result<AudioResponse> {
852        let (mut resp, meta) = self
853            .post_json::<IsolateVoiceRequest, AudioResponse>("/qai/v1/audio/isolate", req)
854            .await?;
855        if resp.cost_ticks == 0 {
856            resp.cost_ticks = meta.cost_ticks;
857        }
858        if resp.request_id.is_empty() {
859            resp.request_id = meta.request_id;
860        }
861        Ok(resp)
862    }
863
864    /// Remixes audio with a different voice.
865    pub async fn remix_voice(&self, req: &RemixVoiceRequest) -> Result<AudioResponse> {
866        let (mut resp, meta) = self
867            .post_json::<RemixVoiceRequest, AudioResponse>("/qai/v1/audio/remix", req)
868            .await?;
869        if resp.cost_ticks == 0 {
870            resp.cost_ticks = meta.cost_ticks;
871        }
872        if resp.request_id.is_empty() {
873            resp.request_id = meta.request_id;
874        }
875        Ok(resp)
876    }
877
878    /// Dubs audio or video into a target language.
879    pub async fn dub(&self, req: &DubRequest) -> Result<AudioResponse> {
880        let (mut resp, meta) = self
881            .post_json::<DubRequest, AudioResponse>("/qai/v1/audio/dub", req)
882            .await?;
883        if resp.cost_ticks == 0 {
884            resp.cost_ticks = meta.cost_ticks;
885        }
886        if resp.request_id.is_empty() {
887            resp.request_id = meta.request_id;
888        }
889        Ok(resp)
890    }
891
892    /// Performs forced alignment of text against audio.
893    pub async fn align(&self, req: &AlignRequest) -> Result<AlignResponse> {
894        let (mut resp, meta) = self
895            .post_json::<AlignRequest, AlignResponse>("/qai/v1/audio/align", req)
896            .await?;
897        if resp.cost_ticks == 0 {
898            resp.cost_ticks = meta.cost_ticks;
899        }
900        if resp.request_id.is_empty() {
901            resp.request_id = meta.request_id;
902        }
903        Ok(resp)
904    }
905
906    /// Designs a new voice from a text description and generates sample audio.
907    pub async fn voice_design(&self, req: &VoiceDesignRequest) -> Result<AudioResponse> {
908        let (mut resp, meta) = self
909            .post_json::<VoiceDesignRequest, AudioResponse>("/qai/v1/audio/voice-design", req)
910            .await?;
911        if resp.cost_ticks == 0 {
912            resp.cost_ticks = meta.cost_ticks;
913        }
914        if resp.request_id.is_empty() {
915            resp.request_id = meta.request_id;
916        }
917        Ok(resp)
918    }
919
920    /// Generates speech using Starfish TTS (HeyGen).
921    pub async fn starfish_tts(&self, req: &StarfishTTSRequest) -> Result<AudioResponse> {
922        let (mut resp, meta) = self
923            .post_json::<StarfishTTSRequest, AudioResponse>("/qai/v1/audio/starfish-tts", req)
924            .await?;
925        if resp.cost_ticks == 0 {
926            resp.cost_ticks = meta.cost_ticks;
927        }
928        if resp.request_id.is_empty() {
929            resp.request_id = meta.request_id;
930        }
931        Ok(resp)
932    }
933
934    /// Generates music via ElevenLabs Eleven Music (advanced: sections, finetunes, edits).
935    pub async fn generate_music_advanced(
936        &self,
937        req: &ElevenMusicRequest,
938    ) -> Result<ElevenMusicResponse> {
939        let (mut resp, meta) = self
940            .post_json::<ElevenMusicRequest, ElevenMusicResponse>(
941                "/qai/v1/audio/music/advanced",
942                req,
943            )
944            .await?;
945        if resp.cost_ticks == 0 {
946            resp.cost_ticks = meta.cost_ticks;
947        }
948        if resp.request_id.is_empty() {
949            resp.request_id = meta.request_id;
950        }
951        Ok(resp)
952    }
953
954    /// Lists all music finetunes for the authenticated user.
955    pub async fn list_finetunes(&self) -> Result<ListFinetunesResponse> {
956        let (resp, _) = self
957            .get_json::<ListFinetunesResponse>("/qai/v1/audio/finetunes")
958            .await?;
959        Ok(resp)
960    }
961
962    /// Creates a new music finetune from audio sample files.
963    pub async fn create_finetune(
964        &self,
965        name: &str,
966        files: Vec<crate::voices::CloneVoiceFile>,
967    ) -> Result<FinetuneInfo> {
968        let mut form = reqwest::multipart::Form::new().text("name", name.to_string());
969
970        for file in files {
971            let part = reqwest::multipart::Part::bytes(file.data)
972                .file_name(file.filename)
973                .mime_str(&file.mime_type)
974                .map_err(|e| crate::error::Error::Http(e.into()))?;
975            form = form.part("files", part);
976        }
977
978        let (resp, _) = self
979            .post_multipart::<FinetuneInfo>("/qai/v1/audio/finetunes", form)
980            .await?;
981        Ok(resp)
982    }
983
984    /// Deletes a music finetune by ID.
985    pub async fn delete_finetune(&self, id: &str) -> Result<serde_json::Value> {
986        let path = format!("/qai/v1/audio/finetunes/{id}");
987        let (resp, _) = self.delete_json::<serde_json::Value>(&path).await?;
988        Ok(resp)
989    }
990}