vapi_client/models/
fallback_open_ai_voice.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 * 
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct FallbackOpenAiVoice {
16    /// This is the voice provider that will be used.
17    #[serde(rename = "provider")]
18    pub provider: Provider,
19    #[serde(rename = "voiceId")]
20    pub voice_id: models::OpenAiVoiceVoiceId,
21    /// This is the model that will be used for text-to-speech.
22    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
23    pub model: Option<Model>,
24    /// This is a prompt that allows you to control the voice of your generated audio. Does not work with 'tts-1' or 'tts-1-hd' models.
25    #[serde(rename = "instructions", skip_serializing_if = "Option::is_none")]
26    pub instructions: Option<String>,
27    /// This is the speed multiplier that will be used.
28    #[serde(rename = "speed", skip_serializing_if = "Option::is_none")]
29    pub speed: Option<f64>,
30    /// This is the plan for chunking the model output before it is sent to the voice provider.
31    #[serde(rename = "chunkPlan", skip_serializing_if = "Option::is_none")]
32    pub chunk_plan: Option<models::ChunkPlan>,
33}
34
35impl FallbackOpenAiVoice {
36    pub fn new(provider: Provider, voice_id: models::OpenAiVoiceVoiceId) -> FallbackOpenAiVoice {
37        FallbackOpenAiVoice {
38            provider,
39            voice_id,
40            model: None,
41            instructions: None,
42            speed: None,
43            chunk_plan: None,
44        }
45    }
46}
47/// This is the voice provider that will be used.
48#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
49pub enum Provider {
50    #[serde(rename = "openai")]
51    Openai,
52}
53
54impl Default for Provider {
55    fn default() -> Provider {
56        Self::Openai
57    }
58}
59/// This is the model that will be used for text-to-speech.
60#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
61pub enum Model {
62    #[serde(rename = "tts-1")]
63    Tts1,
64    #[serde(rename = "tts-1-hd")]
65    Tts1Hd,
66    #[serde(rename = "gpt-4o-mini-tts")]
67    Gpt4oMiniTts,
68}
69
70impl Default for Model {
71    fn default() -> Model {
72        Self::Tts1
73    }
74}
75