vapi_client/models/
fallback_open_ai_voice.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct FallbackOpenAiVoice {
16    /// This is the flag to toggle voice caching for the assistant.
17    #[serde(rename = "cachingEnabled", skip_serializing_if = "Option::is_none")]
18    pub caching_enabled: Option<bool>,
19    /// This is the voice provider that will be used.
20    #[serde(rename = "provider")]
21    pub provider: ProviderTrue,
22    #[serde(rename = "voiceId")]
23    pub voice_id: models::OpenAiVoiceVoiceId,
24    /// This is the model that will be used for text-to-speech.
25    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
26    pub model: Option<ModelTrue>,
27    /// This is a prompt that allows you to control the voice of your generated audio. Does not work with 'tts-1' or 'tts-1-hd' models.
28    #[serde(rename = "instructions", skip_serializing_if = "Option::is_none")]
29    pub instructions: Option<String>,
30    /// This is the speed multiplier that will be used.
31    #[serde(rename = "speed", skip_serializing_if = "Option::is_none")]
32    pub speed: Option<f64>,
33    /// This is the plan for chunking the model output before it is sent to the voice provider.
34    #[serde(rename = "chunkPlan", skip_serializing_if = "Option::is_none")]
35    pub chunk_plan: Option<models::ChunkPlan>,
36}
37
38impl FallbackOpenAiVoice {
39    pub fn new(
40        provider: ProviderTrue,
41        voice_id: models::OpenAiVoiceVoiceId,
42    ) -> FallbackOpenAiVoice {
43        FallbackOpenAiVoice {
44            caching_enabled: None,
45            provider,
46            voice_id,
47            model: None,
48            instructions: None,
49            speed: None,
50            chunk_plan: None,
51        }
52    }
53}
54/// This is the voice provider that will be used.
55#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
56pub enum ProviderTrue {
57    #[serde(rename = "openai")]
58    Openai,
59}
60
61impl Default for ProviderTrue {
62    fn default() -> ProviderTrue {
63        Self::Openai
64    }
65}
66/// This is the model that will be used for text-to-speech.
67#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
68pub enum ModelTrue {
69    #[serde(rename = "tts-1")]
70    Tts1,
71    #[serde(rename = "tts-1-hd")]
72    Tts1Hd,
73    #[serde(rename = "gpt-4o-mini-tts")]
74    Gpt4oMiniTts,
75}
76
77impl Default for ModelTrue {
78    fn default() -> ModelTrue {
79        Self::Tts1
80    }
81}