vapi_client/models/
open_ai_voice.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct OpenAiVoice {
16    /// This is the flag to toggle voice caching for the assistant.
17    #[serde(rename = "cachingEnabled", skip_serializing_if = "Option::is_none")]
18    pub caching_enabled: Option<bool>,
19    /// This is the voice provider that will be used.
20    #[serde(rename = "provider")]
21    pub provider: ProviderTrue,
22    #[serde(rename = "voiceId")]
23    pub voice_id: models::OpenAiVoiceVoiceId,
24    /// This is the model that will be used for text-to-speech.
25    #[serde(rename = "model", skip_serializing_if = "Option::is_none")]
26    pub model: Option<ModelTrue>,
27    /// This is a prompt that allows you to control the voice of your generated audio. Does not work with 'tts-1' or 'tts-1-hd' models.
28    #[serde(rename = "instructions", skip_serializing_if = "Option::is_none")]
29    pub instructions: Option<String>,
30    /// This is the speed multiplier that will be used.
31    #[serde(rename = "speed", skip_serializing_if = "Option::is_none")]
32    pub speed: Option<f64>,
33    /// This is the plan for chunking the model output before it is sent to the voice provider.
34    #[serde(rename = "chunkPlan", skip_serializing_if = "Option::is_none")]
35    pub chunk_plan: Option<models::ChunkPlan>,
36    /// This is the plan for voice provider fallbacks in the event that the primary voice provider fails.
37    #[serde(rename = "fallbackPlan", skip_serializing_if = "Option::is_none")]
38    pub fallback_plan: Option<models::FallbackPlan>,
39}
40
41impl OpenAiVoice {
42    pub fn new(provider: ProviderTrue, voice_id: models::OpenAiVoiceVoiceId) -> OpenAiVoice {
43        OpenAiVoice {
44            caching_enabled: None,
45            provider,
46            voice_id,
47            model: None,
48            instructions: None,
49            speed: None,
50            chunk_plan: None,
51            fallback_plan: None,
52        }
53    }
54}
55/// This is the voice provider that will be used.
56#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
57pub enum ProviderTrue {
58    #[serde(rename = "openai")]
59    Openai,
60}
61
62impl Default for ProviderTrue {
63    fn default() -> ProviderTrue {
64        Self::Openai
65    }
66}
67/// This is the model that will be used for text-to-speech.
68#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
69pub enum ModelTrue {
70    #[serde(rename = "tts-1")]
71    Tts1,
72    #[serde(rename = "tts-1-hd")]
73    Tts1Hd,
74    #[serde(rename = "gpt-4o-mini-tts")]
75    Gpt4oMiniTts,
76}
77
78impl Default for ModelTrue {
79    fn default() -> ModelTrue {
80        Self::Tts1
81    }
82}