vapi_client/models/server_message_voice_request.rs
1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct ServerMessageVoiceRequest {
16 #[serde(rename = "phoneNumber", skip_serializing_if = "Option::is_none")]
17 pub phone_number: Option<Box<models::ServerMessageAssistantRequestPhoneNumber>>,
18 /// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
19 #[serde(rename = "type")]
20 pub r#type: Type,
21 /// This is the ISO-8601 formatted timestamp of when the message was sent.
22 #[serde(rename = "timestamp", skip_serializing_if = "Option::is_none")]
23 pub timestamp: Option<String>,
24 /// This is a live version of the `call.artifact`. This matches what is stored on `call.artifact` after the call.
25 #[serde(rename = "artifact", skip_serializing_if = "Option::is_none")]
26 pub artifact: Option<Box<models::Artifact>>,
27 /// This is the assistant that is currently active. This is provided for convenience. This matches one of the following: - `call.assistant`, - `call.assistantId`, - `call.squad[n].assistant`, - `call.squad[n].assistantId`, - `call.squadId->[n].assistant`, - `call.squadId->[n].assistantId`.
28 #[serde(rename = "assistant", skip_serializing_if = "Option::is_none")]
29 pub assistant: Option<Box<models::CreateAssistantDto>>,
30 /// This is the customer associated with the call. This matches one of the following: - `call.customer`, - `call.customerId`.
31 #[serde(rename = "customer", skip_serializing_if = "Option::is_none")]
32 pub customer: Option<Box<models::CreateCustomerDto>>,
33 /// This is the call object. This matches what was returned in POST /call. Note: This might get stale during the call. To get the latest call object, especially after the call is ended, use GET /call/:id.
34 #[serde(rename = "call", skip_serializing_if = "Option::is_none")]
35 pub call: Option<Box<models::Call>>,
36 /// This is the text to be synthesized.
37 #[serde(rename = "text")]
38 pub text: String,
39 /// This is the sample rate to be synthesized.
40 #[serde(rename = "sampleRate")]
41 pub sample_rate: f64,
42}
43
44impl ServerMessageVoiceRequest {
45 pub fn new(r#type: Type, text: String, sample_rate: f64) -> ServerMessageVoiceRequest {
46 ServerMessageVoiceRequest {
47 phone_number: None,
48 r#type,
49 timestamp: None,
50 artifact: None,
51 assistant: None,
52 customer: None,
53 call: None,
54 text,
55 sample_rate,
56 }
57 }
58}
59/// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
60#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
61pub enum Type {
62 #[serde(rename = "voice-request")]
63 VoiceRequest,
64}
65
66impl Default for Type {
67 fn default() -> Type {
68 Self::VoiceRequest
69 }
70}
71