vapi_client/models/server_message_voice_request.rs
1/*
2 * Vapi API
3 *
4 * API for building voice assistants
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use serde::{Deserialize, Serialize};
12use utoipa::OpenApi;
13
14
15use crate::models;
16
17#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, OpenApi)]
18pub struct ServerMessageVoiceRequest {
19 #[serde(rename = "phoneNumber", skip_serializing_if = "Option::is_none")]
20 pub phone_number: Option<models::ServerMessageAssistantRequestPhoneNumber>,
21 /// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
22 #[serde(rename = "type")]
23 pub r#type: Type,
24 /// This is the ISO-8601 formatted timestamp of when the message was sent.
25 #[serde(rename = "timestamp", skip_serializing_if = "Option::is_none")]
26 pub timestamp: Option<f64>,
27 /// This is a live version of the `call.artifact`. This matches what is stored on `call.artifact` after the call.
28 #[serde(rename = "artifact", skip_serializing_if = "Option::is_none")]
29 pub artifact: Option<models::Artifact>,
30 /// This is the assistant that is currently active. This is provided for convenience. This matches one of the following: - `call.assistant`, - `call.assistantId`, - `call.squad[n].assistant`, - `call.squad[n].assistantId`, - `call.squadId->[n].assistant`, - `call.squadId->[n].assistantId`.
31 #[serde(rename = "assistant", skip_serializing_if = "Option::is_none")]
32 pub assistant: Option<models::CreateAssistantDto>,
33 /// This is the customer associated with the call. This matches one of the following: - `call.customer`, - `call.customerId`.
34 #[serde(rename = "customer", skip_serializing_if = "Option::is_none")]
35 pub customer: Option<models::CreateCustomerDto>,
36 /// This is the call object. This matches what was returned in POST /call. Note: This might get stale during the call. To get the latest call object, especially after the call is ended, use GET /call/:id.
37 #[serde(rename = "call", skip_serializing_if = "Option::is_none")]
38 pub call: Option<models::Call>,
39 /// This is the text to be synthesized.
40 #[serde(rename = "text")]
41 pub text: String,
42 /// This is the sample rate to be synthesized.
43 #[serde(rename = "sampleRate")]
44 pub sample_rate: f64,
45}
46
47impl ServerMessageVoiceRequest {
48 pub fn new(r#type: Type, text: String, sample_rate: f64) -> ServerMessageVoiceRequest {
49 ServerMessageVoiceRequest {
50 phone_number: None,
51 r#type,
52 timestamp: None,
53 artifact: None,
54 assistant: None,
55 customer: None,
56 call: None,
57 text,
58 sample_rate,
59 }
60 }
61}
62/// This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`. Here is what the request will look like: POST https://{assistant.voice.server.url} Content-Type: application/json { \"messsage\": { \"type\": \"voice-request\", \"text\": \"Hello, world!\", \"sampleRate\": 24000, ...other metadata about the call... } } The expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport: ``` response.on('data', (chunk: Buffer) => { outputStream.write(chunk); }); ```
63#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, OpenApi)]
64pub enum Type {
65 #[serde(rename = "voice-request")]
66 VoiceRequest,
67}
68
69impl Default for Type {
70 fn default() -> Type {
71 Self::VoiceRequest
72 }
73}