vapi_client/models/
function_tool_with_tool_call.rs

1/*
2 * Vapi API
3 *
4 * Voice AI for developers.
5 *
6 * The version of the OpenAPI document: 1.0
7 *
8 * Generated by: https://openapi-generator.tech
9 */
10
11use crate::models;
12use serde::{Deserialize, Serialize};
13
14#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
15pub struct FunctionToolWithToolCall {
16    /// These are the messages that will be spoken to the user as the tool is running.  For some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.
17    #[serde(rename = "messages", skip_serializing_if = "Option::is_none")]
18    pub messages: Option<Vec<models::CreateDtmfToolDtoMessagesInner>>,
19    /// The type of tool. \"function\" for Function tool.
20    #[serde(rename = "type")]
21    pub r#type: TypeTrue,
22    /// This determines if the tool is async.    If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.    If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.    Defaults to synchronous (`false`).
23    #[serde(rename = "async", skip_serializing_if = "Option::is_none")]
24    pub r#async: Option<bool>,
25    ///    This is the server where a `tool-calls` webhook will be sent.    Notes:   - Webhook is sent to this server when a tool call is made.   - Webhook contains the call, assistant, and phone number objects.   - Webhook contains the variables set on the assistant.   - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.   - Webhook expects a response with tool call result.
26    #[serde(rename = "server", skip_serializing_if = "Option::is_none")]
27    pub server: Option<models::Server>,
28    #[serde(rename = "toolCall")]
29    pub tool_call: models::ToolCall,
30    /// This is the function definition of the tool.  For `endCall`, `transferCall`, and `dtmf` tools, this is auto-filled based on tool-specific fields like `tool.destinations`. But, even in those cases, you can provide a custom function definition for advanced use cases.  An example of an advanced use case is if you want to customize the message that's spoken for `endCall` tool. You can specify a function where it returns an argument \"reason\". Then, in `messages` array, you can have many \"request-complete\" messages. One of these messages will be triggered if the `messages[].conditions` matches the \"reason\" argument.
31    #[serde(rename = "function", skip_serializing_if = "Option::is_none")]
32    pub function: Option<models::OpenAiFunction>,
33}
34
35impl FunctionToolWithToolCall {
36    pub fn new(r#type: TypeTrue, tool_call: models::ToolCall) -> FunctionToolWithToolCall {
37        FunctionToolWithToolCall {
38            messages: None,
39            r#type,
40            r#async: None,
41            server: None,
42            tool_call,
43            function: None,
44        }
45    }
46}
47/// The type of tool. \"function\" for Function tool.
48#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
49pub enum TypeTrue {
50    #[serde(rename = "function")]
51    Function,
52}
53
54impl Default for TypeTrue {
55    fn default() -> TypeTrue {
56        Self::Function
57    }
58}