1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
/*
* Windmill API
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.681.0
* Contact: contact@windmill.dev
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::{Deserialize, Serialize};
/// AiAgentInputTransforms : Input parameters for the AI agent mapped to their values
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct AiAgentInputTransforms {
#[serde(rename = "provider")]
pub provider: Box<models::ProviderTransform>,
/// Output format type. Valid values: 'text' (default) - plain text response, 'image' - image generation
#[serde(rename = "output_type")]
pub output_type: Box<models::InputTransform>,
/// The user's prompt/message to the AI agent. Supports variable interpolation with flow.input syntax.
#[serde(rename = "user_message")]
pub user_message: Box<models::InputTransform>,
/// System instructions that guide the AI's behavior, persona, and response style. Optional.
#[serde(rename = "system_prompt", skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<Box<models::InputTransform>>,
/// Boolean. If true, stream the AI response incrementally. Streaming events include: token_delta, tool_call, tool_call_arguments, tool_execution, tool_result
#[serde(rename = "streaming", skip_serializing_if = "Option::is_none")]
pub streaming: Option<Box<models::InputTransform>>,
#[serde(rename = "memory", skip_serializing_if = "Option::is_none")]
pub memory: Option<Box<models::MemoryTransform>>,
/// JSON Schema object defining structured output format. Used when you need the AI to return data in a specific shape. Supports standard JSON Schema properties: type, properties, required, items, enum, pattern, minLength, maxLength, minimum, maximum, etc. Example: { type: 'object', properties: { name: { type: 'string' }, age: { type: 'integer' } }, required: ['name'] }
#[serde(rename = "output_schema", skip_serializing_if = "Option::is_none")]
pub output_schema: Option<Box<models::InputTransform>>,
/// Array of file references (images or PDFs) for the AI agent. Format: Array<{ bucket: string, key: string }> - S3 object references Example: [{ bucket: 'my-bucket', key: 'documents/report.pdf' }]
#[serde(rename = "user_attachments", skip_serializing_if = "Option::is_none")]
pub user_attachments: Option<Box<models::InputTransform>>,
/// Integer. Maximum number of tokens the AI will generate in its response. Range: 1 to 4,294,967,295. Typical values: 256-4096 for most use cases.
#[serde(rename = "max_completion_tokens", skip_serializing_if = "Option::is_none")]
pub max_completion_tokens: Option<Box<models::InputTransform>>,
/// Float. Controls randomness/creativity of responses. Range: 0.0 to 2.0 (provider-dependent) - 0.0 = deterministic, focused responses - 0.7 = balanced (common default) - 1.0+ = more creative/random
#[serde(rename = "temperature", skip_serializing_if = "Option::is_none")]
pub temperature: Option<Box<models::InputTransform>>,
}
impl AiAgentInputTransforms {
/// Input parameters for the AI agent mapped to their values
pub fn new(provider: models::ProviderTransform, output_type: models::InputTransform, user_message: models::InputTransform) -> AiAgentInputTransforms {
AiAgentInputTransforms {
provider: Box::new(provider),
output_type: Box::new(output_type),
user_message: Box::new(user_message),
system_prompt: None,
streaming: None,
memory: None,
output_schema: None,
user_attachments: None,
max_completion_tokens: None,
temperature: None,
}
}
}