genai_types/
messages.rs

1use crate::models::ModelInfo;
2use crate::tool_choice::ToolChoice;
3use mcp_protocol::tool::{Tool, ToolContent};
4use serde::{Deserialize, Serialize};
5
6/// Different types of content that can be in a message
7#[derive(Serialize, Deserialize, Debug, Clone)]
8pub enum MessageContent {
9    Text {
10        text: String,
11    },
12
13    ToolUse {
14        id: String,
15        name: String,
16        input: serde_json::Value,
17    },
18
19    ToolResult {
20        tool_use_id: String,
21        content: Vec<ToolContent>,
22        is_error: Option<bool>,
23    },
24}
25
26/// A single message in a conversation with Claude
27#[derive(Serialize, Deserialize, Debug, Clone)]
28pub struct Message {
29    /// Role of the message sender (user, assistant, system)
30    pub role: String,
31
32    /// Content of the message as vector of MessageContent objects
33    pub content: Vec<MessageContent>,
34}
35
36impl Message {
37    /// Create a new message with structured content
38    pub fn new_structured(role: impl Into<String>, content: Vec<MessageContent>) -> Self {
39        Self {
40            role: role.into(),
41            content,
42        }
43    }
44}
45
46/// Request to generate a completion from Claude
47#[derive(Serialize, Deserialize, Debug, Clone)]
48pub struct CompletionRequest {
49    /// The Claude model to use
50    pub model: String,
51
52    /// List of messages in the conversation
53    pub messages: Vec<Message>,
54
55    /// Maximum number of tokens to generate
56    pub max_tokens: u32,
57
58    /// Temperature parameter (0.0 to 1.0)
59    pub temperature: Option<f32>,
60
61    /// System prompt to use
62    pub system: Option<String>,
63
64    /// Tools to make available to Claude
65    pub tools: Option<Vec<Tool>>,
66
67    /// Tool choice configuration
68    pub tool_choice: Option<ToolChoice>,
69
70    /// Whether to disable parallel tool use
71    pub disable_parallel_tool_use: Option<bool>,
72}
73
74/// Information about token usage
75#[derive(Serialize, Deserialize, Debug, Clone)]
76pub struct Usage {
77    pub input_tokens: u32,
78
79    pub output_tokens: u32,
80}
81
82/// Response from a completion request
83#[derive(Serialize, Deserialize, Debug, Clone)]
84pub struct CompletionResponse {
85    /// Generated content blocks
86    pub content: Vec<MessageContent>,
87
88    /// ID of the message
89    pub id: String,
90
91    /// Model used for generation
92    pub model: String,
93
94    // always "assistant"
95    pub role: String,
96
97    pub stop_reason: StopReason,
98
99    pub stop_sequence: Option<String>,
100
101    /// Message type
102    #[serde(rename = "type")]
103    pub message_type: String,
104
105    /// Token usage information
106    pub usage: Usage,
107}
108
109/// Reason why generation stopped
110#[derive(Serialize, Deserialize, Debug, Clone)]
111pub enum StopReason {
112    /// Generation stopped because the end of a turn was reached
113    #[serde(rename = "end_turn")]
114    EndTurn,
115
116    /// Generation stopped because the maximum token limit was reached
117    #[serde(rename = "max_tokens")]
118    MaxTokens,
119
120    /// Generation stopped because a stop sequence was encountered
121    #[serde(rename = "stop_sequence")]
122    StopSequence,
123
124    /// Generation stopped because a tool was used
125    #[serde(rename = "tool_use")]
126    ToolUse,
127}
128
129/// Request format for the anthropic-proxy actor
130#[derive(Serialize, Deserialize, Debug, Clone)]
131pub enum ProxyRequest {
132    ListModels,
133
134    GenerateCompletion { request: CompletionRequest },
135}
136
137/// Response format from the anthropic-proxy actor
138#[derive(Serialize, Deserialize, Debug, Clone)]
139pub enum ProxyResponse {
140    /// List of available models
141    ListModels { models: Vec<ModelInfo> },
142
143    /// Generated completion
144    Completion { completion: CompletionResponse },
145
146    /// Error response
147    Error { error: String },
148}