genai_types/
messages.rs

1use crate::models::ModelInfo;
2use crate::tool_choice::ToolChoice;
3use mcp_protocol::tool::{Tool, ToolContent};
4use serde::{Deserialize, Serialize};
5
6/// Different types of content that can be in a message
7#[derive(Serialize, Deserialize, Debug, Clone)]
8#[serde(tag = "type", rename_all = "snake_case")]
9pub enum MessageContent {
10    Text {
11        text: String,
12    },
13
14    ToolUse {
15        id: String,
16        name: String,
17        input: serde_json::Value,
18    },
19
20    ToolResult {
21        tool_use_id: String,
22        content: Vec<ToolContent>,
23        is_error: Option<bool>,
24    },
25}
26
27/// A single message in a conversation with Claude
28#[derive(Serialize, Deserialize, Debug, Clone)]
29pub struct Message {
30    /// Role of the message sender (user, assistant, system)
31    pub role: String,
32
33    /// Content of the message as vector of MessageContent objects
34    pub content: Vec<MessageContent>,
35}
36
37impl Message {
38    /// Create a new message with structured content
39    pub fn new_structured(role: impl Into<String>, content: Vec<MessageContent>) -> Self {
40        Self {
41            role: role.into(),
42            content,
43        }
44    }
45}
46
47/// Request to generate a completion from Claude
48#[derive(Serialize, Deserialize, Debug, Clone)]
49pub struct CompletionRequest {
50    /// The Claude model to use
51    pub model: String,
52
53    /// List of messages in the conversation
54    pub messages: Vec<Message>,
55
56    /// Maximum number of tokens to generate
57    pub max_tokens: u32,
58
59    /// Temperature parameter (0.0 to 1.0)
60    pub temperature: Option<f32>,
61
62    /// System prompt to use
63    pub system: Option<String>,
64
65    /// Tools to make available to Claude
66    pub tools: Option<Vec<Tool>>,
67
68    /// Tool choice configuration
69    pub tool_choice: Option<ToolChoice>,
70
71    /// Whether to disable parallel tool use
72    pub disable_parallel_tool_use: Option<bool>,
73}
74
75/// Information about token usage
76#[derive(Serialize, Deserialize, Debug, Clone)]
77pub struct Usage {
78    pub input_tokens: u32,
79
80    pub output_tokens: u32,
81}
82
83/// Response from a completion request
84#[derive(Serialize, Deserialize, Debug, Clone)]
85pub struct CompletionResponse {
86    /// Generated content blocks
87    pub content: Vec<MessageContent>,
88
89    /// ID of the message
90    pub id: String,
91
92    /// Model used for generation
93    pub model: String,
94
95    // always "assistant"
96    pub role: String,
97
98    pub stop_reason: StopReason,
99
100    pub stop_sequence: Option<String>,
101
102    /// Message type
103    #[serde(rename = "type")]
104    pub message_type: String,
105
106    /// Token usage information
107    pub usage: Usage,
108}
109
110/// Reason why generation stopped
111#[derive(Serialize, Deserialize, Debug, Clone)]
112pub enum StopReason {
113    /// Generation stopped because the end of a turn was reached
114    #[serde(rename = "end_turn")]
115    EndTurn,
116
117    /// Generation stopped because the maximum token limit was reached
118    #[serde(rename = "max_tokens")]
119    MaxTokens,
120
121    /// Generation stopped because a stop sequence was encountered
122    #[serde(rename = "stop_sequence")]
123    StopSequence,
124
125    /// Generation stopped because a tool was used
126    #[serde(rename = "tool_use")]
127    ToolUse,
128}
129
130/// Request format for the anthropic-proxy actor
131#[derive(Serialize, Deserialize, Debug, Clone)]
132pub enum ProxyRequest {
133    ListModels,
134
135    GenerateCompletion { request: CompletionRequest },
136}
137
138/// Response format from the anthropic-proxy actor
139#[derive(Serialize, Deserialize, Debug, Clone)]
140pub enum ProxyResponse {
141    /// List of available models
142    ListModels { models: Vec<ModelInfo> },
143
144    /// Generated completion
145    Completion { completion: CompletionResponse },
146
147    /// Error response
148    Error { error: String },
149}