turbomcp_protocol/types/
sampling.rs

1//! LLM sampling types (MCP 2025-06-18)
2//!
3//! This module contains types for server-initiated LLM sampling,
4//! allowing servers to request LLM interactions from clients.
5
6use serde::{Deserialize, Serialize};
7use std::collections::HashMap;
8
9use super::{content::Content, core::Role};
10
11/// Include context options for sampling
12#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
13#[serde(rename_all = "camelCase")]
14pub enum IncludeContext {
15    /// No context
16    None,
17    /// This server only
18    ThisServer,
19    /// All servers
20    AllServers,
21}
22
23/// Sampling message structure
24#[derive(Debug, Clone, Serialize, Deserialize)]
25pub struct SamplingMessage {
26    /// Message role
27    pub role: Role,
28    /// Message content
29    pub content: Content,
30    /// Optional message metadata
31    #[serde(skip_serializing_if = "Option::is_none")]
32    pub metadata: Option<HashMap<String, serde_json::Value>>,
33}
34
35/// Create message request (for LLM sampling)
36#[derive(Debug, Clone, Serialize, Deserialize)]
37pub struct CreateMessageRequest {
38    /// Messages to include in the sampling request
39    pub messages: Vec<SamplingMessage>,
40    /// Model preferences (optional)
41    #[serde(skip_serializing_if = "Option::is_none")]
42    pub model_preferences: Option<ModelPreferences>,
43    /// System prompt (optional)
44    #[serde(rename = "systemPrompt", skip_serializing_if = "Option::is_none")]
45    pub system_prompt: Option<String>,
46    /// Include context from other servers
47    #[serde(rename = "includeContext", skip_serializing_if = "Option::is_none")]
48    pub include_context: Option<IncludeContext>,
49    /// Temperature for sampling (0.0 to 2.0)
50    #[serde(skip_serializing_if = "Option::is_none")]
51    pub temperature: Option<f64>,
52    /// Maximum number of tokens to generate (required by MCP spec)
53    #[serde(rename = "maxTokens")]
54    pub max_tokens: u32,
55    /// Stop sequences
56    #[serde(rename = "stopSequences", skip_serializing_if = "Option::is_none")]
57    pub stop_sequences: Option<Vec<String>>,
58    /// Optional metadata per MCP 2025-06-18 specification
59    #[serde(skip_serializing_if = "Option::is_none")]
60    pub _meta: Option<serde_json::Value>,
61}
62
63/// Model hint for selection (MCP 2025-06-18 compliant)
64#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
65pub struct ModelHint {
66    /// Model name hint (substring matching)
67    /// Examples: "claude-3-5-sonnet", "sonnet", "claude"
68    #[serde(skip_serializing_if = "Option::is_none")]
69    pub name: Option<String>,
70}
71
72impl ModelHint {
73    /// Create a new model hint with a name
74    pub fn new(name: impl Into<String>) -> Self {
75        Self {
76            name: Some(name.into()),
77        }
78    }
79}
80
81/// Model preferences for sampling (MCP 2025-06-18 compliant)
82///
83/// The spec changed from tier-based to priority-based system.
84/// Priorities are 0.0-1.0 where 0 = not important, 1 = most important.
85#[derive(Debug, Clone, Serialize, Deserialize)]
86pub struct ModelPreferences {
87    /// Optional hints for model selection (evaluated in order)
88    #[serde(skip_serializing_if = "Option::is_none")]
89    pub hints: Option<Vec<ModelHint>>,
90
91    /// Cost priority (0.0 = not important, 1.0 = most important)
92    #[serde(rename = "costPriority", skip_serializing_if = "Option::is_none")]
93    pub cost_priority: Option<f64>,
94
95    /// Speed priority (0.0 = not important, 1.0 = most important)
96    #[serde(rename = "speedPriority", skip_serializing_if = "Option::is_none")]
97    pub speed_priority: Option<f64>,
98
99    /// Intelligence priority (0.0 = not important, 1.0 = most important)
100    #[serde(
101        rename = "intelligencePriority",
102        skip_serializing_if = "Option::is_none"
103    )]
104    pub intelligence_priority: Option<f64>,
105}
106
107/// Create message result
108#[derive(Debug, Clone, Serialize, Deserialize)]
109pub struct CreateMessageResult {
110    /// The role of the message (required by MCP specification)
111    pub role: super::core::Role,
112    /// The generated message content
113    pub content: Content,
114    /// Model used for generation (required by MCP specification)
115    pub model: String,
116    /// Stop reason (if applicable)
117    ///
118    /// Uses the StopReason enum with camelCase serialization for MCP 2025-06-18 compliance.
119    #[serde(rename = "stopReason", skip_serializing_if = "Option::is_none")]
120    pub stop_reason: Option<StopReason>,
121    /// Optional metadata per MCP 2025-06-18 specification
122    #[serde(skip_serializing_if = "Option::is_none")]
123    pub _meta: Option<serde_json::Value>,
124}
125
126/// Stop reason for generation
127///
128/// Per MCP 2025-06-18 spec, these values use camelCase serialization for interoperability.
129#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
130#[serde(rename_all = "camelCase")]
131pub enum StopReason {
132    /// Generation completed naturally
133    EndTurn,
134    /// Hit maximum token limit
135    MaxTokens,
136    /// Hit a stop sequence
137    StopSequence,
138    /// Content filtering triggered
139    ContentFilter,
140    /// Tool use required
141    ToolUse,
142}
143
144/// Usage statistics for sampling
145#[derive(Debug, Clone, Serialize, Deserialize)]
146pub struct UsageStats {
147    /// Input tokens consumed
148    #[serde(rename = "inputTokens", skip_serializing_if = "Option::is_none")]
149    pub input_tokens: Option<u32>,
150    /// Output tokens generated
151    #[serde(rename = "outputTokens", skip_serializing_if = "Option::is_none")]
152    pub output_tokens: Option<u32>,
153    /// Total tokens used
154    #[serde(rename = "totalTokens", skip_serializing_if = "Option::is_none")]
155    pub total_tokens: Option<u32>,
156}