pub struct CreateMessageRequestParams {
pub include_context: Option<IncludeContext>,
pub max_tokens: i64,
pub messages: Vec<SamplingMessage>,
pub meta: Option<MessageMeta>,
pub metadata: Option<Map<String, Value>>,
pub model_preferences: Option<ModelPreferences>,
pub stop_sequences: Vec<String>,
pub system_prompt: Option<String>,
pub task: Option<TaskMetadata>,
pub temperature: Option<f64>,
pub tool_choice: Option<ToolChoice>,
pub tools: Vec<Tool>,
}Expand description
Parameters for a sampling/createMessage request.
JSON schema
{
"description": "Parameters for a sampling/createMessage request.",
"type": "object",
"required": [
"maxTokens",
"messages"
],
"properties": {
"_meta": {
"description": "See [General fields: _meta](https://modelcontextprotocol.io/specification/2025-11-25/basic/index#meta) for notes on _meta usage.",
"type": "object",
"properties": {
"progressToken": {
"description": "If specified, the caller is requesting out-of-band progress notifications for this request (as represented by notifications/progress). The value of this parameter is an opaque token that will be attached to any subsequent notifications. The receiver is not obligated to provide these notifications.",
"$ref": "#/$defs/ProgressToken"
}
},
"additionalProperties": {}
},
"includeContext": {
"description": "A request to include context from one or more MCP servers (including the caller), to be attached to the prompt.\nThe client MAY ignore this request.\n\nDefault is \"none\". Values \"thisServer\" and \"allServers\" are soft-deprecated. Servers SHOULD only use these values if the client\ndeclares ClientCapabilities.sampling.context. These values may be removed in future spec releases.",
"type": "string",
"enum": [
"allServers",
"none",
"thisServer"
]
},
"maxTokens": {
"description": "The requested maximum number of tokens to sample (to prevent runaway completions).\n\nThe client MAY choose to sample fewer tokens than the requested maximum.",
"type": "integer"
},
"messages": {
"type": "array",
"items": {
"$ref": "#/$defs/SamplingMessage"
}
},
"metadata": {
"description": "Optional metadata to pass through to the LLM provider. The format of this metadata is provider-specific.",
"type": "object",
"additionalProperties": true
},
"modelPreferences": {
"description": "The server's preferences for which model to select. The client MAY ignore these preferences.",
"$ref": "#/$defs/ModelPreferences"
},
"stopSequences": {
"type": "array",
"items": {
"type": "string"
}
},
"systemPrompt": {
"description": "An optional system prompt the server wants to use for sampling. The client MAY modify or omit this prompt.",
"type": "string"
},
"task": {
"description": "If specified, the caller is requesting task-augmented execution for this request.\nThe request will return a CreateTaskResult immediately, and the actual result can be\nretrieved later via tasks/result.\n\nTask augmentation is subject to capability negotiation - receivers MUST declare support\nfor task augmentation of specific request types in their capabilities.",
"$ref": "#/$defs/TaskMetadata"
},
"temperature": {
"type": "number"
},
"toolChoice": {
"description": "Controls how the model uses tools.\nThe client MUST return an error if this field is provided but ClientCapabilities.sampling.tools is not declared.\nDefault is { mode: \"auto\" }.",
"$ref": "#/$defs/ToolChoice"
},
"tools": {
"description": "Tools that the model may use during generation.\nThe client MUST return an error if this field is provided but ClientCapabilities.sampling.tools is not declared.",
"type": "array",
"items": {
"$ref": "#/$defs/Tool"
}
}
}
}Fields§
§include_context: Option<IncludeContext>A request to include context from one or more MCP servers (including the caller), to be attached to the prompt. The client MAY ignore this request. Default is “none”. Values “thisServer” and “allServers” are soft-deprecated. Servers SHOULD only use these values if the client declares ClientCapabilities.sampling.context. These values may be removed in future spec releases.
max_tokens: i64The requested maximum number of tokens to sample (to prevent runaway completions). The client MAY choose to sample fewer tokens than the requested maximum.
messages: Vec<SamplingMessage>§meta: Option<MessageMeta>§metadata: Option<Map<String, Value>>Optional metadata to pass through to the LLM provider. The format of this metadata is provider-specific.
model_preferences: Option<ModelPreferences>The server’s preferences for which model to select. The client MAY ignore these preferences.
stop_sequences: Vec<String>§system_prompt: Option<String>An optional system prompt the server wants to use for sampling. The client MAY modify or omit this prompt.
task: Option<TaskMetadata>If specified, the caller is requesting task-augmented execution for this request. The request will return a CreateTaskResult immediately, and the actual result can be retrieved later via tasks/result. Task augmentation is subject to capability negotiation - receivers MUST declare support for task augmentation of specific request types in their capabilities.
temperature: Option<f64>§tool_choice: Option<ToolChoice>Controls how the model uses tools. The client MUST return an error if this field is provided but ClientCapabilities.sampling.tools is not declared. Default is { mode: “auto” }.
tools: Vec<Tool>Tools that the model may use during generation. The client MUST return an error if this field is provided but ClientCapabilities.sampling.tools is not declared.
Trait Implementations§
Source§impl Clone for CreateMessageRequestParams
impl Clone for CreateMessageRequestParams
Source§fn clone(&self) -> CreateMessageRequestParams
fn clone(&self) -> CreateMessageRequestParams
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more