reasonkit/m2/
connector.rs1use crate::error::Error;
3use crate::m2::types::{
4 CompositeConstraints, Evidence, ExecutionMetrics, InterleavedProtocol, M2Config, ProtocolInput,
5 ProtocolOutput as M2ProtocolOutput,
6};
7use anyhow::{Context, Result};
8use reqwest::Client;
9use serde_json::json;
10use tracing::{debug, error, info, instrument};
11
12#[derive(Debug)]
13pub struct M2Connector {
14 client: Client,
15 config: M2Config,
16}
17
18impl M2Connector {
19 pub fn new(config: M2Config) -> Self {
20 Self {
21 client: Client::new(),
22 config,
23 }
24 }
25
26 #[instrument(skip(self, protocol, input))]
27 pub async fn execute_interleaved_thinking(
28 &self,
29 protocol: &InterleavedProtocol,
30 _constraints: &CompositeConstraints,
31 input: &ProtocolInput,
32 ) -> Result<M2Result, Error> {
33 let endpoint = &self.config.endpoint;
34
35 if endpoint.contains("ollama") || endpoint.contains("localhost") {
36 return self.execute_via_ollama(protocol, input).await.map_err(|e| {
37 error!("Ollama execution failed: {}", e);
38 Error::M2ExecutionError(format!("Ollama execution failed: {}", e))
39 });
40 }
41
42 info!("Executing M2 connector stub for endpoint: {}", endpoint);
44 Ok(M2Result {
45 output: M2ProtocolOutput {
46 result: serde_json::Value::Null.to_string(),
47 evidence: vec![],
48 confidence: 0.0,
49 },
50 metrics: ExecutionMetrics::default(),
51 })
52 }
53
54 async fn execute_via_ollama(
55 &self,
56 protocol: &InterleavedProtocol,
57 input: &ProtocolInput,
58 ) -> Result<M2Result> {
59 let model = "minimax-m2.1:cloud"; let prompt = format!(
61 "Execute the following protocol:\nName: {}\nDescription: {}\n\nInput: {}",
62 protocol.name, protocol.description, input
63 );
64
65 let body = json!({
66 "model": model,
67 "prompt": prompt,
68 "stream": false
69 });
70
71 debug!("Sending request to Ollama at {}", self.config.endpoint);
72
73 let response = self
74 .client
75 .post(&self.config.endpoint)
76 .json(&body)
77 .send()
78 .await
79 .with_context(|| {
80 format!(
81 "Failed to send request to Ollama endpoint: {}",
82 self.config.endpoint
83 )
84 })?;
85
86 if !response.status().is_success() {
87 let status = response.status();
88 let error_body = response
89 .text()
90 .await
91 .unwrap_or_else(|e| format!("<failed to read error body: {}>", e));
92 return Err(anyhow::anyhow!(
93 "Ollama API error: {} - {}",
94 status,
95 error_body
96 ));
97 }
98
99 let response_json: serde_json::Value = response
100 .json()
101 .await
102 .context("Failed to parse Ollama response as JSON")?;
103
104 let response_text = response_json
106 .get("response")
107 .and_then(|v| v.as_str())
108 .unwrap_or("")
109 .to_string();
110
111 Ok(M2Result {
114 output: M2ProtocolOutput {
115 result: response_text.clone(),
116 evidence: vec![Evidence {
117 content: "Generated via Ollama".to_string(),
118 source: "minimax-m2.1".to_string(),
119 confidence: 0.8, }],
121 confidence: 0.8,
122 },
123 metrics: ExecutionMetrics {
124 duration_ms: 0, token_usage: Default::default(), ..Default::default()
127 },
128 })
129 }
130}
131
132#[derive(Debug)]
133pub struct M2Result {
134 pub output: M2ProtocolOutput,
135 pub metrics: ExecutionMetrics,
136}