mcp_langbase_reasoning/langbase/
types.rs

1//! Langbase API types for pipe communication.
2//!
3//! This module provides request/response types for the Langbase Pipes API.
4
5use serde::{Deserialize, Serialize};
6use std::collections::HashMap;
7use tracing::warn;
8
9#[cfg(test)]
10#[path = "types_tests.rs"]
11mod types_tests;
12
13/// Message in a Langbase conversation.
14#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct Message {
16    /// Role of the message sender.
17    pub role: MessageRole,
18    /// Content of the message.
19    pub content: String,
20}
21
22/// Message role in a conversation.
23#[derive(Debug, Clone, Serialize, Deserialize)]
24#[serde(rename_all = "lowercase")]
25pub enum MessageRole {
26    /// System instruction message.
27    System,
28    /// User input message.
29    User,
30    /// Assistant response message.
31    Assistant,
32}
33
34/// Request to run a Langbase pipe.
35#[derive(Debug, Clone, Serialize)]
36pub struct PipeRequest {
37    /// Pipe name (required by Langbase API).
38    pub name: String,
39    /// Conversation messages to send.
40    pub messages: Vec<Message>,
41    /// Disable streaming (default: false for non-streaming response).
42    #[serde(default)]
43    pub stream: bool,
44    /// Optional template variables.
45    #[serde(skip_serializing_if = "Option::is_none")]
46    pub variables: Option<HashMap<String, String>>,
47    /// Optional thread ID for conversation continuity.
48    #[serde(rename = "threadId", skip_serializing_if = "Option::is_none")]
49    pub thread_id: Option<String>,
50}
51
52/// Response from a Langbase pipe.
53#[derive(Debug, Clone, Deserialize)]
54pub struct PipeResponse {
55    /// Whether the request succeeded.
56    pub success: bool,
57    /// Completion text from the model.
58    pub completion: String,
59    /// Thread ID for conversation continuity.
60    #[serde(rename = "threadId")]
61    pub thread_id: Option<String>,
62    /// Raw model response details.
63    pub raw: Option<RawResponse>,
64}
65
66/// Raw model response details.
67#[derive(Debug, Clone, Deserialize)]
68pub struct RawResponse {
69    /// Model name used for completion.
70    pub model: Option<String>,
71    /// Token usage information.
72    pub usage: Option<Usage>,
73}
74
75/// Token usage information.
76#[derive(Debug, Clone, Deserialize)]
77pub struct Usage {
78    /// Number of prompt tokens.
79    pub prompt_tokens: Option<u32>,
80    /// Number of completion tokens.
81    pub completion_tokens: Option<u32>,
82    /// Total tokens used.
83    pub total_tokens: Option<u32>,
84}
85
86impl Message {
87    /// Create a system message
88    pub fn system(content: impl Into<String>) -> Self {
89        Self {
90            role: MessageRole::System,
91            content: content.into(),
92        }
93    }
94
95    /// Create a user message
96    pub fn user(content: impl Into<String>) -> Self {
97        Self {
98            role: MessageRole::User,
99            content: content.into(),
100        }
101    }
102
103    /// Create an assistant message
104    pub fn assistant(content: impl Into<String>) -> Self {
105        Self {
106            role: MessageRole::Assistant,
107            content: content.into(),
108        }
109    }
110}
111
112impl PipeRequest {
113    /// Create a new pipe request with name and messages
114    pub fn new(name: impl Into<String>, messages: Vec<Message>) -> Self {
115        Self {
116            name: name.into(),
117            messages,
118            stream: false, // Disable streaming for synchronous responses
119            variables: None,
120            thread_id: None,
121        }
122    }
123
124    /// Add variables to the request
125    pub fn with_variables(mut self, variables: HashMap<String, String>) -> Self {
126        self.variables = Some(variables);
127        self
128    }
129
130    /// Add a single variable
131    pub fn with_variable(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
132        self.variables
133            .get_or_insert_with(HashMap::new)
134            .insert(key.into(), value.into());
135        self
136    }
137
138    /// Set the thread ID for conversation continuity
139    pub fn with_thread_id(mut self, thread_id: impl Into<String>) -> Self {
140        self.thread_id = Some(thread_id.into());
141        self
142    }
143}
144
145/// Structured reasoning response from a pipe.
146#[derive(Debug, Clone, Serialize, Deserialize)]
147pub struct ReasoningResponse {
148    /// The reasoning thought/content.
149    pub thought: String,
150    /// Confidence score (0.0-1.0).
151    pub confidence: f64,
152    /// Optional metadata.
153    #[serde(default)]
154    pub metadata: Option<serde_json::Value>,
155}
156
157/// Request to create a new Langbase pipe.
158#[derive(Debug, Clone, Serialize)]
159pub struct CreatePipeRequest {
160    /// Pipe name (unique identifier).
161    pub name: String,
162    /// Optional description.
163    #[serde(skip_serializing_if = "Option::is_none")]
164    pub description: Option<String>,
165    /// Visibility status.
166    #[serde(skip_serializing_if = "Option::is_none")]
167    pub status: Option<PipeStatus>,
168    /// Model to use (e.g., "openai:gpt-4o-mini").
169    #[serde(skip_serializing_if = "Option::is_none")]
170    pub model: Option<String>,
171    /// Whether to update if exists.
172    #[serde(skip_serializing_if = "Option::is_none")]
173    pub upsert: Option<bool>,
174    /// Whether to enable streaming.
175    #[serde(skip_serializing_if = "Option::is_none")]
176    pub stream: Option<bool>,
177    /// Whether to output JSON.
178    #[serde(skip_serializing_if = "Option::is_none")]
179    pub json: Option<bool>,
180    /// Whether to store conversations.
181    #[serde(skip_serializing_if = "Option::is_none")]
182    pub store: Option<bool>,
183    /// Model temperature.
184    #[serde(skip_serializing_if = "Option::is_none")]
185    pub temperature: Option<f64>,
186    /// Maximum tokens to generate.
187    #[serde(skip_serializing_if = "Option::is_none")]
188    pub max_tokens: Option<u32>,
189    /// Initial messages/prompts.
190    #[serde(skip_serializing_if = "Option::is_none")]
191    pub messages: Option<Vec<Message>>,
192}
193
194/// Pipe visibility status.
195#[derive(Debug, Clone, Serialize, Deserialize)]
196#[serde(rename_all = "lowercase")]
197pub enum PipeStatus {
198    /// Publicly accessible.
199    Public,
200    /// Private access only.
201    Private,
202}
203
204/// Response from creating a pipe.
205#[derive(Debug, Clone, Deserialize)]
206pub struct CreatePipeResponse {
207    /// Pipe name.
208    pub name: String,
209    /// Pipe description.
210    pub description: Option<String>,
211    /// Visibility status.
212    pub status: String,
213    /// Owner's login name.
214    pub owner_login: String,
215    /// Pipe URL.
216    pub url: String,
217    /// Pipe type.
218    #[serde(rename = "type")]
219    pub pipe_type: String,
220    /// API key for the pipe.
221    pub api_key: String,
222}
223
224impl CreatePipeRequest {
225    /// Create a new pipe request with just a name
226    pub fn new(name: impl Into<String>) -> Self {
227        Self {
228            name: name.into(),
229            description: None,
230            status: None,
231            model: None,
232            upsert: None,
233            stream: None,
234            json: None,
235            store: None,
236            temperature: None,
237            max_tokens: None,
238            messages: None,
239        }
240    }
241
242    /// Set description
243    pub fn with_description(mut self, description: impl Into<String>) -> Self {
244        self.description = Some(description.into());
245        self
246    }
247
248    /// Set status (public/private)
249    pub fn with_status(mut self, status: PipeStatus) -> Self {
250        self.status = Some(status);
251        self
252    }
253
254    /// Set model (e.g., "openai:gpt-4o-mini")
255    pub fn with_model(mut self, model: impl Into<String>) -> Self {
256        self.model = Some(model.into());
257        self
258    }
259
260    /// Enable upsert (update if exists)
261    pub fn with_upsert(mut self, upsert: bool) -> Self {
262        self.upsert = Some(upsert);
263        self
264    }
265
266    /// Enable JSON output mode
267    pub fn with_json_output(mut self, json: bool) -> Self {
268        self.json = Some(json);
269        self
270    }
271
272    /// Set temperature
273    pub fn with_temperature(mut self, temperature: f64) -> Self {
274        self.temperature = Some(temperature);
275        self
276    }
277
278    /// Set max tokens
279    pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
280        self.max_tokens = Some(max_tokens);
281        self
282    }
283
284    /// Set system/user messages
285    pub fn with_messages(mut self, messages: Vec<Message>) -> Self {
286        self.messages = Some(messages);
287        self
288    }
289}
290
291impl ReasoningResponse {
292    /// Parse a reasoning response from pipe completion text
293    pub fn from_completion(completion: &str) -> Self {
294        match serde_json::from_str::<ReasoningResponse>(completion) {
295            Ok(parsed) => parsed,
296            Err(e) => {
297                warn!(
298                    error = %e,
299                    completion_preview = %completion.chars().take(200).collect::<String>(),
300                    "Failed to parse reasoning response as JSON, using raw completion as thought"
301                );
302                // Fall back to treating the entire completion as the thought
303                Self {
304                    thought: completion.to_string(),
305                    confidence: 0.8,
306                    metadata: None,
307                }
308            }
309        }
310    }
311}
312
313// ============================================================================
314// Phase 4: Bias & Fallacy Detection Response Types
315// ============================================================================
316
317/// A single detected bias from the analysis
318#[derive(Debug, Clone, Serialize, Deserialize)]
319pub struct DetectedBias {
320    /// Name of the cognitive bias (e.g., "confirmation_bias", "anchoring_bias")
321    pub bias_type: String,
322    /// Severity level from 1 (minor) to 5 (critical)
323    pub severity: i32,
324    /// Confidence in this detection (0.0-1.0)
325    pub confidence: f64,
326    /// Explanation of why this is a bias
327    pub explanation: String,
328    /// Suggested remediation
329    #[serde(skip_serializing_if = "Option::is_none")]
330    pub remediation: Option<String>,
331    /// Text excerpt showing the bias
332    #[serde(skip_serializing_if = "Option::is_none")]
333    pub excerpt: Option<String>,
334}
335
336/// Response from bias detection pipe
337#[derive(Debug, Clone, Serialize, Deserialize)]
338pub struct BiasDetectionResponse {
339    /// List of detected biases
340    pub detections: Vec<DetectedBias>,
341    /// Overall reasoning quality score (0.0-1.0, higher = better)
342    pub reasoning_quality: f64,
343    /// Overall assessment summary
344    pub overall_assessment: String,
345    /// Additional metadata
346    #[serde(default, skip_serializing_if = "Option::is_none")]
347    pub metadata: Option<serde_json::Value>,
348}
349
350impl BiasDetectionResponse {
351    /// Parse a bias detection response from pipe completion text
352    pub fn from_completion(completion: &str) -> Self {
353        match serde_json::from_str::<BiasDetectionResponse>(completion) {
354            Ok(parsed) => parsed,
355            Err(e) => {
356                warn!(
357                    error = %e,
358                    completion_preview = %completion.chars().take(200).collect::<String>(),
359                    "Failed to parse bias detection response as JSON, returning empty result"
360                );
361                Self {
362                    detections: vec![],
363                    reasoning_quality: 0.5,
364                    overall_assessment: completion.to_string(),
365                    metadata: None,
366                }
367            }
368        }
369    }
370}
371
372/// A single detected logical fallacy from the analysis
373#[derive(Debug, Clone, Serialize, Deserialize)]
374pub struct DetectedFallacy {
375    /// Name of the fallacy (e.g., "ad_hominem", "straw_man", "false_dichotomy")
376    pub fallacy_type: String,
377    /// Category: "formal" or "informal"
378    pub category: String,
379    /// Severity level from 1 (minor) to 5 (critical)
380    pub severity: i32,
381    /// Confidence in this detection (0.0-1.0)
382    pub confidence: f64,
383    /// Explanation of why this is a fallacy
384    pub explanation: String,
385    /// Suggested remediation
386    #[serde(skip_serializing_if = "Option::is_none")]
387    pub remediation: Option<String>,
388    /// Text excerpt showing the fallacy
389    #[serde(skip_serializing_if = "Option::is_none")]
390    pub excerpt: Option<String>,
391}
392
393/// Response from fallacy detection pipe
394#[derive(Debug, Clone, Serialize, Deserialize)]
395pub struct FallacyDetectionResponse {
396    /// List of detected fallacies
397    pub detections: Vec<DetectedFallacy>,
398    /// Overall argument validity score (0.0-1.0, higher = more valid)
399    pub argument_validity: f64,
400    /// Overall assessment summary
401    pub overall_assessment: String,
402    /// Additional metadata
403    #[serde(default, skip_serializing_if = "Option::is_none")]
404    pub metadata: Option<serde_json::Value>,
405}
406
407impl FallacyDetectionResponse {
408    /// Parse a fallacy detection response from pipe completion text
409    pub fn from_completion(completion: &str) -> Self {
410        match serde_json::from_str::<FallacyDetectionResponse>(completion) {
411            Ok(parsed) => parsed,
412            Err(e) => {
413                warn!(
414                    error = %e,
415                    completion_preview = %completion.chars().take(200).collect::<String>(),
416                    "Failed to parse fallacy detection response as JSON, returning empty result"
417                );
418                Self {
419                    detections: vec![],
420                    argument_validity: 0.5,
421                    overall_assessment: completion.to_string(),
422                    metadata: None,
423                }
424            }
425        }
426    }
427}