gent/runtime/
llm.rs

1//! LLM client abstraction for GENT
2
3use crate::errors::GentResult;
4use async_trait::async_trait;
5use serde::{Deserialize, Serialize};
6use serde_json::Value as JsonValue;
7
8/// Role in a chat conversation
9#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
10pub enum Role {
11    /// System message (sets agent behavior)
12    System,
13    /// User message (input)
14    User,
15    /// Assistant message (LLM response)
16    Assistant,
17    /// Tool result message
18    Tool,
19}
20
21/// A tool call made by the LLM
22#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
23pub struct ToolCall {
24    /// Unique ID for this tool call
25    pub id: String,
26    /// Name of the tool to call
27    pub name: String,
28    /// Arguments to pass to the tool (as JSON)
29    pub arguments: JsonValue,
30}
31
32/// Result from a tool execution
33#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
34pub struct ToolResult {
35    /// ID of the tool call this result corresponds to
36    pub call_id: String,
37    /// Content returned by the tool
38    pub content: String,
39    /// Whether this is an error result
40    pub is_error: bool,
41}
42
43/// Definition of a tool that can be called
44#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
45pub struct ToolDefinition {
46    /// Name of the tool
47    pub name: String,
48    /// Description of what the tool does
49    pub description: String,
50    /// JSON Schema for the tool's parameters
51    pub parameters: JsonValue,
52}
53
54/// A message in a chat conversation
55#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
56pub struct Message {
57    /// Role of the message sender
58    pub role: Role,
59    /// Content of the message
60    pub content: String,
61    /// Tool call ID (only for Role::Tool messages)
62    #[serde(skip_serializing_if = "Option::is_none")]
63    pub tool_call_id: Option<String>,
64    /// Tool calls (only for Role::Assistant messages)
65    #[serde(skip_serializing_if = "Option::is_none")]
66    pub tool_calls: Option<Vec<ToolCall>>,
67}
68
69impl Message {
70    /// Create a new message
71    pub fn new(role: Role, content: impl Into<String>) -> Self {
72        Self {
73            role,
74            content: content.into(),
75            tool_call_id: None,
76            tool_calls: None,
77        }
78    }
79
80    /// Create a system message
81    pub fn system(content: impl Into<String>) -> Self {
82        Self::new(Role::System, content)
83    }
84
85    /// Create a user message
86    pub fn user(content: impl Into<String>) -> Self {
87        Self::new(Role::User, content)
88    }
89
90    /// Create an assistant message
91    pub fn assistant(content: impl Into<String>) -> Self {
92        Self::new(Role::Assistant, content)
93    }
94
95    /// Create a tool result message
96    pub fn tool_result(result: ToolResult) -> Self {
97        Self {
98            role: Role::Tool,
99            content: result.content,
100            tool_call_id: Some(result.call_id),
101            tool_calls: None,
102        }
103    }
104
105    /// Create an assistant message with tool calls
106    pub fn assistant_with_tool_calls(tool_calls: Vec<ToolCall>) -> Self {
107        Self {
108            role: Role::Assistant,
109            content: String::new(),
110            tool_call_id: None,
111            tool_calls: Some(tool_calls),
112        }
113    }
114}
115
116/// Response from an LLM
117#[derive(Debug, Clone, PartialEq)]
118pub struct LLMResponse {
119    /// The response content (optional if tool calls are present)
120    pub content: Option<String>,
121    /// Tool calls requested by the LLM
122    pub tool_calls: Vec<ToolCall>,
123}
124
125impl LLMResponse {
126    /// Create a new LLM response with content
127    pub fn new(content: impl Into<String>) -> Self {
128        Self {
129            content: Some(content.into()),
130            tool_calls: vec![],
131        }
132    }
133
134    /// Create a response with tool calls
135    pub fn with_tool_calls(tool_calls: Vec<ToolCall>) -> Self {
136        Self {
137            content: None,
138            tool_calls,
139        }
140    }
141
142    /// Create a response with both content and tool calls
143    pub fn with_content_and_tools(content: impl Into<String>, tool_calls: Vec<ToolCall>) -> Self {
144        Self {
145            content: Some(content.into()),
146            tool_calls,
147        }
148    }
149}
150
151/// Trait for LLM clients
152#[async_trait]
153pub trait LLMClient: Send + Sync {
154    /// Send a chat request to the LLM
155    ///
156    /// # Arguments
157    /// * `messages` - The conversation history
158    /// * `tools` - Available tool definitions
159    /// * `model` - Optional model override (uses client default if None)
160    /// * `json_mode` - Enable JSON mode (response_format: json_object)
161    async fn chat(
162        &self,
163        messages: Vec<Message>,
164        tools: Vec<ToolDefinition>,
165        model: Option<&str>,
166        json_mode: bool,
167    ) -> GentResult<LLMResponse>;
168}
169
170/// Mock LLM client for testing
171#[derive(Debug, Clone)]
172pub struct MockLLMClient {
173    /// The response to return
174    response: String,
175    /// Tool calls to return (if any)
176    tool_calls: Vec<ToolCall>,
177}
178
179impl MockLLMClient {
180    /// Create a new mock client with default response
181    pub fn new() -> Self {
182        Self {
183            response: "Hello! I'm a friendly assistant. How can I help you today?".to_string(),
184            tool_calls: vec![],
185        }
186    }
187
188    /// Create a mock client with a custom response
189    pub fn with_response(response: impl Into<String>) -> Self {
190        Self {
191            response: response.into(),
192            tool_calls: vec![],
193        }
194    }
195
196    /// Create a mock client with tool calls
197    pub fn with_tool_calls(tool_calls: Vec<ToolCall>) -> Self {
198        Self {
199            response: String::new(),
200            tool_calls,
201        }
202    }
203
204    /// Get the configured response
205    pub fn response(&self) -> &str {
206        &self.response
207    }
208}
209
210impl Default for MockLLMClient {
211    fn default() -> Self {
212        Self::new()
213    }
214}
215
216#[async_trait]
217impl LLMClient for MockLLMClient {
218    async fn chat(
219        &self,
220        _messages: Vec<Message>,
221        _tools: Vec<ToolDefinition>,
222        _model: Option<&str>,
223        _json_mode: bool,
224    ) -> GentResult<LLMResponse> {
225        if !self.tool_calls.is_empty() {
226            Ok(LLMResponse::with_tool_calls(self.tool_calls.clone()))
227        } else {
228            Ok(LLMResponse::new(&self.response))
229        }
230    }
231}