lib_client_ollama/
types.rs

1//! Data types for the Ollama API.
2
3use serde::{Deserialize, Serialize};
4
5/// Message role.
6#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
7#[serde(rename_all = "lowercase")]
8pub enum Role {
9    System,
10    User,
11    Assistant,
12    Tool,
13}
14
15/// A message in the conversation.
16#[derive(Debug, Clone, Serialize, Deserialize)]
17pub struct Message {
18    /// Message role.
19    pub role: Role,
20    /// Message content.
21    pub content: String,
22    /// Tool calls made by the assistant.
23    #[serde(skip_serializing_if = "Option::is_none")]
24    pub tool_calls: Option<Vec<ToolCall>>,
25}
26
27impl Message {
28    /// Create a system message.
29    pub fn system(content: impl Into<String>) -> Self {
30        Self {
31            role: Role::System,
32            content: content.into(),
33            tool_calls: None,
34        }
35    }
36
37    /// Create a user message.
38    pub fn user(content: impl Into<String>) -> Self {
39        Self {
40            role: Role::User,
41            content: content.into(),
42            tool_calls: None,
43        }
44    }
45
46    /// Create an assistant message.
47    pub fn assistant(content: impl Into<String>) -> Self {
48        Self {
49            role: Role::Assistant,
50            content: content.into(),
51            tool_calls: None,
52        }
53    }
54
55    /// Create an assistant message with tool calls.
56    pub fn assistant_with_tool_calls(tool_calls: Vec<ToolCall>) -> Self {
57        Self {
58            role: Role::Assistant,
59            content: String::new(),
60            tool_calls: Some(tool_calls),
61        }
62    }
63
64    /// Create a tool result message.
65    pub fn tool(content: impl Into<String>) -> Self {
66        Self {
67            role: Role::Tool,
68            content: content.into(),
69            tool_calls: None,
70        }
71    }
72}
73
74/// Tool call made by the assistant.
75#[derive(Debug, Clone, Serialize, Deserialize)]
76pub struct ToolCall {
77    /// Function call details.
78    pub function: FunctionCall,
79}
80
81impl ToolCall {
82    /// Create a new tool call.
83    pub fn new(name: impl Into<String>, arguments: serde_json::Value) -> Self {
84        Self {
85            function: FunctionCall {
86                name: name.into(),
87                arguments,
88            },
89        }
90    }
91}
92
93/// Function call details.
94#[derive(Debug, Clone, Serialize, Deserialize)]
95pub struct FunctionCall {
96    /// Function name.
97    pub name: String,
98    /// Arguments as JSON.
99    pub arguments: serde_json::Value,
100}
101
102/// Tool definition.
103#[derive(Debug, Clone, Serialize, Deserialize)]
104pub struct Tool {
105    /// Tool type (always "function").
106    #[serde(rename = "type")]
107    pub tool_type: String,
108    /// Function definition.
109    pub function: FunctionDefinition,
110}
111
112impl Tool {
113    /// Create a new function tool.
114    pub fn function(
115        name: impl Into<String>,
116        description: impl Into<String>,
117        parameters: serde_json::Value,
118    ) -> Self {
119        Self {
120            tool_type: "function".to_string(),
121            function: FunctionDefinition {
122                name: name.into(),
123                description: description.into(),
124                parameters,
125            },
126        }
127    }
128}
129
130/// Function definition.
131#[derive(Debug, Clone, Serialize, Deserialize)]
132pub struct FunctionDefinition {
133    /// Function name.
134    pub name: String,
135    /// Function description.
136    pub description: String,
137    /// JSON schema for parameters.
138    pub parameters: serde_json::Value,
139}
140
141/// Options for generation.
142#[derive(Debug, Clone, Default, Serialize, Deserialize)]
143pub struct Options {
144    /// Temperature for sampling.
145    #[serde(skip_serializing_if = "Option::is_none")]
146    pub temperature: Option<f32>,
147    /// Top-p sampling.
148    #[serde(skip_serializing_if = "Option::is_none")]
149    pub top_p: Option<f32>,
150    /// Top-k sampling.
151    #[serde(skip_serializing_if = "Option::is_none")]
152    pub top_k: Option<i32>,
153    /// Number of tokens to predict.
154    #[serde(skip_serializing_if = "Option::is_none")]
155    pub num_predict: Option<i32>,
156    /// Stop sequences.
157    #[serde(skip_serializing_if = "Option::is_none")]
158    pub stop: Option<Vec<String>>,
159    /// Seed for reproducibility.
160    #[serde(skip_serializing_if = "Option::is_none")]
161    pub seed: Option<i64>,
162}
163
164impl Options {
165    /// Create new options with temperature.
166    pub fn with_temperature(mut self, temperature: f32) -> Self {
167        self.temperature = Some(temperature);
168        self
169    }
170
171    /// Set top-p sampling.
172    pub fn with_top_p(mut self, top_p: f32) -> Self {
173        self.top_p = Some(top_p);
174        self
175    }
176
177    /// Set number of tokens to predict.
178    pub fn with_num_predict(mut self, num_predict: i32) -> Self {
179        self.num_predict = Some(num_predict);
180        self
181    }
182
183    /// Set stop sequences.
184    pub fn with_stop(mut self, stop: Vec<String>) -> Self {
185        self.stop = Some(stop);
186        self
187    }
188}
189
190/// Request to generate a chat completion.
191#[derive(Debug, Clone, Serialize)]
192pub struct ChatRequest {
193    /// Model name.
194    pub model: String,
195    /// Messages in the conversation.
196    pub messages: Vec<Message>,
197    /// Whether to stream the response.
198    pub stream: bool,
199    /// Generation options.
200    #[serde(skip_serializing_if = "Option::is_none")]
201    pub options: Option<Options>,
202    /// Available tools.
203    #[serde(skip_serializing_if = "Option::is_none")]
204    pub tools: Option<Vec<Tool>>,
205    /// Format (e.g., "json").
206    #[serde(skip_serializing_if = "Option::is_none")]
207    pub format: Option<String>,
208}
209
210impl ChatRequest {
211    /// Create a new chat request.
212    pub fn new(model: impl Into<String>, messages: Vec<Message>) -> Self {
213        Self {
214            model: model.into(),
215            messages,
216            stream: false,
217            options: None,
218            tools: None,
219            format: None,
220        }
221    }
222
223    /// Set generation options.
224    pub fn with_options(mut self, options: Options) -> Self {
225        self.options = Some(options);
226        self
227    }
228
229    /// Set available tools.
230    pub fn with_tools(mut self, tools: Vec<Tool>) -> Self {
231        self.tools = Some(tools);
232        self
233    }
234
235    /// Set output format.
236    pub fn with_format(mut self, format: impl Into<String>) -> Self {
237        self.format = Some(format.into());
238        self
239    }
240
241    /// Enable streaming.
242    pub fn with_stream(mut self, stream: bool) -> Self {
243        self.stream = stream;
244        self
245    }
246}
247
248/// Response from a chat completion.
249#[derive(Debug, Clone, Deserialize)]
250pub struct ChatResponse {
251    /// Model used.
252    pub model: String,
253    /// Creation timestamp.
254    pub created_at: String,
255    /// Response message.
256    pub message: Message,
257    /// Whether generation is done.
258    pub done: bool,
259    /// Reason for stopping.
260    pub done_reason: Option<String>,
261    /// Total duration in nanoseconds.
262    pub total_duration: Option<u64>,
263    /// Load duration in nanoseconds.
264    pub load_duration: Option<u64>,
265    /// Prompt evaluation count.
266    pub prompt_eval_count: Option<usize>,
267    /// Prompt evaluation duration in nanoseconds.
268    pub prompt_eval_duration: Option<u64>,
269    /// Evaluation count.
270    pub eval_count: Option<usize>,
271    /// Evaluation duration in nanoseconds.
272    pub eval_duration: Option<u64>,
273}
274
275impl ChatResponse {
276    /// Get the response content.
277    pub fn content(&self) -> &str {
278        &self.message.content
279    }
280
281    /// Get tool calls if any.
282    pub fn tool_calls(&self) -> Option<&Vec<ToolCall>> {
283        self.message.tool_calls.as_ref()
284    }
285
286    /// Check if the response contains tool calls.
287    pub fn has_tool_calls(&self) -> bool {
288        self.message.tool_calls.is_some()
289    }
290}
291
292/// Request to generate a completion (non-chat).
293#[derive(Debug, Clone, Serialize)]
294pub struct GenerateRequest {
295    /// Model name.
296    pub model: String,
297    /// Prompt text.
298    pub prompt: String,
299    /// Whether to stream the response.
300    pub stream: bool,
301    /// Generation options.
302    #[serde(skip_serializing_if = "Option::is_none")]
303    pub options: Option<Options>,
304    /// System prompt.
305    #[serde(skip_serializing_if = "Option::is_none")]
306    pub system: Option<String>,
307    /// Format (e.g., "json").
308    #[serde(skip_serializing_if = "Option::is_none")]
309    pub format: Option<String>,
310}
311
312impl GenerateRequest {
313    /// Create a new generate request.
314    pub fn new(model: impl Into<String>, prompt: impl Into<String>) -> Self {
315        Self {
316            model: model.into(),
317            prompt: prompt.into(),
318            stream: false,
319            options: None,
320            system: None,
321            format: None,
322        }
323    }
324
325    /// Set generation options.
326    pub fn with_options(mut self, options: Options) -> Self {
327        self.options = Some(options);
328        self
329    }
330
331    /// Set system prompt.
332    pub fn with_system(mut self, system: impl Into<String>) -> Self {
333        self.system = Some(system.into());
334        self
335    }
336}
337
338/// Response from a generate request.
339#[derive(Debug, Clone, Deserialize)]
340pub struct GenerateResponse {
341    /// Model used.
342    pub model: String,
343    /// Creation timestamp.
344    pub created_at: String,
345    /// Generated response.
346    pub response: String,
347    /// Whether generation is done.
348    pub done: bool,
349    /// Total duration in nanoseconds.
350    pub total_duration: Option<u64>,
351    /// Prompt evaluation count.
352    pub prompt_eval_count: Option<usize>,
353    /// Evaluation count.
354    pub eval_count: Option<usize>,
355}
356
357/// Model information.
358#[derive(Debug, Clone, Deserialize)]
359pub struct ModelInfo {
360    /// Model name.
361    pub name: String,
362    /// Model modification time.
363    pub modified_at: String,
364    /// Model size in bytes.
365    pub size: u64,
366    /// Model digest.
367    pub digest: String,
368    /// Model details.
369    pub details: Option<ModelDetails>,
370}
371
372/// Model details.
373#[derive(Debug, Clone, Deserialize)]
374pub struct ModelDetails {
375    /// Format.
376    pub format: Option<String>,
377    /// Model family.
378    pub family: Option<String>,
379    /// Parameter size.
380    pub parameter_size: Option<String>,
381    /// Quantization level.
382    pub quantization_level: Option<String>,
383}
384
385/// List of models.
386#[derive(Debug, Clone, Deserialize)]
387pub struct ModelList {
388    /// Models.
389    pub models: Vec<ModelInfo>,
390}
391
392/// Error response from the API.
393#[derive(Debug, Clone, Deserialize)]
394pub struct ErrorResponse {
395    /// Error message.
396    pub error: String,
397}