1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
//! Basic example demonstrating OllamaApiAgent usage.
//!
//! This example shows how to use OllamaApiAgent for local LLM inference
//! with Ollama server.
//!
//! # Prerequisites
//!
//! 1. Install Ollama: https://ollama.ai/download
//! 2. Pull a model: `ollama pull llama3`
//! 3. Start the server: `ollama serve` (usually runs automatically)
//!
//! # Run
//!
//! ```bash
//! cargo run --example ollama_basic --features ollama-api
//! ```
use llm_toolkit::agent::Agent;
use llm_toolkit::agent::impls::OllamaApiAgent;
#[tokio::main(flavor = "current_thread")]
async fn main() {
println!("=== Ollama API Agent Example ===\n");
// =========================================================================
// 1. Default Configuration
// =========================================================================
println!("1. Default Configuration (localhost:11434, llama3)");
let agent = OllamaApiAgent::new();
println!(" Model: {}", agent.model());
println!(" Endpoint: {}", agent.endpoint());
// Check if Ollama server is running
if agent.is_healthy().await {
println!(" Status: Ollama server is healthy\n");
} else {
println!(" Status: Ollama server is NOT running");
println!(" Please start Ollama: ollama serve\n");
return;
}
// =========================================================================
// 2. List Available Models
// =========================================================================
println!("2. Available Models on Server");
match agent.list_models().await {
Ok(models) => {
for model in &models {
println!(" - {}", model);
}
println!();
}
Err(e) => {
println!(" Error listing models: {}\n", e);
}
}
// =========================================================================
// 3. Custom Configuration
// =========================================================================
println!("3. Custom Configuration");
let custom_agent = OllamaApiAgent::new()
.with_model("llama3")
.with_system_prompt("You are a helpful assistant. Be concise.");
println!(" Model: {}", custom_agent.model());
println!(" System prompt: Set\n");
// =========================================================================
// 4. Environment Variable Configuration
// =========================================================================
println!("4. Environment Variable Configuration");
let env_agent = OllamaApiAgent::from_env();
println!(
" Model (from OLLAMA_MODEL or default): {}",
env_agent.model()
);
println!(
" Endpoint (from OLLAMA_HOST or default): {}\n",
env_agent.endpoint()
);
// =========================================================================
// 5. Execute a Prompt
// =========================================================================
println!("5. Execute a Prompt");
println!(" Prompt: 'What is Rust programming language? Answer in one sentence.'\n");
match agent
.execute("What is Rust programming language? Answer in one sentence.".into())
.await
{
Ok(response) => {
println!(" Response: {}\n", response.trim());
}
Err(e) => {
println!(" Error: {}\n", e);
}
}
// =========================================================================
// 6. With System Prompt
// =========================================================================
println!("6. With System Prompt");
let json_agent = OllamaApiAgent::new()
.with_model("llama3")
.with_system_prompt("You are a JSON generator. Always respond with valid JSON only.");
println!(" Prompt: 'Generate a user object with name and age fields'\n");
match json_agent
.execute("Generate a user object with name and age fields".into())
.await
{
Ok(response) => {
println!(" Response: {}\n", response.trim());
}
Err(e) => {
println!(" Error: {}\n", e);
}
}
println!("=== Example Complete ===");
}