pub struct LLMConfig {Show 35 fields
pub model: String,
pub api_key: Option<String>,
pub base_url: Option<String>,
pub max_tokens: Option<u32>,
pub temperature: Option<f32>,
pub top_p: Option<f32>,
pub top_k: Option<u32>,
pub system: Option<String>,
pub timeout_seconds: Option<u64>,
pub embedding_encoding_format: Option<String>,
pub embedding_dimensions: Option<u32>,
pub enable_parallel_tool_use: Option<bool>,
pub reasoning: Option<bool>,
pub reasoning_effort: Option<String>,
pub reasoning_budget_tokens: Option<u32>,
pub api_version: Option<String>,
pub deployment_id: Option<String>,
pub voice: Option<String>,
pub xai_search_mode: Option<String>,
pub xai_search_source_type: Option<String>,
pub xai_search_excluded_websites: Option<Vec<String>>,
pub xai_search_max_results: Option<u32>,
pub xai_search_from_date: Option<String>,
pub xai_search_to_date: Option<String>,
pub openai_enable_web_search: Option<bool>,
pub openai_web_search_context_size: Option<String>,
pub openai_web_search_user_location_type: Option<String>,
pub openai_web_search_user_location_approximate_country: Option<String>,
pub openai_web_search_user_location_approximate_city: Option<String>,
pub openai_web_search_user_location_approximate_region: Option<String>,
pub resilient_enable: Option<bool>,
pub resilient_attempts: Option<usize>,
pub resilient_base_delay_ms: Option<u64>,
pub resilient_max_delay_ms: Option<u64>,
pub resilient_jitter: Option<bool>,
}Expand description
Configuration for LLM providers with all builder options.
This struct provides comprehensive configuration for any LLM provider, matching all options available in the LLMBuilder.
Fields§
§model: String§api_key: Option<String>§base_url: Option<String>§max_tokens: Option<u32>§temperature: Option<f32>§top_p: Option<f32>§top_k: Option<u32>§system: Option<String>§timeout_seconds: Option<u64>§embedding_encoding_format: Option<String>§embedding_dimensions: Option<u32>§enable_parallel_tool_use: Option<bool>§reasoning: Option<bool>§reasoning_effort: Option<String>§reasoning_budget_tokens: Option<u32>§api_version: Option<String>§deployment_id: Option<String>§voice: Option<String>§xai_search_mode: Option<String>§xai_search_source_type: Option<String>§xai_search_excluded_websites: Option<Vec<String>>§xai_search_max_results: Option<u32>§xai_search_from_date: Option<String>§xai_search_to_date: Option<String>§openai_enable_web_search: Option<bool>§openai_web_search_context_size: Option<String>§openai_web_search_user_location_type: Option<String>§openai_web_search_user_location_approximate_country: Option<String>§openai_web_search_user_location_approximate_city: Option<String>§openai_web_search_user_location_approximate_region: Option<String>§resilient_enable: Option<bool>§resilient_attempts: Option<usize>§resilient_base_delay_ms: Option<u64>§resilient_max_delay_ms: Option<u64>§resilient_jitter: Option<bool>Implementations§
Source§impl LLMConfig
impl LLMConfig
Sourcepub fn new(model: impl Into<String>) -> Self
pub fn new(model: impl Into<String>) -> Self
Create a new LLMConfig with just the model name
Examples found in repository?
examples/08_llm_providers.rs (line 173)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}
235
236/// Example 3: Provider-specific features
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}
297
298/// Example 4: Compare providers
299async fn compare_providers() -> Result<(), Box<dyn std::error::Error>> {
300 println!("\n✨ Comparing responses across providers:\n");
301
302 let providers = vec![
303 ("OpenAI GPT-4", "openai::gpt-4", std::env::var("OPENAI_API_KEY").ok()),
304 ("Anthropic Claude", "anthropic::claude-3-sonnet-20240229", std::env::var("ANTHROPIC_API_KEY").ok()),
305 ("Ollama Llama", "ollama::llama3.2", None),
306 ("Google Gemini", "google::gemini-pro", std::env::var("GOOGLE_API_KEY").ok()),
307 ("Groq Mixtral", "groq::mixtral-8x7b-32768", std::env::var("GROQ_API_KEY").ok()),
308 ];
309
310 for (name, model, api_key) in providers {
311 println!("🤖 {}:", name);
312
313 let mut agent = Agent::new(name, model);
314
315 // Configure with LLMConfig if API key is available
316 if let Some(key) = api_key {
317 let config = LLMConfig::new(model)
318 .with_api_key(key)
319 .with_temperature(0.7)
320 .with_max_tokens(100);
321
322 if let Ok(_) = agent.with_llm_config(config) {
323 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
324 let response = agent.run(task).await;
325 println!(" ✓ {:?}", response.result());
326 }
327 } else {
328 // Try without explicit API key (for Ollama or env vars)
329 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
330 let response = agent.run(task).await;
331 println!(" ✓ {:?}", response.result());
332 }
333
334 println!();
335 }
336
337 Ok(())
338}More examples
examples/09_test_groq.rs (line 18)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🧪 Testing Groq LLM Provider with API Key\n");
8
9 // Test 1: Using Agent::new_with_config() - Recommended approach
10 println!("{}", "=".repeat(60));
11 println!("Test 1: Using Agent::new_with_config() with explicit API key");
12 println!("{}", "=".repeat(60));
13
14 match std::env::var("GROQ_API_KEY") {
15 Ok(api_key) => {
16 println!("✓ GROQ_API_KEY found in environment\n");
17
18 let groq_config = LLMConfig::new("groq::llama-3.3-70b-versatile")
19 .with_api_key(api_key)
20 .with_temperature(0.7)
21 .with_max_tokens(100);
22
23 match Agent::new_with_config("Groq Agent", groq_config) {
24 Ok(mut agent) => {
25 println!("✓ Groq agent created successfully!\n");
26
27 println!("Running task: 'What is 2 + 2?'");
28 let task = TaskRequest::new("What is 2 + 2? Answer in one short sentence.");
29 println!("Sending request to Groq API...\n");
30
31 let response = agent.run(task).await;
32
33 println!("Response received:");
34 println!("================");
35 println!("{:?}", response.result());
36 println!("================\n");
37
38 println!("🎉 Test 1 completed successfully!");
39 }
40 Err(e) => {
41 println!("✗ Failed to create Groq agent: {}\n", e);
42 }
43 }
44 }
45 Err(_) => {
46 println!("✗ GROQ_API_KEY not found in environment");
47 println!(" Please set GROQ_API_KEY to run this test\n");
48 }
49 }
50
51 // Test 2: Using Agent::new() with environment variable - Simpler approach
52 println!("\n{}", "=".repeat(60));
53 println!("Test 2: Using Agent::new() with environment variable");
54 println!("{}", "=".repeat(60));
55
56 if std::env::var("GROQ_API_KEY").is_ok() {
57 println!("✓ GROQ_API_KEY is set, agent will use it automatically\n");
58
59 match std::panic::catch_unwind(|| {
60 Agent::new("Simple Groq Agent", "groq::llama-3.3-70b-versatile")
61 }) {
62 Ok(mut agent) => {
63 println!("✓ Groq agent created successfully with auto API key!\n");
64
65 println!("Running task: 'What is the capital of France?'");
66 let task = TaskRequest::new("What is the capital of France? Answer in one word.");
67
68 let response = agent.run(task).await;
69
70 println!("Response received:");
71 println!("================");
72 println!("{:?}", response.result());
73 println!("================\n");
74
75 println!("🎉 Test 2 completed successfully!");
76 }
77 Err(_) => {
78 println!("✗ Failed to create agent (unexpected panic)\n");
79 }
80 }
81 } else {
82 println!("✗ GROQ_API_KEY not set, skipping this test\n");
83 }
84
85 println!("\n{}", "=".repeat(60));
86 println!("All tests completed!");
87 println!("{}", "=".repeat(60));
88
89 Ok(())
90}Sourcepub fn with_api_key(self, api_key: impl Into<String>) -> Self
pub fn with_api_key(self, api_key: impl Into<String>) -> Self
Set API key
Examples found in repository?
examples/08_llm_providers.rs (line 174)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}
235
236/// Example 3: Provider-specific features
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}
297
298/// Example 4: Compare providers
299async fn compare_providers() -> Result<(), Box<dyn std::error::Error>> {
300 println!("\n✨ Comparing responses across providers:\n");
301
302 let providers = vec![
303 ("OpenAI GPT-4", "openai::gpt-4", std::env::var("OPENAI_API_KEY").ok()),
304 ("Anthropic Claude", "anthropic::claude-3-sonnet-20240229", std::env::var("ANTHROPIC_API_KEY").ok()),
305 ("Ollama Llama", "ollama::llama3.2", None),
306 ("Google Gemini", "google::gemini-pro", std::env::var("GOOGLE_API_KEY").ok()),
307 ("Groq Mixtral", "groq::mixtral-8x7b-32768", std::env::var("GROQ_API_KEY").ok()),
308 ];
309
310 for (name, model, api_key) in providers {
311 println!("🤖 {}:", name);
312
313 let mut agent = Agent::new(name, model);
314
315 // Configure with LLMConfig if API key is available
316 if let Some(key) = api_key {
317 let config = LLMConfig::new(model)
318 .with_api_key(key)
319 .with_temperature(0.7)
320 .with_max_tokens(100);
321
322 if let Ok(_) = agent.with_llm_config(config) {
323 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
324 let response = agent.run(task).await;
325 println!(" ✓ {:?}", response.result());
326 }
327 } else {
328 // Try without explicit API key (for Ollama or env vars)
329 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
330 let response = agent.run(task).await;
331 println!(" ✓ {:?}", response.result());
332 }
333
334 println!();
335 }
336
337 Ok(())
338}More examples
examples/09_test_groq.rs (line 19)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🧪 Testing Groq LLM Provider with API Key\n");
8
9 // Test 1: Using Agent::new_with_config() - Recommended approach
10 println!("{}", "=".repeat(60));
11 println!("Test 1: Using Agent::new_with_config() with explicit API key");
12 println!("{}", "=".repeat(60));
13
14 match std::env::var("GROQ_API_KEY") {
15 Ok(api_key) => {
16 println!("✓ GROQ_API_KEY found in environment\n");
17
18 let groq_config = LLMConfig::new("groq::llama-3.3-70b-versatile")
19 .with_api_key(api_key)
20 .with_temperature(0.7)
21 .with_max_tokens(100);
22
23 match Agent::new_with_config("Groq Agent", groq_config) {
24 Ok(mut agent) => {
25 println!("✓ Groq agent created successfully!\n");
26
27 println!("Running task: 'What is 2 + 2?'");
28 let task = TaskRequest::new("What is 2 + 2? Answer in one short sentence.");
29 println!("Sending request to Groq API...\n");
30
31 let response = agent.run(task).await;
32
33 println!("Response received:");
34 println!("================");
35 println!("{:?}", response.result());
36 println!("================\n");
37
38 println!("🎉 Test 1 completed successfully!");
39 }
40 Err(e) => {
41 println!("✗ Failed to create Groq agent: {}\n", e);
42 }
43 }
44 }
45 Err(_) => {
46 println!("✗ GROQ_API_KEY not found in environment");
47 println!(" Please set GROQ_API_KEY to run this test\n");
48 }
49 }
50
51 // Test 2: Using Agent::new() with environment variable - Simpler approach
52 println!("\n{}", "=".repeat(60));
53 println!("Test 2: Using Agent::new() with environment variable");
54 println!("{}", "=".repeat(60));
55
56 if std::env::var("GROQ_API_KEY").is_ok() {
57 println!("✓ GROQ_API_KEY is set, agent will use it automatically\n");
58
59 match std::panic::catch_unwind(|| {
60 Agent::new("Simple Groq Agent", "groq::llama-3.3-70b-versatile")
61 }) {
62 Ok(mut agent) => {
63 println!("✓ Groq agent created successfully with auto API key!\n");
64
65 println!("Running task: 'What is the capital of France?'");
66 let task = TaskRequest::new("What is the capital of France? Answer in one word.");
67
68 let response = agent.run(task).await;
69
70 println!("Response received:");
71 println!("================");
72 println!("{:?}", response.result());
73 println!("================\n");
74
75 println!("🎉 Test 2 completed successfully!");
76 }
77 Err(_) => {
78 println!("✗ Failed to create agent (unexpected panic)\n");
79 }
80 }
81 } else {
82 println!("✗ GROQ_API_KEY not set, skipping this test\n");
83 }
84
85 println!("\n{}", "=".repeat(60));
86 println!("All tests completed!");
87 println!("{}", "=".repeat(60));
88
89 Ok(())
90}Sourcepub fn with_base_url(self, base_url: impl Into<String>) -> Self
pub fn with_base_url(self, base_url: impl Into<String>) -> Self
Set base URL
Examples found in repository?
examples/08_llm_providers.rs (line 248)
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}Sourcepub fn with_max_tokens(self, max_tokens: u32) -> Self
pub fn with_max_tokens(self, max_tokens: u32) -> Self
Set max tokens
Examples found in repository?
examples/08_llm_providers.rs (line 176)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}
235
236/// Example 3: Provider-specific features
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}
297
298/// Example 4: Compare providers
299async fn compare_providers() -> Result<(), Box<dyn std::error::Error>> {
300 println!("\n✨ Comparing responses across providers:\n");
301
302 let providers = vec![
303 ("OpenAI GPT-4", "openai::gpt-4", std::env::var("OPENAI_API_KEY").ok()),
304 ("Anthropic Claude", "anthropic::claude-3-sonnet-20240229", std::env::var("ANTHROPIC_API_KEY").ok()),
305 ("Ollama Llama", "ollama::llama3.2", None),
306 ("Google Gemini", "google::gemini-pro", std::env::var("GOOGLE_API_KEY").ok()),
307 ("Groq Mixtral", "groq::mixtral-8x7b-32768", std::env::var("GROQ_API_KEY").ok()),
308 ];
309
310 for (name, model, api_key) in providers {
311 println!("🤖 {}:", name);
312
313 let mut agent = Agent::new(name, model);
314
315 // Configure with LLMConfig if API key is available
316 if let Some(key) = api_key {
317 let config = LLMConfig::new(model)
318 .with_api_key(key)
319 .with_temperature(0.7)
320 .with_max_tokens(100);
321
322 if let Ok(_) = agent.with_llm_config(config) {
323 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
324 let response = agent.run(task).await;
325 println!(" ✓ {:?}", response.result());
326 }
327 } else {
328 // Try without explicit API key (for Ollama or env vars)
329 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
330 let response = agent.run(task).await;
331 println!(" ✓ {:?}", response.result());
332 }
333
334 println!();
335 }
336
337 Ok(())
338}More examples
examples/09_test_groq.rs (line 21)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🧪 Testing Groq LLM Provider with API Key\n");
8
9 // Test 1: Using Agent::new_with_config() - Recommended approach
10 println!("{}", "=".repeat(60));
11 println!("Test 1: Using Agent::new_with_config() with explicit API key");
12 println!("{}", "=".repeat(60));
13
14 match std::env::var("GROQ_API_KEY") {
15 Ok(api_key) => {
16 println!("✓ GROQ_API_KEY found in environment\n");
17
18 let groq_config = LLMConfig::new("groq::llama-3.3-70b-versatile")
19 .with_api_key(api_key)
20 .with_temperature(0.7)
21 .with_max_tokens(100);
22
23 match Agent::new_with_config("Groq Agent", groq_config) {
24 Ok(mut agent) => {
25 println!("✓ Groq agent created successfully!\n");
26
27 println!("Running task: 'What is 2 + 2?'");
28 let task = TaskRequest::new("What is 2 + 2? Answer in one short sentence.");
29 println!("Sending request to Groq API...\n");
30
31 let response = agent.run(task).await;
32
33 println!("Response received:");
34 println!("================");
35 println!("{:?}", response.result());
36 println!("================\n");
37
38 println!("🎉 Test 1 completed successfully!");
39 }
40 Err(e) => {
41 println!("✗ Failed to create Groq agent: {}\n", e);
42 }
43 }
44 }
45 Err(_) => {
46 println!("✗ GROQ_API_KEY not found in environment");
47 println!(" Please set GROQ_API_KEY to run this test\n");
48 }
49 }
50
51 // Test 2: Using Agent::new() with environment variable - Simpler approach
52 println!("\n{}", "=".repeat(60));
53 println!("Test 2: Using Agent::new() with environment variable");
54 println!("{}", "=".repeat(60));
55
56 if std::env::var("GROQ_API_KEY").is_ok() {
57 println!("✓ GROQ_API_KEY is set, agent will use it automatically\n");
58
59 match std::panic::catch_unwind(|| {
60 Agent::new("Simple Groq Agent", "groq::llama-3.3-70b-versatile")
61 }) {
62 Ok(mut agent) => {
63 println!("✓ Groq agent created successfully with auto API key!\n");
64
65 println!("Running task: 'What is the capital of France?'");
66 let task = TaskRequest::new("What is the capital of France? Answer in one word.");
67
68 let response = agent.run(task).await;
69
70 println!("Response received:");
71 println!("================");
72 println!("{:?}", response.result());
73 println!("================\n");
74
75 println!("🎉 Test 2 completed successfully!");
76 }
77 Err(_) => {
78 println!("✗ Failed to create agent (unexpected panic)\n");
79 }
80 }
81 } else {
82 println!("✗ GROQ_API_KEY not set, skipping this test\n");
83 }
84
85 println!("\n{}", "=".repeat(60));
86 println!("All tests completed!");
87 println!("{}", "=".repeat(60));
88
89 Ok(())
90}Sourcepub fn with_temperature(self, temperature: f32) -> Self
pub fn with_temperature(self, temperature: f32) -> Self
Set temperature
Examples found in repository?
examples/08_llm_providers.rs (line 175)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}
235
236/// Example 3: Provider-specific features
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}
297
298/// Example 4: Compare providers
299async fn compare_providers() -> Result<(), Box<dyn std::error::Error>> {
300 println!("\n✨ Comparing responses across providers:\n");
301
302 let providers = vec![
303 ("OpenAI GPT-4", "openai::gpt-4", std::env::var("OPENAI_API_KEY").ok()),
304 ("Anthropic Claude", "anthropic::claude-3-sonnet-20240229", std::env::var("ANTHROPIC_API_KEY").ok()),
305 ("Ollama Llama", "ollama::llama3.2", None),
306 ("Google Gemini", "google::gemini-pro", std::env::var("GOOGLE_API_KEY").ok()),
307 ("Groq Mixtral", "groq::mixtral-8x7b-32768", std::env::var("GROQ_API_KEY").ok()),
308 ];
309
310 for (name, model, api_key) in providers {
311 println!("🤖 {}:", name);
312
313 let mut agent = Agent::new(name, model);
314
315 // Configure with LLMConfig if API key is available
316 if let Some(key) = api_key {
317 let config = LLMConfig::new(model)
318 .with_api_key(key)
319 .with_temperature(0.7)
320 .with_max_tokens(100);
321
322 if let Ok(_) = agent.with_llm_config(config) {
323 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
324 let response = agent.run(task).await;
325 println!(" ✓ {:?}", response.result());
326 }
327 } else {
328 // Try without explicit API key (for Ollama or env vars)
329 let task = TaskRequest::new("Explain the concept of 'ownership' in Rust in one sentence.");
330 let response = agent.run(task).await;
331 println!(" ✓ {:?}", response.result());
332 }
333
334 println!();
335 }
336
337 Ok(())
338}More examples
examples/09_test_groq.rs (line 20)
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🧪 Testing Groq LLM Provider with API Key\n");
8
9 // Test 1: Using Agent::new_with_config() - Recommended approach
10 println!("{}", "=".repeat(60));
11 println!("Test 1: Using Agent::new_with_config() with explicit API key");
12 println!("{}", "=".repeat(60));
13
14 match std::env::var("GROQ_API_KEY") {
15 Ok(api_key) => {
16 println!("✓ GROQ_API_KEY found in environment\n");
17
18 let groq_config = LLMConfig::new("groq::llama-3.3-70b-versatile")
19 .with_api_key(api_key)
20 .with_temperature(0.7)
21 .with_max_tokens(100);
22
23 match Agent::new_with_config("Groq Agent", groq_config) {
24 Ok(mut agent) => {
25 println!("✓ Groq agent created successfully!\n");
26
27 println!("Running task: 'What is 2 + 2?'");
28 let task = TaskRequest::new("What is 2 + 2? Answer in one short sentence.");
29 println!("Sending request to Groq API...\n");
30
31 let response = agent.run(task).await;
32
33 println!("Response received:");
34 println!("================");
35 println!("{:?}", response.result());
36 println!("================\n");
37
38 println!("🎉 Test 1 completed successfully!");
39 }
40 Err(e) => {
41 println!("✗ Failed to create Groq agent: {}\n", e);
42 }
43 }
44 }
45 Err(_) => {
46 println!("✗ GROQ_API_KEY not found in environment");
47 println!(" Please set GROQ_API_KEY to run this test\n");
48 }
49 }
50
51 // Test 2: Using Agent::new() with environment variable - Simpler approach
52 println!("\n{}", "=".repeat(60));
53 println!("Test 2: Using Agent::new() with environment variable");
54 println!("{}", "=".repeat(60));
55
56 if std::env::var("GROQ_API_KEY").is_ok() {
57 println!("✓ GROQ_API_KEY is set, agent will use it automatically\n");
58
59 match std::panic::catch_unwind(|| {
60 Agent::new("Simple Groq Agent", "groq::llama-3.3-70b-versatile")
61 }) {
62 Ok(mut agent) => {
63 println!("✓ Groq agent created successfully with auto API key!\n");
64
65 println!("Running task: 'What is the capital of France?'");
66 let task = TaskRequest::new("What is the capital of France? Answer in one word.");
67
68 let response = agent.run(task).await;
69
70 println!("Response received:");
71 println!("================");
72 println!("{:?}", response.result());
73 println!("================\n");
74
75 println!("🎉 Test 2 completed successfully!");
76 }
77 Err(_) => {
78 println!("✗ Failed to create agent (unexpected panic)\n");
79 }
80 }
81 } else {
82 println!("✗ GROQ_API_KEY not set, skipping this test\n");
83 }
84
85 println!("\n{}", "=".repeat(60));
86 println!("All tests completed!");
87 println!("{}", "=".repeat(60));
88
89 Ok(())
90}Sourcepub fn with_top_p(self, top_p: f32) -> Self
pub fn with_top_p(self, top_p: f32) -> Self
Set top_p
Examples found in repository?
examples/08_llm_providers.rs (line 177)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}Sourcepub fn with_top_k(self, top_k: u32) -> Self
pub fn with_top_k(self, top_k: u32) -> Self
Set top_k
Sourcepub fn with_system(self, system: impl Into<String>) -> Self
pub fn with_system(self, system: impl Into<String>) -> Self
Set system prompt
Examples found in repository?
examples/08_llm_providers.rs (line 223)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}Sourcepub fn with_timeout_seconds(self, timeout: u64) -> Self
pub fn with_timeout_seconds(self, timeout: u64) -> Self
Set timeout in seconds
Sourcepub fn with_reasoning(self, enabled: bool) -> Self
pub fn with_reasoning(self, enabled: bool) -> Self
Enable reasoning (for supported providers)
Examples found in repository?
examples/08_llm_providers.rs (line 210)
167async fn advanced_configuration() -> Result<(), Box<dyn std::error::Error>> {
168 println!("\n✨ Using LLMConfig for advanced configuration:\n");
169
170 // Method 1: Using Agent::new_with_config() (Recommended for explicit API keys)
171 println!("📘 Using Agent::new_with_config() with explicit API key:");
172 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
173 let config = LLMConfig::new("openai::gpt-4")
174 .with_api_key(api_key)
175 .with_temperature(0.7)
176 .with_max_tokens(150)
177 .with_top_p(0.9);
178
179 if let Ok(mut agent) = Agent::new_with_config("Configured Assistant", config) {
180 let task1 = TaskRequest::new("Explain quantum computing in one sentence.");
181 let response = agent.run(task1).await;
182 println!(" Response: {:?}\n", response.result());
183 }
184 } else {
185 println!(" Skipped (OPENAI_API_KEY not set)\n");
186 }
187
188 // Method 2: Using Agent::new() then with_llm_config() (Alternative approach)
189 println!("📗 Using Agent::new() then with_llm_config():");
190 let mut agent = Agent::new("Configured Assistant", "openai::gpt-4");
191
192 let config = LLMConfig::new("openai::gpt-4")
193 .with_temperature(0.8)
194 .with_max_tokens(200);
195
196 if let Ok(_) = agent.with_llm_config(config) {
197 let task2 = TaskRequest::new("What is machine learning in one sentence?");
198 let response = agent.run(task2).await;
199 println!(" Response: {:?}\n", response.result());
200 }
201
202 // Anthropic with reasoning
203 println!("📗 Anthropic with extended thinking:");
204 let mut claude_agent = Agent::new("Thinking Claude", "anthropic::claude-3-opus-20240229");
205
206 let claude_config = LLMConfig::new("anthropic::claude-3-opus-20240229")
207 .with_api_key(std::env::var("ANTHROPIC_API_KEY").unwrap_or_default())
208 .with_temperature(0.5)
209 .with_max_tokens(200)
210 .with_reasoning(true);
211
212 if let Ok(_) = claude_agent.with_llm_config(claude_config) {
213 let task2 = TaskRequest::new("Explain quantum computing in one sentence.");
214 let response = claude_agent.run(task2).await;
215 println!(" Response: {:?}\n", response.result());
216 }
217
218 // Ollama with custom system prompt
219 println!("📙 Ollama with custom system prompt:");
220 let mut ollama_agent = Agent::new("Custom Ollama", "ollama::llama3.2");
221
222 let ollama_config = LLMConfig::new("ollama::llama3.2")
223 .with_system("You are a concise and technical AI assistant.")
224 .with_max_tokens(100)
225 .with_temperature(0.3);
226
227 if let Ok(_) = ollama_agent.with_llm_config(ollama_config) {
228 let task3 = TaskRequest::new("Explain quantum computing in one sentence.");
229 let response = ollama_agent.run(task3).await;
230 println!(" Response: {:?}\n", response.result());
231 }
232
233 Ok(())
234}Sourcepub fn with_reasoning_effort(self, effort: impl Into<String>) -> Self
pub fn with_reasoning_effort(self, effort: impl Into<String>) -> Self
Set reasoning effort
Sourcepub fn with_deployment_id(self, deployment_id: impl Into<String>) -> Self
pub fn with_deployment_id(self, deployment_id: impl Into<String>) -> Self
Set Azure deployment ID
Examples found in repository?
examples/08_llm_providers.rs (line 246)
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}Sourcepub fn with_api_version(self, api_version: impl Into<String>) -> Self
pub fn with_api_version(self, api_version: impl Into<String>) -> Self
Set Azure API version
Examples found in repository?
examples/08_llm_providers.rs (line 247)
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}Sourcepub fn with_openai_web_search(self, enabled: bool) -> Self
pub fn with_openai_web_search(self, enabled: bool) -> Self
Enable OpenAI web search
Examples found in repository?
examples/08_llm_providers.rs (line 260)
237async fn provider_specific_features() -> Result<(), Box<dyn std::error::Error>> {
238 println!("\n✨ Provider-specific features:\n");
239
240 // Azure OpenAI
241 println!("☁️ Azure OpenAI with deployment configuration:");
242 let mut azure_agent = Agent::new("Azure Assistant", "azure::gpt-4");
243
244 let azure_config = LLMConfig::new("azure::gpt-4")
245 .with_api_key(std::env::var("AZURE_OPENAI_API_KEY").unwrap_or_default())
246 .with_deployment_id("your-deployment-id")
247 .with_api_version("2024-02-01")
248 .with_base_url("https://your-resource.openai.azure.com");
249
250 if let Ok(_) = azure_agent.with_llm_config(azure_config) {
251 println!(" ✓ Azure agent configured with deployment settings");
252 }
253
254 // OpenAI with web search
255 println!("\n🌐 OpenAI with web search enabled:");
256 let mut search_agent = Agent::new("Search Assistant", "openai::gpt-4");
257
258 let search_config = LLMConfig::new("openai::gpt-4")
259 .with_api_key(std::env::var("OPENAI_API_KEY").unwrap_or_default())
260 .with_openai_web_search(true);
261
262 if let Ok(_) = search_agent.with_llm_config(search_config) {
263 let task1 = TaskRequest::new("What are the latest developments in AI?");
264 let response = search_agent.run(task1).await;
265 println!(" Response (with web search): {:?}\n", response.result());
266 }
267
268 // DeepSeek
269 println!("🔍 DeepSeek for code-focused tasks:");
270 let mut deepseek_agent = Agent::new("DeepSeek Assistant", "deepseek::deepseek-coder");
271
272 let deepseek_config = LLMConfig::new("deepseek::deepseek-coder")
273 .with_api_key(std::env::var("DEEPSEEK_API_KEY").unwrap_or_default())
274 .with_temperature(0.2);
275
276 if let Ok(_) = deepseek_agent.with_llm_config(deepseek_config) {
277 let code_task = TaskRequest::new("Write a Rust function to calculate fibonacci numbers");
278 let response = deepseek_agent.run(code_task).await;
279 println!(" Response: {:?}\n", response.result());
280 }
281
282 // Mistral
283 println!("🌟 Mistral for European AI:");
284 let mut mistral_agent = Agent::new("Mistral Assistant", "mistral::mistral-large-latest");
285
286 let mistral_config = LLMConfig::new("mistral::mistral-large-latest")
287 .with_api_key(std::env::var("MISTRAL_API_KEY").unwrap_or_default())
288 .with_temperature(0.7)
289 .with_max_tokens(500);
290
291 if let Ok(_) = mistral_agent.with_llm_config(mistral_config) {
292 println!(" ✓ Mistral agent configured");
293 }
294
295 Ok(())
296}Sourcepub fn with_resilience(self, enabled: bool, attempts: usize) -> Self
pub fn with_resilience(self, enabled: bool, attempts: usize) -> Self
Enable resilience with retry/backoff
Trait Implementations§
Source§impl<'de> Deserialize<'de> for LLMConfig
impl<'de> Deserialize<'de> for LLMConfig
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl From<LLMConfig> for LLMProviderConfig
impl From<LLMConfig> for LLMProviderConfig
Auto Trait Implementations§
impl Freeze for LLMConfig
impl RefUnwindSafe for LLMConfig
impl Send for LLMConfig
impl Sync for LLMConfig
impl Unpin for LLMConfig
impl UnwindSafe for LLMConfig
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more