1use ceylon_runtime::core::agent::AgentContext;
19use ceylon_runtime::core::error::Result;
20use ceylon_runtime::llm::LlmAgent;
21
22#[tokio::main]
23async fn main() -> Result<()> {
24 println!("=== Ceylon Runtime - LLM Ollama Example ===\n");
25
26 let mut agent = LlmAgent::builder("gemma_agent", "ollama::gemma3:latest")
29 .with_system_prompt(
30 "You are a helpful AI assistant. Be concise and informative in your responses.",
31 )
32 .with_temperature(0.7)
33 .with_max_tokens(1024)
34 .build()?;
35
36 println!("✓ LLM Agent created successfully with Ollama gemma3:latest\n");
37
38 let mut ctx = AgentContext::new("gemma_demo_mesh".to_string(), None);
40
41 println!("--- Example 1: Simple Greeting ---");
43 let prompt1 = "Hello! What are you capable of?";
44 println!("User: {}", prompt1);
45
46 match agent.send_message_and_get_response(prompt1, &mut ctx).await {
47 Ok(response) => {
48 println!("Assistant: {}\n", response);
49 }
50 Err(e) => {
51 eprintln!("Error: {}\n", e);
52 eprintln!("Make sure Ollama is running and gemma3:latest model is available.");
53 eprintln!("Pull the model with: ollama pull gemma3:latest");
54 return Err(e);
55 }
56 }
57
58 println!("--- Example 2: Technical Question ---");
60 let prompt2 = "Explain what an AI agent is in 2-3 sentences.";
61 println!("User: {}", prompt2);
62
63 match agent.send_message_and_get_response(prompt2, &mut ctx).await {
64 Ok(response) => {
65 println!("Assistant: {}\n", response);
66 }
67 Err(e) => {
68 eprintln!("Error: {}\n", e);
69 return Err(e);
70 }
71 }
72
73 println!("--- Example 3: Creative Task ---");
75 let prompt3 = "Write a haiku about programming.";
76 println!("User: {}", prompt3);
77
78 match agent.send_message_and_get_response(prompt3, &mut ctx).await {
79 Ok(response) => {
80 println!("Assistant: {}\n", response);
81 }
82 Err(e) => {
83 eprintln!("Error: {}\n", e);
84 return Err(e);
85 }
86 }
87
88 println!("=== Example completed successfully! ===");
89 Ok(())
90}