pub struct LlmAgent { /* private fields */ }Expand description
An agent that uses an LLM (Large Language Model) to process messages.
The LlmAgent integrates with the Ceylon Mesh agent system and delegates
message processing to an LLM. It supports:
- Configurable system prompts
- Model parameters (temperature, max tokens, etc.)
- Tool calling integration with existing
ToolInvoker - Multiple LLM providers (OpenAI, Anthropic, Ollama, etc.)
- Optional memory module integration
§Examples
use runtime::llm::{LlmAgent, LLMConfig};
// Create agent with Ollama (local, no API key needed)
let agent = LlmAgent::builder("my_agent", "ollama::llama2")
.with_system_prompt("You are a helpful assistant.")
.build()
.expect("Failed to create agent");
// Create agent with OpenAI
let agent = LlmAgent::builder("gpt_agent", "openai::gpt-4")
.with_api_key(std::env::var("OPENAI_API_KEY").unwrap())
.with_temperature(0.7)
.build()
.expect("Failed to create agent");Implementations§
Source§impl LlmAgent
impl LlmAgent
Sourcepub fn builder(
name: impl Into<String>,
model: impl Into<String>,
) -> LlmAgentBuilder
pub fn builder( name: impl Into<String>, model: impl Into<String>, ) -> LlmAgentBuilder
Create a builder for constructing an LlmAgent.
§Arguments
name- The name of the agentmodel- The model in “provider::model” format (e.g., “openai::gpt-4”, “ollama::llama2”)
§Examples
use runtime::llm::LlmAgent;
let agent = LlmAgent::builder("my_agent", "ollama::llama2")
.build()
.expect("Failed to create agent");Examples found in repository?
examples/llm_multi_agent_mesh.rs (line 46)
45 fn new(mesh: Arc<LocalMesh>) -> Result<Self> {
46 let llm_agent = LlmAgent::builder("researcher", "ollama::gemma3:latest")
47 .with_system_prompt(
48 "You are a research assistant. When given a topic, provide detailed \
49 information and key facts about it. Be thorough but focused. \
50 Limit your response to 3-4 paragraphs.",
51 )
52 .with_temperature(0.7)
53 .with_max_tokens(1024)
54 .build()?;
55
56 Ok(Self { llm_agent, mesh })
57 }
58}
59
60#[async_trait]
61impl Agent for ResearcherAgent {
62 fn name(&self) -> String {
63 "researcher".to_string()
64 }
65
66 async fn on_start(&mut self, _ctx: &mut AgentContext) -> Result<()> {
67 println!("[Researcher] Started and ready for research tasks.");
68 Ok(())
69 }
70
71 async fn on_message(&mut self, msg: Message, ctx: &mut AgentContext) -> Result<()> {
72 let topic = String::from_utf8_lossy(&msg.payload);
73 println!("\n[Researcher] Received research request: {}", topic);
74
75 // Use LLM to research the topic
76 let research_prompt = format!(
77 "Research the following topic and provide key information: {}",
78 topic
79 );
80
81 match self
82 .llm_agent
83 .send_message_and_get_response(&research_prompt, ctx)
84 .await
85 {
86 Ok(research_result) => {
87 println!("[Researcher] Research complete. Sending to summarizer...");
88
89 // Send research results to the summarizer
90 let research_msg =
91 Message::new("research_result", research_result.into_bytes(), self.name());
92 self.mesh.send(research_msg, "summarizer").await?;
93 }
94 Err(e) => {
95 eprintln!("[Researcher] Error during research: {}", e);
96 // Send error message to summarizer
97 let error_msg = Message::new(
98 "error",
99 format!("Research failed: {}", e).into_bytes(),
100 self.name(),
101 );
102 self.mesh.send(error_msg, "summarizer").await?;
103 }
104 }
105
106 Ok(())
107 }
108}
109
110// --- Summarizer Agent Wrapper ---
111// Wraps an LlmAgent to summarize research findings
112struct SummarizerAgent {
113 llm_agent: LlmAgent,
114 completion_notify: Arc<Notify>,
115}
116
117impl SummarizerAgent {
118 fn new(completion_notify: Arc<Notify>) -> Result<Self> {
119 let llm_agent = LlmAgent::builder("summarizer", "ollama::gemma3:latest")
120 .with_system_prompt(
121 "You are a summarization expert. When given research content, \
122 create a clear and concise summary with bullet points highlighting \
123 the most important facts. Keep the summary to 5-7 bullet points.",
124 )
125 .with_temperature(0.5)
126 .with_max_tokens(512)
127 .build()?;
128
129 Ok(Self {
130 llm_agent,
131 completion_notify,
132 })
133 }More examples
examples/llm_ollama.rs (line 28)
23async fn main() -> Result<()> {
24 println!("=== Ceylon Runtime - LLM Ollama Example ===\n");
25
26 // Create an LLM agent using Ollama with gemma3:latest model
27 // No API key is required for Ollama (local inference)
28 let mut agent = LlmAgent::builder("gemma_agent", "ollama::gemma3:latest")
29 .with_system_prompt(
30 "You are a helpful AI assistant. Be concise and informative in your responses.",
31 )
32 .with_temperature(0.7)
33 .with_max_tokens(1024)
34 .build()?;
35
36 println!("✓ LLM Agent created successfully with Ollama gemma3:latest\n");
37
38 // Create an agent context for the conversation
39 let mut ctx = AgentContext::new("gemma_demo_mesh".to_string(), None);
40
41 // Example 1: Simple greeting
42 println!("--- Example 1: Simple Greeting ---");
43 let prompt1 = "Hello! What are you capable of?";
44 println!("User: {}", prompt1);
45
46 match agent.send_message_and_get_response(prompt1, &mut ctx).await {
47 Ok(response) => {
48 println!("Assistant: {}\n", response);
49 }
50 Err(e) => {
51 eprintln!("Error: {}\n", e);
52 eprintln!("Make sure Ollama is running and gemma3:latest model is available.");
53 eprintln!("Pull the model with: ollama pull gemma3:latest");
54 return Err(e);
55 }
56 }
57
58 // Example 2: Technical question
59 println!("--- Example 2: Technical Question ---");
60 let prompt2 = "Explain what an AI agent is in 2-3 sentences.";
61 println!("User: {}", prompt2);
62
63 match agent.send_message_and_get_response(prompt2, &mut ctx).await {
64 Ok(response) => {
65 println!("Assistant: {}\n", response);
66 }
67 Err(e) => {
68 eprintln!("Error: {}\n", e);
69 return Err(e);
70 }
71 }
72
73 // Example 3: Creative task
74 println!("--- Example 3: Creative Task ---");
75 let prompt3 = "Write a haiku about programming.";
76 println!("User: {}", prompt3);
77
78 match agent.send_message_and_get_response(prompt3, &mut ctx).await {
79 Ok(response) => {
80 println!("Assistant: {}\n", response);
81 }
82 Err(e) => {
83 eprintln!("Error: {}\n", e);
84 return Err(e);
85 }
86 }
87
88 println!("=== Example completed successfully! ===");
89 Ok(())
90}Sourcepub fn new_with_config(
name: impl Into<String>,
config: LLMConfig,
system_prompt: impl Into<String>,
memory: Option<Arc<dyn Memory>>,
) -> Result<Self>
pub fn new_with_config( name: impl Into<String>, config: LLMConfig, system_prompt: impl Into<String>, memory: Option<Arc<dyn Memory>>, ) -> Result<Self>
Create an LlmAgent with comprehensive LLMConfig
Sourcepub fn with_react(&mut self, config: ReActConfig)
pub fn with_react(&mut self, config: ReActConfig)
Enable ReAct (Reason + Act) mode
Sourcepub async fn send_message_react(
&mut self,
message: impl Into<String>,
ctx: &mut AgentContext,
) -> Result<ReActResult>
pub async fn send_message_react( &mut self, message: impl Into<String>, ctx: &mut AgentContext, ) -> Result<ReActResult>
Send a message using ReAct reasoning mode
Sourcepub async fn send_message_and_get_response(
&mut self,
message: impl Into<String>,
ctx: &mut AgentContext,
) -> Result<String>
pub async fn send_message_and_get_response( &mut self, message: impl Into<String>, ctx: &mut AgentContext, ) -> Result<String>
Send a message and get the LLM’s response This is a convenience method for Python bindings and direct usage. It processes the message with the LLM and returns the response text.
Examples found in repository?
examples/llm_multi_agent_mesh.rs (line 83)
71 async fn on_message(&mut self, msg: Message, ctx: &mut AgentContext) -> Result<()> {
72 let topic = String::from_utf8_lossy(&msg.payload);
73 println!("\n[Researcher] Received research request: {}", topic);
74
75 // Use LLM to research the topic
76 let research_prompt = format!(
77 "Research the following topic and provide key information: {}",
78 topic
79 );
80
81 match self
82 .llm_agent
83 .send_message_and_get_response(&research_prompt, ctx)
84 .await
85 {
86 Ok(research_result) => {
87 println!("[Researcher] Research complete. Sending to summarizer...");
88
89 // Send research results to the summarizer
90 let research_msg =
91 Message::new("research_result", research_result.into_bytes(), self.name());
92 self.mesh.send(research_msg, "summarizer").await?;
93 }
94 Err(e) => {
95 eprintln!("[Researcher] Error during research: {}", e);
96 // Send error message to summarizer
97 let error_msg = Message::new(
98 "error",
99 format!("Research failed: {}", e).into_bytes(),
100 self.name(),
101 );
102 self.mesh.send(error_msg, "summarizer").await?;
103 }
104 }
105
106 Ok(())
107 }
108}
109
110// --- Summarizer Agent Wrapper ---
111// Wraps an LlmAgent to summarize research findings
112struct SummarizerAgent {
113 llm_agent: LlmAgent,
114 completion_notify: Arc<Notify>,
115}
116
117impl SummarizerAgent {
118 fn new(completion_notify: Arc<Notify>) -> Result<Self> {
119 let llm_agent = LlmAgent::builder("summarizer", "ollama::gemma3:latest")
120 .with_system_prompt(
121 "You are a summarization expert. When given research content, \
122 create a clear and concise summary with bullet points highlighting \
123 the most important facts. Keep the summary to 5-7 bullet points.",
124 )
125 .with_temperature(0.5)
126 .with_max_tokens(512)
127 .build()?;
128
129 Ok(Self {
130 llm_agent,
131 completion_notify,
132 })
133 }
134}
135
136#[async_trait]
137impl Agent for SummarizerAgent {
138 fn name(&self) -> String {
139 "summarizer".to_string()
140 }
141
142 async fn on_start(&mut self, _ctx: &mut AgentContext) -> Result<()> {
143 println!("[Summarizer] Started and ready to summarize.");
144 Ok(())
145 }
146
147 async fn on_message(&mut self, msg: Message, ctx: &mut AgentContext) -> Result<()> {
148 let content = String::from_utf8_lossy(&msg.payload);
149 println!("\n[Summarizer] Received content from {}", msg.sender);
150
151 if msg.topic == "error" {
152 println!("[Summarizer] Received error: {}", content);
153 self.completion_notify.notify_one();
154 return Ok(());
155 }
156
157 // Use LLM to summarize the research
158 let summary_prompt = format!(
159 "Please summarize the following research content into clear bullet points:\n\n{}",
160 content
161 );
162
163 match self
164 .llm_agent
165 .send_message_and_get_response(&summary_prompt, ctx)
166 .await
167 {
168 Ok(summary) => {
169 println!("\n========================================");
170 println!(" FINAL SUMMARY");
171 println!("========================================\n");
172 println!("{}", summary);
173 println!("\n========================================\n");
174 }
175 Err(e) => {
176 eprintln!("[Summarizer] Error during summarization: {}", e);
177 }
178 }
179
180 // Signal completion
181 self.completion_notify.notify_one();
182 Ok(())
183 }More examples
examples/llm_ollama.rs (line 46)
23async fn main() -> Result<()> {
24 println!("=== Ceylon Runtime - LLM Ollama Example ===\n");
25
26 // Create an LLM agent using Ollama with gemma3:latest model
27 // No API key is required for Ollama (local inference)
28 let mut agent = LlmAgent::builder("gemma_agent", "ollama::gemma3:latest")
29 .with_system_prompt(
30 "You are a helpful AI assistant. Be concise and informative in your responses.",
31 )
32 .with_temperature(0.7)
33 .with_max_tokens(1024)
34 .build()?;
35
36 println!("✓ LLM Agent created successfully with Ollama gemma3:latest\n");
37
38 // Create an agent context for the conversation
39 let mut ctx = AgentContext::new("gemma_demo_mesh".to_string(), None);
40
41 // Example 1: Simple greeting
42 println!("--- Example 1: Simple Greeting ---");
43 let prompt1 = "Hello! What are you capable of?";
44 println!("User: {}", prompt1);
45
46 match agent.send_message_and_get_response(prompt1, &mut ctx).await {
47 Ok(response) => {
48 println!("Assistant: {}\n", response);
49 }
50 Err(e) => {
51 eprintln!("Error: {}\n", e);
52 eprintln!("Make sure Ollama is running and gemma3:latest model is available.");
53 eprintln!("Pull the model with: ollama pull gemma3:latest");
54 return Err(e);
55 }
56 }
57
58 // Example 2: Technical question
59 println!("--- Example 2: Technical Question ---");
60 let prompt2 = "Explain what an AI agent is in 2-3 sentences.";
61 println!("User: {}", prompt2);
62
63 match agent.send_message_and_get_response(prompt2, &mut ctx).await {
64 Ok(response) => {
65 println!("Assistant: {}\n", response);
66 }
67 Err(e) => {
68 eprintln!("Error: {}\n", e);
69 return Err(e);
70 }
71 }
72
73 // Example 3: Creative task
74 println!("--- Example 3: Creative Task ---");
75 let prompt3 = "Write a haiku about programming.";
76 println!("User: {}", prompt3);
77
78 match agent.send_message_and_get_response(prompt3, &mut ctx).await {
79 Ok(response) => {
80 println!("Assistant: {}\n", response);
81 }
82 Err(e) => {
83 eprintln!("Error: {}\n", e);
84 return Err(e);
85 }
86 }
87
88 println!("=== Example completed successfully! ===");
89 Ok(())
90}Sourcepub fn last_response(&self) -> Option<String>
pub fn last_response(&self) -> Option<String>
Get the last assistant response from conversation history
Trait Implementations§
Source§impl Agent for LlmAgent
impl Agent for LlmAgent
Source§fn on_message<'life0, 'life1, 'async_trait>(
&'life0 mut self,
msg: CeylonMessage,
ctx: &'life1 mut AgentContext,
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn on_message<'life0, 'life1, 'async_trait>(
&'life0 mut self,
msg: CeylonMessage,
ctx: &'life1 mut AgentContext,
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Called when a binary message is received. Read more
Source§fn on_generic_message<'life0, 'life1, 'async_trait>(
&'life0 mut self,
msg: GenericMessage,
ctx: &'life1 mut AgentContext,
) -> Pin<Box<dyn Future<Output = Result<GenericResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn on_generic_message<'life0, 'life1, 'async_trait>(
&'life0 mut self,
msg: GenericMessage,
ctx: &'life1 mut AgentContext,
) -> Pin<Box<dyn Future<Output = Result<GenericResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Handle a generic string message and return a response. Read more
Source§fn tool_invoker(&self) -> Option<&ToolInvoker>
fn tool_invoker(&self) -> Option<&ToolInvoker>
Get the tool invoker for this agent (if it has actions). Read more
Source§fn tool_invoker_mut(&mut self) -> Option<&mut ToolInvoker>
fn tool_invoker_mut(&mut self) -> Option<&mut ToolInvoker>
Get mutable tool invoker for dynamic tool registration.
Auto Trait Implementations§
impl Freeze for LlmAgent
impl !RefUnwindSafe for LlmAgent
impl Send for LlmAgent
impl Sync for LlmAgent
impl Unpin for LlmAgent
impl !UnwindSafe for LlmAgent
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more