pub struct ChatMessage {
pub role: Role,
pub content: String,
pub name: Option<String>,
pub tool_calls: Option<Vec<ToolCall>>,
pub tool_call_id: Option<String>,
}Expand description
Represents a single message in a chat conversation.
Fields§
§role: RoleThe role of the message sender.
content: StringThe content of the message.
name: Option<String>The name of the message sender.
tool_calls: Option<Vec<ToolCall>>Any tool calls requested by the assistant.
tool_call_id: Option<String>The ID of the tool call this message is a response to.
Implementations§
Source§impl ChatMessage
impl ChatMessage
Sourcepub fn system(content: impl Into<String>) -> Self
pub fn system(content: impl Into<String>) -> Self
Creates a new system message.
Examples found in repository?
examples/direct_llm_usage.rs (line 68)
52async fn simple_call() -> helios_engine::Result<()> {
53 // Create a configuration for the LLM.
54 let llm_config = LLMConfig {
55 model_name: "gpt-3.5-turbo".to_string(),
56 base_url: "https://api.openai.com/v1".to_string(),
57 api_key: std::env::var("OPENAI_API_KEY")
58 .unwrap_or_else(|_| "your-api-key-here".to_string()),
59 temperature: 0.7,
60 max_tokens: 2048,
61 };
62
63 // Create a new LLM client.
64 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
65
66 // Prepare the messages to send to the LLM.
67 let messages = vec![
68 ChatMessage::system("You are a helpful assistant that gives concise answers."),
69 ChatMessage::user("What is the capital of France? Answer in one sentence."),
70 ];
71
72 // Make the call to the LLM.
73 println!("Sending request...");
74 match client.chat(messages, None, None, None, None).await {
75 Ok(response) => {
76 println!("✓ Response: {}", response.content);
77 }
78 Err(e) => {
79 println!("✗ Error: {}", e);
80 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
81 }
82 }
83
84 Ok(())
85}More examples
examples/streaming_chat.rs (line 36)
14async fn main() -> helios_engine::Result<()> {
15 println!("🚀 Helios Engine - Streaming Example");
16 println!("=====================================\n");
17
18 // Set up the LLM configuration.
19 let llm_config = LLMConfig {
20 model_name: "gpt-3.5-turbo".to_string(),
21 base_url: "https://api.openai.com/v1".to_string(),
22 api_key: std::env::var("OPENAI_API_KEY")
23 .unwrap_or_else(|_| "your-api-key-here".to_string()),
24 temperature: 0.7,
25 max_tokens: 2048,
26 };
27
28 // Create a new LLM client.
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31 // --- Example 1: Simple streaming response ---
32 println!("Example 1: Simple Streaming Response");
33 println!("======================================\n");
34
35 let messages = vec![
36 ChatMessage::system("You are a helpful assistant."),
37 ChatMessage::user("Write a short poem about coding."),
38 ];
39
40 print!("Assistant: ");
41 io::stdout().flush()?;
42
43 // Stream the response from the model, printing each chunk as it arrives.
44 let response = client
45 .chat_stream(messages, None, None, None, None, |chunk| {
46 print!("{}", chunk);
47 io::stdout().flush().unwrap();
48 })
49 .await?;
50
51 println!("\n\n");
52
53 // --- Example 2: Interactive streaming chat ---
54 println!("Example 2: Interactive Streaming Chat");
55 println!("======================================\n");
56
57 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59 let questions = vec![
60 "What is Rust?",
61 "What are its main benefits?",
62 "Show me a simple example.",
63 ];
64
65 for question in questions {
66 println!("User: {}", question);
67 session.add_user_message(question);
68
69 print!("Assistant: ");
70 io::stdout().flush()?;
71
72 // Stream the response, maintaining the conversation context.
73 let response = client
74 .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 // --- Example 3: Streaming with thinking tags ---
85 println!("\nExample 3: Streaming with Thinking Tags");
86 println!("=========================================\n");
87 println!("When using models that support thinking tags (like o1),");
88 println!("you can detect and display them during streaming.\n");
89
90 /// A helper struct to track and display thinking tags in streamed responses.
91 struct ThinkingTracker {
92 in_thinking: bool,
93 thinking_buffer: String,
94 }
95
96 impl ThinkingTracker {
97 /// Creates a new `ThinkingTracker`.
98 fn new() -> Self {
99 Self {
100 in_thinking: false,
101 thinking_buffer: String::new(),
102 }
103 }
104
105 /// Processes a chunk of a streamed response and returns the processed output.
106 fn process_chunk(&mut self, chunk: &str) -> String {
107 let mut output = String::new();
108 let mut chars = chunk.chars().peekable();
109
110 while let Some(c) = chars.next() {
111 if c == '<' {
112 let remaining: String = chars.clone().collect();
113 if remaining.starts_with("thinking>") {
114 self.in_thinking = true;
115 self.thinking_buffer.clear();
116 output.push_str("\n💭 [Thinking");
117 for _ in 0..9 {
118 chars.next();
119 }
120 continue;
121 } else if remaining.starts_with("/thinking>") {
122 self.in_thinking = false;
123 output.push_str("]\n");
124 for _ in 0..10 {
125 chars.next();
126 }
127 continue;
128 }
129 }
130
131 if self.in_thinking {
132 self.thinking_buffer.push(c);
133 if self.thinking_buffer.len() % 3 == 0 {
134 output.push('.');
135 }
136 } else {
137 output.push(c);
138 }
139 }
140
141 output
142 }
143 }
144
145 let messages = vec![ChatMessage::user(
146 "Solve this problem: What is 15 * 234 + 89?",
147 )];
148
149 let mut tracker = ThinkingTracker::new();
150 print!("Assistant: ");
151 io::stdout().flush()?;
152
153 // Stream the response, processing thinking tags as they arrive.
154 let _response = client
155 .chat_stream(messages, None, None, None, None, |chunk| {
156 let output = tracker.process_chunk(chunk);
157 print!("{}", output);
158 io::stdout().flush().unwrap();
159 })
160 .await?;
161
162 println!("\n\n Streaming examples completed!");
163 println!("\nKey benefits of streaming:");
164 println!(" • Real-time response display");
165 println!(" • Better user experience for long responses");
166 println!(" • Ability to show thinking/reasoning process");
167 println!(" • Early cancellation possible (future feature)");
168
169 Ok(())
170}Sourcepub fn sys(content: impl Into<String>) -> Self
pub fn sys(content: impl Into<String>) -> Self
Creates a new system message. Alias for system().
Examples found in repository?
examples/ultra_simple.rs (line 71)
9async fn main() -> helios_engine::Result<()> {
10 println!("🚀 Ultra Simple Helios Example\n");
11
12 // ========== SIMPLEST AGENT CREATION ==========
13 println!("1️⃣ Creating an agent - shortest possible syntax:\n");
14
15 // One-liner: Create agent with auto config
16 let mut agent = Agent::builder("Helper")
17 .auto_config()
18 .prompt("You are helpful and concise.")
19 .build()
20 .await?;
21
22 println!("✓ Agent created!\n");
23
24 // ========== SIMPLEST CHAT ==========
25 println!("2️⃣ Asking questions - simplest possible:\n");
26
27 // Use .ask() instead of .chat() for more natural syntax
28 let answer = agent.ask("What is 2+2?").await?;
29 println!("Q: What is 2+2?\nA: {}\n", answer);
30
31 // ========== SIMPLEST CONFIG ==========
32 println!("3️⃣ Creating config with shortest syntax:\n");
33
34 // Ultra-short config creation
35 let _config = Config::builder()
36 .m("gpt-4") // .m() is shorthand for .model()
37 .key("your-api-key") // .key() is shorthand for .api_key()
38 .temp(0.8) // .temp() is shorthand for .temperature()
39 .tokens(1024) // .tokens() is shorthand for .max_tokens()
40 .build();
41
42 println!("✓ Config created with ultra-short syntax!\n");
43
44 // ========== SIMPLEST AGENT WITH TOOLS ==========
45 println!("4️⃣ Agent with tools - simplest way:\n");
46
47 let mut calc_agent = Agent::builder("Calculator")
48 .auto_config()
49 .prompt("You are a math expert.")
50 .with_tool(Box::new(CalculatorTool)) // Add single tool
51 .build()
52 .await?;
53
54 let result = calc_agent.ask("Calculate 15 * 7 + 5").await?;
55 println!("Q: Calculate 15 * 7 + 5\nA: {}\n", result);
56
57 // ========== SIMPLEST QUICK AGENT ==========
58 println!("5️⃣ Quick agent - one method call:\n");
59
60 // Agent::quick() creates agent in ONE LINE with auto config!
61 let mut quick_agent = Agent::quick("QuickBot").await?;
62 let quick_answer = quick_agent.ask("Say hello!").await?;
63 println!("Response: {}\n", quick_answer);
64
65 // ========== SIMPLEST CHAT MESSAGES ==========
66 println!("6️⃣ Creating messages - super short syntax:\n");
67
68 use helios_engine::ChatMessage;
69
70 // Short aliases for message creation
71 let _sys_msg = ChatMessage::sys("You are helpful"); // .sys() not .system()
72 let _user_msg = ChatMessage::msg("Hello there"); // .msg() not .user()
73 let _reply_msg = ChatMessage::reply("Hi! How can I help?"); // .reply() not .assistant()
74
75 println!("✓ Messages created with ultra-short syntax!\n");
76
77 // ========== SHORTEST AUTOFOREST ==========
78 println!("7️⃣ AutoForest - simplest multi-agent orchestration:\n");
79
80 use helios_engine::AutoForest;
81
82 let mut forest = AutoForest::new(Config::builder().m("gpt-4").build())
83 .with_tools(vec![Box::new(CalculatorTool)])
84 .build()
85 .await?;
86
87 // Use .run() for shortest syntax
88 let forest_result = forest.run("Analyze these numbers: 10, 20, 30, 40").await?;
89 println!("Forest Result:\n{}\n", forest_result);
90
91 // ========== COMPARISON TABLE ==========
92 println!("📊 Syntax Comparison - Short vs Long:\n");
93 println!("┌─────────────────────┬──────────────────────┬─────────────────┐");
94 println!("│ Operation │ Short Syntax │ Long Syntax │");
95 println!("├─────────────────────┼──────────────────────┼─────────────────┤");
96 println!("│ Create Agent │ Agent::quick() │ Agent::builder()│");
97 println!("│ Ask Question │ .ask() │ .chat() │");
98 println!("│ System Prompt │ .prompt() │ .system_prompt() │");
99 println!("│ Config Model │ .m() │ .model() │");
100 println!("│ Config Key │ .key() │ .api_key() │");
101 println!("│ Config Temp │ .temp() │ .temperature() │");
102 println!("│ Config Tokens │ .tokens() │ .max_tokens() │");
103 println!("│ System Message │ ChatMessage::sys() │ ChatMessage::system()");
104 println!("│ User Message │ ChatMessage::msg() │ ChatMessage::user()");
105 println!("│ Assistant Message │ ChatMessage::reply() │ ChatMessage::assistant()");
106 println!("│ AutoForest Execute │ .run() │ .execute_task() │");
107 println!("└─────────────────────┴──────────────────────┴─────────────────┘\n");
108
109 println!("✅ All examples completed!");
110 println!("💡 Tip: Mix and match short and long syntax based on your preference!");
111
112 Ok(())
113}Sourcepub fn user(content: impl Into<String>) -> Self
pub fn user(content: impl Into<String>) -> Self
Creates a new user message.
Examples found in repository?
examples/direct_llm_usage.rs (line 69)
52async fn simple_call() -> helios_engine::Result<()> {
53 // Create a configuration for the LLM.
54 let llm_config = LLMConfig {
55 model_name: "gpt-3.5-turbo".to_string(),
56 base_url: "https://api.openai.com/v1".to_string(),
57 api_key: std::env::var("OPENAI_API_KEY")
58 .unwrap_or_else(|_| "your-api-key-here".to_string()),
59 temperature: 0.7,
60 max_tokens: 2048,
61 };
62
63 // Create a new LLM client.
64 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
65
66 // Prepare the messages to send to the LLM.
67 let messages = vec![
68 ChatMessage::system("You are a helpful assistant that gives concise answers."),
69 ChatMessage::user("What is the capital of France? Answer in one sentence."),
70 ];
71
72 // Make the call to the LLM.
73 println!("Sending request...");
74 match client.chat(messages, None, None, None, None).await {
75 Ok(response) => {
76 println!("✓ Response: {}", response.content);
77 }
78 Err(e) => {
79 println!("✗ Error: {}", e);
80 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
81 }
82 }
83
84 Ok(())
85}More examples
examples/streaming_chat.rs (line 37)
14async fn main() -> helios_engine::Result<()> {
15 println!("🚀 Helios Engine - Streaming Example");
16 println!("=====================================\n");
17
18 // Set up the LLM configuration.
19 let llm_config = LLMConfig {
20 model_name: "gpt-3.5-turbo".to_string(),
21 base_url: "https://api.openai.com/v1".to_string(),
22 api_key: std::env::var("OPENAI_API_KEY")
23 .unwrap_or_else(|_| "your-api-key-here".to_string()),
24 temperature: 0.7,
25 max_tokens: 2048,
26 };
27
28 // Create a new LLM client.
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31 // --- Example 1: Simple streaming response ---
32 println!("Example 1: Simple Streaming Response");
33 println!("======================================\n");
34
35 let messages = vec![
36 ChatMessage::system("You are a helpful assistant."),
37 ChatMessage::user("Write a short poem about coding."),
38 ];
39
40 print!("Assistant: ");
41 io::stdout().flush()?;
42
43 // Stream the response from the model, printing each chunk as it arrives.
44 let response = client
45 .chat_stream(messages, None, None, None, None, |chunk| {
46 print!("{}", chunk);
47 io::stdout().flush().unwrap();
48 })
49 .await?;
50
51 println!("\n\n");
52
53 // --- Example 2: Interactive streaming chat ---
54 println!("Example 2: Interactive Streaming Chat");
55 println!("======================================\n");
56
57 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59 let questions = vec![
60 "What is Rust?",
61 "What are its main benefits?",
62 "Show me a simple example.",
63 ];
64
65 for question in questions {
66 println!("User: {}", question);
67 session.add_user_message(question);
68
69 print!("Assistant: ");
70 io::stdout().flush()?;
71
72 // Stream the response, maintaining the conversation context.
73 let response = client
74 .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 // --- Example 3: Streaming with thinking tags ---
85 println!("\nExample 3: Streaming with Thinking Tags");
86 println!("=========================================\n");
87 println!("When using models that support thinking tags (like o1),");
88 println!("you can detect and display them during streaming.\n");
89
90 /// A helper struct to track and display thinking tags in streamed responses.
91 struct ThinkingTracker {
92 in_thinking: bool,
93 thinking_buffer: String,
94 }
95
96 impl ThinkingTracker {
97 /// Creates a new `ThinkingTracker`.
98 fn new() -> Self {
99 Self {
100 in_thinking: false,
101 thinking_buffer: String::new(),
102 }
103 }
104
105 /// Processes a chunk of a streamed response and returns the processed output.
106 fn process_chunk(&mut self, chunk: &str) -> String {
107 let mut output = String::new();
108 let mut chars = chunk.chars().peekable();
109
110 while let Some(c) = chars.next() {
111 if c == '<' {
112 let remaining: String = chars.clone().collect();
113 if remaining.starts_with("thinking>") {
114 self.in_thinking = true;
115 self.thinking_buffer.clear();
116 output.push_str("\n💭 [Thinking");
117 for _ in 0..9 {
118 chars.next();
119 }
120 continue;
121 } else if remaining.starts_with("/thinking>") {
122 self.in_thinking = false;
123 output.push_str("]\n");
124 for _ in 0..10 {
125 chars.next();
126 }
127 continue;
128 }
129 }
130
131 if self.in_thinking {
132 self.thinking_buffer.push(c);
133 if self.thinking_buffer.len() % 3 == 0 {
134 output.push('.');
135 }
136 } else {
137 output.push(c);
138 }
139 }
140
141 output
142 }
143 }
144
145 let messages = vec![ChatMessage::user(
146 "Solve this problem: What is 15 * 234 + 89?",
147 )];
148
149 let mut tracker = ThinkingTracker::new();
150 print!("Assistant: ");
151 io::stdout().flush()?;
152
153 // Stream the response, processing thinking tags as they arrive.
154 let _response = client
155 .chat_stream(messages, None, None, None, None, |chunk| {
156 let output = tracker.process_chunk(chunk);
157 print!("{}", output);
158 io::stdout().flush().unwrap();
159 })
160 .await?;
161
162 println!("\n\n Streaming examples completed!");
163 println!("\nKey benefits of streaming:");
164 println!(" • Real-time response display");
165 println!(" • Better user experience for long responses");
166 println!(" • Ability to show thinking/reasoning process");
167 println!(" • Early cancellation possible (future feature)");
168
169 Ok(())
170}Sourcepub fn msg(content: impl Into<String>) -> Self
pub fn msg(content: impl Into<String>) -> Self
Creates a new user message. Alias for user().
Examples found in repository?
examples/ultra_simple.rs (line 72)
9async fn main() -> helios_engine::Result<()> {
10 println!("🚀 Ultra Simple Helios Example\n");
11
12 // ========== SIMPLEST AGENT CREATION ==========
13 println!("1️⃣ Creating an agent - shortest possible syntax:\n");
14
15 // One-liner: Create agent with auto config
16 let mut agent = Agent::builder("Helper")
17 .auto_config()
18 .prompt("You are helpful and concise.")
19 .build()
20 .await?;
21
22 println!("✓ Agent created!\n");
23
24 // ========== SIMPLEST CHAT ==========
25 println!("2️⃣ Asking questions - simplest possible:\n");
26
27 // Use .ask() instead of .chat() for more natural syntax
28 let answer = agent.ask("What is 2+2?").await?;
29 println!("Q: What is 2+2?\nA: {}\n", answer);
30
31 // ========== SIMPLEST CONFIG ==========
32 println!("3️⃣ Creating config with shortest syntax:\n");
33
34 // Ultra-short config creation
35 let _config = Config::builder()
36 .m("gpt-4") // .m() is shorthand for .model()
37 .key("your-api-key") // .key() is shorthand for .api_key()
38 .temp(0.8) // .temp() is shorthand for .temperature()
39 .tokens(1024) // .tokens() is shorthand for .max_tokens()
40 .build();
41
42 println!("✓ Config created with ultra-short syntax!\n");
43
44 // ========== SIMPLEST AGENT WITH TOOLS ==========
45 println!("4️⃣ Agent with tools - simplest way:\n");
46
47 let mut calc_agent = Agent::builder("Calculator")
48 .auto_config()
49 .prompt("You are a math expert.")
50 .with_tool(Box::new(CalculatorTool)) // Add single tool
51 .build()
52 .await?;
53
54 let result = calc_agent.ask("Calculate 15 * 7 + 5").await?;
55 println!("Q: Calculate 15 * 7 + 5\nA: {}\n", result);
56
57 // ========== SIMPLEST QUICK AGENT ==========
58 println!("5️⃣ Quick agent - one method call:\n");
59
60 // Agent::quick() creates agent in ONE LINE with auto config!
61 let mut quick_agent = Agent::quick("QuickBot").await?;
62 let quick_answer = quick_agent.ask("Say hello!").await?;
63 println!("Response: {}\n", quick_answer);
64
65 // ========== SIMPLEST CHAT MESSAGES ==========
66 println!("6️⃣ Creating messages - super short syntax:\n");
67
68 use helios_engine::ChatMessage;
69
70 // Short aliases for message creation
71 let _sys_msg = ChatMessage::sys("You are helpful"); // .sys() not .system()
72 let _user_msg = ChatMessage::msg("Hello there"); // .msg() not .user()
73 let _reply_msg = ChatMessage::reply("Hi! How can I help?"); // .reply() not .assistant()
74
75 println!("✓ Messages created with ultra-short syntax!\n");
76
77 // ========== SHORTEST AUTOFOREST ==========
78 println!("7️⃣ AutoForest - simplest multi-agent orchestration:\n");
79
80 use helios_engine::AutoForest;
81
82 let mut forest = AutoForest::new(Config::builder().m("gpt-4").build())
83 .with_tools(vec![Box::new(CalculatorTool)])
84 .build()
85 .await?;
86
87 // Use .run() for shortest syntax
88 let forest_result = forest.run("Analyze these numbers: 10, 20, 30, 40").await?;
89 println!("Forest Result:\n{}\n", forest_result);
90
91 // ========== COMPARISON TABLE ==========
92 println!("📊 Syntax Comparison - Short vs Long:\n");
93 println!("┌─────────────────────┬──────────────────────┬─────────────────┐");
94 println!("│ Operation │ Short Syntax │ Long Syntax │");
95 println!("├─────────────────────┼──────────────────────┼─────────────────┤");
96 println!("│ Create Agent │ Agent::quick() │ Agent::builder()│");
97 println!("│ Ask Question │ .ask() │ .chat() │");
98 println!("│ System Prompt │ .prompt() │ .system_prompt() │");
99 println!("│ Config Model │ .m() │ .model() │");
100 println!("│ Config Key │ .key() │ .api_key() │");
101 println!("│ Config Temp │ .temp() │ .temperature() │");
102 println!("│ Config Tokens │ .tokens() │ .max_tokens() │");
103 println!("│ System Message │ ChatMessage::sys() │ ChatMessage::system()");
104 println!("│ User Message │ ChatMessage::msg() │ ChatMessage::user()");
105 println!("│ Assistant Message │ ChatMessage::reply() │ ChatMessage::assistant()");
106 println!("│ AutoForest Execute │ .run() │ .execute_task() │");
107 println!("└─────────────────────┴──────────────────────┴─────────────────┘\n");
108
109 println!("✅ All examples completed!");
110 println!("💡 Tip: Mix and match short and long syntax based on your preference!");
111
112 Ok(())
113}Sourcepub fn reply(content: impl Into<String>) -> Self
pub fn reply(content: impl Into<String>) -> Self
Creates a new assistant message. Alias for assistant().
Examples found in repository?
examples/ultra_simple.rs (line 73)
9async fn main() -> helios_engine::Result<()> {
10 println!("🚀 Ultra Simple Helios Example\n");
11
12 // ========== SIMPLEST AGENT CREATION ==========
13 println!("1️⃣ Creating an agent - shortest possible syntax:\n");
14
15 // One-liner: Create agent with auto config
16 let mut agent = Agent::builder("Helper")
17 .auto_config()
18 .prompt("You are helpful and concise.")
19 .build()
20 .await?;
21
22 println!("✓ Agent created!\n");
23
24 // ========== SIMPLEST CHAT ==========
25 println!("2️⃣ Asking questions - simplest possible:\n");
26
27 // Use .ask() instead of .chat() for more natural syntax
28 let answer = agent.ask("What is 2+2?").await?;
29 println!("Q: What is 2+2?\nA: {}\n", answer);
30
31 // ========== SIMPLEST CONFIG ==========
32 println!("3️⃣ Creating config with shortest syntax:\n");
33
34 // Ultra-short config creation
35 let _config = Config::builder()
36 .m("gpt-4") // .m() is shorthand for .model()
37 .key("your-api-key") // .key() is shorthand for .api_key()
38 .temp(0.8) // .temp() is shorthand for .temperature()
39 .tokens(1024) // .tokens() is shorthand for .max_tokens()
40 .build();
41
42 println!("✓ Config created with ultra-short syntax!\n");
43
44 // ========== SIMPLEST AGENT WITH TOOLS ==========
45 println!("4️⃣ Agent with tools - simplest way:\n");
46
47 let mut calc_agent = Agent::builder("Calculator")
48 .auto_config()
49 .prompt("You are a math expert.")
50 .with_tool(Box::new(CalculatorTool)) // Add single tool
51 .build()
52 .await?;
53
54 let result = calc_agent.ask("Calculate 15 * 7 + 5").await?;
55 println!("Q: Calculate 15 * 7 + 5\nA: {}\n", result);
56
57 // ========== SIMPLEST QUICK AGENT ==========
58 println!("5️⃣ Quick agent - one method call:\n");
59
60 // Agent::quick() creates agent in ONE LINE with auto config!
61 let mut quick_agent = Agent::quick("QuickBot").await?;
62 let quick_answer = quick_agent.ask("Say hello!").await?;
63 println!("Response: {}\n", quick_answer);
64
65 // ========== SIMPLEST CHAT MESSAGES ==========
66 println!("6️⃣ Creating messages - super short syntax:\n");
67
68 use helios_engine::ChatMessage;
69
70 // Short aliases for message creation
71 let _sys_msg = ChatMessage::sys("You are helpful"); // .sys() not .system()
72 let _user_msg = ChatMessage::msg("Hello there"); // .msg() not .user()
73 let _reply_msg = ChatMessage::reply("Hi! How can I help?"); // .reply() not .assistant()
74
75 println!("✓ Messages created with ultra-short syntax!\n");
76
77 // ========== SHORTEST AUTOFOREST ==========
78 println!("7️⃣ AutoForest - simplest multi-agent orchestration:\n");
79
80 use helios_engine::AutoForest;
81
82 let mut forest = AutoForest::new(Config::builder().m("gpt-4").build())
83 .with_tools(vec![Box::new(CalculatorTool)])
84 .build()
85 .await?;
86
87 // Use .run() for shortest syntax
88 let forest_result = forest.run("Analyze these numbers: 10, 20, 30, 40").await?;
89 println!("Forest Result:\n{}\n", forest_result);
90
91 // ========== COMPARISON TABLE ==========
92 println!("📊 Syntax Comparison - Short vs Long:\n");
93 println!("┌─────────────────────┬──────────────────────┬─────────────────┐");
94 println!("│ Operation │ Short Syntax │ Long Syntax │");
95 println!("├─────────────────────┼──────────────────────┼─────────────────┤");
96 println!("│ Create Agent │ Agent::quick() │ Agent::builder()│");
97 println!("│ Ask Question │ .ask() │ .chat() │");
98 println!("│ System Prompt │ .prompt() │ .system_prompt() │");
99 println!("│ Config Model │ .m() │ .model() │");
100 println!("│ Config Key │ .key() │ .api_key() │");
101 println!("│ Config Temp │ .temp() │ .temperature() │");
102 println!("│ Config Tokens │ .tokens() │ .max_tokens() │");
103 println!("│ System Message │ ChatMessage::sys() │ ChatMessage::system()");
104 println!("│ User Message │ ChatMessage::msg() │ ChatMessage::user()");
105 println!("│ Assistant Message │ ChatMessage::reply() │ ChatMessage::assistant()");
106 println!("│ AutoForest Execute │ .run() │ .execute_task() │");
107 println!("└─────────────────────┴──────────────────────┴─────────────────┘\n");
108
109 println!("✅ All examples completed!");
110 println!("💡 Tip: Mix and match short and long syntax based on your preference!");
111
112 Ok(())
113}Trait Implementations§
Source§impl Clone for ChatMessage
impl Clone for ChatMessage
Source§fn clone(&self) -> ChatMessage
fn clone(&self) -> ChatMessage
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Debug for ChatMessage
impl Debug for ChatMessage
Source§impl<'de> Deserialize<'de> for ChatMessage
impl<'de> Deserialize<'de> for ChatMessage
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for ChatMessage
impl RefUnwindSafe for ChatMessage
impl Send for ChatMessage
impl Sync for ChatMessage
impl Unpin for ChatMessage
impl UnwindSafe for ChatMessage
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more