pub struct ChatMessage {
pub role: Role,
pub content: String,
pub name: Option<String>,
pub tool_calls: Option<Vec<ToolCall>>,
pub tool_call_id: Option<String>,
}Fields§
§role: Role§content: String§name: Option<String>§tool_calls: Option<Vec<ToolCall>>§tool_call_id: Option<String>Implementations§
Source§impl ChatMessage
impl ChatMessage
Sourcepub fn system(content: impl Into<String>) -> Self
pub fn system(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 66)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}More examples
examples/local_streaming.rs (line 38)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Local Model Streaming Example");
14 println!("=================================================\n");
15
16 // Configure local model
17 let local_config = LocalConfig {
18 huggingface_repo: "unsloth/Qwen2.5-0.5B-Instruct-GGUF".to_string(),
19 model_file: "Qwen2.5-0.5B-Instruct-Q4_K_M.gguf".to_string(),
20 context_size: 2048,
21 temperature: 0.7,
22 max_tokens: 512,
23 };
24
25 println!("📥 Loading local model...");
26 println!(" Repository: {}", local_config.huggingface_repo);
27 println!(" Model: {}\n", local_config.model_file);
28
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Local(local_config)).await?;
30
31 println!("✓ Model loaded successfully!\n");
32
33 // Example 1: Simple streaming
34 println!("Example 1: Simple Streaming Response");
35 println!("======================================\n");
36
37 let messages = vec![
38 ChatMessage::system("You are a helpful coding assistant."),
39 ChatMessage::user("Write a short explanation of what Rust is."),
40 ];
41
42 print!("Assistant: ");
43 io::stdout().flush()?;
44
45 let _response = client
46 .chat_stream(messages, None, |chunk| {
47 print!("{}", chunk);
48 io::stdout().flush().unwrap();
49 })
50 .await?;
51
52 println!("\n");
53
54 // Example 2: Multiple questions with streaming
55 println!("Example 2: Interactive Streaming");
56 println!("==================================\n");
57
58 let questions = vec![
59 "What are the main benefits of Rust?",
60 "Give me a simple code example.",
61 ];
62
63 let mut session = helios_engine::ChatSession::new()
64 .with_system_prompt("You are a helpful programming assistant.");
65
66 for question in questions {
67 println!("User: {}", question);
68 session.add_user_message(question);
69
70 print!("Assistant: ");
71 io::stdout().flush()?;
72
73 let response = client
74 .chat_stream(session.get_messages(), None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 println!("✅ Local model streaming completed successfully!");
85 println!("\n💡 Features:");
86 println!(" • Token-by-token streaming for local models");
87 println!(" • Real-time response display (no more instant full responses)");
88 println!(" • Same streaming API for both local and remote models");
89 println!(" • Improved user experience with progressive output");
90
91 Ok(())
92}examples/streaming_chat.rs (line 30)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Sourcepub fn user(content: impl Into<String>) -> Self
pub fn user(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 67)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}More examples
examples/local_streaming.rs (line 39)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Local Model Streaming Example");
14 println!("=================================================\n");
15
16 // Configure local model
17 let local_config = LocalConfig {
18 huggingface_repo: "unsloth/Qwen2.5-0.5B-Instruct-GGUF".to_string(),
19 model_file: "Qwen2.5-0.5B-Instruct-Q4_K_M.gguf".to_string(),
20 context_size: 2048,
21 temperature: 0.7,
22 max_tokens: 512,
23 };
24
25 println!("📥 Loading local model...");
26 println!(" Repository: {}", local_config.huggingface_repo);
27 println!(" Model: {}\n", local_config.model_file);
28
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Local(local_config)).await?;
30
31 println!("✓ Model loaded successfully!\n");
32
33 // Example 1: Simple streaming
34 println!("Example 1: Simple Streaming Response");
35 println!("======================================\n");
36
37 let messages = vec![
38 ChatMessage::system("You are a helpful coding assistant."),
39 ChatMessage::user("Write a short explanation of what Rust is."),
40 ];
41
42 print!("Assistant: ");
43 io::stdout().flush()?;
44
45 let _response = client
46 .chat_stream(messages, None, |chunk| {
47 print!("{}", chunk);
48 io::stdout().flush().unwrap();
49 })
50 .await?;
51
52 println!("\n");
53
54 // Example 2: Multiple questions with streaming
55 println!("Example 2: Interactive Streaming");
56 println!("==================================\n");
57
58 let questions = vec![
59 "What are the main benefits of Rust?",
60 "Give me a simple code example.",
61 ];
62
63 let mut session = helios_engine::ChatSession::new()
64 .with_system_prompt("You are a helpful programming assistant.");
65
66 for question in questions {
67 println!("User: {}", question);
68 session.add_user_message(question);
69
70 print!("Assistant: ");
71 io::stdout().flush()?;
72
73 let response = client
74 .chat_stream(session.get_messages(), None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 println!("✅ Local model streaming completed successfully!");
85 println!("\n💡 Features:");
86 println!(" • Token-by-token streaming for local models");
87 println!(" • Real-time response display (no more instant full responses)");
88 println!(" • Same streaming API for both local and remote models");
89 println!(" • Improved user experience with progressive output");
90
91 Ok(())
92}examples/streaming_chat.rs (line 31)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}pub fn assistant(content: impl Into<String>) -> Self
pub fn tool(content: impl Into<String>, tool_call_id: impl Into<String>) -> Self
Trait Implementations§
Source§impl Clone for ChatMessage
impl Clone for ChatMessage
Source§fn clone(&self) -> ChatMessage
fn clone(&self) -> ChatMessage
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Debug for ChatMessage
impl Debug for ChatMessage
Source§impl<'de> Deserialize<'de> for ChatMessage
impl<'de> Deserialize<'de> for ChatMessage
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for ChatMessage
impl RefUnwindSafe for ChatMessage
impl Send for ChatMessage
impl Sync for ChatMessage
impl Unpin for ChatMessage
impl UnwindSafe for ChatMessage
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more