pub struct ChatMessage {
pub role: Role,
pub content: String,
pub name: Option<String>,
pub tool_calls: Option<Vec<ToolCall>>,
pub tool_call_id: Option<String>,
}Fields§
§role: Role§content: String§name: Option<String>§tool_calls: Option<Vec<ToolCall>>§tool_call_id: Option<String>Implementations§
Source§impl ChatMessage
impl ChatMessage
Sourcepub fn system(content: impl Into<String>) -> Self
pub fn system(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 66)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}More examples
examples/local_streaming.rs (line 38)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Local Model Streaming Example");
14 println!("=================================================\n");
15
16 // Configure local model
17 let local_config = LocalConfig {
18 huggingface_repo: "unsloth/Qwen2.5-0.5B-Instruct-GGUF".to_string(),
19 model_file: "Qwen2.5-0.5B-Instruct-Q4_K_M.gguf".to_string(),
20 context_size: 2048,
21 temperature: 0.7,
22 max_tokens: 512,
23 };
24
25 println!("📥 Loading local model...");
26 println!(" Repository: {}", local_config.huggingface_repo);
27 println!(" Model: {}\n", local_config.model_file);
28
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Local(local_config)).await?;
30
31 println!("✓ Model loaded successfully!\n");
32
33 // Example 1: Simple streaming
34 println!("Example 1: Simple Streaming Response");
35 println!("======================================\n");
36
37 let messages = vec![
38 ChatMessage::system("You are a helpful coding assistant."),
39 ChatMessage::user("Write a short explanation of what Rust is."),
40 ];
41
42 print!("Assistant: ");
43 io::stdout().flush()?;
44
45 let _response = client
46 .chat_stream(messages, None, |chunk| {
47 print!("{}", chunk);
48 io::stdout().flush().unwrap();
49 })
50 .await?;
51
52 println!("\n");
53
54 // Example 2: Multiple questions with streaming
55 println!("Example 2: Interactive Streaming");
56 println!("==================================\n");
57
58 let questions = vec![
59 "What are the main benefits of Rust?",
60 "Give me a simple code example.",
61 ];
62
63 let mut session = helios_engine::ChatSession::new()
64 .with_system_prompt("You are a helpful programming assistant.");
65
66 for question in questions {
67 println!("User: {}", question);
68 session.add_user_message(question);
69
70 print!("Assistant: ");
71 io::stdout().flush()?;
72
73 let response = client
74 .chat_stream(session.get_messages(), None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 println!("✅ Local model streaming completed successfully!");
85 println!("\n💡 Features:");
86 println!(" • Token-by-token streaming for local models");
87 println!(" • Real-time response display (no more instant full responses)");
88 println!(" • Same streaming API for both local and remote models");
89 println!(" • Improved user experience with progressive output");
90
91 Ok(())
92}examples/streaming_chat.rs (line 32)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Streaming Example");
14 println!("=====================================\n");
15
16 // Setup LLM configuration
17 let llm_config = LLMConfig {
18 model_name: "gpt-3.5-turbo".to_string(),
19 base_url: "https://api.openai.com/v1".to_string(),
20 api_key: std::env::var("OPENAI_API_KEY")
21 .unwrap_or_else(|_| "your-api-key-here".to_string()),
22 temperature: 0.7,
23 max_tokens: 2048,
24 };
25
26 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
27
28 println!("Example 1: Simple Streaming Response");
29 println!("======================================\n");
30
31 let messages = vec![
32 ChatMessage::system("You are a helpful assistant."),
33 ChatMessage::user("Write a short poem about coding."),
34 ];
35
36 print!("Assistant: ");
37 io::stdout().flush()?;
38
39 let response = client
40 .chat_stream(messages, None, |chunk| {
41 print!("{}", chunk);
42 io::stdout().flush().unwrap();
43 })
44 .await?;
45
46 println!("\n\n");
47
48 println!("Example 2: Interactive Streaming Chat");
49 println!("======================================\n");
50
51 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
52
53 let questions = vec![
54 "What is Rust?",
55 "What are its main benefits?",
56 "Show me a simple example.",
57 ];
58
59 for question in questions {
60 println!("User: {}", question);
61 session.add_user_message(question);
62
63 print!("Assistant: ");
64 io::stdout().flush()?;
65
66 let response = client
67 .chat_stream(session.get_messages(), None, |chunk| {
68 print!("{}", chunk);
69 io::stdout().flush().unwrap();
70 })
71 .await?;
72
73 session.add_assistant_message(&response.content);
74 println!("\n");
75 }
76
77 println!("\nExample 3: Streaming with Thinking Tags");
78 println!("=========================================\n");
79 println!("When using models that support thinking tags (like o1),");
80 println!("you can detect and display them during streaming.\n");
81
82 struct ThinkingTracker {
83 in_thinking: bool,
84 thinking_buffer: String,
85 }
86
87 impl ThinkingTracker {
88 fn new() -> Self {
89 Self {
90 in_thinking: false,
91 thinking_buffer: String::new(),
92 }
93 }
94
95 fn process_chunk(&mut self, chunk: &str) -> String {
96 let mut output = String::new();
97 let mut chars = chunk.chars().peekable();
98
99 while let Some(c) = chars.next() {
100 if c == '<' {
101 let remaining: String = chars.clone().collect();
102 if remaining.starts_with("thinking>") {
103 self.in_thinking = true;
104 self.thinking_buffer.clear();
105 output.push_str("\n💭 [Thinking");
106 for _ in 0..9 {
107 chars.next();
108 }
109 continue;
110 } else if remaining.starts_with("/thinking>") {
111 self.in_thinking = false;
112 output.push_str("]\n");
113 for _ in 0..10 {
114 chars.next();
115 }
116 continue;
117 }
118 }
119
120 if self.in_thinking {
121 self.thinking_buffer.push(c);
122 if self.thinking_buffer.len() % 3 == 0 {
123 output.push('.');
124 }
125 } else {
126 output.push(c);
127 }
128 }
129
130 output
131 }
132 }
133
134 let messages = vec![ChatMessage::user(
135 "Solve this problem: What is 15 * 234 + 89?",
136 )];
137
138 let mut tracker = ThinkingTracker::new();
139 print!("Assistant: ");
140 io::stdout().flush()?;
141
142 let _response = client
143 .chat_stream(messages, None, |chunk| {
144 let output = tracker.process_chunk(chunk);
145 print!("{}", output);
146 io::stdout().flush().unwrap();
147 })
148 .await?;
149
150 println!("\n\n✅ Streaming examples completed!");
151 println!("\nKey benefits of streaming:");
152 println!(" • Real-time response display");
153 println!(" • Better user experience for long responses");
154 println!(" • Ability to show thinking/reasoning process");
155 println!(" • Early cancellation possible (future feature)");
156
157 Ok(())
158}Sourcepub fn user(content: impl Into<String>) -> Self
pub fn user(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 67)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}More examples
examples/local_streaming.rs (line 39)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Local Model Streaming Example");
14 println!("=================================================\n");
15
16 // Configure local model
17 let local_config = LocalConfig {
18 huggingface_repo: "unsloth/Qwen2.5-0.5B-Instruct-GGUF".to_string(),
19 model_file: "Qwen2.5-0.5B-Instruct-Q4_K_M.gguf".to_string(),
20 context_size: 2048,
21 temperature: 0.7,
22 max_tokens: 512,
23 };
24
25 println!("📥 Loading local model...");
26 println!(" Repository: {}", local_config.huggingface_repo);
27 println!(" Model: {}\n", local_config.model_file);
28
29 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Local(local_config)).await?;
30
31 println!("✓ Model loaded successfully!\n");
32
33 // Example 1: Simple streaming
34 println!("Example 1: Simple Streaming Response");
35 println!("======================================\n");
36
37 let messages = vec![
38 ChatMessage::system("You are a helpful coding assistant."),
39 ChatMessage::user("Write a short explanation of what Rust is."),
40 ];
41
42 print!("Assistant: ");
43 io::stdout().flush()?;
44
45 let _response = client
46 .chat_stream(messages, None, |chunk| {
47 print!("{}", chunk);
48 io::stdout().flush().unwrap();
49 })
50 .await?;
51
52 println!("\n");
53
54 // Example 2: Multiple questions with streaming
55 println!("Example 2: Interactive Streaming");
56 println!("==================================\n");
57
58 let questions = vec![
59 "What are the main benefits of Rust?",
60 "Give me a simple code example.",
61 ];
62
63 let mut session = helios_engine::ChatSession::new()
64 .with_system_prompt("You are a helpful programming assistant.");
65
66 for question in questions {
67 println!("User: {}", question);
68 session.add_user_message(question);
69
70 print!("Assistant: ");
71 io::stdout().flush()?;
72
73 let response = client
74 .chat_stream(session.get_messages(), None, |chunk| {
75 print!("{}", chunk);
76 io::stdout().flush().unwrap();
77 })
78 .await?;
79
80 session.add_assistant_message(&response.content);
81 println!("\n");
82 }
83
84 println!("✅ Local model streaming completed successfully!");
85 println!("\n💡 Features:");
86 println!(" • Token-by-token streaming for local models");
87 println!(" • Real-time response display (no more instant full responses)");
88 println!(" • Same streaming API for both local and remote models");
89 println!(" • Improved user experience with progressive output");
90
91 Ok(())
92}examples/streaming_chat.rs (line 33)
12async fn main() -> helios_engine::Result<()> {
13 println!("🚀 Helios Engine - Streaming Example");
14 println!("=====================================\n");
15
16 // Setup LLM configuration
17 let llm_config = LLMConfig {
18 model_name: "gpt-3.5-turbo".to_string(),
19 base_url: "https://api.openai.com/v1".to_string(),
20 api_key: std::env::var("OPENAI_API_KEY")
21 .unwrap_or_else(|_| "your-api-key-here".to_string()),
22 temperature: 0.7,
23 max_tokens: 2048,
24 };
25
26 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
27
28 println!("Example 1: Simple Streaming Response");
29 println!("======================================\n");
30
31 let messages = vec![
32 ChatMessage::system("You are a helpful assistant."),
33 ChatMessage::user("Write a short poem about coding."),
34 ];
35
36 print!("Assistant: ");
37 io::stdout().flush()?;
38
39 let response = client
40 .chat_stream(messages, None, |chunk| {
41 print!("{}", chunk);
42 io::stdout().flush().unwrap();
43 })
44 .await?;
45
46 println!("\n\n");
47
48 println!("Example 2: Interactive Streaming Chat");
49 println!("======================================\n");
50
51 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
52
53 let questions = vec![
54 "What is Rust?",
55 "What are its main benefits?",
56 "Show me a simple example.",
57 ];
58
59 for question in questions {
60 println!("User: {}", question);
61 session.add_user_message(question);
62
63 print!("Assistant: ");
64 io::stdout().flush()?;
65
66 let response = client
67 .chat_stream(session.get_messages(), None, |chunk| {
68 print!("{}", chunk);
69 io::stdout().flush().unwrap();
70 })
71 .await?;
72
73 session.add_assistant_message(&response.content);
74 println!("\n");
75 }
76
77 println!("\nExample 3: Streaming with Thinking Tags");
78 println!("=========================================\n");
79 println!("When using models that support thinking tags (like o1),");
80 println!("you can detect and display them during streaming.\n");
81
82 struct ThinkingTracker {
83 in_thinking: bool,
84 thinking_buffer: String,
85 }
86
87 impl ThinkingTracker {
88 fn new() -> Self {
89 Self {
90 in_thinking: false,
91 thinking_buffer: String::new(),
92 }
93 }
94
95 fn process_chunk(&mut self, chunk: &str) -> String {
96 let mut output = String::new();
97 let mut chars = chunk.chars().peekable();
98
99 while let Some(c) = chars.next() {
100 if c == '<' {
101 let remaining: String = chars.clone().collect();
102 if remaining.starts_with("thinking>") {
103 self.in_thinking = true;
104 self.thinking_buffer.clear();
105 output.push_str("\n💭 [Thinking");
106 for _ in 0..9 {
107 chars.next();
108 }
109 continue;
110 } else if remaining.starts_with("/thinking>") {
111 self.in_thinking = false;
112 output.push_str("]\n");
113 for _ in 0..10 {
114 chars.next();
115 }
116 continue;
117 }
118 }
119
120 if self.in_thinking {
121 self.thinking_buffer.push(c);
122 if self.thinking_buffer.len() % 3 == 0 {
123 output.push('.');
124 }
125 } else {
126 output.push(c);
127 }
128 }
129
130 output
131 }
132 }
133
134 let messages = vec![ChatMessage::user(
135 "Solve this problem: What is 15 * 234 + 89?",
136 )];
137
138 let mut tracker = ThinkingTracker::new();
139 print!("Assistant: ");
140 io::stdout().flush()?;
141
142 let _response = client
143 .chat_stream(messages, None, |chunk| {
144 let output = tracker.process_chunk(chunk);
145 print!("{}", output);
146 io::stdout().flush().unwrap();
147 })
148 .await?;
149
150 println!("\n\n✅ Streaming examples completed!");
151 println!("\nKey benefits of streaming:");
152 println!(" • Real-time response display");
153 println!(" • Better user experience for long responses");
154 println!(" • Ability to show thinking/reasoning process");
155 println!(" • Early cancellation possible (future feature)");
156
157 Ok(())
158}pub fn assistant(content: impl Into<String>) -> Self
pub fn tool(content: impl Into<String>, tool_call_id: impl Into<String>) -> Self
Trait Implementations§
Source§impl Clone for ChatMessage
impl Clone for ChatMessage
Source§fn clone(&self) -> ChatMessage
fn clone(&self) -> ChatMessage
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Debug for ChatMessage
impl Debug for ChatMessage
Source§impl<'de> Deserialize<'de> for ChatMessage
impl<'de> Deserialize<'de> for ChatMessage
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for ChatMessage
impl RefUnwindSafe for ChatMessage
impl Send for ChatMessage
impl Sync for ChatMessage
impl Unpin for ChatMessage
impl UnwindSafe for ChatMessage
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more