pub struct ChatMessage {
pub role: Role,
pub content: String,
pub name: Option<String>,
pub tool_calls: Option<Vec<ToolCall>>,
pub tool_call_id: Option<String>,
}Fields§
§role: Role§content: String§name: Option<String>§tool_calls: Option<Vec<ToolCall>>§tool_call_id: Option<String>Implementations§
Source§impl ChatMessage
impl ChatMessage
Sourcepub fn system(content: impl Into<String>) -> Self
pub fn system(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 67)
51async fn simple_call() -> helios_engine::Result<()> {
52 // Create configuration
53 let llm_config = LLMConfig {
54 model_name: "gpt-3.5-turbo".to_string(),
55 base_url: "https://api.openai.com/v1".to_string(),
56 api_key: std::env::var("OPENAI_API_KEY")
57 .unwrap_or_else(|_| "your-api-key-here".to_string()),
58 temperature: 0.7,
59 max_tokens: 2048,
60 };
61
62 // Create client
63 let client = LLMClient::new(llm_config);
64
65 // Prepare messages
66 let messages = vec![
67 ChatMessage::system("You are a helpful assistant that gives concise answers."),
68 ChatMessage::user("What is the capital of France? Answer in one sentence."),
69 ];
70
71 // Make the call
72 println!("Sending request...");
73 match client.chat(messages, None).await {
74 Ok(response) => {
75 println!("✓ Response: {}", response.content);
76 }
77 Err(e) => {
78 println!("✗ Error: {}", e);
79 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
80 }
81 }
82
83 Ok(())
84}More examples
examples/streaming_chat.rs (line 31)
11async fn main() -> helios_engine::Result<()> {
12 println!("🚀 Helios Engine - Streaming Example");
13 println!("=====================================\n");
14
15 // Setup LLM configuration
16 let llm_config = LLMConfig {
17 model_name: "gpt-3.5-turbo".to_string(),
18 base_url: "https://api.openai.com/v1".to_string(),
19 api_key: std::env::var("OPENAI_API_KEY")
20 .unwrap_or_else(|_| "your-api-key-here".to_string()),
21 temperature: 0.7,
22 max_tokens: 2048,
23 };
24
25 let client = LLMClient::new(llm_config);
26
27 println!("Example 1: Simple Streaming Response");
28 println!("======================================\n");
29
30 let messages = vec![
31 ChatMessage::system("You are a helpful assistant."),
32 ChatMessage::user("Write a short poem about coding."),
33 ];
34
35 print!("Assistant: ");
36 io::stdout().flush()?;
37
38 let response = client.chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 }).await?;
42
43 println!("\n\n");
44
45 println!("Example 2: Interactive Streaming Chat");
46 println!("======================================\n");
47
48 let mut session = ChatSession::new()
49 .with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client.chat_stream(session.get_messages(), None, |chunk| {
65 print!("{}", chunk);
66 io::stdout().flush().unwrap();
67 }).await?;
68
69 session.add_assistant_message(&response.content);
70 println!("\n");
71 }
72
73 println!("\nExample 3: Streaming with Thinking Tags");
74 println!("=========================================\n");
75 println!("When using models that support thinking tags (like o1),");
76 println!("you can detect and display them during streaming.\n");
77
78 struct ThinkingTracker {
79 in_thinking: bool,
80 thinking_buffer: String,
81 }
82
83 impl ThinkingTracker {
84 fn new() -> Self {
85 Self {
86 in_thinking: false,
87 thinking_buffer: String::new(),
88 }
89 }
90
91 fn process_chunk(&mut self, chunk: &str) -> String {
92 let mut output = String::new();
93 let mut chars = chunk.chars().peekable();
94
95 while let Some(c) = chars.next() {
96 if c == '<' {
97 let remaining: String = chars.clone().collect();
98 if remaining.starts_with("thinking>") {
99 self.in_thinking = true;
100 self.thinking_buffer.clear();
101 output.push_str("\n💭 [Thinking");
102 for _ in 0..9 {
103 chars.next();
104 }
105 continue;
106 } else if remaining.starts_with("/thinking>") {
107 self.in_thinking = false;
108 output.push_str("]\n");
109 for _ in 0..10 {
110 chars.next();
111 }
112 continue;
113 }
114 }
115
116 if self.in_thinking {
117 self.thinking_buffer.push(c);
118 if self.thinking_buffer.len() % 3 == 0 {
119 output.push('.');
120 }
121 } else {
122 output.push(c);
123 }
124 }
125
126 output
127 }
128 }
129
130 let messages = vec![
131 ChatMessage::user("Solve this problem: What is 15 * 234 + 89?"),
132 ];
133
134 let mut tracker = ThinkingTracker::new();
135 print!("Assistant: ");
136 io::stdout().flush()?;
137
138 let _response = client.chat_stream(messages, None, |chunk| {
139 let output = tracker.process_chunk(chunk);
140 print!("{}", output);
141 io::stdout().flush().unwrap();
142 }).await?;
143
144 println!("\n\n✅ Streaming examples completed!");
145 println!("\nKey benefits of streaming:");
146 println!(" • Real-time response display");
147 println!(" • Better user experience for long responses");
148 println!(" • Ability to show thinking/reasoning process");
149 println!(" • Early cancellation possible (future feature)");
150
151 Ok(())
152}Sourcepub fn user(content: impl Into<String>) -> Self
pub fn user(content: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 68)
51async fn simple_call() -> helios_engine::Result<()> {
52 // Create configuration
53 let llm_config = LLMConfig {
54 model_name: "gpt-3.5-turbo".to_string(),
55 base_url: "https://api.openai.com/v1".to_string(),
56 api_key: std::env::var("OPENAI_API_KEY")
57 .unwrap_or_else(|_| "your-api-key-here".to_string()),
58 temperature: 0.7,
59 max_tokens: 2048,
60 };
61
62 // Create client
63 let client = LLMClient::new(llm_config);
64
65 // Prepare messages
66 let messages = vec![
67 ChatMessage::system("You are a helpful assistant that gives concise answers."),
68 ChatMessage::user("What is the capital of France? Answer in one sentence."),
69 ];
70
71 // Make the call
72 println!("Sending request...");
73 match client.chat(messages, None).await {
74 Ok(response) => {
75 println!("✓ Response: {}", response.content);
76 }
77 Err(e) => {
78 println!("✗ Error: {}", e);
79 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
80 }
81 }
82
83 Ok(())
84}More examples
examples/streaming_chat.rs (line 32)
11async fn main() -> helios_engine::Result<()> {
12 println!("🚀 Helios Engine - Streaming Example");
13 println!("=====================================\n");
14
15 // Setup LLM configuration
16 let llm_config = LLMConfig {
17 model_name: "gpt-3.5-turbo".to_string(),
18 base_url: "https://api.openai.com/v1".to_string(),
19 api_key: std::env::var("OPENAI_API_KEY")
20 .unwrap_or_else(|_| "your-api-key-here".to_string()),
21 temperature: 0.7,
22 max_tokens: 2048,
23 };
24
25 let client = LLMClient::new(llm_config);
26
27 println!("Example 1: Simple Streaming Response");
28 println!("======================================\n");
29
30 let messages = vec![
31 ChatMessage::system("You are a helpful assistant."),
32 ChatMessage::user("Write a short poem about coding."),
33 ];
34
35 print!("Assistant: ");
36 io::stdout().flush()?;
37
38 let response = client.chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 }).await?;
42
43 println!("\n\n");
44
45 println!("Example 2: Interactive Streaming Chat");
46 println!("======================================\n");
47
48 let mut session = ChatSession::new()
49 .with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client.chat_stream(session.get_messages(), None, |chunk| {
65 print!("{}", chunk);
66 io::stdout().flush().unwrap();
67 }).await?;
68
69 session.add_assistant_message(&response.content);
70 println!("\n");
71 }
72
73 println!("\nExample 3: Streaming with Thinking Tags");
74 println!("=========================================\n");
75 println!("When using models that support thinking tags (like o1),");
76 println!("you can detect and display them during streaming.\n");
77
78 struct ThinkingTracker {
79 in_thinking: bool,
80 thinking_buffer: String,
81 }
82
83 impl ThinkingTracker {
84 fn new() -> Self {
85 Self {
86 in_thinking: false,
87 thinking_buffer: String::new(),
88 }
89 }
90
91 fn process_chunk(&mut self, chunk: &str) -> String {
92 let mut output = String::new();
93 let mut chars = chunk.chars().peekable();
94
95 while let Some(c) = chars.next() {
96 if c == '<' {
97 let remaining: String = chars.clone().collect();
98 if remaining.starts_with("thinking>") {
99 self.in_thinking = true;
100 self.thinking_buffer.clear();
101 output.push_str("\n💭 [Thinking");
102 for _ in 0..9 {
103 chars.next();
104 }
105 continue;
106 } else if remaining.starts_with("/thinking>") {
107 self.in_thinking = false;
108 output.push_str("]\n");
109 for _ in 0..10 {
110 chars.next();
111 }
112 continue;
113 }
114 }
115
116 if self.in_thinking {
117 self.thinking_buffer.push(c);
118 if self.thinking_buffer.len() % 3 == 0 {
119 output.push('.');
120 }
121 } else {
122 output.push(c);
123 }
124 }
125
126 output
127 }
128 }
129
130 let messages = vec![
131 ChatMessage::user("Solve this problem: What is 15 * 234 + 89?"),
132 ];
133
134 let mut tracker = ThinkingTracker::new();
135 print!("Assistant: ");
136 io::stdout().flush()?;
137
138 let _response = client.chat_stream(messages, None, |chunk| {
139 let output = tracker.process_chunk(chunk);
140 print!("{}", output);
141 io::stdout().flush().unwrap();
142 }).await?;
143
144 println!("\n\n✅ Streaming examples completed!");
145 println!("\nKey benefits of streaming:");
146 println!(" • Real-time response display");
147 println!(" • Better user experience for long responses");
148 println!(" • Ability to show thinking/reasoning process");
149 println!(" • Early cancellation possible (future feature)");
150
151 Ok(())
152}pub fn assistant(content: impl Into<String>) -> Self
pub fn tool(content: impl Into<String>, tool_call_id: impl Into<String>) -> Self
Trait Implementations§
Source§impl Clone for ChatMessage
impl Clone for ChatMessage
Source§fn clone(&self) -> ChatMessage
fn clone(&self) -> ChatMessage
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Debug for ChatMessage
impl Debug for ChatMessage
Source§impl<'de> Deserialize<'de> for ChatMessage
impl<'de> Deserialize<'de> for ChatMessage
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Auto Trait Implementations§
impl Freeze for ChatMessage
impl RefUnwindSafe for ChatMessage
impl Send for ChatMessage
impl Sync for ChatMessage
impl Unpin for ChatMessage
impl UnwindSafe for ChatMessage
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more