pub struct ChatSession {
pub messages: Vec<ChatMessage>,
pub system_prompt: Option<String>,
}Fields§
§messages: Vec<ChatMessage>§system_prompt: Option<String>Implementations§
Source§impl ChatSession
impl ChatSession
Sourcepub fn new() -> Self
pub fn new() -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 99)
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 49)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Sourcepub fn with_system_prompt(self, prompt: impl Into<String>) -> Self
pub fn with_system_prompt(self, prompt: impl Into<String>) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 100)
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 49)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}pub fn add_message(&mut self, message: ChatMessage)
Sourcepub fn add_user_message(&mut self, content: impl Into<String>)
pub fn add_user_message(&mut self, content: impl Into<String>)
Examples found in repository?
examples/direct_llm_usage.rs (line 104)
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 59)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Sourcepub fn add_assistant_message(&mut self, content: impl Into<String>)
pub fn add_assistant_message(&mut self, content: impl Into<String>)
Examples found in repository?
examples/direct_llm_usage.rs (line 109)
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 71)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Sourcepub fn get_messages(&self) -> Vec<ChatMessage>
pub fn get_messages(&self) -> Vec<ChatMessage>
Examples found in repository?
examples/direct_llm_usage.rs (line 107)
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 65)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Sourcepub fn clear(&mut self)
pub fn clear(&mut self)
Examples found in repository?
examples/direct_llm_usage.rs (line 215)
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}Trait Implementations§
Source§impl Clone for ChatSession
impl Clone for ChatSession
Source§fn clone(&self) -> ChatSession
fn clone(&self) -> ChatSession
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl Debug for ChatSession
impl Debug for ChatSession
Auto Trait Implementations§
impl Freeze for ChatSession
impl RefUnwindSafe for ChatSession
impl Send for ChatSession
impl Sync for ChatSession
impl Unpin for ChatSession
impl UnwindSafe for ChatSession
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more