streaming_chat/
streaming_chat.rs1use helios_engine::config::LLMConfig;
2use helios_engine::{ChatMessage, ChatSession, LLMClient};
7use std::io::{self, Write};
8
9#[tokio::main]
10async fn main() -> helios_engine::Result<()> {
11 println!("š Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\nš [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\nā
Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" ⢠Real-time response display");
151 println!(" ⢠Better user experience for long responses");
152 println!(" ⢠Ability to show thinking/reasoning process");
153 println!(" ⢠Early cancellation possible (future feature)");
154
155 Ok(())
156}