ChatSession

Struct ChatSession 

Source
pub struct ChatSession {
    pub messages: Vec<ChatMessage>,
    pub system_prompt: Option<String>,
    pub metadata: HashMap<String, String>,
}
Expand description

Represents a chat session, including the conversation history and metadata.

Fields§

§messages: Vec<ChatMessage>

The messages in the chat session.

§system_prompt: Option<String>

The system prompt for the chat session.

§metadata: HashMap<String, String>

Metadata associated with the chat session.

Implementations§

Source§

impl ChatSession

Source

pub fn new() -> Self

Creates a new, empty chat session.

Examples found in repository?
examples/direct_llm_usage.rs (line 103)
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("🔵 OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🟢 Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🦙 Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("🔷 Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
More examples
Hide additional examples
examples/streaming_chat.rs (line 57)
14async fn main() -> helios_engine::Result<()> {
15    println!("🚀 Helios Engine - Streaming Example");
16    println!("=====================================\n");
17
18    // Set up the LLM configuration.
19    let llm_config = LLMConfig {
20        model_name: "gpt-3.5-turbo".to_string(),
21        base_url: "https://api.openai.com/v1".to_string(),
22        api_key: std::env::var("OPENAI_API_KEY")
23            .unwrap_or_else(|_| "your-api-key-here".to_string()),
24        temperature: 0.7,
25        max_tokens: 2048,
26    };
27
28    // Create a new LLM client.
29    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31    // --- Example 1: Simple streaming response ---
32    println!("Example 1: Simple Streaming Response");
33    println!("======================================\n");
34
35    let messages = vec![
36        ChatMessage::system("You are a helpful assistant."),
37        ChatMessage::user("Write a short poem about coding."),
38    ];
39
40    print!("Assistant: ");
41    io::stdout().flush()?;
42
43    // Stream the response from the model, printing each chunk as it arrives.
44    let response = client
45        .chat_stream(messages, None, None, None, None, |chunk| {
46            print!("{}", chunk);
47            io::stdout().flush().unwrap();
48        })
49        .await?;
50
51    println!("\n\n");
52
53    // --- Example 2: Interactive streaming chat ---
54    println!("Example 2: Interactive Streaming Chat");
55    println!("======================================\n");
56
57    let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59    let questions = vec![
60        "What is Rust?",
61        "What are its main benefits?",
62        "Show me a simple example.",
63    ];
64
65    for question in questions {
66        println!("User: {}", question);
67        session.add_user_message(question);
68
69        print!("Assistant: ");
70        io::stdout().flush()?;
71
72        // Stream the response, maintaining the conversation context.
73        let response = client
74            .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75                print!("{}", chunk);
76                io::stdout().flush().unwrap();
77            })
78            .await?;
79
80        session.add_assistant_message(&response.content);
81        println!("\n");
82    }
83
84    // --- Example 3: Streaming with thinking tags ---
85    println!("\nExample 3: Streaming with Thinking Tags");
86    println!("=========================================\n");
87    println!("When using models that support thinking tags (like o1),");
88    println!("you can detect and display them during streaming.\n");
89
90    /// A helper struct to track and display thinking tags in streamed responses.
91    struct ThinkingTracker {
92        in_thinking: bool,
93        thinking_buffer: String,
94    }
95
96    impl ThinkingTracker {
97        /// Creates a new `ThinkingTracker`.
98        fn new() -> Self {
99            Self {
100                in_thinking: false,
101                thinking_buffer: String::new(),
102            }
103        }
104
105        /// Processes a chunk of a streamed response and returns the processed output.
106        fn process_chunk(&mut self, chunk: &str) -> String {
107            let mut output = String::new();
108            let mut chars = chunk.chars().peekable();
109
110            while let Some(c) = chars.next() {
111                if c == '<' {
112                    let remaining: String = chars.clone().collect();
113                    if remaining.starts_with("thinking>") {
114                        self.in_thinking = true;
115                        self.thinking_buffer.clear();
116                        output.push_str("\n💭 [Thinking");
117                        for _ in 0..9 {
118                            chars.next();
119                        }
120                        continue;
121                    } else if remaining.starts_with("/thinking>") {
122                        self.in_thinking = false;
123                        output.push_str("]\n");
124                        for _ in 0..10 {
125                            chars.next();
126                        }
127                        continue;
128                    }
129                }
130
131                if self.in_thinking {
132                    self.thinking_buffer.push(c);
133                    if self.thinking_buffer.len() % 3 == 0 {
134                        output.push('.');
135                    }
136                } else {
137                    output.push(c);
138                }
139            }
140
141            output
142        }
143    }
144
145    let messages = vec![ChatMessage::user(
146        "Solve this problem: What is 15 * 234 + 89?",
147    )];
148
149    let mut tracker = ThinkingTracker::new();
150    print!("Assistant: ");
151    io::stdout().flush()?;
152
153    // Stream the response, processing thinking tags as they arrive.
154    let _response = client
155        .chat_stream(messages, None, None, None, None, |chunk| {
156            let output = tracker.process_chunk(chunk);
157            print!("{}", output);
158            io::stdout().flush().unwrap();
159        })
160        .await?;
161
162    println!("\n\n✅ Streaming examples completed!");
163    println!("\nKey benefits of streaming:");
164    println!("  • Real-time response display");
165    println!("  • Better user experience for long responses");
166    println!("  • Ability to show thinking/reasoning process");
167    println!("  • Early cancellation possible (future feature)");
168
169    Ok(())
170}
Source

pub fn with_system_prompt(self, prompt: impl Into<String>) -> Self

Sets the system prompt for the chat session.

Examples found in repository?
examples/direct_llm_usage.rs (line 104)
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("🔵 OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🟢 Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🦙 Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("🔷 Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
More examples
Hide additional examples
examples/streaming_chat.rs (line 57)
14async fn main() -> helios_engine::Result<()> {
15    println!("🚀 Helios Engine - Streaming Example");
16    println!("=====================================\n");
17
18    // Set up the LLM configuration.
19    let llm_config = LLMConfig {
20        model_name: "gpt-3.5-turbo".to_string(),
21        base_url: "https://api.openai.com/v1".to_string(),
22        api_key: std::env::var("OPENAI_API_KEY")
23            .unwrap_or_else(|_| "your-api-key-here".to_string()),
24        temperature: 0.7,
25        max_tokens: 2048,
26    };
27
28    // Create a new LLM client.
29    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31    // --- Example 1: Simple streaming response ---
32    println!("Example 1: Simple Streaming Response");
33    println!("======================================\n");
34
35    let messages = vec![
36        ChatMessage::system("You are a helpful assistant."),
37        ChatMessage::user("Write a short poem about coding."),
38    ];
39
40    print!("Assistant: ");
41    io::stdout().flush()?;
42
43    // Stream the response from the model, printing each chunk as it arrives.
44    let response = client
45        .chat_stream(messages, None, None, None, None, |chunk| {
46            print!("{}", chunk);
47            io::stdout().flush().unwrap();
48        })
49        .await?;
50
51    println!("\n\n");
52
53    // --- Example 2: Interactive streaming chat ---
54    println!("Example 2: Interactive Streaming Chat");
55    println!("======================================\n");
56
57    let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59    let questions = vec![
60        "What is Rust?",
61        "What are its main benefits?",
62        "Show me a simple example.",
63    ];
64
65    for question in questions {
66        println!("User: {}", question);
67        session.add_user_message(question);
68
69        print!("Assistant: ");
70        io::stdout().flush()?;
71
72        // Stream the response, maintaining the conversation context.
73        let response = client
74            .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75                print!("{}", chunk);
76                io::stdout().flush().unwrap();
77            })
78            .await?;
79
80        session.add_assistant_message(&response.content);
81        println!("\n");
82    }
83
84    // --- Example 3: Streaming with thinking tags ---
85    println!("\nExample 3: Streaming with Thinking Tags");
86    println!("=========================================\n");
87    println!("When using models that support thinking tags (like o1),");
88    println!("you can detect and display them during streaming.\n");
89
90    /// A helper struct to track and display thinking tags in streamed responses.
91    struct ThinkingTracker {
92        in_thinking: bool,
93        thinking_buffer: String,
94    }
95
96    impl ThinkingTracker {
97        /// Creates a new `ThinkingTracker`.
98        fn new() -> Self {
99            Self {
100                in_thinking: false,
101                thinking_buffer: String::new(),
102            }
103        }
104
105        /// Processes a chunk of a streamed response and returns the processed output.
106        fn process_chunk(&mut self, chunk: &str) -> String {
107            let mut output = String::new();
108            let mut chars = chunk.chars().peekable();
109
110            while let Some(c) = chars.next() {
111                if c == '<' {
112                    let remaining: String = chars.clone().collect();
113                    if remaining.starts_with("thinking>") {
114                        self.in_thinking = true;
115                        self.thinking_buffer.clear();
116                        output.push_str("\n💭 [Thinking");
117                        for _ in 0..9 {
118                            chars.next();
119                        }
120                        continue;
121                    } else if remaining.starts_with("/thinking>") {
122                        self.in_thinking = false;
123                        output.push_str("]\n");
124                        for _ in 0..10 {
125                            chars.next();
126                        }
127                        continue;
128                    }
129                }
130
131                if self.in_thinking {
132                    self.thinking_buffer.push(c);
133                    if self.thinking_buffer.len() % 3 == 0 {
134                        output.push('.');
135                    }
136                } else {
137                    output.push(c);
138                }
139            }
140
141            output
142        }
143    }
144
145    let messages = vec![ChatMessage::user(
146        "Solve this problem: What is 15 * 234 + 89?",
147    )];
148
149    let mut tracker = ThinkingTracker::new();
150    print!("Assistant: ");
151    io::stdout().flush()?;
152
153    // Stream the response, processing thinking tags as they arrive.
154    let _response = client
155        .chat_stream(messages, None, None, None, None, |chunk| {
156            let output = tracker.process_chunk(chunk);
157            print!("{}", output);
158            io::stdout().flush().unwrap();
159        })
160        .await?;
161
162    println!("\n\n✅ Streaming examples completed!");
163    println!("\nKey benefits of streaming:");
164    println!("  • Real-time response display");
165    println!("  • Better user experience for long responses");
166    println!("  • Ability to show thinking/reasoning process");
167    println!("  • Early cancellation possible (future feature)");
168
169    Ok(())
170}
Source

pub fn add_message(&mut self, message: ChatMessage)

Adds a message to the chat session.

Source

pub fn add_user_message(&mut self, content: impl Into<String>)

Adds a user message to the chat session.

Examples found in repository?
examples/direct_llm_usage.rs (line 108)
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("🔵 OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🟢 Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🦙 Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("🔷 Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
More examples
Hide additional examples
examples/streaming_chat.rs (line 67)
14async fn main() -> helios_engine::Result<()> {
15    println!("🚀 Helios Engine - Streaming Example");
16    println!("=====================================\n");
17
18    // Set up the LLM configuration.
19    let llm_config = LLMConfig {
20        model_name: "gpt-3.5-turbo".to_string(),
21        base_url: "https://api.openai.com/v1".to_string(),
22        api_key: std::env::var("OPENAI_API_KEY")
23            .unwrap_or_else(|_| "your-api-key-here".to_string()),
24        temperature: 0.7,
25        max_tokens: 2048,
26    };
27
28    // Create a new LLM client.
29    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31    // --- Example 1: Simple streaming response ---
32    println!("Example 1: Simple Streaming Response");
33    println!("======================================\n");
34
35    let messages = vec![
36        ChatMessage::system("You are a helpful assistant."),
37        ChatMessage::user("Write a short poem about coding."),
38    ];
39
40    print!("Assistant: ");
41    io::stdout().flush()?;
42
43    // Stream the response from the model, printing each chunk as it arrives.
44    let response = client
45        .chat_stream(messages, None, None, None, None, |chunk| {
46            print!("{}", chunk);
47            io::stdout().flush().unwrap();
48        })
49        .await?;
50
51    println!("\n\n");
52
53    // --- Example 2: Interactive streaming chat ---
54    println!("Example 2: Interactive Streaming Chat");
55    println!("======================================\n");
56
57    let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59    let questions = vec![
60        "What is Rust?",
61        "What are its main benefits?",
62        "Show me a simple example.",
63    ];
64
65    for question in questions {
66        println!("User: {}", question);
67        session.add_user_message(question);
68
69        print!("Assistant: ");
70        io::stdout().flush()?;
71
72        // Stream the response, maintaining the conversation context.
73        let response = client
74            .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75                print!("{}", chunk);
76                io::stdout().flush().unwrap();
77            })
78            .await?;
79
80        session.add_assistant_message(&response.content);
81        println!("\n");
82    }
83
84    // --- Example 3: Streaming with thinking tags ---
85    println!("\nExample 3: Streaming with Thinking Tags");
86    println!("=========================================\n");
87    println!("When using models that support thinking tags (like o1),");
88    println!("you can detect and display them during streaming.\n");
89
90    /// A helper struct to track and display thinking tags in streamed responses.
91    struct ThinkingTracker {
92        in_thinking: bool,
93        thinking_buffer: String,
94    }
95
96    impl ThinkingTracker {
97        /// Creates a new `ThinkingTracker`.
98        fn new() -> Self {
99            Self {
100                in_thinking: false,
101                thinking_buffer: String::new(),
102            }
103        }
104
105        /// Processes a chunk of a streamed response and returns the processed output.
106        fn process_chunk(&mut self, chunk: &str) -> String {
107            let mut output = String::new();
108            let mut chars = chunk.chars().peekable();
109
110            while let Some(c) = chars.next() {
111                if c == '<' {
112                    let remaining: String = chars.clone().collect();
113                    if remaining.starts_with("thinking>") {
114                        self.in_thinking = true;
115                        self.thinking_buffer.clear();
116                        output.push_str("\n💭 [Thinking");
117                        for _ in 0..9 {
118                            chars.next();
119                        }
120                        continue;
121                    } else if remaining.starts_with("/thinking>") {
122                        self.in_thinking = false;
123                        output.push_str("]\n");
124                        for _ in 0..10 {
125                            chars.next();
126                        }
127                        continue;
128                    }
129                }
130
131                if self.in_thinking {
132                    self.thinking_buffer.push(c);
133                    if self.thinking_buffer.len() % 3 == 0 {
134                        output.push('.');
135                    }
136                } else {
137                    output.push(c);
138                }
139            }
140
141            output
142        }
143    }
144
145    let messages = vec![ChatMessage::user(
146        "Solve this problem: What is 15 * 234 + 89?",
147    )];
148
149    let mut tracker = ThinkingTracker::new();
150    print!("Assistant: ");
151    io::stdout().flush()?;
152
153    // Stream the response, processing thinking tags as they arrive.
154    let _response = client
155        .chat_stream(messages, None, None, None, None, |chunk| {
156            let output = tracker.process_chunk(chunk);
157            print!("{}", output);
158            io::stdout().flush().unwrap();
159        })
160        .await?;
161
162    println!("\n\n✅ Streaming examples completed!");
163    println!("\nKey benefits of streaming:");
164    println!("  • Real-time response display");
165    println!("  • Better user experience for long responses");
166    println!("  • Ability to show thinking/reasoning process");
167    println!("  • Early cancellation possible (future feature)");
168
169    Ok(())
170}
Source

pub fn add_assistant_message(&mut self, content: impl Into<String>)

Adds an assistant message to the chat session.

Examples found in repository?
examples/direct_llm_usage.rs (line 116)
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("🔵 OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🟢 Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🦙 Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("🔷 Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
More examples
Hide additional examples
examples/streaming_chat.rs (line 80)
14async fn main() -> helios_engine::Result<()> {
15    println!("🚀 Helios Engine - Streaming Example");
16    println!("=====================================\n");
17
18    // Set up the LLM configuration.
19    let llm_config = LLMConfig {
20        model_name: "gpt-3.5-turbo".to_string(),
21        base_url: "https://api.openai.com/v1".to_string(),
22        api_key: std::env::var("OPENAI_API_KEY")
23            .unwrap_or_else(|_| "your-api-key-here".to_string()),
24        temperature: 0.7,
25        max_tokens: 2048,
26    };
27
28    // Create a new LLM client.
29    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31    // --- Example 1: Simple streaming response ---
32    println!("Example 1: Simple Streaming Response");
33    println!("======================================\n");
34
35    let messages = vec![
36        ChatMessage::system("You are a helpful assistant."),
37        ChatMessage::user("Write a short poem about coding."),
38    ];
39
40    print!("Assistant: ");
41    io::stdout().flush()?;
42
43    // Stream the response from the model, printing each chunk as it arrives.
44    let response = client
45        .chat_stream(messages, None, None, None, None, |chunk| {
46            print!("{}", chunk);
47            io::stdout().flush().unwrap();
48        })
49        .await?;
50
51    println!("\n\n");
52
53    // --- Example 2: Interactive streaming chat ---
54    println!("Example 2: Interactive Streaming Chat");
55    println!("======================================\n");
56
57    let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59    let questions = vec![
60        "What is Rust?",
61        "What are its main benefits?",
62        "Show me a simple example.",
63    ];
64
65    for question in questions {
66        println!("User: {}", question);
67        session.add_user_message(question);
68
69        print!("Assistant: ");
70        io::stdout().flush()?;
71
72        // Stream the response, maintaining the conversation context.
73        let response = client
74            .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75                print!("{}", chunk);
76                io::stdout().flush().unwrap();
77            })
78            .await?;
79
80        session.add_assistant_message(&response.content);
81        println!("\n");
82    }
83
84    // --- Example 3: Streaming with thinking tags ---
85    println!("\nExample 3: Streaming with Thinking Tags");
86    println!("=========================================\n");
87    println!("When using models that support thinking tags (like o1),");
88    println!("you can detect and display them during streaming.\n");
89
90    /// A helper struct to track and display thinking tags in streamed responses.
91    struct ThinkingTracker {
92        in_thinking: bool,
93        thinking_buffer: String,
94    }
95
96    impl ThinkingTracker {
97        /// Creates a new `ThinkingTracker`.
98        fn new() -> Self {
99            Self {
100                in_thinking: false,
101                thinking_buffer: String::new(),
102            }
103        }
104
105        /// Processes a chunk of a streamed response and returns the processed output.
106        fn process_chunk(&mut self, chunk: &str) -> String {
107            let mut output = String::new();
108            let mut chars = chunk.chars().peekable();
109
110            while let Some(c) = chars.next() {
111                if c == '<' {
112                    let remaining: String = chars.clone().collect();
113                    if remaining.starts_with("thinking>") {
114                        self.in_thinking = true;
115                        self.thinking_buffer.clear();
116                        output.push_str("\n💭 [Thinking");
117                        for _ in 0..9 {
118                            chars.next();
119                        }
120                        continue;
121                    } else if remaining.starts_with("/thinking>") {
122                        self.in_thinking = false;
123                        output.push_str("]\n");
124                        for _ in 0..10 {
125                            chars.next();
126                        }
127                        continue;
128                    }
129                }
130
131                if self.in_thinking {
132                    self.thinking_buffer.push(c);
133                    if self.thinking_buffer.len() % 3 == 0 {
134                        output.push('.');
135                    }
136                } else {
137                    output.push(c);
138                }
139            }
140
141            output
142        }
143    }
144
145    let messages = vec![ChatMessage::user(
146        "Solve this problem: What is 15 * 234 + 89?",
147    )];
148
149    let mut tracker = ThinkingTracker::new();
150    print!("Assistant: ");
151    io::stdout().flush()?;
152
153    // Stream the response, processing thinking tags as they arrive.
154    let _response = client
155        .chat_stream(messages, None, None, None, None, |chunk| {
156            let output = tracker.process_chunk(chunk);
157            print!("{}", output);
158            io::stdout().flush().unwrap();
159        })
160        .await?;
161
162    println!("\n\n✅ Streaming examples completed!");
163    println!("\nKey benefits of streaming:");
164    println!("  • Real-time response display");
165    println!("  • Better user experience for long responses");
166    println!("  • Ability to show thinking/reasoning process");
167    println!("  • Early cancellation possible (future feature)");
168
169    Ok(())
170}
Source

pub fn get_messages(&self) -> Vec<ChatMessage>

Returns all messages in the chat session, including the system prompt.

Examples found in repository?
examples/direct_llm_usage.rs (line 112)
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("🔵 OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🟢 Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🦙 Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("🔷 Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
More examples
Hide additional examples
examples/streaming_chat.rs (line 74)
14async fn main() -> helios_engine::Result<()> {
15    println!("🚀 Helios Engine - Streaming Example");
16    println!("=====================================\n");
17
18    // Set up the LLM configuration.
19    let llm_config = LLMConfig {
20        model_name: "gpt-3.5-turbo".to_string(),
21        base_url: "https://api.openai.com/v1".to_string(),
22        api_key: std::env::var("OPENAI_API_KEY")
23            .unwrap_or_else(|_| "your-api-key-here".to_string()),
24        temperature: 0.7,
25        max_tokens: 2048,
26    };
27
28    // Create a new LLM client.
29    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
30
31    // --- Example 1: Simple streaming response ---
32    println!("Example 1: Simple Streaming Response");
33    println!("======================================\n");
34
35    let messages = vec![
36        ChatMessage::system("You are a helpful assistant."),
37        ChatMessage::user("Write a short poem about coding."),
38    ];
39
40    print!("Assistant: ");
41    io::stdout().flush()?;
42
43    // Stream the response from the model, printing each chunk as it arrives.
44    let response = client
45        .chat_stream(messages, None, None, None, None, |chunk| {
46            print!("{}", chunk);
47            io::stdout().flush().unwrap();
48        })
49        .await?;
50
51    println!("\n\n");
52
53    // --- Example 2: Interactive streaming chat ---
54    println!("Example 2: Interactive Streaming Chat");
55    println!("======================================\n");
56
57    let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
58
59    let questions = vec![
60        "What is Rust?",
61        "What are its main benefits?",
62        "Show me a simple example.",
63    ];
64
65    for question in questions {
66        println!("User: {}", question);
67        session.add_user_message(question);
68
69        print!("Assistant: ");
70        io::stdout().flush()?;
71
72        // Stream the response, maintaining the conversation context.
73        let response = client
74            .chat_stream(session.get_messages(), None, None, None, None, |chunk| {
75                print!("{}", chunk);
76                io::stdout().flush().unwrap();
77            })
78            .await?;
79
80        session.add_assistant_message(&response.content);
81        println!("\n");
82    }
83
84    // --- Example 3: Streaming with thinking tags ---
85    println!("\nExample 3: Streaming with Thinking Tags");
86    println!("=========================================\n");
87    println!("When using models that support thinking tags (like o1),");
88    println!("you can detect and display them during streaming.\n");
89
90    /// A helper struct to track and display thinking tags in streamed responses.
91    struct ThinkingTracker {
92        in_thinking: bool,
93        thinking_buffer: String,
94    }
95
96    impl ThinkingTracker {
97        /// Creates a new `ThinkingTracker`.
98        fn new() -> Self {
99            Self {
100                in_thinking: false,
101                thinking_buffer: String::new(),
102            }
103        }
104
105        /// Processes a chunk of a streamed response and returns the processed output.
106        fn process_chunk(&mut self, chunk: &str) -> String {
107            let mut output = String::new();
108            let mut chars = chunk.chars().peekable();
109
110            while let Some(c) = chars.next() {
111                if c == '<' {
112                    let remaining: String = chars.clone().collect();
113                    if remaining.starts_with("thinking>") {
114                        self.in_thinking = true;
115                        self.thinking_buffer.clear();
116                        output.push_str("\n💭 [Thinking");
117                        for _ in 0..9 {
118                            chars.next();
119                        }
120                        continue;
121                    } else if remaining.starts_with("/thinking>") {
122                        self.in_thinking = false;
123                        output.push_str("]\n");
124                        for _ in 0..10 {
125                            chars.next();
126                        }
127                        continue;
128                    }
129                }
130
131                if self.in_thinking {
132                    self.thinking_buffer.push(c);
133                    if self.thinking_buffer.len() % 3 == 0 {
134                        output.push('.');
135                    }
136                } else {
137                    output.push(c);
138                }
139            }
140
141            output
142        }
143    }
144
145    let messages = vec![ChatMessage::user(
146        "Solve this problem: What is 15 * 234 + 89?",
147    )];
148
149    let mut tracker = ThinkingTracker::new();
150    print!("Assistant: ");
151    io::stdout().flush()?;
152
153    // Stream the response, processing thinking tags as they arrive.
154    let _response = client
155        .chat_stream(messages, None, None, None, None, |chunk| {
156            let output = tracker.process_chunk(chunk);
157            print!("{}", output);
158            io::stdout().flush().unwrap();
159        })
160        .await?;
161
162    println!("\n\n✅ Streaming examples completed!");
163    println!("\nKey benefits of streaming:");
164    println!("  • Real-time response display");
165    println!("  • Better user experience for long responses");
166    println!("  • Ability to show thinking/reasoning process");
167    println!("  • Early cancellation possible (future feature)");
168
169    Ok(())
170}
Source

pub fn clear(&mut self)

Clears all messages from the chat session.

Examples found in repository?
examples/direct_llm_usage.rs (line 227)
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n👋 Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("🧹 Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n📜 Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\n❌ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}
Source

pub fn set_metadata(&mut self, key: impl Into<String>, value: impl Into<String>)

Sets a metadata key-value pair for the session.

Source

pub fn get_metadata(&self, key: &str) -> Option<&String>

Gets a metadata value by key.

Source

pub fn remove_metadata(&mut self, key: &str) -> Option<String>

Removes a metadata key-value pair.

Source

pub fn get_summary(&self) -> String

Returns a summary of the chat session.

Trait Implementations§

Source§

impl Clone for ChatSession

Source§

fn clone(&self) -> ChatSession

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ChatSession

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Default for ChatSession

Source§

fn default() -> Self

Creates a new, empty chat session.

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> FromRef<T> for T
where T: Clone,

Source§

fn from_ref(input: &T) -> T

Converts to this type from a reference to the input type.
Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more