pub struct LLMClient { /* private fields */ }Implementations§
Source§impl LLMClient
impl LLMClient
Sourcepub async fn new(provider_type: LLMProviderType) -> Result<Self>
pub async fn new(provider_type: LLMProviderType) -> Result<Self>
Examples found in repository?
examples/direct_llm_usage.rs (line 62)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}
84
85/// Example 2: Multi-turn conversation with context
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}More examples
examples/streaming_chat.rs (line 24)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}pub fn provider_type(&self) -> &LLMProviderType
Source§impl LLMClient
impl LLMClient
Sourcepub async fn chat(
&self,
messages: Vec<ChatMessage>,
tools: Option<Vec<ToolDefinition>>,
) -> Result<ChatMessage>
pub async fn chat( &self, messages: Vec<ChatMessage>, tools: Option<Vec<ToolDefinition>>, ) -> Result<ChatMessage>
Examples found in repository?
examples/direct_llm_usage.rs (line 72)
50async fn simple_call() -> helios_engine::Result<()> {
51 // Create configuration
52 let llm_config = LLMConfig {
53 model_name: "gpt-3.5-turbo".to_string(),
54 base_url: "https://api.openai.com/v1".to_string(),
55 api_key: std::env::var("OPENAI_API_KEY")
56 .unwrap_or_else(|_| "your-api-key-here".to_string()),
57 temperature: 0.7,
58 max_tokens: 2048,
59 };
60
61 // Create client
62 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64 // Prepare messages
65 let messages = vec![
66 ChatMessage::system("You are a helpful assistant that gives concise answers."),
67 ChatMessage::user("What is the capital of France? Answer in one sentence."),
68 ];
69
70 // Make the call
71 println!("Sending request...");
72 match client.chat(messages, None).await {
73 Ok(response) => {
74 println!("✓ Response: {}", response.content);
75 }
76 Err(e) => {
77 println!("✗ Error: {}", e);
78 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
79 }
80 }
81
82 Ok(())
83}
84
85/// Example 2: Multi-turn conversation with context
86async fn conversation_with_context() -> helios_engine::Result<()> {
87 let llm_config = LLMConfig {
88 model_name: "gpt-3.5-turbo".to_string(),
89 base_url: "https://api.openai.com/v1".to_string(),
90 api_key: std::env::var("OPENAI_API_KEY")
91 .unwrap_or_else(|_| "your-api-key-here".to_string()),
92 temperature: 0.7,
93 max_tokens: 2048,
94 };
95
96 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98 // Use ChatSession to manage conversation
99 let mut session = ChatSession::new()
100 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102 // First turn
103 println!("Turn 1:");
104 session.add_user_message("What is 15 * 23?");
105 print!(" User: What is 15 * 23?\n ");
106
107 match client.chat(session.get_messages(), None).await {
108 Ok(response) => {
109 session.add_assistant_message(&response.content);
110 println!("Assistant: {}", response.content);
111 }
112 Err(e) => {
113 println!("Error: {}", e);
114 return Ok(());
115 }
116 }
117
118 // Second turn (with context from first turn)
119 println!("\nTurn 2:");
120 session.add_user_message("Now divide that by 5.");
121 print!(" User: Now divide that by 5.\n ");
122
123 match client.chat(session.get_messages(), None).await {
124 Ok(response) => {
125 session.add_assistant_message(&response.content);
126 println!("Assistant: {}", response.content);
127 }
128 Err(e) => {
129 println!("Error: {}", e);
130 }
131 }
132
133 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
134
135 Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140 println!("You can use Helios with various LLM providers:\n");
141
142 println!("🔵 OpenAI:");
143 println!(" LLMConfig {{");
144 println!(" model_name: \"gpt-4\".to_string(),");
145 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
146 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147 println!(" temperature: 0.7,");
148 println!(" max_tokens: 2048,");
149 println!(" }}\n");
150
151 println!("🟢 Local LM Studio:");
152 println!(" LLMConfig {{");
153 println!(" model_name: \"local-model\".to_string(),");
154 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
155 println!(" api_key: \"not-needed\".to_string(),");
156 println!(" temperature: 0.7,");
157 println!(" max_tokens: 2048,");
158 println!(" }}\n");
159
160 println!("🦙 Ollama:");
161 println!(" LLMConfig {{");
162 println!(" model_name: \"llama2\".to_string(),");
163 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
164 println!(" api_key: \"not-needed\".to_string(),");
165 println!(" temperature: 0.7,");
166 println!(" max_tokens: 2048,");
167 println!(" }}\n");
168
169 println!("🔷 Azure OpenAI:");
170 println!(" LLMConfig {{");
171 println!(" model_name: \"gpt-35-turbo\".to_string(),");
172 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174 println!(" temperature: 0.7,");
175 println!(" max_tokens: 2048,");
176 println!(" }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181 let llm_config = LLMConfig {
182 model_name: "gpt-3.5-turbo".to_string(),
183 base_url: "https://api.openai.com/v1".to_string(),
184 api_key: std::env::var("OPENAI_API_KEY")
185 .unwrap_or_else(|_| "your-api-key-here".to_string()),
186 temperature: 0.7,
187 max_tokens: 2048,
188 };
189
190 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191 let mut session =
192 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196 loop {
197 print!("You: ");
198 io::stdout().flush()?;
199
200 let mut input = String::new();
201 io::stdin().read_line(&mut input)?;
202 let input = input.trim();
203
204 if input.is_empty() {
205 continue;
206 }
207
208 if input == "exit" || input == "quit" {
209 println!("\n👋 Goodbye!");
210 break;
211 }
212
213 // Special commands
214 if input == "clear" {
215 session.clear();
216 println!("🧹 Conversation cleared!\n");
217 continue;
218 }
219
220 if input == "history" {
221 println!("\n📜 Conversation history:");
222 for (i, msg) in session.messages.iter().enumerate() {
223 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
224 }
225 println!();
226 continue;
227 }
228
229 session.add_user_message(input);
230
231 print!("Assistant: ");
232 io::stdout().flush()?;
233
234 match client.chat(session.get_messages(), None).await {
235 Ok(response) => {
236 session.add_assistant_message(&response.content);
237 println!("{}\n", response.content);
238 }
239 Err(e) => {
240 println!("\n❌ Error: {}", e);
241 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
242 // Remove the last user message since it failed
243 session.messages.pop();
244 }
245 }
246 }
247
248 Ok(())
249}Sourcepub async fn chat_stream<F>(
&self,
messages: Vec<ChatMessage>,
tools: Option<Vec<ToolDefinition>>,
on_chunk: F,
) -> Result<ChatMessage>
pub async fn chat_stream<F>( &self, messages: Vec<ChatMessage>, tools: Option<Vec<ToolDefinition>>, on_chunk: F, ) -> Result<ChatMessage>
Examples found in repository?
examples/streaming_chat.rs (lines 38-41)
10async fn main() -> helios_engine::Result<()> {
11 println!("🚀 Helios Engine - Streaming Example");
12 println!("=====================================\n");
13
14 // Setup LLM configuration
15 let llm_config = LLMConfig {
16 model_name: "gpt-3.5-turbo".to_string(),
17 base_url: "https://api.openai.com/v1".to_string(),
18 api_key: std::env::var("OPENAI_API_KEY")
19 .unwrap_or_else(|_| "your-api-key-here".to_string()),
20 temperature: 0.7,
21 max_tokens: 2048,
22 };
23
24 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
25
26 println!("Example 1: Simple Streaming Response");
27 println!("======================================\n");
28
29 let messages = vec![
30 ChatMessage::system("You are a helpful assistant."),
31 ChatMessage::user("Write a short poem about coding."),
32 ];
33
34 print!("Assistant: ");
35 io::stdout().flush()?;
36
37 let response = client
38 .chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 })
42 .await?;
43
44 println!("\n\n");
45
46 println!("Example 2: Interactive Streaming Chat");
47 println!("======================================\n");
48
49 let mut session = ChatSession::new().with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client
65 .chat_stream(session.get_messages(), None, |chunk| {
66 print!("{}", chunk);
67 io::stdout().flush().unwrap();
68 })
69 .await?;
70
71 session.add_assistant_message(&response.content);
72 println!("\n");
73 }
74
75 println!("\nExample 3: Streaming with Thinking Tags");
76 println!("=========================================\n");
77 println!("When using models that support thinking tags (like o1),");
78 println!("you can detect and display them during streaming.\n");
79
80 struct ThinkingTracker {
81 in_thinking: bool,
82 thinking_buffer: String,
83 }
84
85 impl ThinkingTracker {
86 fn new() -> Self {
87 Self {
88 in_thinking: false,
89 thinking_buffer: String::new(),
90 }
91 }
92
93 fn process_chunk(&mut self, chunk: &str) -> String {
94 let mut output = String::new();
95 let mut chars = chunk.chars().peekable();
96
97 while let Some(c) = chars.next() {
98 if c == '<' {
99 let remaining: String = chars.clone().collect();
100 if remaining.starts_with("thinking>") {
101 self.in_thinking = true;
102 self.thinking_buffer.clear();
103 output.push_str("\n💭 [Thinking");
104 for _ in 0..9 {
105 chars.next();
106 }
107 continue;
108 } else if remaining.starts_with("/thinking>") {
109 self.in_thinking = false;
110 output.push_str("]\n");
111 for _ in 0..10 {
112 chars.next();
113 }
114 continue;
115 }
116 }
117
118 if self.in_thinking {
119 self.thinking_buffer.push(c);
120 if self.thinking_buffer.len() % 3 == 0 {
121 output.push('.');
122 }
123 } else {
124 output.push(c);
125 }
126 }
127
128 output
129 }
130 }
131
132 let messages = vec![ChatMessage::user(
133 "Solve this problem: What is 15 * 234 + 89?",
134 )];
135
136 let mut tracker = ThinkingTracker::new();
137 print!("Assistant: ");
138 io::stdout().flush()?;
139
140 let _response = client
141 .chat_stream(messages, None, |chunk| {
142 let output = tracker.process_chunk(chunk);
143 print!("{}", output);
144 io::stdout().flush().unwrap();
145 })
146 .await?;
147
148 println!("\n\n✅ Streaming examples completed!");
149 println!("\nKey benefits of streaming:");
150 println!(" • Real-time response display");
151 println!(" • Better user experience for long responses");
152 println!(" • Ability to show thinking/reasoning process");
153 println!(" • Early cancellation possible (future feature)");
154
155 Ok(())
156}Trait Implementations§
Source§impl LLMProvider for LLMClient
impl LLMProvider for LLMClient
fn generate<'life0, 'async_trait>(
&'life0 self,
request: LLMRequest,
) -> Pin<Box<dyn Future<Output = Result<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Auto Trait Implementations§
impl Freeze for LLMClient
impl !RefUnwindSafe for LLMClient
impl Send for LLMClient
impl Sync for LLMClient
impl Unpin for LLMClient
impl !UnwindSafe for LLMClient
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more