pub struct LLMClient { /* private fields */ }Implementations§
Source§impl LLMClient
impl LLMClient
Sourcepub fn new(config: LLMConfig) -> Self
pub fn new(config: LLMConfig) -> Self
Examples found in repository?
examples/direct_llm_usage.rs (line 63)
51async fn simple_call() -> helios_engine::Result<()> {
52 // Create configuration
53 let llm_config = LLMConfig {
54 model_name: "gpt-3.5-turbo".to_string(),
55 base_url: "https://api.openai.com/v1".to_string(),
56 api_key: std::env::var("OPENAI_API_KEY")
57 .unwrap_or_else(|_| "your-api-key-here".to_string()),
58 temperature: 0.7,
59 max_tokens: 2048,
60 };
61
62 // Create client
63 let client = LLMClient::new(llm_config);
64
65 // Prepare messages
66 let messages = vec![
67 ChatMessage::system("You are a helpful assistant that gives concise answers."),
68 ChatMessage::user("What is the capital of France? Answer in one sentence."),
69 ];
70
71 // Make the call
72 println!("Sending request...");
73 match client.chat(messages, None).await {
74 Ok(response) => {
75 println!("✓ Response: {}", response.content);
76 }
77 Err(e) => {
78 println!("✗ Error: {}", e);
79 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
80 }
81 }
82
83 Ok(())
84}
85
86/// Example 2: Multi-turn conversation with context
87async fn conversation_with_context() -> helios_engine::Result<()> {
88 let llm_config = LLMConfig {
89 model_name: "gpt-3.5-turbo".to_string(),
90 base_url: "https://api.openai.com/v1".to_string(),
91 api_key: std::env::var("OPENAI_API_KEY")
92 .unwrap_or_else(|_| "your-api-key-here".to_string()),
93 temperature: 0.7,
94 max_tokens: 2048,
95 };
96
97 let client = LLMClient::new(llm_config);
98
99 // Use ChatSession to manage conversation
100 let mut session = ChatSession::new()
101 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103 // First turn
104 println!("Turn 1:");
105 session.add_user_message("What is 15 * 23?");
106 print!(" User: What is 15 * 23?\n ");
107
108 match client.chat(session.get_messages(), None).await {
109 Ok(response) => {
110 session.add_assistant_message(&response.content);
111 println!("Assistant: {}", response.content);
112 }
113 Err(e) => {
114 println!("Error: {}", e);
115 return Ok(());
116 }
117 }
118
119 // Second turn (with context from first turn)
120 println!("\nTurn 2:");
121 session.add_user_message("Now divide that by 5.");
122 print!(" User: Now divide that by 5.\n ");
123
124 match client.chat(session.get_messages(), None).await {
125 Ok(response) => {
126 session.add_assistant_message(&response.content);
127 println!("Assistant: {}", response.content);
128 }
129 Err(e) => {
130 println!("Error: {}", e);
131 }
132 }
133
134 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136 Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141 println!("You can use Helios with various LLM providers:\n");
142
143 println!("🔵 OpenAI:");
144 println!(" LLMConfig {{");
145 println!(" model_name: \"gpt-4\".to_string(),");
146 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
147 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148 println!(" temperature: 0.7,");
149 println!(" max_tokens: 2048,");
150 println!(" }}\n");
151
152 println!("🟢 Local LM Studio:");
153 println!(" LLMConfig {{");
154 println!(" model_name: \"local-model\".to_string(),");
155 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
156 println!(" api_key: \"not-needed\".to_string(),");
157 println!(" temperature: 0.7,");
158 println!(" max_tokens: 2048,");
159 println!(" }}\n");
160
161 println!("🦙 Ollama:");
162 println!(" LLMConfig {{");
163 println!(" model_name: \"llama2\".to_string(),");
164 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
165 println!(" api_key: \"not-needed\".to_string(),");
166 println!(" temperature: 0.7,");
167 println!(" max_tokens: 2048,");
168 println!(" }}\n");
169
170 println!("🔷 Azure OpenAI:");
171 println!(" LLMConfig {{");
172 println!(" model_name: \"gpt-35-turbo\".to_string(),");
173 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175 println!(" temperature: 0.7,");
176 println!(" max_tokens: 2048,");
177 println!(" }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182 let llm_config = LLMConfig {
183 model_name: "gpt-3.5-turbo".to_string(),
184 base_url: "https://api.openai.com/v1".to_string(),
185 api_key: std::env::var("OPENAI_API_KEY")
186 .unwrap_or_else(|_| "your-api-key-here".to_string()),
187 temperature: 0.7,
188 max_tokens: 2048,
189 };
190
191 let client = LLMClient::new(llm_config);
192 let mut session = ChatSession::new()
193 .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197 loop {
198 print!("You: ");
199 io::stdout().flush()?;
200
201 let mut input = String::new();
202 io::stdin().read_line(&mut input)?;
203 let input = input.trim();
204
205 if input.is_empty() {
206 continue;
207 }
208
209 if input == "exit" || input == "quit" {
210 println!("\n👋 Goodbye!");
211 break;
212 }
213
214 // Special commands
215 if input == "clear" {
216 session.clear();
217 println!("🧹 Conversation cleared!\n");
218 continue;
219 }
220
221 if input == "history" {
222 println!("\n📜 Conversation history:");
223 for (i, msg) in session.messages.iter().enumerate() {
224 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
225 }
226 println!();
227 continue;
228 }
229
230 session.add_user_message(input);
231
232 print!("Assistant: ");
233 io::stdout().flush()?;
234
235 match client.chat(session.get_messages(), None).await {
236 Ok(response) => {
237 session.add_assistant_message(&response.content);
238 println!("{}\n", response.content);
239 }
240 Err(e) => {
241 println!("\n❌ Error: {}", e);
242 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
243 // Remove the last user message since it failed
244 session.messages.pop();
245 }
246 }
247 }
248
249 Ok(())
250}More examples
examples/streaming_chat.rs (line 25)
11async fn main() -> helios_engine::Result<()> {
12 println!("🚀 Helios Engine - Streaming Example");
13 println!("=====================================\n");
14
15 // Setup LLM configuration
16 let llm_config = LLMConfig {
17 model_name: "gpt-3.5-turbo".to_string(),
18 base_url: "https://api.openai.com/v1".to_string(),
19 api_key: std::env::var("OPENAI_API_KEY")
20 .unwrap_or_else(|_| "your-api-key-here".to_string()),
21 temperature: 0.7,
22 max_tokens: 2048,
23 };
24
25 let client = LLMClient::new(llm_config);
26
27 println!("Example 1: Simple Streaming Response");
28 println!("======================================\n");
29
30 let messages = vec![
31 ChatMessage::system("You are a helpful assistant."),
32 ChatMessage::user("Write a short poem about coding."),
33 ];
34
35 print!("Assistant: ");
36 io::stdout().flush()?;
37
38 let response = client.chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 }).await?;
42
43 println!("\n\n");
44
45 println!("Example 2: Interactive Streaming Chat");
46 println!("======================================\n");
47
48 let mut session = ChatSession::new()
49 .with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client.chat_stream(session.get_messages(), None, |chunk| {
65 print!("{}", chunk);
66 io::stdout().flush().unwrap();
67 }).await?;
68
69 session.add_assistant_message(&response.content);
70 println!("\n");
71 }
72
73 println!("\nExample 3: Streaming with Thinking Tags");
74 println!("=========================================\n");
75 println!("When using models that support thinking tags (like o1),");
76 println!("you can detect and display them during streaming.\n");
77
78 struct ThinkingTracker {
79 in_thinking: bool,
80 thinking_buffer: String,
81 }
82
83 impl ThinkingTracker {
84 fn new() -> Self {
85 Self {
86 in_thinking: false,
87 thinking_buffer: String::new(),
88 }
89 }
90
91 fn process_chunk(&mut self, chunk: &str) -> String {
92 let mut output = String::new();
93 let mut chars = chunk.chars().peekable();
94
95 while let Some(c) = chars.next() {
96 if c == '<' {
97 let remaining: String = chars.clone().collect();
98 if remaining.starts_with("thinking>") {
99 self.in_thinking = true;
100 self.thinking_buffer.clear();
101 output.push_str("\n💭 [Thinking");
102 for _ in 0..9 {
103 chars.next();
104 }
105 continue;
106 } else if remaining.starts_with("/thinking>") {
107 self.in_thinking = false;
108 output.push_str("]\n");
109 for _ in 0..10 {
110 chars.next();
111 }
112 continue;
113 }
114 }
115
116 if self.in_thinking {
117 self.thinking_buffer.push(c);
118 if self.thinking_buffer.len() % 3 == 0 {
119 output.push('.');
120 }
121 } else {
122 output.push(c);
123 }
124 }
125
126 output
127 }
128 }
129
130 let messages = vec![
131 ChatMessage::user("Solve this problem: What is 15 * 234 + 89?"),
132 ];
133
134 let mut tracker = ThinkingTracker::new();
135 print!("Assistant: ");
136 io::stdout().flush()?;
137
138 let _response = client.chat_stream(messages, None, |chunk| {
139 let output = tracker.process_chunk(chunk);
140 print!("{}", output);
141 io::stdout().flush().unwrap();
142 }).await?;
143
144 println!("\n\n✅ Streaming examples completed!");
145 println!("\nKey benefits of streaming:");
146 println!(" • Real-time response display");
147 println!(" • Better user experience for long responses");
148 println!(" • Ability to show thinking/reasoning process");
149 println!(" • Early cancellation possible (future feature)");
150
151 Ok(())
152}pub fn config(&self) -> &LLMConfig
Source§impl LLMClient
impl LLMClient
Sourcepub async fn chat(
&self,
messages: Vec<ChatMessage>,
tools: Option<Vec<ToolDefinition>>,
) -> Result<ChatMessage>
pub async fn chat( &self, messages: Vec<ChatMessage>, tools: Option<Vec<ToolDefinition>>, ) -> Result<ChatMessage>
Examples found in repository?
examples/direct_llm_usage.rs (line 73)
51async fn simple_call() -> helios_engine::Result<()> {
52 // Create configuration
53 let llm_config = LLMConfig {
54 model_name: "gpt-3.5-turbo".to_string(),
55 base_url: "https://api.openai.com/v1".to_string(),
56 api_key: std::env::var("OPENAI_API_KEY")
57 .unwrap_or_else(|_| "your-api-key-here".to_string()),
58 temperature: 0.7,
59 max_tokens: 2048,
60 };
61
62 // Create client
63 let client = LLMClient::new(llm_config);
64
65 // Prepare messages
66 let messages = vec![
67 ChatMessage::system("You are a helpful assistant that gives concise answers."),
68 ChatMessage::user("What is the capital of France? Answer in one sentence."),
69 ];
70
71 // Make the call
72 println!("Sending request...");
73 match client.chat(messages, None).await {
74 Ok(response) => {
75 println!("✓ Response: {}", response.content);
76 }
77 Err(e) => {
78 println!("✗ Error: {}", e);
79 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
80 }
81 }
82
83 Ok(())
84}
85
86/// Example 2: Multi-turn conversation with context
87async fn conversation_with_context() -> helios_engine::Result<()> {
88 let llm_config = LLMConfig {
89 model_name: "gpt-3.5-turbo".to_string(),
90 base_url: "https://api.openai.com/v1".to_string(),
91 api_key: std::env::var("OPENAI_API_KEY")
92 .unwrap_or_else(|_| "your-api-key-here".to_string()),
93 temperature: 0.7,
94 max_tokens: 2048,
95 };
96
97 let client = LLMClient::new(llm_config);
98
99 // Use ChatSession to manage conversation
100 let mut session = ChatSession::new()
101 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103 // First turn
104 println!("Turn 1:");
105 session.add_user_message("What is 15 * 23?");
106 print!(" User: What is 15 * 23?\n ");
107
108 match client.chat(session.get_messages(), None).await {
109 Ok(response) => {
110 session.add_assistant_message(&response.content);
111 println!("Assistant: {}", response.content);
112 }
113 Err(e) => {
114 println!("Error: {}", e);
115 return Ok(());
116 }
117 }
118
119 // Second turn (with context from first turn)
120 println!("\nTurn 2:");
121 session.add_user_message("Now divide that by 5.");
122 print!(" User: Now divide that by 5.\n ");
123
124 match client.chat(session.get_messages(), None).await {
125 Ok(response) => {
126 session.add_assistant_message(&response.content);
127 println!("Assistant: {}", response.content);
128 }
129 Err(e) => {
130 println!("Error: {}", e);
131 }
132 }
133
134 println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136 Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141 println!("You can use Helios with various LLM providers:\n");
142
143 println!("🔵 OpenAI:");
144 println!(" LLMConfig {{");
145 println!(" model_name: \"gpt-4\".to_string(),");
146 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
147 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148 println!(" temperature: 0.7,");
149 println!(" max_tokens: 2048,");
150 println!(" }}\n");
151
152 println!("🟢 Local LM Studio:");
153 println!(" LLMConfig {{");
154 println!(" model_name: \"local-model\".to_string(),");
155 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
156 println!(" api_key: \"not-needed\".to_string(),");
157 println!(" temperature: 0.7,");
158 println!(" max_tokens: 2048,");
159 println!(" }}\n");
160
161 println!("🦙 Ollama:");
162 println!(" LLMConfig {{");
163 println!(" model_name: \"llama2\".to_string(),");
164 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
165 println!(" api_key: \"not-needed\".to_string(),");
166 println!(" temperature: 0.7,");
167 println!(" max_tokens: 2048,");
168 println!(" }}\n");
169
170 println!("🔷 Azure OpenAI:");
171 println!(" LLMConfig {{");
172 println!(" model_name: \"gpt-35-turbo\".to_string(),");
173 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175 println!(" temperature: 0.7,");
176 println!(" max_tokens: 2048,");
177 println!(" }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182 let llm_config = LLMConfig {
183 model_name: "gpt-3.5-turbo".to_string(),
184 base_url: "https://api.openai.com/v1".to_string(),
185 api_key: std::env::var("OPENAI_API_KEY")
186 .unwrap_or_else(|_| "your-api-key-here".to_string()),
187 temperature: 0.7,
188 max_tokens: 2048,
189 };
190
191 let client = LLMClient::new(llm_config);
192 let mut session = ChatSession::new()
193 .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197 loop {
198 print!("You: ");
199 io::stdout().flush()?;
200
201 let mut input = String::new();
202 io::stdin().read_line(&mut input)?;
203 let input = input.trim();
204
205 if input.is_empty() {
206 continue;
207 }
208
209 if input == "exit" || input == "quit" {
210 println!("\n👋 Goodbye!");
211 break;
212 }
213
214 // Special commands
215 if input == "clear" {
216 session.clear();
217 println!("🧹 Conversation cleared!\n");
218 continue;
219 }
220
221 if input == "history" {
222 println!("\n📜 Conversation history:");
223 for (i, msg) in session.messages.iter().enumerate() {
224 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
225 }
226 println!();
227 continue;
228 }
229
230 session.add_user_message(input);
231
232 print!("Assistant: ");
233 io::stdout().flush()?;
234
235 match client.chat(session.get_messages(), None).await {
236 Ok(response) => {
237 session.add_assistant_message(&response.content);
238 println!("{}\n", response.content);
239 }
240 Err(e) => {
241 println!("\n❌ Error: {}", e);
242 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
243 // Remove the last user message since it failed
244 session.messages.pop();
245 }
246 }
247 }
248
249 Ok(())
250}Sourcepub async fn chat_stream<F>(
&self,
messages: Vec<ChatMessage>,
tools: Option<Vec<ToolDefinition>>,
on_chunk: F,
) -> Result<ChatMessage>
pub async fn chat_stream<F>( &self, messages: Vec<ChatMessage>, tools: Option<Vec<ToolDefinition>>, on_chunk: F, ) -> Result<ChatMessage>
Examples found in repository?
examples/streaming_chat.rs (lines 38-41)
11async fn main() -> helios_engine::Result<()> {
12 println!("🚀 Helios Engine - Streaming Example");
13 println!("=====================================\n");
14
15 // Setup LLM configuration
16 let llm_config = LLMConfig {
17 model_name: "gpt-3.5-turbo".to_string(),
18 base_url: "https://api.openai.com/v1".to_string(),
19 api_key: std::env::var("OPENAI_API_KEY")
20 .unwrap_or_else(|_| "your-api-key-here".to_string()),
21 temperature: 0.7,
22 max_tokens: 2048,
23 };
24
25 let client = LLMClient::new(llm_config);
26
27 println!("Example 1: Simple Streaming Response");
28 println!("======================================\n");
29
30 let messages = vec![
31 ChatMessage::system("You are a helpful assistant."),
32 ChatMessage::user("Write a short poem about coding."),
33 ];
34
35 print!("Assistant: ");
36 io::stdout().flush()?;
37
38 let response = client.chat_stream(messages, None, |chunk| {
39 print!("{}", chunk);
40 io::stdout().flush().unwrap();
41 }).await?;
42
43 println!("\n\n");
44
45 println!("Example 2: Interactive Streaming Chat");
46 println!("======================================\n");
47
48 let mut session = ChatSession::new()
49 .with_system_prompt("You are a helpful coding assistant.");
50
51 let questions = vec![
52 "What is Rust?",
53 "What are its main benefits?",
54 "Show me a simple example.",
55 ];
56
57 for question in questions {
58 println!("User: {}", question);
59 session.add_user_message(question);
60
61 print!("Assistant: ");
62 io::stdout().flush()?;
63
64 let response = client.chat_stream(session.get_messages(), None, |chunk| {
65 print!("{}", chunk);
66 io::stdout().flush().unwrap();
67 }).await?;
68
69 session.add_assistant_message(&response.content);
70 println!("\n");
71 }
72
73 println!("\nExample 3: Streaming with Thinking Tags");
74 println!("=========================================\n");
75 println!("When using models that support thinking tags (like o1),");
76 println!("you can detect and display them during streaming.\n");
77
78 struct ThinkingTracker {
79 in_thinking: bool,
80 thinking_buffer: String,
81 }
82
83 impl ThinkingTracker {
84 fn new() -> Self {
85 Self {
86 in_thinking: false,
87 thinking_buffer: String::new(),
88 }
89 }
90
91 fn process_chunk(&mut self, chunk: &str) -> String {
92 let mut output = String::new();
93 let mut chars = chunk.chars().peekable();
94
95 while let Some(c) = chars.next() {
96 if c == '<' {
97 let remaining: String = chars.clone().collect();
98 if remaining.starts_with("thinking>") {
99 self.in_thinking = true;
100 self.thinking_buffer.clear();
101 output.push_str("\n💭 [Thinking");
102 for _ in 0..9 {
103 chars.next();
104 }
105 continue;
106 } else if remaining.starts_with("/thinking>") {
107 self.in_thinking = false;
108 output.push_str("]\n");
109 for _ in 0..10 {
110 chars.next();
111 }
112 continue;
113 }
114 }
115
116 if self.in_thinking {
117 self.thinking_buffer.push(c);
118 if self.thinking_buffer.len() % 3 == 0 {
119 output.push('.');
120 }
121 } else {
122 output.push(c);
123 }
124 }
125
126 output
127 }
128 }
129
130 let messages = vec![
131 ChatMessage::user("Solve this problem: What is 15 * 234 + 89?"),
132 ];
133
134 let mut tracker = ThinkingTracker::new();
135 print!("Assistant: ");
136 io::stdout().flush()?;
137
138 let _response = client.chat_stream(messages, None, |chunk| {
139 let output = tracker.process_chunk(chunk);
140 print!("{}", output);
141 io::stdout().flush().unwrap();
142 }).await?;
143
144 println!("\n\n✅ Streaming examples completed!");
145 println!("\nKey benefits of streaming:");
146 println!(" • Real-time response display");
147 println!(" • Better user experience for long responses");
148 println!(" • Ability to show thinking/reasoning process");
149 println!(" • Early cancellation possible (future feature)");
150
151 Ok(())
152}Trait Implementations§
Source§impl LLMProvider for LLMClient
impl LLMProvider for LLMClient
fn generate<'life0, 'async_trait>(
&'life0 self,
request: LLMRequest,
) -> Pin<Box<dyn Future<Output = Result<LLMResponse>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Auto Trait Implementations§
impl Freeze for LLMClient
impl !RefUnwindSafe for LLMClient
impl Send for LLMClient
impl Sync for LLMClient
impl Unpin for LLMClient
impl !UnwindSafe for LLMClient
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more