direct_llm_usage/
direct_llm_usage.rs1use helios_engine::config::LLMConfig;
8use helios_engine::{ChatMessage, ChatSession, LLMClient};
9use std::io::{self, Write};
10
11#[tokio::main]
12async fn main() -> helios_engine::Result<()> {
13 println!("๐ Helios Direct LLM Usage Examples\n");
14
15 println!("๐ Example 1: Simple Single Call");
17 println!("{}", "=".repeat(50));
18 simple_call().await?;
19 println!();
20
21 println!("๐ฌ Example 2: Conversation with Context");
23 println!("{}", "=".repeat(50));
24 conversation_with_context().await?;
25 println!();
26
27 println!("๐ Example 3: Using Different Providers");
29 println!("{}", "=".repeat(50));
30 different_providers_info();
31 println!();
32
33 println!("๐ฎ Example 4: Interactive Chat");
35 println!("{}", "=".repeat(50));
36 println!("Would you like to start an interactive chat? (y/n)");
37
38 let mut choice = String::new();
39 io::stdin().read_line(&mut choice)?;
40
41 if choice.trim().to_lowercase() == "y" {
42 interactive_chat().await?;
43 } else {
44 println!("Skipping interactive chat.\n");
45 }
46
47 println!("โ
All examples completed!");
48 Ok(())
49}
50
51async fn simple_call() -> helios_engine::Result<()> {
53 let llm_config = LLMConfig {
55 model_name: "gpt-3.5-turbo".to_string(),
56 base_url: "https://api.openai.com/v1".to_string(),
57 api_key: std::env::var("OPENAI_API_KEY")
58 .unwrap_or_else(|_| "your-api-key-here".to_string()),
59 temperature: 0.7,
60 max_tokens: 2048,
61 };
62
63 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
65
66 let messages = vec![
68 ChatMessage::system("You are a helpful assistant that gives concise answers."),
69 ChatMessage::user("What is the capital of France? Answer in one sentence."),
70 ];
71
72 println!("Sending request...");
74 match client.chat(messages, None, None, None, None).await {
75 Ok(response) => {
76 println!("โ Response: {}", response.content);
77 }
78 Err(e) => {
79 println!("โ Error: {}", e);
80 println!(" (Make sure to set OPENAI_API_KEY environment variable)");
81 }
82 }
83
84 Ok(())
85}
86
87async fn conversation_with_context() -> helios_engine::Result<()> {
89 let llm_config = LLMConfig {
91 model_name: "gpt-3.5-turbo".to_string(),
92 base_url: "https://api.openai.com/v1".to_string(),
93 api_key: std::env::var("OPENAI_API_KEY")
94 .unwrap_or_else(|_| "your-api-key-here".to_string()),
95 temperature: 0.7,
96 max_tokens: 2048,
97 };
98
99 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102 let mut session = ChatSession::new()
104 .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106 println!("Turn 1:");
108 session.add_user_message("What is 15 * 23?");
109 print!(" User: What is 15 * 23?\n ");
110
111 match client
112 .chat(session.get_messages(), None, None, None, None)
113 .await
114 {
115 Ok(response) => {
116 session.add_assistant_message(&response.content);
117 println!("Assistant: {}", response.content);
118 }
119 Err(e) => {
120 println!("Error: {}", e);
121 return Ok(());
122 }
123 }
124
125 println!("\nTurn 2:");
127 session.add_user_message("Now divide that by 5.");
128 print!(" User: Now divide that by 5.\n ");
129
130 match client
131 .chat(session.get_messages(), None, None, None, None)
132 .await
133 {
134 Ok(response) => {
135 session.add_assistant_message(&response.content);
136 println!("Assistant: {}", response.content);
137 }
138 Err(e) => {
139 println!("Error: {}", e);
140 }
141 }
142
143 println!("\n๐ก Notice how the assistant remembered the result from the first calculation!");
144
145 Ok(())
146}
147
148fn different_providers_info() {
150 println!("You can use Helios with various LLM providers:\n");
151
152 println!("๐ต OpenAI:");
153 println!(" LLMConfig {{");
154 println!(" model_name: \"gpt-4\".to_string(),");
155 println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
156 println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157 println!(" temperature: 0.7,");
158 println!(" max_tokens: 2048,");
159 println!(" }}\n");
160
161 println!("๐ข Local LM Studio:");
162 println!(" LLMConfig {{");
163 println!(" model_name: \"local-model\".to_string(),");
164 println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
165 println!(" api_key: \"not-needed\".to_string(),");
166 println!(" temperature: 0.7,");
167 println!(" max_tokens: 2048,");
168 println!(" }}\n");
169
170 println!("๐ฆ Ollama:");
171 println!(" LLMConfig {{");
172 println!(" model_name: \"llama2\".to_string(),");
173 println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
174 println!(" api_key: \"not-needed\".to_string(),");
175 println!(" temperature: 0.7,");
176 println!(" max_tokens: 2048,");
177 println!(" }}\n");
178
179 println!("๐ท Azure OpenAI:");
180 println!(" LLMConfig {{");
181 println!(" model_name: \"gpt-35-turbo\".to_string(),");
182 println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183 println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184 println!(" temperature: 0.7,");
185 println!(" max_tokens: 2048,");
186 println!(" }}\n");
187}
188
189async fn interactive_chat() -> helios_engine::Result<()> {
191 let llm_config = LLMConfig {
193 model_name: "gpt-3.5-turbo".to_string(),
194 base_url: "https://api.openai.com/v1".to_string(),
195 api_key: std::env::var("OPENAI_API_KEY")
196 .unwrap_or_else(|_| "your-api-key-here".to_string()),
197 temperature: 0.7,
198 max_tokens: 2048,
199 };
200
201 let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203 let mut session =
204 ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206 println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208 loop {
209 print!("You: ");
210 io::stdout().flush()?;
211
212 let mut input = String::new();
213 io::stdin().read_line(&mut input)?;
214 let input = input.trim();
215
216 if input.is_empty() {
217 continue;
218 }
219
220 if input == "exit" || input == "quit" {
221 println!("\n๐ Goodbye!");
222 break;
223 }
224
225 if input == "clear" {
227 session.clear();
228 println!("๐งน Conversation cleared!\n");
229 continue;
230 }
231
232 if input == "history" {
233 println!("\n๐ Conversation history:");
234 for (i, msg) in session.messages.iter().enumerate() {
235 println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
236 }
237 println!();
238 continue;
239 }
240
241 session.add_user_message(input);
242
243 print!("Assistant: ");
244 io::stdout().flush()?;
245
246 match client
247 .chat(session.get_messages(), None, None, None, None)
248 .await
249 {
250 Ok(response) => {
251 session.add_assistant_message(&response.content);
252 println!("{}\n", response.content);
253 }
254 Err(e) => {
255 println!("\nโ Error: {}", e);
256 println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
257 session.messages.pop();
259 }
260 }
261 }
262
263 Ok(())
264}