direct_llm_usage/
direct_llm_usage.rs

1//! # Example: Direct LLM Usage
2//!
3//! This example demonstrates how to use the Helios Engine as a library to make
4//! direct calls to LLM models, without using the `Agent` abstraction. This is
5//! useful for simple use cases where you just need to interact with an LLM directly.
6
7use helios_engine::config::LLMConfig;
8use helios_engine::{ChatMessage, ChatSession, LLMClient};
9use std::io::{self, Write};
10
11#[tokio::main]
12async fn main() -> helios_engine::Result<()> {
13    println!("๐Ÿš€ Helios Direct LLM Usage Examples\n");
14
15    // --- Example 1: Simple single call ---
16    println!("๐Ÿ“ Example 1: Simple Single Call");
17    println!("{}", "=".repeat(50));
18    simple_call().await?;
19    println!();
20
21    // --- Example 2: Conversation with context ---
22    println!("๐Ÿ’ฌ Example 2: Conversation with Context");
23    println!("{}", "=".repeat(50));
24    conversation_with_context().await?;
25    println!();
26
27    // --- Example 3: Different providers ---
28    println!("๐ŸŒ Example 3: Using Different Providers");
29    println!("{}", "=".repeat(50));
30    different_providers_info();
31    println!();
32
33    // --- Example 4: Interactive chat ---
34    println!("๐ŸŽฎ Example 4: Interactive Chat");
35    println!("{}", "=".repeat(50));
36    println!("Would you like to start an interactive chat? (y/n)");
37
38    let mut choice = String::new();
39    io::stdin().read_line(&mut choice)?;
40
41    if choice.trim().to_lowercase() == "y" {
42        interactive_chat().await?;
43    } else {
44        println!("Skipping interactive chat.\n");
45    }
46
47    println!("โœ… All examples completed!");
48    Ok(())
49}
50
51/// Makes a simple, single call to the LLM.
52async fn simple_call() -> helios_engine::Result<()> {
53    // Create a configuration for the LLM.
54    let llm_config = LLMConfig {
55        model_name: "gpt-3.5-turbo".to_string(),
56        base_url: "https://api.openai.com/v1".to_string(),
57        api_key: std::env::var("OPENAI_API_KEY")
58            .unwrap_or_else(|_| "your-api-key-here".to_string()),
59        temperature: 0.7,
60        max_tokens: 2048,
61    };
62
63    // Create a new LLM client.
64    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
65
66    // Prepare the messages to send to the LLM.
67    let messages = vec![
68        ChatMessage::system("You are a helpful assistant that gives concise answers."),
69        ChatMessage::user("What is the capital of France? Answer in one sentence."),
70    ];
71
72    // Make the call to the LLM.
73    println!("Sending request...");
74    match client.chat(messages, None, None, None, None).await {
75        Ok(response) => {
76            println!("โœ“ Response: {}", response.content);
77        }
78        Err(e) => {
79            println!("โœ— Error: {}", e);
80            println!("  (Make sure to set OPENAI_API_KEY environment variable)");
81        }
82    }
83
84    Ok(())
85}
86
87/// Demonstrates a multi-turn conversation with context.
88async fn conversation_with_context() -> helios_engine::Result<()> {
89    // Create a configuration for the LLM.
90    let llm_config = LLMConfig {
91        model_name: "gpt-3.5-turbo".to_string(),
92        base_url: "https://api.openai.com/v1".to_string(),
93        api_key: std::env::var("OPENAI_API_KEY")
94            .unwrap_or_else(|_| "your-api-key-here".to_string()),
95        temperature: 0.7,
96        max_tokens: 2048,
97    };
98
99    // Create a new LLM client.
100    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
101
102    // Use a `ChatSession` to manage the conversation history.
103    let mut session = ChatSession::new()
104        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
105
106    // --- First turn ---
107    println!("Turn 1:");
108    session.add_user_message("What is 15 * 23?");
109    print!("  User: What is 15 * 23?\n  ");
110
111    match client
112        .chat(session.get_messages(), None, None, None, None)
113        .await
114    {
115        Ok(response) => {
116            session.add_assistant_message(&response.content);
117            println!("Assistant: {}", response.content);
118        }
119        Err(e) => {
120            println!("Error: {}", e);
121            return Ok(());
122        }
123    }
124
125    // --- Second turn (with context from the first turn) ---
126    println!("\nTurn 2:");
127    session.add_user_message("Now divide that by 5.");
128    print!("  User: Now divide that by 5.\n  ");
129
130    match client
131        .chat(session.get_messages(), None, None, None, None)
132        .await
133    {
134        Ok(response) => {
135            session.add_assistant_message(&response.content);
136            println!("Assistant: {}", response.content);
137        }
138        Err(e) => {
139            println!("Error: {}", e);
140        }
141    }
142
143    println!("\n๐Ÿ’ก Notice how the assistant remembered the result from the first calculation!");
144
145    Ok(())
146}
147
148/// Provides information about using different LLM providers.
149fn different_providers_info() {
150    println!("You can use Helios with various LLM providers:\n");
151
152    println!("๐Ÿ”ต OpenAI:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"gpt-4\".to_string(),");
155    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
156    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("๐ŸŸข Local LM Studio:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"local-model\".to_string(),");
164    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("๐Ÿฆ™ Ollama:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"llama2\".to_string(),");
173    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
174    println!("       api_key: \"not-needed\".to_string(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178
179    println!("๐Ÿ”ท Azure OpenAI:");
180    println!("   LLMConfig {{");
181    println!("       model_name: \"gpt-35-turbo\".to_string(),");
182    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
183    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
184    println!("       temperature: 0.7,");
185    println!("       max_tokens: 2048,");
186    println!("   }}\n");
187}
188
189/// Starts an interactive chat session with the LLM.
190async fn interactive_chat() -> helios_engine::Result<()> {
191    // Create a configuration for the LLM.
192    let llm_config = LLMConfig {
193        model_name: "gpt-3.5-turbo".to_string(),
194        base_url: "https://api.openai.com/v1".to_string(),
195        api_key: std::env::var("OPENAI_API_KEY")
196            .unwrap_or_else(|_| "your-api-key-here".to_string()),
197        temperature: 0.7,
198        max_tokens: 2048,
199    };
200
201    // Create a new LLM client.
202    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
203    let mut session =
204        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
205
206    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
207
208    loop {
209        print!("You: ");
210        io::stdout().flush()?;
211
212        let mut input = String::new();
213        io::stdin().read_line(&mut input)?;
214        let input = input.trim();
215
216        if input.is_empty() {
217            continue;
218        }
219
220        if input == "exit" || input == "quit" {
221            println!("\n๐Ÿ‘‹ Goodbye!");
222            break;
223        }
224
225        // Handle special commands.
226        if input == "clear" {
227            session.clear();
228            println!("๐Ÿงน Conversation cleared!\n");
229            continue;
230        }
231
232        if input == "history" {
233            println!("\n๐Ÿ“œ Conversation history:");
234            for (i, msg) in session.messages.iter().enumerate() {
235                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
236            }
237            println!();
238            continue;
239        }
240
241        session.add_user_message(input);
242
243        print!("Assistant: ");
244        io::stdout().flush()?;
245
246        match client
247            .chat(session.get_messages(), None, None, None, None)
248            .await
249        {
250            Ok(response) => {
251                session.add_assistant_message(&response.content);
252                println!("{}\n", response.content);
253            }
254            Err(e) => {
255                println!("\nโŒ Error: {}", e);
256                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
257                // Remove the last user message since it failed.
258                session.messages.pop();
259            }
260        }
261    }
262
263    Ok(())
264}