direct_llm_usage/
direct_llm_usage.rs

1use helios_engine::config::LLMConfig;
2/// Example: Using Helios as a crate for direct LLM calls
3///
4/// This example demonstrates how to use Helios as a library to make
5/// direct calls to LLM models without using the Agent abstraction.
6use helios_engine::{ChatMessage, ChatSession, LLMClient};
7use std::io::{self, Write};
8
9#[tokio::main]
10async fn main() -> helios_engine::Result<()> {
11    println!("๐Ÿš€ Helios Direct LLM Usage Examples\n");
12
13    // Example 1: Simple single call
14    println!("๐Ÿ“ Example 1: Simple Single Call");
15    println!("{}", "=".repeat(50));
16    simple_call().await?;
17    println!();
18
19    // Example 2: Conversation with context
20    println!("๐Ÿ’ฌ Example 2: Conversation with Context");
21    println!("{}", "=".repeat(50));
22    conversation_with_context().await?;
23    println!();
24
25    // Example 3: Different providers
26    println!("๐ŸŒ Example 3: Using Different Providers");
27    println!("{}", "=".repeat(50));
28    different_providers_info();
29    println!();
30
31    // Example 4: Interactive chat (optional - comment out if not needed)
32    println!("๐ŸŽฎ Example 4: Interactive Chat");
33    println!("{}", "=".repeat(50));
34    println!("Would you like to start an interactive chat? (y/n)");
35
36    let mut choice = String::new();
37    io::stdin().read_line(&mut choice)?;
38
39    if choice.trim().to_lowercase() == "y" {
40        interactive_chat().await?;
41    } else {
42        println!("Skipping interactive chat.\n");
43    }
44
45    println!("โœ… All examples completed!");
46    Ok(())
47}
48
49/// Example 1: Simple single call to the LLM
50async fn simple_call() -> helios_engine::Result<()> {
51    // Create configuration
52    let llm_config = LLMConfig {
53        model_name: "gpt-3.5-turbo".to_string(),
54        base_url: "https://api.openai.com/v1".to_string(),
55        api_key: std::env::var("OPENAI_API_KEY")
56            .unwrap_or_else(|_| "your-api-key-here".to_string()),
57        temperature: 0.7,
58        max_tokens: 2048,
59    };
60
61    // Create client
62    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
63
64    // Prepare messages
65    let messages = vec![
66        ChatMessage::system("You are a helpful assistant that gives concise answers."),
67        ChatMessage::user("What is the capital of France? Answer in one sentence."),
68    ];
69
70    // Make the call
71    println!("Sending request...");
72    match client.chat(messages, None).await {
73        Ok(response) => {
74            println!("โœ“ Response: {}", response.content);
75        }
76        Err(e) => {
77            println!("โœ— Error: {}", e);
78            println!("  (Make sure to set OPENAI_API_KEY environment variable)");
79        }
80    }
81
82    Ok(())
83}
84
85/// Example 2: Multi-turn conversation with context
86async fn conversation_with_context() -> helios_engine::Result<()> {
87    let llm_config = LLMConfig {
88        model_name: "gpt-3.5-turbo".to_string(),
89        base_url: "https://api.openai.com/v1".to_string(),
90        api_key: std::env::var("OPENAI_API_KEY")
91            .unwrap_or_else(|_| "your-api-key-here".to_string()),
92        temperature: 0.7,
93        max_tokens: 2048,
94    };
95
96    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
97
98    // Use ChatSession to manage conversation
99    let mut session = ChatSession::new()
100        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
101
102    // First turn
103    println!("Turn 1:");
104    session.add_user_message("What is 15 * 23?");
105    print!("  User: What is 15 * 23?\n  ");
106
107    match client.chat(session.get_messages(), None).await {
108        Ok(response) => {
109            session.add_assistant_message(&response.content);
110            println!("Assistant: {}", response.content);
111        }
112        Err(e) => {
113            println!("Error: {}", e);
114            return Ok(());
115        }
116    }
117
118    // Second turn (with context from first turn)
119    println!("\nTurn 2:");
120    session.add_user_message("Now divide that by 5.");
121    print!("  User: Now divide that by 5.\n  ");
122
123    match client.chat(session.get_messages(), None).await {
124        Ok(response) => {
125            session.add_assistant_message(&response.content);
126            println!("Assistant: {}", response.content);
127        }
128        Err(e) => {
129            println!("Error: {}", e);
130        }
131    }
132
133    println!("\n๐Ÿ’ก Notice how the assistant remembered the result from the first calculation!");
134
135    Ok(())
136}
137
138/// Example 3: Information about using different providers
139fn different_providers_info() {
140    println!("You can use Helios with various LLM providers:\n");
141
142    println!("๐Ÿ”ต OpenAI:");
143    println!("   LLMConfig {{");
144    println!("       model_name: \"gpt-4\".to_string(),");
145    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
146    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
147    println!("       temperature: 0.7,");
148    println!("       max_tokens: 2048,");
149    println!("   }}\n");
150
151    println!("๐ŸŸข Local LM Studio:");
152    println!("   LLMConfig {{");
153    println!("       model_name: \"local-model\".to_string(),");
154    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
155    println!("       api_key: \"not-needed\".to_string(),");
156    println!("       temperature: 0.7,");
157    println!("       max_tokens: 2048,");
158    println!("   }}\n");
159
160    println!("๐Ÿฆ™ Ollama:");
161    println!("   LLMConfig {{");
162    println!("       model_name: \"llama2\".to_string(),");
163    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
164    println!("       api_key: \"not-needed\".to_string(),");
165    println!("       temperature: 0.7,");
166    println!("       max_tokens: 2048,");
167    println!("   }}\n");
168
169    println!("๐Ÿ”ท Azure OpenAI:");
170    println!("   LLMConfig {{");
171    println!("       model_name: \"gpt-35-turbo\".to_string(),");
172    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
173    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
174    println!("       temperature: 0.7,");
175    println!("       max_tokens: 2048,");
176    println!("   }}\n");
177}
178
179/// Example 4: Interactive chat session
180async fn interactive_chat() -> helios_engine::Result<()> {
181    let llm_config = LLMConfig {
182        model_name: "gpt-3.5-turbo".to_string(),
183        base_url: "https://api.openai.com/v1".to_string(),
184        api_key: std::env::var("OPENAI_API_KEY")
185            .unwrap_or_else(|_| "your-api-key-here".to_string()),
186        temperature: 0.7,
187        max_tokens: 2048,
188    };
189
190    let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
191    let mut session =
192        ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
193
194    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
195
196    loop {
197        print!("You: ");
198        io::stdout().flush()?;
199
200        let mut input = String::new();
201        io::stdin().read_line(&mut input)?;
202        let input = input.trim();
203
204        if input.is_empty() {
205            continue;
206        }
207
208        if input == "exit" || input == "quit" {
209            println!("\n๐Ÿ‘‹ Goodbye!");
210            break;
211        }
212
213        // Special commands
214        if input == "clear" {
215            session.clear();
216            println!("๐Ÿงน Conversation cleared!\n");
217            continue;
218        }
219
220        if input == "history" {
221            println!("\n๐Ÿ“œ Conversation history:");
222            for (i, msg) in session.messages.iter().enumerate() {
223                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
224            }
225            println!();
226            continue;
227        }
228
229        session.add_user_message(input);
230
231        print!("Assistant: ");
232        io::stdout().flush()?;
233
234        match client.chat(session.get_messages(), None).await {
235            Ok(response) => {
236                session.add_assistant_message(&response.content);
237                println!("{}\n", response.content);
238            }
239            Err(e) => {
240                println!("\nโŒ Error: {}", e);
241                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
242                // Remove the last user message since it failed
243                session.messages.pop();
244            }
245        }
246    }
247
248    Ok(())
249}