direct_llm_usage/
direct_llm_usage.rs

1/// Example: Using Helios as a crate for direct LLM calls
2/// 
3/// This example demonstrates how to use Helios as a library to make
4/// direct calls to LLM models without using the Agent abstraction.
5
6use helios_engine::{LLMClient, ChatMessage, ChatSession};
7use helios_engine::config::LLMConfig;
8use std::io::{self, Write};
9
10#[tokio::main]
11async fn main() -> helios_engine::Result<()> {
12    println!("๐Ÿš€ Helios Direct LLM Usage Examples\n");
13
14    // Example 1: Simple single call
15    println!("๐Ÿ“ Example 1: Simple Single Call");
16    println!("{}", "=".repeat(50));
17    simple_call().await?;
18    println!();
19
20    // Example 2: Conversation with context
21    println!("๐Ÿ’ฌ Example 2: Conversation with Context");
22    println!("{}", "=".repeat(50));
23    conversation_with_context().await?;
24    println!();
25
26    // Example 3: Different providers
27    println!("๐ŸŒ Example 3: Using Different Providers");
28    println!("{}", "=".repeat(50));
29    different_providers_info();
30    println!();
31
32    // Example 4: Interactive chat (optional - comment out if not needed)
33    println!("๐ŸŽฎ Example 4: Interactive Chat");
34    println!("{}", "=".repeat(50));
35    println!("Would you like to start an interactive chat? (y/n)");
36    
37    let mut choice = String::new();
38    io::stdin().read_line(&mut choice)?;
39    
40    if choice.trim().to_lowercase() == "y" {
41        interactive_chat().await?;
42    } else {
43        println!("Skipping interactive chat.\n");
44    }
45
46    println!("โœ… All examples completed!");
47    Ok(())
48}
49
50/// Example 1: Simple single call to the LLM
51async fn simple_call() -> helios_engine::Result<()> {
52    // Create configuration
53    let llm_config = LLMConfig {
54        model_name: "gpt-3.5-turbo".to_string(),
55        base_url: "https://api.openai.com/v1".to_string(),
56        api_key: std::env::var("OPENAI_API_KEY")
57            .unwrap_or_else(|_| "your-api-key-here".to_string()),
58        temperature: 0.7,
59        max_tokens: 2048,
60    };
61
62    // Create client
63    let client = LLMClient::new(llm_config);
64
65    // Prepare messages
66    let messages = vec![
67        ChatMessage::system("You are a helpful assistant that gives concise answers."),
68        ChatMessage::user("What is the capital of France? Answer in one sentence."),
69    ];
70
71    // Make the call
72    println!("Sending request...");
73    match client.chat(messages, None).await {
74        Ok(response) => {
75            println!("โœ“ Response: {}", response.content);
76        }
77        Err(e) => {
78            println!("โœ— Error: {}", e);
79            println!("  (Make sure to set OPENAI_API_KEY environment variable)");
80        }
81    }
82
83    Ok(())
84}
85
86/// Example 2: Multi-turn conversation with context
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n๐Ÿ’ก Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("๐Ÿ”ต OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("๐ŸŸข Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("๐Ÿฆ™ Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("๐Ÿ”ท Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n๐Ÿ‘‹ Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("๐Ÿงน Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n๐Ÿ“œ Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\nโŒ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}