use helios_engine::config::LLMConfig;
use helios_engine::{ChatMessage, ChatSession, LLMClient};
use std::io::{self, Write};
#[tokio::main]
async fn main() -> helios_engine::Result<()> {
println!("๐ Helios Direct LLM Usage Examples\n");
println!("๐ Example 1: Simple Single Call");
println!("{}", "=".repeat(50));
simple_call().await?;
println!();
println!("๐ฌ Example 2: Conversation with Context");
println!("{}", "=".repeat(50));
conversation_with_context().await?;
println!();
println!("๐ Example 3: Using Different Providers");
println!("{}", "=".repeat(50));
different_providers_info();
println!();
println!("๐ฎ Example 4: Interactive Chat");
println!("{}", "=".repeat(50));
println!("Would you like to start an interactive chat? (y/n)");
let mut choice = String::new();
io::stdin().read_line(&mut choice)?;
if choice.trim().to_lowercase() == "y" {
interactive_chat().await?;
} else {
println!("Skipping interactive chat.\n");
}
println!("โ
All examples completed!");
Ok(())
}
async fn simple_call() -> helios_engine::Result<()> {
let llm_config = LLMConfig {
model_name: "gpt-3.5-turbo".to_string(),
base_url: "https://api.openai.com/v1".to_string(),
api_key: std::env::var("OPENAI_API_KEY")
.unwrap_or_else(|_| "your-api-key-here".to_string()),
temperature: 0.7,
max_tokens: 2048,
};
let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
let messages = vec![
ChatMessage::system("You are a helpful assistant that gives concise answers."),
ChatMessage::user("What is the capital of France? Answer in one sentence."),
];
println!("Sending request...");
match client.chat(messages, None, None, None, None).await {
Ok(response) => {
println!("โ Response: {}", response.content);
}
Err(e) => {
println!("โ Error: {}", e);
println!(" (Make sure to set OPENAI_API_KEY environment variable)");
}
}
Ok(())
}
async fn conversation_with_context() -> helios_engine::Result<()> {
let llm_config = LLMConfig {
model_name: "gpt-3.5-turbo".to_string(),
base_url: "https://api.openai.com/v1".to_string(),
api_key: std::env::var("OPENAI_API_KEY")
.unwrap_or_else(|_| "your-api-key-here".to_string()),
temperature: 0.7,
max_tokens: 2048,
};
let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
let mut session = ChatSession::new()
.with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
println!("Turn 1:");
session.add_user_message("What is 15 * 23?");
print!(" User: What is 15 * 23?\n ");
match client
.chat(session.get_messages(), None, None, None, None)
.await
{
Ok(response) => {
session.add_assistant_message(&response.content);
println!("Assistant: {}", response.content);
}
Err(e) => {
println!("Error: {}", e);
return Ok(());
}
}
println!("\nTurn 2:");
session.add_user_message("Now divide that by 5.");
print!(" User: Now divide that by 5.\n ");
match client
.chat(session.get_messages(), None, None, None, None)
.await
{
Ok(response) => {
session.add_assistant_message(&response.content);
println!("Assistant: {}", response.content);
}
Err(e) => {
println!("Error: {}", e);
}
}
println!("\n๐ก Notice how the assistant remembered the result from the first calculation!");
Ok(())
}
fn different_providers_info() {
println!("You can use Helios with various LLM providers:\n");
println!("๐ต OpenAI:");
println!(" LLMConfig {{");
println!(" model_name: \"gpt-4\".to_string(),");
println!(" base_url: \"https://api.openai.com/v1\".to_string(),");
println!(" api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
println!(" temperature: 0.7,");
println!(" max_tokens: 2048,");
println!(" }}\n");
println!("๐ข Local LM Studio:");
println!(" LLMConfig {{");
println!(" model_name: \"local-model\".to_string(),");
println!(" base_url: \"http://localhost:1234/v1\".to_string(),");
println!(" api_key: \"not-needed\".to_string(),");
println!(" temperature: 0.7,");
println!(" max_tokens: 2048,");
println!(" }}\n");
println!("๐ฆ Ollama:");
println!(" LLMConfig {{");
println!(" model_name: \"llama2\".to_string(),");
println!(" base_url: \"http://localhost:11434/v1\".to_string(),");
println!(" api_key: \"not-needed\".to_string(),");
println!(" temperature: 0.7,");
println!(" max_tokens: 2048,");
println!(" }}\n");
println!("๐ท Azure OpenAI:");
println!(" LLMConfig {{");
println!(" model_name: \"gpt-35-turbo\".to_string(),");
println!(" base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
println!(" api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
println!(" temperature: 0.7,");
println!(" max_tokens: 2048,");
println!(" }}\n");
}
async fn interactive_chat() -> helios_engine::Result<()> {
let llm_config = LLMConfig {
model_name: "gpt-3.5-turbo".to_string(),
base_url: "https://api.openai.com/v1".to_string(),
api_key: std::env::var("OPENAI_API_KEY")
.unwrap_or_else(|_| "your-api-key-here".to_string()),
temperature: 0.7,
max_tokens: 2048,
};
let client = LLMClient::new(helios_engine::llm::LLMProviderType::Remote(llm_config)).await?;
let mut session =
ChatSession::new().with_system_prompt("You are a friendly and helpful AI assistant.");
println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
loop {
print!("You: ");
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();
if input.is_empty() {
continue;
}
if input == "exit" || input == "quit" {
println!("\n๐ Goodbye!");
break;
}
if input == "clear" {
session.clear();
println!("๐งน Conversation cleared!\n");
continue;
}
if input == "history" {
println!("\n๐ Conversation history:");
for (i, msg) in session.messages.iter().enumerate() {
println!(" {}. {:?}: {}", i + 1, msg.role, msg.content);
}
println!();
continue;
}
session.add_user_message(input);
print!("Assistant: ");
io::stdout().flush()?;
match client
.chat(session.get_messages(), None, None, None, None)
.await
{
Ok(response) => {
session.add_assistant_message(&response.content);
println!("{}\n", response.content);
}
Err(e) => {
println!("\nโ Error: {}", e);
println!(" (Make sure OPENAI_API_KEY is set correctly)\n");
session.messages.pop();
}
}
}
Ok(())
}