use kernex_core::context::Context;
use kernex_core::traits::Provider;
use kernex_providers::ollama::OllamaProvider;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let provider = OllamaProvider::from_config(
"http://localhost:11434".to_string(),
"llama3.2".to_string(),
None, )?;
if !provider.is_available().await {
eprintln!("Error: Ollama not available.");
eprintln!();
eprintln!("To fix this:");
eprintln!(" 1. Install Ollama: https://ollama.com");
eprintln!(" 2. Pull a model: ollama pull llama3.2");
eprintln!(" 3. Start the server: ollama serve");
std::process::exit(1);
}
println!("Connected to Ollama. Type a message:");
print!("> ");
use std::io::Write;
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
let input = input.trim();
if input.is_empty() {
println!("No input provided. Exiting.");
return Ok(());
}
let mut context = Context::new(input);
context.system_prompt = "You are a helpful assistant. Keep responses concise.".to_string();
let response = provider.complete(&context).await?;
println!("\n{}", response.text);
if let Some(model) = &response.metadata.model {
eprintln!(
"\n[Model: {} | Tokens: {} | Time: {}ms]",
model,
response.metadata.tokens_used.unwrap_or(0),
response.metadata.processing_time_ms,
);
}
Ok(())
}