use ubiquity_core::{
llm::{LLMServiceFactory, LLMRequest, Message, MessageRole},
llm_config::{LLMConfigBuilder, LLMConfigPresets, LLMConfigFromEnv},
LLMProvider,
};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("=== Ubiquity LLM Service Example ===\n");
println!("1. Creating LLM service with configuration builder:");
let config = LLMConfigBuilder::new()
.provider(LLMProvider::Mock)
.api_key("mock-key")
.model("test-model")
.temperature(0.7)
.max_tokens(2048)
.timeout(Duration::from_secs(30))
.build()?;
let service = LLMServiceFactory::create(&config).await?;
println!(" Created {:?} service", service.provider());
println!("\n2. Using configuration presets:");
let fast_config = LLMConfigPresets::fast()
.provider(LLMProvider::Mock)
.api_key("mock-key")
.build()?;
println!(" Fast preset: temp={}, max_tokens={}",
fast_config.temperature, fast_config.max_tokens);
let code_config = LLMConfigPresets::code_generation()
.provider(LLMProvider::Mock)
.api_key("mock-key")
.build()?;
println!(" Code generation preset: temp={}, max_tokens={}",
code_config.temperature, code_config.max_tokens);
println!("\n3. Making a completion request:");
let request = LLMRequest {
messages: vec![
Message {
role: MessageRole::System,
content: "You are a helpful AI assistant.".to_string(),
},
Message {
role: MessageRole::User,
content: "What is the meaning of life?".to_string(),
},
],
temperature: Some(0.8),
max_tokens: Some(100),
stop_sequences: None,
stream: false,
system_prompt: None,
extra_params: None,
};
let response = service.complete(request.clone()).await?;
println!(" Response: {}", response.content);
if let Some(usage) = &response.usage {
println!(" Token usage: {} prompt + {} completion = {} total",
usage.prompt_tokens, usage.completion_tokens, usage.total_tokens);
}
println!("\n4. Performing health check:");
match service.health_check().await {
Ok(()) => println!(" Service is healthy"),
Err(e) => println!(" Service health check failed: {}", e),
}
println!("\n5. Streaming example:");
let stream_request = LLMRequest {
stream: true,
..request
};
let mut stream = service.stream(stream_request).await?;
println!(" Streaming response:");
use futures::StreamExt;
while let Some(chunk) = stream.next().await {
match chunk {
Ok(chunk) => {
print!("{}", chunk.delta);
if chunk.is_final {
println!("\n Stream completed");
if let Some(usage) = chunk.usage {
println!(" Final token usage: {} total", usage.total_tokens);
}
}
}
Err(e) => {
println!("\n Stream error: {}", e);
break;
}
}
}
println!("\n6. Loading configuration from environment:");
match LLMConfigFromEnv::any() {
Ok(env_config) => {
println!(" Found {:?} configuration in environment", env_config.provider);
}
Err(e) => {
println!(" No configuration found in environment: {}", e);
println!(" Set CLAUDE_API_KEY, OPENAI_API_KEY, or LOCAL_LLM_URL to use");
}
}
println!("\n=== Example completed ===");
Ok(())
}