use futures::StreamExt;
use tiycore::{
models::get_model,
provider::get_provider,
stream::AssistantMessageEventStream,
types::{Context, Model, Provider, StreamOptions, UserMessage},
};
fn env_or(primary: &str, fallback: &str) -> Option<String> {
std::env::var(primary)
.or_else(|_| std::env::var(fallback))
.ok()
.filter(|v| !v.is_empty())
}
fn main() {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
println!("=== tiycore Basic Usage Example ===\n");
println!("--- Making LLM Request ---");
let api_key = env_or("LLM_API_KEY", "OPENAI_API_KEY");
let base_url = env_or("LLM_BASE_URL", "OPENAI_BASE_URL");
let model_id = std::env::var("LLM_MODEL")
.ok()
.filter(|v| !v.is_empty())
.unwrap_or_else(|| "gpt-4o-mini".to_string());
let model = get_model("openai", &model_id).unwrap_or_else(|| {
Model::builder()
.id(&model_id)
.name(&model_id)
.provider(Provider::Zenmux)
.context_window(128000)
.max_tokens(4096)
.build()
.expect("Failed to build custom model")
});
let context = Context {
system_prompt: Some("You are a helpful assistant. Answer in short sentences.".to_string()),
messages: vec![tiycore::types::Message::User(UserMessage::text(
"What is the capital of France?",
))],
tools: None,
};
println!(" Model: {} ({})", model.name, model.id);
println!(" Provider: {}", model.provider);
println!(
" Base URL: {}",
base_url
.as_deref()
.or(model.base_url.as_deref())
.unwrap_or("(default)")
);
println!(" Prompt: \"{}\"", "What is the capital of France?");
let provider = get_provider(&model.provider)
.unwrap_or_else(|| panic!("No provider registered for: {}", model.provider));
match api_key {
Some(key) => {
println!("\n Making request...");
let options = StreamOptions {
temperature: Some(0.7),
max_tokens: Some(8192),
api_key: Some(key),
base_url,
..Default::default()
};
println!("\n Response:");
println!(" --------");
let rt = tokio::runtime::Runtime::new().unwrap();
let result = rt.block_on(async {
let stream = provider.stream(&model, &context, options);
let mut full_response = String::new();
let events: Vec<_> = stream.collect().await;
for event in events {
match event {
tiycore::types::AssistantMessageEvent::TextDelta { delta, .. } => {
print!("{}", delta);
full_response.push_str(&delta);
}
tiycore::types::AssistantMessageEvent::Done { message, .. } => {
println!("\n --------");
println!(" Stop reason: {:?}", message.stop_reason);
println!(
" Usage: {} input, {} output tokens",
message.usage.input, message.usage.output
);
}
tiycore::types::AssistantMessageEvent::Error { error, .. } => {
println!("\n Error: {:?}", error.error_message);
}
_ => {}
}
}
Ok::<(), Box<dyn std::error::Error>>(())
});
if let Err(e) = result {
println!(" Request error: {}", e);
}
}
None => {
println!("\n Note: No API key set, skipping actual API call.");
println!(" To make actual requests, set environment variables:");
println!(" export LLM_API_KEY=your_key");
println!(" export LLM_BASE_URL=https://your-proxy.com/v1 # optional");
println!(" export LLM_MODEL=gpt-4o-mini # optional");
println!(" Or use provider-specific variables:");
println!(" export OPENAI_API_KEY=your_key");
println!(" export OPENAI_BASE_URL=https://your-proxy.com/v1");
}
}
println!("\n=== Example Complete ===");
}
#[allow(dead_code)]
fn process_stream_sync(
stream: AssistantMessageEventStream,
) -> Result<String, Box<dyn std::error::Error>> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async {
let mut full_response = String::new();
let events: Vec<_> = stream.collect().await;
for event in events {
match event {
tiycore::types::AssistantMessageEvent::TextDelta { delta, .. } => {
full_response.push_str(&delta);
}
tiycore::types::AssistantMessageEvent::Done { message, .. } => {
println!(" Stop reason: {:?}", message.stop_reason);
println!(
" Usage: {} input, {} output tokens",
message.usage.input, message.usage.output
);
}
tiycore::types::AssistantMessageEvent::Error { error, .. } => {
return Err(format!("API error: {:?}", error.error_message).into());
}
_ => {}
}
}
Ok(full_response)
})
}