basic_usage/
basic_usage.rs

1use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
2
3#[tokio::main]
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5    println!("πŸš€ AI-lib Basic Usage Example");
6    println!("================================");
7    
8    // εˆ‡ζ’ζ¨‘εž‹ζδΎ›ε•†οΌŒεͺιœ€ζ›΄ζ”Ή Provider ηš„ε€Ό
9    let client = AiClient::new(Provider::Groq)?;
10    println!("βœ… Created client with provider: {:?}", client.current_provider());
11    
12    // θŽ·ε–ζ”―ζŒηš„ζ¨‘εž‹εˆ—θ‘¨
13    let models = client.list_models().await?;
14    println!("πŸ“‹ Available models: {:?}", models);
15    
16    // εˆ›ε»ΊθŠε€©θ―·ζ±‚
17    let request = ChatCompletionRequest::new(
18        "llama3-8b-8192".to_string(),
19        vec![Message {
20            role: Role::User,
21            content: "Hello! Please introduce yourself briefly.".to_string(),
22        }],
23    ).with_temperature(0.7)
24     .with_max_tokens(100);
25    
26    println!("πŸ“€ Sending request to model: {}", request.model);
27    
28    // 发送请求
29    let response = client.chat_completion(request).await?;
30    
31    println!("πŸ“₯ Received response:");
32    println!("   ID: {}", response.id);
33    println!("   Model: {}", response.model);
34    println!("   Content: {}", response.choices[0].message.content);
35    println!("   Usage: {} tokens", response.usage.total_tokens);
36    
37    Ok(())
38}