basic_usage/
basic_usage.rs

1use ai_lib::types::common::Content;
2use ai_lib::{AiClient, ChatCompletionRequest, Message, Provider, Role};
3
4#[tokio::main]
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6    println!("πŸš€ AI-lib Basic Usage Example");
7    println!("================================");
8
9    // εˆ‡ζ’ζ¨‘εž‹ζδΎ›ε•†οΌŒεͺιœ€ζ›΄ζ”Ή Provider ηš„ε€Ό
10    let client = AiClient::new(Provider::Groq)?;
11    println!(
12        "βœ… Created client with provider: {:?}",
13        client.current_provider()
14    );
15
16    // θŽ·ε–ζ”―ζŒηš„ζ¨‘εž‹εˆ—θ‘¨
17    let models = client.list_models().await?;
18    println!("πŸ“‹ Available models: {:?}", models);
19
20    // εˆ›ε»ΊθŠε€©θ―·ζ±‚
21    let request = ChatCompletionRequest::new(
22        "llama3-8b-8192".to_string(),
23        vec![Message {
24            role: Role::User,
25            content: Content::Text("Hello! Please introduce yourself briefly.".to_string()),
26            function_call: None,
27        }],
28    )
29    .with_temperature(0.7)
30    .with_max_tokens(100);
31
32    println!("πŸ“€ Sending request to model: {}", request.model);
33
34    // 发送请求
35    let response = client.chat_completion(request).await?;
36
37    println!("πŸ“₯ Received response:");
38    println!("   ID: {}", response.id);
39    println!("   Model: {}", response.model);
40    println!(
41        "   Content: {}",
42        response.choices[0].message.content.as_text()
43    );
44    println!("   Usage: {} tokens", response.usage.total_tokens);
45
46    Ok(())
47}