openai_test/
openai_test.rs1use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
2
3#[tokio::main]
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 println!("🤖 OpenAI Provider 测试");
6 println!("=====================");
7
8 match std::env::var("OPENAI_API_KEY") {
10 Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
11 Err(_) => {
12 println!("❌ 未设置 OPENAI_API_KEY 环境变量");
13 println!(" 请设置: export OPENAI_API_KEY=your_api_key");
14 return Ok(());
15 }
16 }
17
18 let client = AiClient::new(Provider::OpenAI)?;
20 println!("✅ OpenAI客户端创建成功");
21
22 println!("\n📋 获取OpenAI模型列表...");
24 match client.list_models().await {
25 Ok(models) => {
26 println!("✅ 成功获取 {} 个模型", models.len());
27 println!(" 常用模型:");
28 for model in models.iter().filter(|m| m.contains("gpt")) {
29 println!(" • {}", model);
30 }
31 }
32 Err(e) => println!("❌ 获取模型列表失败: {}", e),
33 }
34
35 println!("\n💬 测试聊天完成...");
37 let request = ChatCompletionRequest::new(
38 "gpt-3.5-turbo".to_string(),
39 vec![Message {
40 role: Role::User,
41 content: "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection.".to_string(),
42 }],
43 ).with_max_tokens(20)
44 .with_temperature(0.7);
45
46 match client.chat_completion(request).await {
47 Ok(response) => {
48 println!("✅ 聊天完成成功!");
49 println!(" 模型: {}", response.model);
50 println!(" 响应: {}", response.choices[0].message.content);
51 println!(" Token使用: {} (prompt: {}, completion: {})",
52 response.usage.total_tokens,
53 response.usage.prompt_tokens,
54 response.usage.completion_tokens
55 );
56 }
57 Err(e) => println!("❌ 聊天完成失败: {}", e),
58 }
59
60 println!("\n🎯 OpenAI配置驱动测试完成!");
61 println!(" 这证明了配置驱动架构的强大之处:");
62 println!(" • 无需编写OpenAI特定代码");
63 println!(" • 只需在ProviderConfigs中添加配置");
64 println!(" • 自动支持所有OpenAI兼容的功能");
65
66 Ok(())
67}