openai_test/
openai_test.rs1use ai_lib::types::common::Content;
2use ai_lib::{AiClient, ChatCompletionRequest, Message, Provider, Role};
3
4#[tokio::main]
5async fn main() -> Result<(), Box<dyn std::error::Error>> {
6 println!("🤖 OpenAI Provider 测试");
7 println!("=====================");
8
9 match std::env::var("OPENAI_API_KEY") {
11 Ok(_) => println!("✅ 检测到 OPENAI_API_KEY"),
12 Err(_) => {
13 println!("❌ 未设置 OPENAI_API_KEY 环境变量");
14 println!(" 请设置: export OPENAI_API_KEY=your_api_key");
15 return Ok(());
16 }
17 }
18
19 let client = AiClient::new(Provider::OpenAI)?;
21 println!("✅ OpenAI客户端创建成功");
22
23 println!("\n📋 获取OpenAI模型列表...");
25 match client.list_models().await {
26 Ok(models) => {
27 println!("✅ 成功获取 {} 个模型", models.len());
28 println!(" 常用模型:");
29 for model in models.iter().filter(|m| m.contains("gpt")) {
30 println!(" • {}", model);
31 }
32 }
33 Err(e) => println!("❌ 获取模型列表失败: {}", e),
34 }
35
36 println!("\n💬 测试聊天完成...");
38 let request = ChatCompletionRequest::new(
39 "gpt-3.5-turbo".to_string(),
40 vec![Message {
41 role: Role::User,
42 content: Content::Text(
43 "Hello! Please respond with 'Hello from OpenAI!' to confirm the connection."
44 .to_string(),
45 ),
46 function_call: None,
47 }],
48 )
49 .with_max_tokens(20)
50 .with_temperature(0.7);
51
52 match client.chat_completion(request).await {
53 Ok(response) => {
54 println!("✅ 聊天完成成功!");
55 println!(" 模型: {}", response.model);
56 println!(" 响应: {}", response.choices[0].message.content.as_text());
57 println!(
58 " Token使用: {} (prompt: {}, completion: {})",
59 response.usage.total_tokens,
60 response.usage.prompt_tokens,
61 response.usage.completion_tokens
62 );
63 }
64 Err(e) => println!("❌ 聊天完成失败: {}", e),
65 }
66
67 println!("\n🎯 OpenAI配置驱动测试完成!");
68 println!(" 这证明了配置驱动架构的强大之处:");
69 println!(" • 无需编写OpenAI特定代码");
70 println!(" • 只需在ProviderConfigs中添加配置");
71 println!(" • 自动支持所有OpenAI兼容的功能");
72
73 Ok(())
74}