debug_request/
debug_request.rs1use ai_lib::types::common::Content;
3use ai_lib::{AiClient, ChatCompletionRequest, Message, Provider, Role};
4
5#[tokio::main]
6async fn main() -> Result<(), Box<dyn std::error::Error>> {
7 println!("🔍 Debug Request Format");
8 println!("======================");
9
10 let request = ChatCompletionRequest::new(
12 "gpt-3.5-turbo".to_string(),
13 vec![Message {
14 role: Role::User,
15 content: Content::Text("Hello!".to_string()),
16 function_call: None,
17 }],
18 )
19 .with_max_tokens(10);
20
21 println!("📤 Original Request:");
22 println!(" Model: {}", request.model);
23 println!(" Message count: {}", request.messages.len());
24 println!(
25 " Message[0]: {:?} - {}",
26 request.messages[0].role,
27 request.messages[0].content.as_text()
28 );
29 println!(" max_tokens: {:?}", request.max_tokens);
30
31 println!("\n🤖 Testing OpenAI...");
33 match AiClient::new(Provider::OpenAI) {
34 Ok(client) => {
35 match client.chat_completion(request.clone()).await {
36 Ok(response) => {
37 println!("✅ Success!");
38 println!(
39 " Response: {}",
40 response.choices[0].message.content.as_text()
41 );
42 }
43 Err(e) => {
44 println!("❌ Failed: {}", e);
45
46 if e.to_string().contains("400") {
48 println!(" This usually indicates incorrect request format");
49 println!(" Let's check if the request contains necessary fields...");
50 }
51 }
52 }
53 }
54 Err(e) => println!("❌ Client creation failed: {}", e),
55 }
56
57 Ok(())
58}