cargo_ai/
openai_api_client.rs

1// External Crates
2use reqwest::ClientBuilder; // HTTP client builder
3use serde::{Deserialize, Serialize}; // Data format (e.g.,JSON, TOML) (de)serialization
4use std::time::Duration; // Duration for timeout handling
5
6#[derive(Serialize, Debug)]
7pub struct Request {
8    pub model: String,          // Model name for OpenAI
9    pub messages: Vec<Message>, // List of messages for chat format
10    pub temperature: f64,       // Temperature setting
11    pub response_format: serde_json::Value,
12}
13
14#[derive(Serialize, Deserialize, Debug)]
15pub struct Message {
16    pub role: String,    // "user", "assistant", or "system"
17    pub content: String, // The actual prompt or message content
18}
19
20#[derive(Deserialize, Debug)]
21#[allow(dead_code)] // Currently not using all response fields
22pub struct Response {
23    pub id: String,           // Unique identifier for the chat session
24    pub object: String,       // Object type, usually "chat.completion"
25    pub created: u64,         // Timestamp when the response was created
26    pub model: String,        // Model name used for the response
27    pub choices: Vec<Choice>, // List of choices (contains the actual answer)
28    pub usage: Usage,         // Information about token usage
29}
30
31#[derive(Deserialize, Debug)]
32#[allow(dead_code)] // Currently not using all response fields
33pub struct Choice {
34    pub message: Message,              // Contains the assistant's message
35    pub finish_reason: Option<String>, // Reason for stopping (e.g., "stop")
36    pub index: usize,                  // Index of the choice
37}
38
39#[derive(Serialize, Deserialize, Debug)]
40pub struct Usage {
41    pub prompt_tokens: u32,     // Number of tokens in the prompt
42    pub completion_tokens: u32, // Number of tokens in the completion
43    pub total_tokens: u32,      // Total number of tokens used
44}
45
46pub async fn send_request(
47    url: &String,
48    model: &String,
49    prompt: &String,
50    timeout_in_sec: u64,
51    token: &String,
52    response_format: serde_json::Value,
53) -> Result<String, Box<dyn std::error::Error>> {
54    let client = ClientBuilder::new()
55        .timeout(Duration::from_secs(timeout_in_sec))
56        .build()?; // 30 sec Default too short for some requests.
57
58    let temperature = if model.starts_with("gpt-5") {
59        1.0
60    } else {
61        crate::DEFAULT_TEMPERATURE
62    };
63
64    let role = String::from("user");
65
66    let message = Message {
67        role,
68        content: prompt.clone(),
69    };
70
71    let messages = vec![message];
72
73    // When `structured` is true, request JSON-only output (OpenAI: response_format = json_object).
74    // This is equivalent to Ollama's `format: "json"` and enforces valid JSON shape at the transport level.
75    let request = Request {
76        model: model.clone(),
77        messages,
78        temperature,
79        response_format: response_format.clone(),
80    };
81
82    // Print the request JSON before sending
83    // println!("OpenAI request JSON:\n{}", serde_json::to_string_pretty(&request)?);
84
85    let http_resp = client
86        .post(url)
87        .header("Authorization", format!("Bearer {}", token))
88        .header("Content-Type", "application/json")
89        .json(&request)
90        .send()
91        .await?;
92
93    // TEMP: print raw server response (without consuming parse-ability)
94    let body_bytes = http_resp.bytes().await?;
95    // println!("Raw OpenAI response:\n{}", String::from_utf8_lossy(&body_bytes));
96
97    // Parse as usual from the captured bytes
98    let response: Response = match serde_json::from_slice(&body_bytes) {
99        Ok(resp) => resp,
100        Err(e) => {
101            let raw = String::from_utf8_lossy(&body_bytes);
102            return Err(format!("Failed to parse JSON: {}\nRaw response:\n{}", e, raw).into());
103        }
104    };
105
106    let response_content = response
107        .choices
108        .get(0)
109        .ok_or("No ChatGPT Response Index 0 Choice.")?
110        .message
111        .content
112        .clone();
113
114    Ok(response_content)
115}