git_commit_helper/
llm.rs

1// ************************************************************************** //
2//                                                                            //
3//                                                        :::      ::::::::   //
4//   llm.rs                                             :+:      :+:    :+:   //
5//                                                    +:+ +:+         +:+     //
6//   By: dfine <coding@dfine.tech>                  +#+  +:+       +#+        //
7//                                                +#+#+#+#+#+   +#+           //
8//   Created: 2025/05/10 19:12:36 by dfine             #+#    #+#             //
9//   Updated: 2025/05/10 19:12:37 by dfine            ###   ########.fr       //
10//                                                                            //
11// ************************************************************************** //
12
13use async_openai::{
14    Client as OpenAIClient,
15    config::OpenAIConfig,
16    types::{ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs},
17};
18use std::error::Error;
19
20/// Sends a prompt to the OpenAI chat API and returns the generated response as a string.
21///
22/// This function uses the `async-openai` crate to interact with a chat completion endpoint
23/// (e.g., GPT-4, GPT-4o, GPT-3.5-turbo). The base URL can be overridden via the
24/// `OPENAI_BASE_URL` environment variable.
25///
26/// # Arguments
27///
28/// * `prompt` - The text prompt to send to the model.
29/// * `model` - The model ID to use (e.g., `"gpt-4o"`, `"gpt-3.5-turbo"`).
30/// * `max_token` - Maximum number of tokens allowed in the response.
31///
32/// # Returns
33///
34/// A `Result` containing the generated string response on success, or an error on failure.
35///
36/// # Errors
37///
38/// This function will return an error if the request fails, the environment variable
39/// is misconfigured, or if the response cannot be parsed correctly.
40///
41/// # Example
42///
43/// ```no_run
44/// use git_commit_helper::call_openai;
45///
46/// #[tokio::main]
47/// async fn main() {
48///     let prompt = "Summarize the following diff...";
49///     let model = "gpt-4o";
50///     let max_token = 2048;
51///
52///     match call_openai(prompt, model, max_token).await {
53///         Ok(response) => println!("LLM response: {}", response),
54///         Err(e) => eprintln!("Error calling OpenAI: {}", e),
55///     }
56/// }
57/// ```
58pub async fn call_openai(
59    prompt: &str,
60    model: &str,
61    max_token: u32,
62) -> Result<String, Box<dyn Error>> {
63    let base_url = std::env::var("OPENAI_BASE_URL")
64        .unwrap_or_else(|_| "https://api.openai.com/v1".to_string());
65    let config = OpenAIConfig::default().with_api_base(base_url);
66    let client = OpenAIClient::with_config(config);
67    let request = CreateChatCompletionRequestArgs::default()
68        .max_tokens(max_token)
69        .model(model)
70        .messages([ChatCompletionRequestUserMessageArgs::default()
71            .content(prompt)
72            .build()?
73            .into()])
74        .build()?;
75    let response = client.chat().create(request).await?;
76    Ok(response
77        .choices
78        .first()
79        .and_then(|c| c.message.content.clone())
80        .unwrap_or_default())
81}