1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
use async_openai::types::{ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs};
use async_openai::config::OpenAIConfig;
use async_openai::Client;
use anyhow::{Context, Result};

use crate::config;
use crate::model::Model;

#[derive(Debug, Clone, PartialEq)]
pub struct Response {
  pub response: String
}

#[derive(Debug, Clone, PartialEq)]
pub struct Request {
  pub prompt:     String,
  pub system:     String,
  pub max_tokens: usize,
  pub model:      Model
}

pub async fn call(request: Request) -> Result<Response> {
  let api_key = config::APP
    .openai_api_key
    .clone()
    .context("Failed to get OpenAI API key, please run `git-ai config set openai-api")?;

  let config = OpenAIConfig::new().with_api_key(api_key);
  let client = Client::with_config(config);

  let request = CreateChatCompletionRequestArgs::default()
    .max_tokens(request.max_tokens as u16)
    .model(request.model.to_string())
    .messages([
      ChatCompletionRequestSystemMessageArgs::default()
        .content(request.system)
        .build()?
        .into(),
      ChatCompletionRequestUserMessageArgs::default()
        .content(request.prompt)
        .build()?
        .into()
    ])
    .build()?;

  let chat = client.chat().create(request).await?;

  let choise = chat
    .choices
    .first()
    .context(format!("Failed to get response: {:?}", chat))?;

  let response = choise
    .message
    .content
    .clone()
    .context("Failed to get response text")?;

  Ok(Response { response })
}