mini_langchain/llm/
openai.rs

1// see https://github.com/64bit/async-openai/blob/main/examples/tool-call/src/main.rs
2pub use async_openai::{
3    Client, config::{Config, OpenAIConfig}
4};
5use serde_json::{json, Value};
6use crate::message::Message;
7use crate::tools::stream::StreamData;
8use serde::{Serialize, Deserialize};
9use crate::llm::{
10    traits::LLM,
11    tokens::TokenUsage,
12    error::LLMError,
13    GenerateResult,
14    LLMResult,
15};
16
17use std::sync::Arc;
18use serde_json::error::Error as SerdeJsonError;
19use serde::de::Error as SerdeDeError;
20use async_stream::stream as async_stream;
21use futures::{
22    FutureExt,
23    future::BoxFuture,
24    stream::BoxStream
25};
26
27
28#[derive(Debug, Clone, Serialize, Deserialize)]
29pub struct OpenAIFunction{
30    #[serde(rename = "type")]
31    pub f_type: &'static str,
32    pub name: String,
33    pub description: String,
34    pub parameters: Value,
35    pub strict: bool,
36}
37
38
39
40#[derive(Debug, Clone, Serialize, Deserialize)]
41pub struct CompletionOptions {
42    pub model: String,
43    #[serde(skip_serializing_if = "Option::is_none")]
44    pub max_tokens: Option<u32>,
45    #[serde(skip_serializing_if = "Option::is_none")]
46    pub temperature: Option<f32>,
47
48    /// How many completions to generate for each prompt.
49    /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
50    ///
51    #[serde(skip_serializing_if = "Option::is_none")]
52    pub n: Option<u8>, // min:1 max: 128, default: 1
53
54    #[serde(skip_serializing_if = "Option::is_none")]
55    pub stream: Option<bool>, // nullable: true
56
57    /// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
58    #[serde(skip_serializing_if = "Option::is_none")]
59    pub user: Option<String>,
60}
61
62pub struct OpenAI{
63    pub client:Client<OpenAIConfig>,
64    pub options:Option<CompletionOptions>
65
66}
67
68impl OpenAI {
69    pub fn new() -> Self {
70        let client = Client::new();
71        Self {
72            client,
73            options: None,
74        }
75    }
76
77    pub fn with_api_key(api_key: impl Into<String>) -> Self {
78        let config = OpenAIConfig::new().with_api_key(api_key);
79        Self {
80            client: Client::with_config(config),
81            options: None,
82        }
83    }
84
85    pub fn with_options(mut self, options: CompletionOptions) -> Self {
86        self.options = Some(options);
87        self
88    }
89}
90
91impl Default for OpenAI {
92    fn default() -> Self {
93        Self::new()
94    }
95}
96
97impl LLM for OpenAI {
98    fn generate<'a>(&'a self, messages: &'a [Message]) -> BoxFuture<'a, LLMResult<GenerateResult>> {
99        // Implementation for generating text using OpenAI API
100        unimplemented!()
101    }
102
103    fn stream<'a>(&'a self, messages: &'a [Message]) -> BoxStream<'a, LLMResult<StreamData>> {
104        // Implementation for streaming text using OpenAI API
105        unimplemented!()
106    }
107}
108
109pub struct OpenAIRequest {
110    pub messages: Vec<Message>,
111    pub model: String,
112    pub tools: Vec<OpenAIFunction>,
113    pub tool_choice: Option<String>, // "auto" | "none"
114}
115