1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
use http_req::{
request::{Method, Request},
uri::Uri,
};
use serde::{Deserialize, Serialize, Serializer};
use std::collections::HashMap;
use urlencoding::encode;
use crate::Retry;
/// Response struct for the chat completion.
#[derive(Debug, Deserialize)]
pub struct ChatResponse {
/// The flag to show whether a new conversation is created.
pub restarted: bool,
/// The response from ChatGPT.
pub choice: String,
}
impl Default for ChatResponse {
fn default() -> ChatResponse {
ChatResponse {
restarted: true,
choice: String::new(),
}
}
}
/// Models for Chat
#[derive(Debug, Clone, Copy)]
pub enum ChatModel {
GPT4_32K,
GPT4,
GPT35Turbo16K,
GPT35Turbo,
}
impl Serialize for ChatModel {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
ChatModel::GPT4_32K => serializer.serialize_str("gpt-4-32k"),
ChatModel::GPT4 => serializer.serialize_str("gpt-4"),
ChatModel::GPT35Turbo16K => serializer.serialize_str("gpt-3.5-turbo-16k"),
ChatModel::GPT35Turbo => serializer.serialize_str("gpt-3.5-turbo"),
}
}
}
// impl fmt::Display for ChatModel {
// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// match self {
// ChatModel::GPT4_32K => write!(f, "gpt-4-32k"),
// ChatModel::GPT4 => write!(f, "gpt-4"),
// ChatModel::GPT35Turbo16K => write!(f, "gpt-3.5-turbo-16k"),
// ChatModel::GPT35Turbo => write!(f, "gpt-3.5-turbo"),
// }
// }
// }
impl Default for ChatModel {
fn default() -> ChatModel {
ChatModel::GPT35Turbo
}
}
/// struct for setting the chat options.
#[derive(Debug, Default, Serialize)]
pub struct ChatOptions<'a> {
/// The ID or name of the model to use for completion.
pub model: ChatModel,
/// When true, a new conversation will be created.
pub restart: bool,
/// The prompt of the system role.
#[serde(skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<&'a str>,
/// What sampling temperature to use, between 0 and 2.
#[serde(skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>,
/// An alternative to sampling with temperature
#[serde(skip_serializing_if = "Option::is_none")]
pub top_p: Option<f32>,
/// Up to 4 sequences where the API will stop generating further tokens.
#[serde(skip_serializing_if = "Option::is_none")]
pub stop: Option<Vec<String>>,
/// The maximum number of tokens to generate in the chat completion.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u16>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
#[serde(skip_serializing_if = "Option::is_none")]
pub presence_penalty: Option<f32>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
#[serde(skip_serializing_if = "Option::is_none")]
pub frequency_penalty: Option<f32>,
/// Modify the likelihood of specified tokens appearing in the completion.
#[serde(skip_serializing_if = "Option::is_none")]
pub logit_bias: Option<HashMap<String, i8>>,
}
impl crate::OpenAIFlows {
/// Create chat completion with the provided sentence.
/// It uses OpenAI's [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5) model to make a conversation.
///
/// `conversation_id` is the identifier of the conversation.
/// The history will be fetched and attached to the `sentence` as a whole prompt for ChatGPT.
///
/// `sentence` is a String that reprensents the current utterance of the conversation.
///
/// If you haven't connected your OpenAI account with [Flows.network platform](https://flows.network),
/// you will receive an error in the flow's building log or running log.
///
pub async fn chat_completion(
&self,
conversation_id: &str,
sentence: &str,
options: &ChatOptions<'_>,
) -> Result<ChatResponse, String> {
self.keep_trying(|account| {
chat_completion_inner(account, conversation_id, sentence, options)
})
}
}
fn chat_completion_inner(
account: &str,
conversation_id: &str,
sentence: &str,
options: &ChatOptions,
) -> Retry<ChatResponse> {
unsafe {
let flows_user = crate::_get_flows_user();
let flow_id = crate::_get_flow_id();
let mut writer = Vec::new();
let uri = format!(
"{}/{}/{}/chat_completion_08?account={}&conversation={}",
crate::OPENAI_API_PREFIX.as_str(),
flows_user,
flow_id,
encode(account),
encode(conversation_id),
);
let uri = Uri::try_from(uri.as_str()).unwrap();
let body = serde_json::to_vec(&serde_json::json!({
"sentence": sentence,
"params": options
}))
.unwrap_or_default();
match Request::new(&uri)
.method(Method::POST)
.header("Content-Type", "application/json")
.header("Content-Length", &body.len())
.body(&body)
.send(&mut writer)
{
Ok(res) => {
match res.status_code().is_success() {
true => Retry::No(
serde_json::from_slice::<ChatResponse>(&writer)
.or(Err(String::from("Unexpected error"))),
),
false => {
match res.status_code().into() {
409 | 429 | 503 => {
// 409 TryAgain 429 RateLimitError
// 503 ServiceUnavailable
Retry::Yes(String::from_utf8_lossy(&writer).into_owned())
}
_ => Retry::No(Err(String::from_utf8_lossy(&writer).into_owned())),
}
}
}
}
Err(e) => Retry::No(Err(e.to_string())),
}
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ChatRole {
User,
Assistant,
}
#[derive(Debug, Deserialize)]
pub struct ChatMessage {
role: ChatRole,
content: String,
}
/// Fetch the question history of conversation_id
/// Result will be an array of string whose length is
/// restricted by limit.
/// When limit is 0, all history will be returned.
pub fn chat_history(conversation_id: &str, limit: u8) -> Option<Vec<ChatMessage>> {
unsafe {
let flows_user = crate::_get_flows_user();
let flow_id = crate::_get_flow_id();
let mut writer = Vec::new();
let uri = format!(
"{}/{}/{}/chat_history?conversation={}&limit={}",
crate::OPENAI_API_PREFIX.as_str(),
flows_user,
flow_id,
encode(conversation_id),
limit
);
let uri = Uri::try_from(uri.as_str()).unwrap();
match Request::new(&uri).method(Method::GET).send(&mut writer) {
Ok(res) => match res.status_code().is_success() {
true => serde_json::from_slice::<Vec<ChatMessage>>(&writer).ok(),
false => None,
},
Err(_) => None,
}
}
}