rig/providers/together/
completion.rs1use crate::{
7 completion::{self, CompletionError},
8 http_client, json_utils,
9 providers::openai,
10};
11
12use super::client::{Client, together_ai_api_types::ApiResponse};
13use crate::completion::CompletionRequest;
14use crate::streaming::StreamingCompletionResponse;
15use serde::{Deserialize, Serialize};
16use serde_json::json;
17use tracing::{Instrument, info_span};
18
19pub const YI_34B_CHAT: &str = "zero-one-ai/Yi-34B-Chat";
24pub const OLMO_7B_INSTRUCT: &str = "allenai/OLMo-7B-Instruct";
25pub const CHRONOS_HERMES_13B: &str = "Austism/chronos-hermes-13b";
26pub const ML318BR: &str = "carson/ml318br";
27pub const DOLPHIN_2_5_MIXTRAL_8X7B: &str = "cognitivecomputations/dolphin-2.5-mixtral-8x7b";
28pub const DBRX_INSTRUCT: &str = "databricks/dbrx-instruct";
29pub const DEEPSEEK_LLM_67B_CHAT: &str = "deepseek-ai/deepseek-llm-67b-chat";
30pub const DEEPSEEK_CODER_33B_INSTRUCT: &str = "deepseek-ai/deepseek-coder-33b-instruct";
31pub const PLATYPUS2_70B_INSTRUCT: &str = "garage-bAInd/Platypus2-70B-instruct";
32pub const GEMMA_2_9B_IT: &str = "google/gemma-2-9b-it";
33pub const GEMMA_2B_IT: &str = "google/gemma-2b-it";
34pub const GEMMA_2_27B_IT: &str = "google/gemma-2-27b-it";
35pub const GEMMA_7B_IT: &str = "google/gemma-7b-it";
36pub const LLAMA_3_70B_INSTRUCT_GRADIENT_1048K: &str =
37 "gradientai/Llama-3-70B-Instruct-Gradient-1048k";
38pub const MYTHOMAX_L2_13B: &str = "Gryphe/MythoMax-L2-13b";
39pub const MYTHOMAX_L2_13B_LITE: &str = "Gryphe/MythoMax-L2-13b-Lite";
40pub const LLAVA_NEXT_MISTRAL_7B: &str = "llava-hf/llava-v1.6-mistral-7b-hf";
41pub const ZEPHYR_7B_BETA: &str = "HuggingFaceH4/zephyr-7b-beta";
42pub const KOALA_7B: &str = "togethercomputer/Koala-7B";
43pub const VICUNA_7B_V1_3: &str = "lmsys/vicuna-7b-v1.3";
44pub const VICUNA_13B_V1_5_16K: &str = "lmsys/vicuna-13b-v1.5-16k";
45pub const VICUNA_13B_V1_5: &str = "lmsys/vicuna-13b-v1.5";
46pub const VICUNA_13B_V1_3: &str = "lmsys/vicuna-13b-v1.3";
47pub const KOALA_13B: &str = "togethercomputer/Koala-13B";
48pub const VICUNA_7B_V1_5: &str = "lmsys/vicuna-7b-v1.5";
49pub const CODE_LLAMA_34B_INSTRUCT: &str = "codellama/CodeLlama-34b-Instruct-hf";
50pub const LLAMA_3_8B_CHAT_HF_INT4: &str = "togethercomputer/Llama-3-8b-chat-hf-int4";
51pub const LLAMA_3_2_90B_VISION_INSTRUCT_TURBO: &str =
52 "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo";
53pub const LLAMA_3_2_11B_VISION_INSTRUCT_TURBO: &str =
54 "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo";
55pub const LLAMA_3_2_3B_INSTRUCT_TURBO: &str = "meta-llama/Llama-3.2-3B-Instruct-Turbo";
56pub const LLAMA_3_8B_CHAT_HF_INT8: &str = "togethercomputer/Llama-3-8b-chat-hf-int8";
57pub const LLAMA_3_1_70B_INSTRUCT_TURBO: &str = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo";
58pub const LLAMA_2_13B_CHAT: &str = "meta-llama/Llama-2-13b-chat-hf";
59pub const LLAMA_3_70B_INSTRUCT_LITE: &str = "meta-llama/Meta-Llama-3-70B-Instruct-Lite";
60pub const LLAMA_3_8B_CHAT_HF: &str = "meta-llama/Llama-3-8b-chat-hf";
61pub const LLAMA_3_70B_CHAT_HF: &str = "meta-llama/Llama-3-70b-chat-hf";
62pub const LLAMA_3_8B_INSTRUCT_TURBO: &str = "meta-llama/Meta-Llama-3-8B-Instruct-Turbo";
63pub const LLAMA_3_8B_INSTRUCT_LITE: &str = "meta-llama/Meta-Llama-3-8B-Instruct-Lite";
64pub const LLAMA_3_1_405B_INSTRUCT_LITE_PRO: &str =
65 "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro";
66pub const LLAMA_2_7B_CHAT: &str = "meta-llama/Llama-2-7b-chat-hf";
67pub const LLAMA_3_1_405B_INSTRUCT_TURBO: &str = "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo";
68pub const LLAMA_VISION_FREE: &str = "meta-llama/Llama-Vision-Free";
69pub const LLAMA_3_70B_INSTRUCT_TURBO: &str = "meta-llama/Meta-Llama-3-70B-Instruct-Turbo";
70pub const LLAMA_3_1_8B_INSTRUCT_TURBO: &str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo";
71pub const CODE_LLAMA_7B_INSTRUCT_TOGETHER: &str = "togethercomputer/CodeLlama-7b-Instruct";
72pub const CODE_LLAMA_34B_INSTRUCT_TOGETHER: &str = "togethercomputer/CodeLlama-34b-Instruct";
73pub const CODE_LLAMA_13B_INSTRUCT: &str = "codellama/CodeLlama-13b-Instruct-hf";
74pub const CODE_LLAMA_13B_INSTRUCT_TOGETHER: &str = "togethercomputer/CodeLlama-13b-Instruct";
75pub const LLAMA_2_13B_CHAT_TOGETHER: &str = "togethercomputer/llama-2-13b-chat";
76pub const LLAMA_2_7B_CHAT_TOGETHER: &str = "togethercomputer/llama-2-7b-chat";
77pub const LLAMA_3_8B_INSTRUCT: &str = "meta-llama/Meta-Llama-3-8B-Instruct";
78pub const LLAMA_3_70B_INSTRUCT: &str = "meta-llama/Meta-Llama-3-70B-Instruct";
79pub const CODE_LLAMA_70B_INSTRUCT: &str = "codellama/CodeLlama-70b-Instruct-hf";
80pub const LLAMA_2_70B_CHAT_TOGETHER: &str = "togethercomputer/llama-2-70b-chat";
81pub const LLAMA_3_1_8B_INSTRUCT_REFERENCE: &str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference";
82pub const LLAMA_3_1_70B_INSTRUCT_REFERENCE: &str =
83 "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference";
84pub const WIZARDLM_2_8X22B: &str = "microsoft/WizardLM-2-8x22B";
85pub const MISTRAL_7B_INSTRUCT_V0_1: &str = "mistralai/Mistral-7B-Instruct-v0.1";
86pub const MISTRAL_7B_INSTRUCT_V0_2: &str = "mistralai/Mistral-7B-Instruct-v0.2";
87pub const MISTRAL_7B_INSTRUCT_V0_3: &str = "mistralai/Mistral-7B-Instruct-v0.3";
88pub const MIXTRAL_8X7B_INSTRUCT_V0_1: &str = "mistralai/Mixtral-8x7B-Instruct-v0.1";
89pub const MIXTRAL_8X22B_INSTRUCT_V0_1: &str = "mistralai/Mixtral-8x22B-Instruct-v0.1";
90pub const NOUS_HERMES_2_MIXTRAL_8X7B_DPO: &str = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO";
91pub const NOUS_HERMES_LLAMA2_70B: &str = "NousResearch/Nous-Hermes-Llama2-70b";
92pub const NOUS_HERMES_2_MIXTRAL_8X7B_SFT: &str = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT";
93pub const NOUS_HERMES_LLAMA2_13B: &str = "NousResearch/Nous-Hermes-Llama2-13b";
94pub const NOUS_HERMES_2_MISTRAL_DPO: &str = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO";
95pub const NOUS_HERMES_LLAMA2_7B: &str = "NousResearch/Nous-Hermes-llama-2-7b";
96pub const NOUS_CAPYBARA_V1_9: &str = "NousResearch/Nous-Capybara-7B-V1p9";
97pub const HERMES_2_THETA_LLAMA_3_70B: &str = "NousResearch/Hermes-2-Theta-Llama-3-70B";
98pub const OPENCHAT_3_5: &str = "openchat/openchat-3.5-1210";
99pub const OPENORCA_MISTRAL_7B_8K: &str = "Open-Orca/Mistral-7B-OpenOrca";
100pub const QWEN_2_72B_INSTRUCT: &str = "Qwen/Qwen2-72B-Instruct";
101pub const QWEN2_5_72B_INSTRUCT_TURBO: &str = "Qwen/Qwen2.5-72B-Instruct-Turbo";
102pub const QWEN2_5_7B_INSTRUCT_TURBO: &str = "Qwen/Qwen2.5-7B-Instruct-Turbo";
103pub const QWEN1_5_110B_CHAT: &str = "Qwen/Qwen1.5-110B-Chat";
104pub const QWEN1_5_72B_CHAT: &str = "Qwen/Qwen1.5-72B-Chat";
105pub const QWEN_2_1_5B_INSTRUCT: &str = "Qwen/Qwen2-1.5B-Instruct";
106pub const QWEN_2_7B_INSTRUCT: &str = "Qwen/Qwen2-7B-Instruct";
107pub const QWEN1_5_14B_CHAT: &str = "Qwen/Qwen1.5-14B-Chat";
108pub const QWEN1_5_1_8B_CHAT: &str = "Qwen/Qwen1.5-1.8B-Chat";
109pub const QWEN1_5_32B_CHAT: &str = "Qwen/Qwen1.5-32B-Chat";
110pub const QWEN1_5_7B_CHAT: &str = "Qwen/Qwen1.5-7B-Chat";
111pub const QWEN1_5_0_5B_CHAT: &str = "Qwen/Qwen1.5-0.5B-Chat";
112pub const QWEN1_5_4B_CHAT: &str = "Qwen/Qwen1.5-4B-Chat";
113pub const SNORKEL_MISTRAL_PAIRRM_DPO: &str = "snorkelai/Snorkel-Mistral-PairRM-DPO";
114pub const SNOWFLAKE_ARCTIC_INSTRUCT: &str = "Snowflake/snowflake-arctic-instruct";
115pub const ALPACA_7B: &str = "togethercomputer/alpaca-7b";
116pub const OPENHERMES_2_MISTRAL_7B: &str = "teknium/OpenHermes-2-Mistral-7B";
117pub const OPENHERMES_2_5_MISTRAL_7B: &str = "teknium/OpenHermes-2p5-Mistral-7B";
118pub const GUANACO_65B: &str = "togethercomputer/guanaco-65b";
119pub const GUANACO_13B: &str = "togethercomputer/guanaco-13b";
120pub const GUANACO_33B: &str = "togethercomputer/guanaco-33b";
121pub const GUANACO_7B: &str = "togethercomputer/guanaco-7b";
122pub const REMM_SLERP_L2_13B: &str = "Undi95/ReMM-SLERP-L2-13B";
123pub const TOPPY_M_7B: &str = "Undi95/Toppy-M-7B";
124pub const SOLAR_10_7B_INSTRUCT_V1: &str = "upstage/SOLAR-10.7B-Instruct-v1.0";
125pub const SOLAR_10_7B_INSTRUCT_V1_INT4: &str = "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4";
126pub const WIZARDLM_13B_V1_2: &str = "WizardLM/WizardLM-13B-V1.2";
127
128#[derive(Clone)]
133pub struct CompletionModel<T = reqwest::Client> {
134 pub(crate) client: Client<T>,
135 pub model: String,
136}
137
138impl<T> CompletionModel<T> {
139 pub fn new(client: Client<T>, model: &str) -> Self {
140 Self {
141 client,
142 model: model.to_string(),
143 }
144 }
145
146 pub(crate) fn create_completion_request(
147 &self,
148 completion_request: completion::CompletionRequest,
149 ) -> Result<serde_json::Value, CompletionError> {
150 let mut full_history: Vec<openai::Message> = match &completion_request.preamble {
151 Some(preamble) => vec![openai::Message::system(preamble)],
152 None => vec![],
153 };
154 if let Some(docs) = completion_request.normalized_documents() {
155 let docs: Vec<openai::Message> = docs.try_into()?;
156 full_history.extend(docs);
157 }
158 let chat_history: Vec<openai::Message> = completion_request
159 .chat_history
160 .into_iter()
161 .map(|message| message.try_into())
162 .collect::<Result<Vec<Vec<openai::Message>>, _>>()?
163 .into_iter()
164 .flatten()
165 .collect();
166
167 full_history.extend(chat_history);
168
169 let tool_choice = completion_request
170 .tool_choice
171 .map(ToolChoice::try_from)
172 .transpose()?;
173
174 let mut request = if completion_request.tools.is_empty() {
175 json!({
176 "model": self.model,
177 "messages": full_history,
178 "temperature": completion_request.temperature,
179 })
180 } else {
181 json!({
182 "model": self.model,
183 "messages": full_history,
184 "temperature": completion_request.temperature,
185 "tools": completion_request.tools.into_iter().map(openai::ToolDefinition::from).collect::<Vec<_>>(),
186 "tool_choice": tool_choice,
187 })
188 };
189 request = if let Some(params) = completion_request.additional_params {
190 json_utils::merge(request, params)
191 } else {
192 request
193 };
194 Ok(request)
195 }
196}
197
198impl completion::CompletionModel for CompletionModel<reqwest::Client> {
199 type Response = openai::CompletionResponse;
200 type StreamingResponse = openai::StreamingCompletionResponse;
201
202 #[cfg_attr(feature = "worker", worker::send)]
203 async fn completion(
204 &self,
205 completion_request: completion::CompletionRequest,
206 ) -> Result<completion::CompletionResponse<openai::CompletionResponse>, CompletionError> {
207 let preamble = completion_request.preamble.clone();
208 let request = self.create_completion_request(completion_request)?;
209 let messages_as_json_string =
210 serde_json::to_string(request.get("messages").unwrap()).unwrap();
211
212 let span = if tracing::Span::current().is_disabled() {
213 info_span!(
214 target: "rig::completions",
215 "chat",
216 gen_ai.operation.name = "chat",
217 gen_ai.provider.name = "together",
218 gen_ai.request.model = self.model,
219 gen_ai.system_instructions = preamble,
220 gen_ai.response.id = tracing::field::Empty,
221 gen_ai.response.model = tracing::field::Empty,
222 gen_ai.usage.output_tokens = tracing::field::Empty,
223 gen_ai.usage.input_tokens = tracing::field::Empty,
224 gen_ai.input.messages = &messages_as_json_string,
225 gen_ai.output.messages = tracing::field::Empty,
226 )
227 } else {
228 tracing::Span::current()
229 };
230
231 tracing::debug!(target: "rig::completion", "TogetherAI completion request: {messages_as_json_string}");
232
233 async move {
234 let response = self
235 .client
236 .reqwest_post("/v1/chat/completions")
237 .json(&request)
238 .send()
239 .await
240 .map_err(|e| CompletionError::HttpError(http_client::Error::Instance(e.into())))?;
241
242 if response.status().is_success() {
243 let t = response.text().await.map_err(|e| {
244 CompletionError::HttpError(http_client::Error::Instance(e.into()))
245 })?;
246 tracing::debug!(target: "rig::completion", "TogetherAI completion response: {t}");
247
248 match serde_json::from_str::<ApiResponse<openai::CompletionResponse>>(&t)? {
249 ApiResponse::Ok(response) => {
250 let span = tracing::Span::current();
251 span.record(
252 "gen_ai.output.messages",
253 serde_json::to_string(&response.choices).unwrap(),
254 );
255 span.record("gen_ai.response.id", &response.id);
256 span.record("gen_ai.response.model_name", &response.model);
257 if let Some(ref usage) = response.usage {
258 span.record("gen_ai.usage.input_tokens", usage.prompt_tokens);
259 span.record(
260 "gen_ai.usage.output_tokens",
261 usage.total_tokens - usage.prompt_tokens,
262 );
263 }
264 response.try_into()
265 }
266 ApiResponse::Error(err) => Err(CompletionError::ProviderError(err.error)),
267 }
268 } else {
269 Err(CompletionError::ProviderError(
270 response.text().await.map_err(|e| {
271 CompletionError::HttpError(http_client::Error::Instance(e.into()))
272 })?,
273 ))
274 }
275 }
276 .instrument(span)
277 .await
278 }
279
280 #[cfg_attr(feature = "worker", worker::send)]
281 async fn stream(
282 &self,
283 request: CompletionRequest,
284 ) -> Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
285 CompletionModel::stream(self, request).await
286 }
287}
288
289#[derive(Debug, Serialize, Deserialize)]
290#[serde(untagged, rename_all = "snake_case")]
291pub enum ToolChoice {
292 None,
293 Auto,
294 Function(Vec<ToolChoiceFunctionKind>),
295}
296
297impl TryFrom<crate::message::ToolChoice> for ToolChoice {
298 type Error = CompletionError;
299
300 fn try_from(value: crate::message::ToolChoice) -> Result<Self, Self::Error> {
301 let res = match value {
302 crate::message::ToolChoice::None => Self::None,
303 crate::message::ToolChoice::Auto => Self::Auto,
304 crate::message::ToolChoice::Specific { function_names } => {
305 let vec: Vec<ToolChoiceFunctionKind> = function_names
306 .into_iter()
307 .map(|name| ToolChoiceFunctionKind::Function { name })
308 .collect();
309
310 Self::Function(vec)
311 }
312 choice => {
313 return Err(CompletionError::ProviderError(format!(
314 "Unsupported tool choice type: {choice:?}"
315 )));
316 }
317 };
318
319 Ok(res)
320 }
321}
322
323#[derive(Debug, Serialize, Deserialize)]
324#[serde(tag = "type", content = "function")]
325pub enum ToolChoiceFunctionKind {
326 Function { name: String },
327}