Skip to main content

synaptic_together/
lib.rs

1use std::sync::Arc;
2pub use synaptic_core::{ChatModel, ChatRequest, ChatResponse, ChatStream, Message, SynapticError};
3use synaptic_models::ProviderBackend;
4use synaptic_openai::{OpenAiChatModel, OpenAiConfig};
5
6#[derive(Debug, Clone, PartialEq, Eq)]
7pub enum TogetherModel {
8    Llama3_3_70bInstructTurbo,
9    Llama3_1_8bInstructTurbo,
10    Llama3_1_405bInstructTurbo,
11    DeepSeekR1,
12    Qwen2_5_72bInstructTurbo,
13    Mixtral8x7bInstruct,
14    Custom(String),
15}
16
17impl TogetherModel {
18    pub fn as_str(&self) -> &str {
19        match self {
20            TogetherModel::Llama3_3_70bInstructTurbo => "meta-llama/Llama-3.3-70B-Instruct-Turbo",
21            TogetherModel::Llama3_1_8bInstructTurbo => {
22                "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
23            }
24            TogetherModel::Llama3_1_405bInstructTurbo => {
25                "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"
26            }
27            TogetherModel::DeepSeekR1 => "deepseek-ai/DeepSeek-R1",
28            TogetherModel::Qwen2_5_72bInstructTurbo => "Qwen/Qwen2.5-72B-Instruct-Turbo",
29            TogetherModel::Mixtral8x7bInstruct => "mistralai/Mixtral-8x7B-Instruct-v0.1",
30            TogetherModel::Custom(s) => s.as_str(),
31        }
32    }
33}
34
35impl std::fmt::Display for TogetherModel {
36    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37        write!(f, "{}", self.as_str())
38    }
39}
40
41#[derive(Debug, Clone)]
42pub struct TogetherConfig {
43    pub api_key: String,
44    pub model: String,
45    pub max_tokens: Option<u32>,
46    pub temperature: Option<f64>,
47    pub top_p: Option<f64>,
48    pub stop: Option<Vec<String>>,
49}
50
51impl TogetherConfig {
52    pub fn new(api_key: impl Into<String>, model: TogetherModel) -> Self {
53        Self {
54            api_key: api_key.into(),
55            model: model.to_string(),
56            max_tokens: None,
57            temperature: None,
58            top_p: None,
59            stop: None,
60        }
61    }
62    pub fn new_custom(api_key: impl Into<String>, model: impl Into<String>) -> Self {
63        Self {
64            api_key: api_key.into(),
65            model: model.into(),
66            max_tokens: None,
67            temperature: None,
68            top_p: None,
69            stop: None,
70        }
71    }
72    pub fn with_max_tokens(mut self, v: u32) -> Self {
73        self.max_tokens = Some(v);
74        self
75    }
76    pub fn with_temperature(mut self, v: f64) -> Self {
77        self.temperature = Some(v);
78        self
79    }
80    pub fn with_top_p(mut self, v: f64) -> Self {
81        self.top_p = Some(v);
82        self
83    }
84    pub fn with_stop(mut self, v: Vec<String>) -> Self {
85        self.stop = Some(v);
86        self
87    }
88}
89
90impl From<TogetherConfig> for OpenAiConfig {
91    fn from(c: TogetherConfig) -> Self {
92        let mut cfg =
93            OpenAiConfig::new(c.api_key, c.model).with_base_url("https://api.together.xyz/v1");
94        if let Some(v) = c.max_tokens {
95            cfg = cfg.with_max_tokens(v);
96        }
97        if let Some(v) = c.temperature {
98            cfg = cfg.with_temperature(v);
99        }
100        if let Some(v) = c.top_p {
101            cfg = cfg.with_top_p(v);
102        }
103        if let Some(v) = c.stop {
104            cfg = cfg.with_stop(v);
105        }
106        cfg
107    }
108}
109
110pub struct TogetherChatModel {
111    inner: OpenAiChatModel,
112}
113
114impl TogetherChatModel {
115    pub fn new(config: TogetherConfig, backend: Arc<dyn ProviderBackend>) -> Self {
116        Self {
117            inner: OpenAiChatModel::new(config.into(), backend),
118        }
119    }
120}
121
122#[async_trait::async_trait]
123impl ChatModel for TogetherChatModel {
124    async fn chat(&self, request: ChatRequest) -> Result<ChatResponse, SynapticError> {
125        self.inner.chat(request).await
126    }
127    fn stream_chat(&self, request: ChatRequest) -> ChatStream<'_> {
128        self.inner.stream_chat(request)
129    }
130}