Skip to main content

synaptic_perplexity/
lib.rs

1use std::sync::Arc;
2pub use synaptic_core::{ChatModel, ChatRequest, ChatResponse, ChatStream, Message, SynapticError};
3use synaptic_models::ProviderBackend;
4use synaptic_openai::{OpenAiChatModel, OpenAiConfig};
5
6#[derive(Debug, Clone, PartialEq, Eq)]
7pub enum PerplexityModel {
8    SonarLarge,
9    SonarSmall,
10    SonarHuge,
11    SonarReasoningPro,
12    Custom(String),
13}
14
15impl PerplexityModel {
16    pub fn as_str(&self) -> &str {
17        match self {
18            PerplexityModel::SonarLarge => "sonar-large-online",
19            PerplexityModel::SonarSmall => "sonar-small-online",
20            PerplexityModel::SonarHuge => "sonar-huge-online",
21            PerplexityModel::SonarReasoningPro => "sonar-reasoning-pro",
22            PerplexityModel::Custom(s) => s.as_str(),
23        }
24    }
25}
26
27impl std::fmt::Display for PerplexityModel {
28    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
29        write!(f, "{}", self.as_str())
30    }
31}
32
33#[derive(Debug, Clone)]
34pub struct PerplexityConfig {
35    pub api_key: String,
36    pub model: String,
37    pub max_tokens: Option<u32>,
38    pub temperature: Option<f64>,
39    pub top_p: Option<f64>,
40    pub stop: Option<Vec<String>>,
41}
42
43impl PerplexityConfig {
44    pub fn new(api_key: impl Into<String>, model: PerplexityModel) -> Self {
45        Self {
46            api_key: api_key.into(),
47            model: model.to_string(),
48            max_tokens: None,
49            temperature: None,
50            top_p: None,
51            stop: None,
52        }
53    }
54    pub fn new_custom(api_key: impl Into<String>, model: impl Into<String>) -> Self {
55        Self {
56            api_key: api_key.into(),
57            model: model.into(),
58            max_tokens: None,
59            temperature: None,
60            top_p: None,
61            stop: None,
62        }
63    }
64    pub fn with_max_tokens(mut self, v: u32) -> Self {
65        self.max_tokens = Some(v);
66        self
67    }
68    pub fn with_temperature(mut self, v: f64) -> Self {
69        self.temperature = Some(v);
70        self
71    }
72    pub fn with_top_p(mut self, v: f64) -> Self {
73        self.top_p = Some(v);
74        self
75    }
76    pub fn with_stop(mut self, v: Vec<String>) -> Self {
77        self.stop = Some(v);
78        self
79    }
80}
81
82impl From<PerplexityConfig> for OpenAiConfig {
83    fn from(c: PerplexityConfig) -> Self {
84        let mut cfg =
85            OpenAiConfig::new(c.api_key, c.model).with_base_url("https://api.perplexity.ai");
86        if let Some(v) = c.max_tokens {
87            cfg = cfg.with_max_tokens(v);
88        }
89        if let Some(v) = c.temperature {
90            cfg = cfg.with_temperature(v);
91        }
92        if let Some(v) = c.top_p {
93            cfg = cfg.with_top_p(v);
94        }
95        if let Some(v) = c.stop {
96            cfg = cfg.with_stop(v);
97        }
98        cfg
99    }
100}
101
102pub struct PerplexityChatModel {
103    inner: OpenAiChatModel,
104}
105
106impl PerplexityChatModel {
107    pub fn new(config: PerplexityConfig, backend: Arc<dyn ProviderBackend>) -> Self {
108        Self {
109            inner: OpenAiChatModel::new(config.into(), backend),
110        }
111    }
112}
113
114#[async_trait::async_trait]
115impl ChatModel for PerplexityChatModel {
116    async fn chat(&self, request: ChatRequest) -> Result<ChatResponse, SynapticError> {
117        self.inner.chat(request).await
118    }
119    fn stream_chat(&self, request: ChatRequest) -> ChatStream<'_> {
120        self.inner.stream_chat(request)
121    }
122}