1use crate::llm::{Client, Model, Response};
2use crate::settings::LlmBackendSettings;
3use anyhow::Result;
4use llm_connector::types::Tool;
5use llm_connector::StreamFormat;
6use tokio_stream::wrappers::UnboundedReceiverStream;
7
8pub struct Service {
16 client: Client,
17 #[allow(dead_code)]
18 model: String,
19}
20
21impl Service {
22 pub fn new(config: &LlmBackendSettings) -> Result<Self> {
24 let client = Client::new(config)?;
25 let model = match config {
26 LlmBackendSettings::OpenAI { model, .. } => model.clone(),
27 LlmBackendSettings::Anthropic { model, .. } => model.clone(),
28 LlmBackendSettings::Ollama { model, .. } => model.clone(),
29 LlmBackendSettings::Aliyun { model, .. } => model.clone(),
30 LlmBackendSettings::Zhipu { model, .. } => model.clone(),
31 LlmBackendSettings::Volcengine { model, .. } => model.clone(),
32 LlmBackendSettings::Tencent { model, .. } => model.clone(),
33 LlmBackendSettings::Longcat { model, .. } => model.clone(),
34 };
35
36 Ok(Self { client, model })
37 }
38
39 #[allow(dead_code)]
43 pub async fn chat(
44 &self,
45 model: Option<&str>,
46 messages: Vec<llm_connector::types::Message>,
47 tools: Option<Vec<Tool>>,
48 ) -> Result<Response> {
49 let model = model.unwrap_or(&self.model);
50 self.client.chat(model, messages, tools).await
51 }
52
53 #[allow(dead_code)]
57 pub async fn chat_stream_ollama(
58 &self,
59 model: Option<&str>,
60 messages: Vec<llm_connector::types::Message>,
61 format: StreamFormat,
62 ) -> Result<UnboundedReceiverStream<String>> {
63 let model = model.unwrap_or(&self.model);
64 self.client.chat_stream_with_format(model, messages, format).await
65 }
66
67 #[allow(dead_code)]
71 pub async fn chat_stream_openai(
72 &self,
73 model: Option<&str>,
74 messages: Vec<llm_connector::types::Message>,
75 tools: Option<Vec<Tool>>,
76 format: StreamFormat,
77 ) -> Result<UnboundedReceiverStream<String>> {
78 let model = model.unwrap_or(&self.model);
79 self.client.chat_stream_openai(model, messages, tools, format).await
80 }
81
82 pub async fn list_models(&self) -> Result<Vec<Model>> {
84 self.client.list_models().await
85 }
86
87 #[allow(dead_code)]
89 pub async fn validate_model(&self, model: &str) -> Result<bool> {
90 let available_models = self.client.list_models().await?;
91 Ok(available_models.iter().any(|m| m.id == model))
92 }
93}