agentroot_core/llm/
http_embedder.rs

1//! HTTP-based embedder using external LLM service
2
3use super::{Embedder, LLMClient};
4use crate::config::LLMServiceConfig;
5use crate::error::Result;
6use async_trait::async_trait;
7use std::sync::Arc;
8
9/// Embedder that uses external HTTP service (vLLM, OpenAI, etc.)
10pub struct HttpEmbedder {
11    client: Arc<dyn LLMClient>,
12}
13
14impl HttpEmbedder {
15    /// Create from LLM client
16    pub fn new(client: Arc<dyn LLMClient>) -> Self {
17        Self { client }
18    }
19
20    /// Create from configuration
21    pub fn from_config(config: LLMServiceConfig) -> Result<Self> {
22        let client = super::VLLMClient::new(config)?;
23        Ok(Self {
24            client: Arc::new(client),
25        })
26    }
27
28    /// Create from environment variables
29    pub fn from_env() -> Result<Self> {
30        let client = super::VLLMClient::from_env()?;
31        Ok(Self {
32            client: Arc::new(client),
33        })
34    }
35}
36
37#[async_trait]
38impl Embedder for HttpEmbedder {
39    async fn embed(&self, text: &str) -> Result<Vec<f32>> {
40        self.client.embed(text).await
41    }
42
43    async fn embed_batch(&self, texts: &[String]) -> Result<Vec<Vec<f32>>> {
44        self.client.embed_batch(texts).await
45    }
46
47    fn dimensions(&self) -> usize {
48        self.client.embedding_dimensions()
49    }
50
51    fn model_name(&self) -> &str {
52        self.client.model_name()
53    }
54}