language_barrier_core/llm_service.rs
1use async_trait::async_trait;
2use reqwest::Client;
3use std::sync::Arc;
4use tracing::{debug, error, info, trace};
5
6use crate::{Chat, Message, ModelInfo, Result, provider::HTTPProvider};
7
8/// This is anything that can generate the next message.
9///
10/// LLMService is responsible for generating the next message in a conversation.
11/// It replaces the previous SingleRequestExecutor pattern with a more flexible
12/// trait-based approach, allowing for different implementations (HTTP, local, etc).
13#[async_trait]
14pub trait LLMService<M: ModelInfo> {
15 /// Generates the next message in the conversation.
16 ///
17 /// Takes a Chat instance and returns a Result containing the next message.
18 async fn generate_next_message(&self, chat: &Chat) -> Result<Message>;
19}
20
21/// An LLM service implementation that sends requests over HTTP.
22///
23/// This implementation of LLMService uses HTTP to communicate with language model providers.
24/// It replaces the previous SingleRequestExecutor, providing the same functionality
25/// but with a more flexible trait-based design.
26///
27/// # Examples
28///
29/// ```no_run
30/// use language_barrier_core::{Chat, Message, model::Claude};
31/// use language_barrier_core::llm_service::{HTTPLlmService, LLMService};
32/// use language_barrier_core::provider::anthropic::AnthropicProvider;
33/// use std::sync::Arc;
34///
35/// #[tokio::main]
36/// async fn main() -> language_barrier_core::Result<()> {
37/// // Create a provider
38/// let provider = AnthropicProvider::new();
39///
40/// // Create a service with the model and provider
41/// let service = HTTPLlmService::new(
42/// Claude::Opus3,
43/// Arc::new(provider)
44/// );
45///
46/// // Create a chat and generate a response
47/// let chat = Chat::default()
48/// .with_system_prompt("You are a helpful assistant.")
49/// .add_message(Message::user("Hello, how are you?"));
50///
51/// let response = service.generate_next_message(&chat).await?;
52///
53/// Ok(())
54/// }
55/// ```
56pub struct HTTPLlmService<M: ModelInfo> {
57 model: M,
58 provider: Arc<dyn HTTPProvider<M>>,
59}
60
61impl<M: ModelInfo> HTTPLlmService<M> {
62 pub fn new(model: M, provider: Arc<dyn HTTPProvider<M>>) -> Self {
63 HTTPLlmService { model, provider }
64 }
65}
66
67#[async_trait]
68impl<M: ModelInfo> LLMService<M> for HTTPLlmService<M> {
69 async fn generate_next_message(&self, chat: &Chat) -> Result<Message> {
70 let client = Client::new();
71
72 let request = match self.provider.accept(self.model, chat) {
73 Ok(req) => {
74 debug!(
75 "Request created successfully: {} {}",
76 req.method(),
77 req.url()
78 );
79 trace!("Request headers: {:#?}", req.headers());
80 req
81 }
82 Err(e) => {
83 error!("Failed to create request: {}", e);
84 return Err(e);
85 }
86 };
87
88 // Send request and get response
89 debug!("Sending HTTP request");
90 let response = match client.execute(request).await {
91 Ok(resp) => {
92 info!("Received response with status: {}", resp.status());
93 trace!("Response headers: {:#?}", resp.headers());
94 resp
95 }
96 Err(e) => {
97 error!("HTTP request failed: {}", e);
98 return Err(e.into());
99 }
100 };
101
102 // Get response text
103 debug!("Reading response body");
104 let response_text = match response.text().await {
105 Ok(text) => {
106 trace!("Response body: {}", text);
107 text
108 }
109 Err(e) => {
110 error!("Failed to read response body: {}", e);
111 return Err(e.into());
112 }
113 };
114
115 // Parse response using provider
116 debug!("Parsing response");
117 let message = match self.provider.parse(response_text) {
118 Ok(msg) => {
119 info!("Successfully parsed response into message");
120 debug!("Message role: {}", msg.role_str());
121 // Message content is now accessed through pattern matching
122 msg
123 }
124 Err(e) => {
125 error!("Failed to parse response: {}", e);
126 return Err(e);
127 }
128 };
129
130 Ok(message)
131 }
132}