latchlm_core/
lib.rs

1// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
2// If a copy of the MPL was not distributed with this file, You can obtain one at
3// https://mozilla.org/MPL/2.0/.
4
5//! # LatchLM Core
6//!
7//! Core traits and types for the LatchLM ecosystem.
8//!
9//! This crate provides the foundation for the LatchLM ecosystem by defining
10//! the core abstractions used across all provider implementations.
11
12pub mod error;
13pub use error::*;
14
15use futures::stream::BoxStream;
16use serde::{Deserialize, Serialize};
17use std::{borrow::Cow, future::Future, pin::Pin, sync::Arc};
18
19/// A `Future` type used by the `AiProvider` trait.
20///
21/// This type alias represents a boxed, pinned future that is `Send`,
22/// which allows to be returned from async traits.
23pub type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
24
25/// A trait representing a specific AI model for a provider.
26///
27/// Implementors of this trait represent specific model variants supported by an LLM provider.
28/// Each model must be convertible to a string identifier that can be used in API requests.
29///
30/// # Example
31/// ```
32/// use latchlm_core::{AiModel, ModelId};
33///
34/// pub enum MyModel {
35///     Variant1,
36///     Variant2,
37/// }
38///
39/// impl AsRef<str> for MyModel {
40///     fn as_ref(&self) -> &str {
41///         match self {
42///             MyModel::Variant1 => "mymodel-variant-1",
43///             MyModel::Variant2 => "mymodel-variant-2",
44///         }
45///     }
46/// }
47///
48/// impl AiModel for MyModel {
49///     fn as_any(&self) -> &dyn std::any::Any {
50///         self
51///     }
52///
53///     fn model_id(&self) -> ModelId {
54///         match self {
55///             MyModel::Variant1 => ModelId { id: "mymodel-variant-1".into(), name: "My Model Variant 1".into()},
56///             MyModel::Variant2 => ModelId { id: "mymodel-variant-2".into(), name: "My Model Variant 2".into()},
57///         }
58///     }
59/// }
60/// ```
61pub trait AiModel: AsRef<str> + Send + Sync + 'static {
62    fn as_any(&self) -> &dyn std::any::Any;
63    fn model_id(&self) -> ModelId<'_>;
64}
65
66impl dyn AiModel {
67    /// Downcasts the model to a specific type.
68    pub fn downcast<M: 'static + Clone>(&self) -> Option<M> {
69        self.as_any().downcast_ref::<M>().cloned()
70    }
71}
72
73/// A unique identifier for an LLM model.
74#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
75pub struct ModelId<'a> {
76    /// The technical identifier used in API requests
77    pub id: Cow<'a, str>,
78    /// A human-readable name
79    pub name: Cow<'a, str>,
80}
81
82impl std::fmt::Display for ModelId<'_> {
83    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
84        write!(f, "{}", self.name)
85    }
86}
87
88/// A request for an LLM.
89#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
90pub struct AiRequest {
91    /// The input text to be processed by the model
92    pub text: String,
93}
94
95/// Response from an LLM API provider.
96#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
97pub struct AiResponse {
98    /// The text response
99    pub text: String,
100    /// Token usage data
101    pub token_usage: TokenUsage,
102}
103
104/// Token usage information returned by LLM providers.
105#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
106pub struct TokenUsage {
107    /// Number of tokens in the input prompt
108    pub input_tokens: Option<u64>,
109    /// Number of tokens in the output response
110    pub output_tokens: Option<u64>,
111    /// Total tokens used during the interaction
112    pub total_tokens: Option<u64>,
113}
114
115/// A trait representing an LLM API provider.
116///
117/// Implementors of this trait provide the functionality to interact with specific
118/// LLM API providers through a unified interface.
119///
120/// Blanket implementations are provided for `&T`, `&mut T`, `Box<T>` and `Arc<T>`
121/// where `T: AiProvider`.
122pub trait AiProvider: Send + Sync {
123    /// Sends a message to the specified model and returns the AI's response.
124    ///
125    /// # Arguments
126    ///
127    /// * `model` - The identifier of the model to use.
128    /// * `request` - The request to send to the model.
129    ///
130    /// # Returns
131    ///
132    /// A future yielding either a `Response` or an `Error`
133    ///
134    /// # Errors
135    ///
136    /// Returns an `Error` if the request fails, the response status is not successful,
137    /// or if the response cannot be parsed.
138    fn send_request(
139        &self,
140        model: &dyn AiModel,
141        request: AiRequest,
142    ) -> BoxFuture<'_, Result<AiResponse>>;
143
144    /// Sends a message to the specified model and returns a stream of AI responses.
145    ///
146    /// # Arguments
147    ///
148    /// * `model` - The identifier of the model to use.
149    /// * `request` - The request to send to the model.
150    ///
151    /// # Returns
152    ///
153    /// A stream yielding either a `Response` or an `Error`
154    ///
155    /// # Errors
156    ///
157    /// Returns an `Error` if the request fails, the response status is not successful,
158    /// or if the response cannot be parsed.
159    fn send_streaming(
160        &self,
161        model: &dyn AiModel,
162        request: AiRequest,
163    ) -> BoxStream<'_, Result<AiResponse>>;
164}
165
166impl<T> AiProvider for &T
167where
168    T: AiProvider + ?Sized,
169{
170    fn send_request(
171        &self,
172        model: &dyn AiModel,
173        request: AiRequest,
174    ) -> BoxFuture<'_, Result<AiResponse>> {
175        (**self).send_request(model, request)
176    }
177
178    fn send_streaming(
179        &self,
180        model: &dyn AiModel,
181        request: AiRequest,
182    ) -> BoxStream<'_, Result<AiResponse>> {
183        (**self).send_streaming(model, request)
184    }
185}
186
187impl<T> AiProvider for &mut T
188where
189    T: AiProvider + ?Sized,
190{
191    fn send_request(
192        &self,
193        model: &dyn AiModel,
194        request: AiRequest,
195    ) -> BoxFuture<'_, Result<AiResponse>> {
196        (**self).send_request(model, request)
197    }
198
199    fn send_streaming(
200        &self,
201        model: &dyn AiModel,
202        request: AiRequest,
203    ) -> BoxStream<'_, Result<AiResponse>> {
204        (**self).send_streaming(model, request)
205    }
206}
207
208impl<T> AiProvider for Box<T>
209where
210    T: AiProvider + ?Sized,
211{
212    fn send_request(
213        &self,
214        model: &dyn AiModel,
215        request: AiRequest,
216    ) -> BoxFuture<'_, Result<AiResponse>> {
217        (**self).send_request(model, request)
218    }
219
220    fn send_streaming(
221        &self,
222        model: &dyn AiModel,
223        request: AiRequest,
224    ) -> BoxStream<'_, Result<AiResponse>> {
225        (**self).send_streaming(model, request)
226    }
227}
228
229impl<T> AiProvider for Arc<T>
230where
231    T: AiProvider + ?Sized,
232{
233    fn send_request(
234        &self,
235        model: &dyn AiModel,
236        request: AiRequest,
237    ) -> BoxFuture<'_, Result<AiResponse>> {
238        (**self).send_request(model, request)
239    }
240
241    fn send_streaming(
242        &self,
243        model: &dyn AiModel,
244        request: AiRequest,
245    ) -> BoxStream<'_, Result<AiResponse>> {
246        (**self).send_streaming(model, request)
247    }
248}