llm_chain/traits.rs
1//! # Traits Module
2//!
3//! Welcome to the `traits` module! This is where llm-chain houses its public traits, which define the essential behavior of steps and executors. These traits are the backbone of our library, and they provide the foundation for creating and working with different models in llm-chain.
4//!
5//! Here's a brief overview of the key concepts:
6//! - **Steps**: These are the building blocks that make up the chains. Steps define the parameters, including the prompt that is sent to the LLM (Large Language Model).
7//! - **Executors**: These are responsible for performing the steps. They take the output of a step, invoke the model with that input, and return the resulting output.
8//!
9//! By implementing these traits, you can set up a new model and use it in your application. Your step defines the input to the model, and your executor invokes the model and returns the output. The output of the executor is then passed to the next step in the chain, and so on.
10//!
11
12use std::{error::Error, fmt::Debug};
13
14use crate::{
15 options::Options,
16 output::Output,
17 prompt::Prompt,
18 schema::{Document, EmptyMetadata},
19 tokens::{PromptTokensError, TokenCount, Tokenizer, TokenizerError},
20};
21use async_trait::async_trait;
22
23#[derive(thiserror::Error, Debug)]
24#[error("unable to create executor")]
25pub enum ExecutorCreationError {
26 #[error("unable to create executor: {0}")]
27 InnerError(#[from] Box<dyn Error + Send + Sync>),
28 #[error("Field must be set: {0}")]
29 FieldRequiredError(String),
30}
31
32#[derive(thiserror::Error, Debug)]
33/// An error indicating that the model was not succesfully run.
34pub enum ExecutorError {
35 #[error("Unable to run model: {0}")]
36 /// An error occuring in the underlying executor code that doesn't fit any other category.
37 InnerError(#[from] Box<dyn Error + Send + Sync>),
38 #[error("Invalid options when calling the executor")]
39 /// An error indicating that the model was invoked with invalid options
40 InvalidOptions,
41 #[error(transparent)]
42 /// An error tokenizing the prompt.
43 PromptTokens(PromptTokensError),
44 #[error("the context was to small to fit your input")]
45 ContextTooSmall,
46}
47
48#[async_trait]
49/// The `Executor` trait represents an executor that performs a single step in a chain. It takes a
50/// step, executes it, and returns the output.
51pub trait Executor: Sized {
52 type StepTokenizer<'a>: Tokenizer
53 where
54 Self: 'a;
55
56 /// Create a new executor with the given options. If you don't need to set any options, you can use the `new` method instead.
57 /// # Parameters
58 /// * `options`: The options to set.
59 fn new_with_options(options: Options) -> Result<Self, ExecutorCreationError>;
60
61 fn new() -> Result<Self, ExecutorCreationError> {
62 Self::new_with_options(Options::empty().clone())
63 }
64
65 async fn execute(&self, options: &Options, prompt: &Prompt) -> Result<Output, ExecutorError>;
66
67 /// Calculates the number of tokens used by the step given a set of parameters.
68 ///
69 /// The step and the parameters together are used to form full prompt, which is then tokenized
70 /// and the token count is returned.
71 ///
72 /// # Parameters
73 ///
74 /// * `options`: The per-invocation options that affect the token allowance.
75 /// * `prompt`: The prompt passed into step
76 ///
77 /// # Returns
78 ///
79 /// A `Result` containing the token count, or an error if there was a problem.
80 fn tokens_used(
81 &self,
82 options: &Options,
83 prompt: &Prompt,
84 ) -> Result<TokenCount, PromptTokensError>;
85
86 /// Returns the maximum number of input tokens allowed by the model used.
87 ///
88 /// # Parameters
89 ///
90 /// * `options`: The per-invocation options that affect the token allowance.
91 ///
92 /// # Returns
93 /// The max token count for the step
94 fn max_tokens_allowed(&self, options: &Options) -> i32;
95
96 /// Returns a possible answer prefix inserted by the model, during a certain prompt mode
97 ///
98 /// # Parameters
99 ///
100 /// * `prompt`: The prompt passed into step
101 ///
102 /// # Returns
103 ///
104 /// A `Option` containing a String if prefix exists, or none if there is no prefix
105 fn answer_prefix(&self, prompt: &Prompt) -> Option<String>;
106
107 /// Creates a tokenizer, depending on the model used by `step`.
108 ///
109 /// # Parameters
110 ///
111 /// * `step`: The step to get an associated tokenizer for.
112 ///
113 /// # Returns
114 ///
115 /// A `Result` containing a tokenizer, or an error if there was a problem.
116 fn get_tokenizer(&self, options: &Options) -> Result<Self::StepTokenizer<'_>, TokenizerError>;
117}
118
119/// This marker trait is needed so the concrete VectorStore::Error can have a derived From<Embeddings::Error>
120pub trait EmbeddingsError {}
121
122#[async_trait]
123pub trait Embeddings {
124 type Error: Send + Debug + Error + EmbeddingsError;
125 async fn embed_texts(&self, texts: Vec<String>) -> Result<Vec<Vec<f32>>, Self::Error>;
126 async fn embed_query(&self, query: String) -> Result<Vec<f32>, Self::Error>;
127}
128
129/// This marker trait is needed so users of VectorStore can derive From<VectorStore::Error>
130pub trait VectorStoreError {}
131
132#[async_trait]
133pub trait VectorStore<E, M = EmptyMetadata>
134where
135 E: Embeddings,
136 M: serde::Serialize + serde::de::DeserializeOwned,
137{
138 type Error: Debug + Error + VectorStoreError;
139 async fn add_texts(&self, texts: Vec<String>) -> Result<Vec<String>, Self::Error>;
140 async fn add_documents(&self, documents: Vec<Document<M>>) -> Result<Vec<String>, Self::Error>;
141 async fn similarity_search(
142 &self,
143 query: String,
144 limit: u32,
145 ) -> Result<Vec<Document<M>>, Self::Error>;
146}