Trait llm_weaver::Loom
source · pub trait Loom<T: Config> {
// Provided methods
fn weave<'async_trait, TID>(
prompt_config: LlmConfig<T, T::PromptModel>,
summary_model_config: LlmConfig<T, T::SummaryModel>,
tapestry_id: TID,
system: String,
msgs: Vec<ContextMessage<T>>
) -> Pin<Box<dyn Future<Output = Result<<<T as Config>::PromptModel as Llm<T>>::Response>> + Send + 'async_trait>>
where TID: 'async_trait + TapestryId,
Self: Send + 'async_trait { ... }
fn generate_summary<'life0, 'async_trait>(
summary_model_config: LlmConfig<T, T::SummaryModel>,
tapestry_fragment: &'life0 TapestryFragment<T>
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: Send + 'async_trait,
'life0: 'async_trait { ... }
fn build_context_message(
role: WrapperRole,
content: String,
account_id: Option<String>
) -> ContextMessage<T> { ... }
}
Expand description
The machine that drives all of the core methods that should be used across any service that needs to prompt LLM and receive a response.
This is implemented over the Config
trait.
Provided Methods§
sourcefn weave<'async_trait, TID>(
prompt_config: LlmConfig<T, T::PromptModel>,
summary_model_config: LlmConfig<T, T::SummaryModel>,
tapestry_id: TID,
system: String,
msgs: Vec<ContextMessage<T>>
) -> Pin<Box<dyn Future<Output = Result<<<T as Config>::PromptModel as Llm<T>>::Response>> + Send + 'async_trait>>where
TID: 'async_trait + TapestryId,
Self: Send + 'async_trait,
fn weave<'async_trait, TID>( prompt_config: LlmConfig<T, T::PromptModel>, summary_model_config: LlmConfig<T, T::SummaryModel>, tapestry_id: TID, system: String, msgs: Vec<ContextMessage<T>> ) -> Pin<Box<dyn Future<Output = Result<<<T as Config>::PromptModel as Llm<T>>::Response>> + Send + 'async_trait>>where TID: 'async_trait + TapestryId, Self: Send + 'async_trait,
Prompt LLM Weaver for a response for TapestryId
.
Prompts LLM with the current TapestryFragment
instance and the new msgs
.
A summary will be generated of the current TapestryFragment
instance if the total number
of tokens in the context_messages
exceeds the maximum number of tokens allowed for the
current Config::PromptModel
or custom max tokens. This threshold is affected by the
Config::TOKEN_THRESHOLD_PERCENTILE
.
Parameters
tapestry_id
: TheTapestryId
to prompt and save context messages to.system
: The system message to prompt LLM with. the currentConfig::PromptModel
.msgs
: The list ofContextMessage
s to prompt LLM with.prompt_params
: The [Config::PromptParameters
] to use when prompting LLM.
sourcefn generate_summary<'life0, 'async_trait>(
summary_model_config: LlmConfig<T, T::SummaryModel>,
tapestry_fragment: &'life0 TapestryFragment<T>
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where
Self: Send + 'async_trait,
'life0: 'async_trait,
fn generate_summary<'life0, 'async_trait>( summary_model_config: LlmConfig<T, T::SummaryModel>, tapestry_fragment: &'life0 TapestryFragment<T> ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where Self: Send + 'async_trait, 'life0: 'async_trait,
Generates the summary of the current TapestryFragment
instance.
This is will utilize the GPT-4 32K model to generate the summary to allow the maximum number of possible tokens in the GPT-4 8K model stored in the tapestry fragment.
Returns the summary message as a string.
sourcefn build_context_message(
role: WrapperRole,
content: String,
account_id: Option<String>
) -> ContextMessage<T>
fn build_context_message( role: WrapperRole, content: String, account_id: Option<String> ) -> ContextMessage<T>
Helper method to build a ContextMessage