Trait llm_weaver::Loom

source ·
pub trait Loom<T: Config> {
    // Provided methods
    fn weave<'async_trait, TID>(
        prompt_llm_config: LlmConfig<T, T::PromptModel>,
        summary_llm_config: LlmConfig<T, T::SummaryModel>,
        tapestry_id: TID,
        instructions: String,
        msgs: Vec<ContextMessage<T>>
    ) -> Pin<Box<dyn Future<Output = Result<(<<T as Config>::PromptModel as Llm<T>>::Response, u64, bool)>> + Send + 'async_trait>>
       where TID: 'async_trait + TapestryId,
             Self: Send + 'async_trait { ... }
    fn generate_summary<'life0, 'async_trait>(
        summary_model_config: LlmConfig<T, T::SummaryModel>,
        tapestry_fragment: &'life0 TapestryFragment<T>,
        summary_max_tokens: SummaryModelTokens<T>
    ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
       where 'life0: 'async_trait { ... }
    fn build_context_message(
        role: WrapperRole,
        content: String,
        account_id: Option<String>
    ) -> ContextMessage<T> { ... }
    fn count_tokens_in_messages(
        msgs: impl Iterator<Item = &ContextMessage<T>>
    ) -> <T::PromptModel as Llm<T>>::Tokens { ... }
}
Expand description

The machine that drives all of the core methods that should be used across any service that needs to prompt LLM and receive a response.

This is implemented over the Config trait.

Provided Methods§

source

fn weave<'async_trait, TID>( prompt_llm_config: LlmConfig<T, T::PromptModel>, summary_llm_config: LlmConfig<T, T::SummaryModel>, tapestry_id: TID, instructions: String, msgs: Vec<ContextMessage<T>> ) -> Pin<Box<dyn Future<Output = Result<(<<T as Config>::PromptModel as Llm<T>>::Response, u64, bool)>> + Send + 'async_trait>>
where TID: 'async_trait + TapestryId, Self: Send + 'async_trait,

Prompt LLM Weaver for a response for TapestryId.

Prompts LLM with the current TapestryFragment instance and the new msgs.

A summary will be generated of the current TapestryFragment instance if the total number of tokens in the context_messages exceeds the maximum number of tokens allowed for the current Config::PromptModel or custom max tokens. This threshold is affected by the Config::TOKEN_THRESHOLD_PERCENTILE.

§Parameters
source

fn generate_summary<'life0, 'async_trait>( summary_model_config: LlmConfig<T, T::SummaryModel>, tapestry_fragment: &'life0 TapestryFragment<T>, summary_max_tokens: SummaryModelTokens<T> ) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where 'life0: 'async_trait,

Generates the summary of the current TapestryFragment instance.

Returns the summary message as a string.

source

fn build_context_message( role: WrapperRole, content: String, account_id: Option<String> ) -> ContextMessage<T>

Helper method to build a ContextMessage

source

fn count_tokens_in_messages( msgs: impl Iterator<Item = &ContextMessage<T>> ) -> <T::PromptModel as Llm<T>>::Tokens

Object Safety§

This trait is not object safe.

Implementors§