pub trait Chain: Sync + Send {
// Required method
fn call<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<GenerateResult, ChainError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait;
// Provided methods
fn invoke<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<String, ChainError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait { ... }
fn execute<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<HashMap<String, Value>, ChainError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait { ... }
fn stream<'life0, 'async_trait>(
&'life0 self,
_input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<StreamData, ChainError>> + Send>>, ChainError>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait { ... }
fn get_input_keys(&self) -> Vec<String> { ... }
fn get_output_keys(&self) -> Vec<String> { ... }
}
Required Methods§
Sourcefn call<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<GenerateResult, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn call<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<GenerateResult, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Call the Chain
and receive as output the result of the generation process along with
additional information like token consumption. The input is a set of variables passed
as a PromptArgs
hashmap.
§Example
let llm = OpenAI::default().with_model(OpenAIModel::Gpt35);
let memory = SimpleMemory::new();
let chain = ConversationalChainBuilder::new()
.llm(llm)
.memory(memory.into())
.build().expect("Error building ConversationalChain");
let input_variables = prompt_args! {
"input" => "Im from Peru",
};
match chain.call(input_variables).await {
Ok(result) => {
println!("Result: {:?}", result);
},
Err(e) => panic!("Error calling Chain: {:?}", e),
};
Provided Methods§
Sourcefn invoke<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<String, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn invoke<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<String, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Invoke the Chain
and receive just the generation result as a String.
The input is a set of variables passed as a PromptArgs
hashmap.
§Example
let llm = OpenAI::default().with_model(OpenAIModel::Gpt35);
let memory = SimpleMemory::new();
let chain = ConversationalChainBuilder::new()
.llm(llm)
.memory(memory.into())
.build().expect("Error building ConversationalChain");
let input_variables = prompt_args! {
"input" => "Im from Peru",
};
match chain.invoke(input_variables).await {
Ok(result) => {
println!("Result: {:?}", result);
},
Err(e) => panic!("Error invoking Chain: {:?}", e),
};
Sourcefn execute<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<HashMap<String, Value>, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn execute<'life0, 'async_trait>(
&'life0 self,
input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<HashMap<String, Value>, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Execute the Chain
and return the result of the generation process
along with additional information like token consumption formatted as a HashMap
.
The input is a set of variables passed as a PromptArgs
hashmap.
The key for the generated output is specified by the get_output_keys
method (default key is output
).
§Example
let llm = OpenAI::default().with_model(OpenAIModel::Gpt35);
let memory = SimpleMemory::new();
let chain = ConversationalChainBuilder::new()
.llm(llm)
.memory(memory.into())
.output_key("name")
.build().expect("Error building ConversationalChain");
let input_variables = prompt_args! {
"input" => "Im from Peru",
};
match chain.execute(input_variables).await {
Ok(result) => {
println!("Result: {:?}", result);
},
Err(e) => panic!("Error executing Chain: {:?}", e),
};
Sourcefn stream<'life0, 'async_trait>(
&'life0 self,
_input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<StreamData, ChainError>> + Send>>, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn stream<'life0, 'async_trait>(
&'life0 self,
_input_variables: PromptArgs,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<StreamData, ChainError>> + Send>>, ChainError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Stream the Chain
and get an asynchronous stream of chain generations.
The input is a set of variables passed as a PromptArgs
hashmap.
If the chain have memroy, the tream method will not be able to automaticaly
set the memroy, bocause it will not know if the how to extract the output message
out of the stram
§Example
let open_ai = OpenAI::default();
let prompt = message_formatter![
fmt_message!(Message::new_system_message(
"You are world class technical documentation writer."
)),
fmt_template!(HumanMessagePromptTemplate::new(template_fstring!(
"{input}", "input"
)))
];
let chain = LLMChainBuilder::new()
.prompt(prompt)
.llm(open_ai.clone())
.build()
.unwrap();
let mut stream = chain.stream(
prompt_args! {
"input" => "Who is the writer of 20,000 Leagues Under the Sea?"
}).await.unwrap();
while let Some(result) = stream.next().await {
match result {
Ok(value) => {
println!("Content: {}", value.content);
},
Err(e) => panic!("Error invoking LLMChain: {:?}", e),
}
};