pub struct QuestionAnsweringModel { /* private fields */ }Expand description
Implementations§
source§impl QuestionAnsweringModel
impl QuestionAnsweringModel
sourcepub fn new(
question_answering_config: QuestionAnsweringConfig
) -> Result<QuestionAnsweringModel, RustBertError>
pub fn new(
question_answering_config: QuestionAnsweringConfig
) -> Result<QuestionAnsweringModel, RustBertError>
Build a new QuestionAnsweringModel
Arguments
question_answering_config-QuestionAnsweringConfigobject containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
Example
use rust_bert::pipelines::question_answering::QuestionAnsweringModel;
let qa_model = QuestionAnsweringModel::new(Default::default())?;sourcepub fn predict(
&self,
qa_inputs: &[QaInput],
top_k: i64,
batch_size: usize
) -> Vec<Vec<Answer>> ⓘ
pub fn predict(
&self,
qa_inputs: &[QaInput],
top_k: i64,
batch_size: usize
) -> Vec<Vec<Answer>> ⓘ
Perform extractive question answering given a list of QaInputs
Arguments
qa_inputs-&[QaInput]Array of Question Answering inputs (context and question pairs)top_k- return the top-k answers for each QaInput. Set to 1 to return only the best answer.batch_size- maximum batch size for the model forward pass.
Returns
Vec<Vec<Answer>>Vector (same length asqa_inputs) of vectors (each of lengthtop_k) containing the extracted answers.
Example
use rust_bert::pipelines::question_answering::{QaInput, QuestionAnsweringModel};
let qa_model = QuestionAnsweringModel::new(Default::default())?;
let question_1 = String::from("Where does Amy live ?");
let context_1 = String::from("Amy lives in Amsterdam");
let question_2 = String::from("Where does Eric live");
let context_2 = String::from("While Amy lives in Amsterdam, Eric is in The Hague.");
let qa_input_1 = QaInput {
question: question_1,
context: context_1,
};
let qa_input_2 = QaInput {
question: question_2,
context: context_2,
};
let answers = qa_model.predict(&[qa_input_1, qa_input_2], 1, 32);