pub struct QuestionAnsweringModel { /* private fields */ }Expand description
Implementations§
source§impl QuestionAnsweringModel
impl QuestionAnsweringModel
sourcepub fn new(
question_answering_config: QuestionAnsweringConfig
) -> Result<QuestionAnsweringModel, RustBertError>
pub fn new( question_answering_config: QuestionAnsweringConfig ) -> Result<QuestionAnsweringModel, RustBertError>
Build a new QuestionAnsweringModel
Arguments
question_answering_config-QuestionAnsweringConfigobject containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
Example
use rust_bert::pipelines::question_answering::QuestionAnsweringModel;
let qa_model = QuestionAnsweringModel::new(Default::default())?;sourcepub fn new_with_tokenizer(
question_answering_config: QuestionAnsweringConfig,
tokenizer: TokenizerOption
) -> Result<QuestionAnsweringModel, RustBertError>
pub fn new_with_tokenizer( question_answering_config: QuestionAnsweringConfig, tokenizer: TokenizerOption ) -> Result<QuestionAnsweringModel, RustBertError>
Build a new QuestionAnsweringModel with a provided tokenizer.
Arguments
question_answering_config-QuestionAnsweringConfigobject containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)tokenizer-TokenizerOptiontokenizer to use for question answering.
Example
use rust_bert::pipelines::common::{ModelType, TokenizerOption};
use rust_bert::pipelines::question_answering::QuestionAnsweringModel;
let tokenizer = TokenizerOption::from_file(
ModelType::Bert,
"path/to/vocab.txt",
None,
false,
None,
None,
)?;
let qa_model = QuestionAnsweringModel::new_with_tokenizer(Default::default(), tokenizer)?;sourcepub fn get_tokenizer(&self) -> &TokenizerOption
pub fn get_tokenizer(&self) -> &TokenizerOption
Get a reference to the model tokenizer.
sourcepub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption
Get a mutable reference to the model tokenizer.
sourcepub fn predict(
&self,
qa_inputs: &[QaInput],
top_k: i64,
batch_size: usize
) -> Vec<Vec<Answer>>
pub fn predict( &self, qa_inputs: &[QaInput], top_k: i64, batch_size: usize ) -> Vec<Vec<Answer>>
Perform extractive question answering given a list of QaInputs
Arguments
qa_inputs-&[QaInput]Array of Question Answering inputs (context and question pairs)top_k- return the top-k answers for each QaInput. Set to 1 to return only the best answer.batch_size- maximum batch size for the model forward pass.
Returns
Vec<Vec<Answer>>Vector (same length asqa_inputs) of vectors (each of lengthtop_k) containing the extracted answers.
Example
use rust_bert::pipelines::question_answering::{QaInput, QuestionAnsweringModel};
let qa_model = QuestionAnsweringModel::new(Default::default())?;
let question_1 = String::from("Where does Amy live ?");
let context_1 = String::from("Amy lives in Amsterdam");
let question_2 = String::from("Where does Eric live");
let context_2 = String::from("While Amy lives in Amsterdam, Eric is in The Hague.");
let qa_input_1 = QaInput {
question: question_1,
context: context_1,
};
let qa_input_2 = QaInput {
question: question_2,
context: context_2,
};
let answers = qa_model.predict(&[qa_input_1, qa_input_2], 1, 32);
Auto Trait Implementations§
impl RefUnwindSafe for QuestionAnsweringModel
impl Send for QuestionAnsweringModel
impl !Sync for QuestionAnsweringModel
impl Unpin for QuestionAnsweringModel
impl UnwindSafe for QuestionAnsweringModel
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more