rust_bert/models/roberta/mod.rs
1//! # RoBERTa: A Robustly Optimized BERT Pretraining Approach (Liu et al.)
2//!
3//! Implementation of the RoBERTa language model ([https://arxiv.org/abs/1907.11692](https://arxiv.org/abs/1907.11692) Liu, Ott, Goyal, Du, Joshi, Chen, Levy, Lewis, Zettlemoyer, Stoyanov, 2019).
4//! The base model is implemented in the `bert_model::BertModel` struct. Several language model heads have also been implemented, including:
5//! - Masked language model: `roberta_model::RobertaForMaskedLM`
6//! - Multiple choices: `roberta_model:RobertaForMultipleChoice`
7//! - Question answering: `roberta_model::RobertaForQuestionAnswering`
8//! - Sequence classification: `roberta_model::RobertaForSequenceClassification`
9//! - Token classification (e.g. NER, POS tagging): `roberta_model::RobertaForTokenClassification`
10//!
11//! # Model set-up and pre-trained weights loading
12//!
13//! The example below illustrate a Masked language model example, the structure is similar for other models.
14//! All models expect the following resources:
15//! - Configuration file expected to have a structure following the [Transformers library](https://github.com/huggingface/transformers)
16//! - Model weights are expected to have a structure and parameter names following the [Transformers library](https://github.com/huggingface/transformers). A conversion using the Python utility scripts is required to convert the `.bin` weights to the `.ot` format.
17//! - `RobertaTokenizer` using a `vocab.txt` vocabulary and `merges.txt` 2-gram merges
18//!
19//! Pretrained models are available and can be downloaded using RemoteResources.
20//!
21//! ```no_run
22//! # fn main() -> anyhow::Result<()> {
23//! #
24//! use tch::{nn, Device};
25//! # use std::path::PathBuf;
26//! use rust_bert::bert::BertConfig;
27//! use rust_bert::resources::{LocalResource, ResourceProvider};
28//! use rust_bert::roberta::RobertaForMaskedLM;
29//! use rust_bert::Config;
30//! use rust_tokenizers::tokenizer::RobertaTokenizer;
31//!
32//! let config_resource = LocalResource {
33//! local_path: PathBuf::from("path/to/config.json"),
34//! };
35//! let vocab_resource = LocalResource {
36//! local_path: PathBuf::from("path/to/vocab.txt"),
37//! };
38//! let merges_resource = LocalResource {
39//! local_path: PathBuf::from("path/to/merges.txt"),
40//! };
41//! let weights_resource = LocalResource {
42//! local_path: PathBuf::from("path/to/model.ot"),
43//! };
44//! let config_path = config_resource.get_local_path()?;
45//! let vocab_path = vocab_resource.get_local_path()?;
46//! let merges_path = merges_resource.get_local_path()?;
47//! let weights_path = weights_resource.get_local_path()?;
48//!
49//! let device = Device::cuda_if_available();
50//! let mut vs = nn::VarStore::new(device);
51//! let tokenizer: RobertaTokenizer = RobertaTokenizer::from_file(
52//! vocab_path.to_str().unwrap(),
53//! merges_path.to_str().unwrap(),
54//! true,
55//! true,
56//! )?;
57//! let config = BertConfig::from_file(config_path);
58//! let bert_model = RobertaForMaskedLM::new(&vs.root(), &config);
59//! vs.load(weights_path)?;
60//!
61//! # Ok(())
62//! # }
63//! ```
64
65mod embeddings;
66mod roberta_model;
67
68pub use embeddings::RobertaEmbeddings;
69pub use roberta_model::{
70 RobertaConfig, RobertaConfigResources, RobertaForMaskedLM, RobertaForMultipleChoice,
71 RobertaForQuestionAnswering, RobertaForSentenceEmbeddings, RobertaForSequenceClassification,
72 RobertaForTokenClassification, RobertaMergesResources, RobertaModelResources,
73 RobertaVocabResources,
74};