Struct rust_bert::gpt2::GPT2LMHeadModel [−][src]
pub struct GPT2LMHeadModel { /* fields omitted */ }
Expand description
GPT2 Language Modeling head
GPT2 model with a decoding head (linear layer without bias). The weights of the linear layer are tied to the word embeddings It is made of the following blocks:
transformer
: Base Gpt2Model
Implementations
Build a new GPT2LMHeadModel
Arguments
p
- Variable store path for the root of the GPT2 modelconfig
-Gpt2Config
object defining the model architecture
Example
use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::Config;
use std::path::Path;
use tch::{nn, Device};
let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = Gpt2Config::from_file(config_path);
let gpt2: GPT2LMHeadModel = GPT2LMHeadModel::new(&p.root() / "gpt2", &config);
Trait Implementations
fn forward_t(
&self,
input_ids: Option<&Tensor>,
layer_past: Cache,
attention_mask: Option<&Tensor>,
token_type_ids: Option<&Tensor>,
position_ids: Option<&Tensor>,
input_embeds: Option<&Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: Option<&Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
fn forward_t(
&self,
input_ids: Option<&Tensor>,
layer_past: Cache,
attention_mask: Option<&Tensor>,
token_type_ids: Option<&Tensor>,
position_ids: Option<&Tensor>,
input_embeds: Option<&Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: Option<&Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
Forward pass through the model
Arguments
input_ids
- Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (seeinput_embeds
)layer_past
- Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.attention_mask
- Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1input_embeds
- Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (seeinput_ids
)token_type_ids
- Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.position_ids
- Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input._encoder_outputs
- Optional tensor of shape (batch size, source_sequence_length, encoder_hidden_dim). Unused for GPT2_decoder_input_ids
- Optional tensor of shape (batch size, target_sequence_length). Unused for GPT2train
- boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
Returns
LMModelOutput
containing:lm_logits
-Tensor
of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and positioncache
-Gpt2Cache
made ofOption<Vec<Tensor>>
of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)
Example
use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation_utils::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
past.push(Tensor::rand(
&[
2,
batch_size,
config.n_head,
past_sequence_length,
config.n_embd / config.n_head,
],
(Double, device),
))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
.expand(&[batch_size, sequence_length], true);
let model_output = no_grad(|| {
gpt2_model
.forward_t(
Some(&input_tensor),
Cache::GPT2Cache(Some(past)),
Some(&attention_mask),
Some(&token_type_ids),
Some(&position_ids),
None,
None,
None,
false,
)
.unwrap()
});
Generate text based on a vector of promp texts. Read more
Generate token indices without decoding (useful for token-level operations before returning final text or as validation step during training). Read more
fn generate_from_ids_and_past(
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘ
fn generate_from_ids_and_past(
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘ
Generate token indices given a list of indices (useful when the input has been pre-tokenized). Returns a list of output tokens that need to be decoded using a tokenizer. Read more
Returns a reference to the text generator’s tokenizer Read more
Auto Trait Implementations
impl RefUnwindSafe for GPT2LMHeadModel
impl Send for GPT2LMHeadModel
impl !Sync for GPT2LMHeadModel
impl Unpin for GPT2LMHeadModel
impl UnwindSafe for GPT2LMHeadModel
Blanket Implementations
Mutably borrows from an owned value. Read more
Instruments this type with the provided Span
, returning an
Instrumented
wrapper. Read more
type Output = T
type Output = T
Should always be Self