[][src]Trait rust_bert::pipelines::generation::LMHeadModel

pub trait LMHeadModel {
    fn forward_t(
        &self,
        input_ids: &Option<Tensor>,
        layer_past: Cache,
        attention_mask: &Option<Tensor>,
        token_type_ids: &Option<Tensor>,
        position_ids: &Option<Tensor>,
        input_embeds: &Option<Tensor>,
        encoder_outputs: Option<&Tensor>,
        decoder_input_ids: &Option<Tensor>,
        train: bool
    ) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>; }

Language Model trait

Shared trait between language generation models (e.g. GPT2, GPT, BART) used in language generation pipelines.

Required methods

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>

Forward pass through the model. Example provided for GPT2.

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • output - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • past - Option<Vec<Tensor>> of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)
  • hidden_states - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
  • attentions - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
    past.push(Tensor::rand(
        &[
            2,
            batch_size,
            config.n_head,
            past_sequence_length,
            config.n_embd / config.n_head,
        ],
        (Double, device),
    ))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let (output, encoder_output, past, hidden_states, attentions) = no_grad(|| {
    gpt2_model
        .forward_t(
            &Some(input_tensor),
            Cache::GPT2Cache(Some(past)),
            &Some(attention_mask),
            &Some(token_type_ids),
            &Some(position_ids),
            &None,
            None,
            &None,
            false,
        )
        .unwrap()
});
Loading content...

Implementors

impl LMHeadModel for BartForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Unused for BART
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for BART
  • token_type_ids - Unused for BART
  • position_ids - Unused for BART
  • encoder_outputs - Optional tuple made of a tensor of shape (batch size, source_sequence_length, encoder_hidden_dim) and optional vectors of tensors of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size). These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialiazed with a BOS token)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • past - None
  • encoder_hidden_states - Option<Tensor> Hidden states for the encoder
  • hidden_states - None
  • attentions - None

Example

use rust_bert::pipelines::generation::LMHeadModel;
use rust_bert::bart::{BartForConditionalGeneration, BartConfig};
 let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
 let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
 let encoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
 let decoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

 let (decoder_output, encoder_hidden_states, cache,
      all_encoder_hidden_states, all_encoder_attentions,
      all_decoder_hidden_states, all_decoder_attentions) = no_grad(|| {
   bart_model
        .forward_t(Some(&input_tensor),
                   Some(&encoder_attention_mask),
                   None,
                   Some(&target_tensor),
                   Some(&decoder_attention_mask),
                   None,
                   false)
   });

impl LMHeadModel for GPT2LMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • _encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, encoder_hidden_dim). Unused for GPT2
  • _decoder_input_ids - Optional tensor of shape (batch size, target_sequence_length). Unused for GPT2
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • output - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • encoder_hidden_states - None
  • past - Option<Vec<Tensor>> of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)
  • hidden_states - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
  • attentions - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
    past.push(Tensor::rand(
        &[
            2,
            batch_size,
            config.n_head,
            past_sequence_length,
            config.n_embd / config.n_head,
        ],
        (Double, device),
    ))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let (output, _, past, hidden_states, attentions) = no_grad(|| {
    gpt2_model
        .forward_t(
            &Some(input_tensor),
            Cache::GPT2Cache(Some(past)),
            &Some(attention_mask),
            &Some(token_type_ids),
            &Some(position_ids),
            &None,
            None,
            &None,
            false,
        )
        .unwrap()
});

impl LMHeadModel for MarianForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Unused for BART
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for BART
  • token_type_ids - Unused for BART
  • position_ids - Unused for BART
  • encoder_outputs - Optional tuple made of a tensor of shape (batch size, source_sequence_length, encoder_hidden_dim) and optional vectors of tensors of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size). These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialiazed with a BOS token)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • past - None
  • encoder_hidden_states - Option<Tensor> Hidden states for the encoder
  • hidden_states - None
  • attentions - None

Example

use rust_bert::bart::BartConfig;
use rust_bert::marian::MarianForConditionalGeneration;
let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
let encoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
let decoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

let (
    decoder_output,
    encoder_hidden_states,
    cache,
    all_encoder_hidden_states,
    all_encoder_attentions,
    all_decoder_hidden_states,
    all_decoder_attentions,
) = no_grad(|| {
    marian_model.forward_t(
        Some(&input_tensor),
        Some(&encoder_attention_mask),
        None,
        Some(&target_tensor),
        Some(&decoder_attention_mask),
        None,
        false,
    )
});

impl LMHeadModel for OpenAIGPTLMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    _layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<(Tensor, Option<Tensor>, Cache, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • _layer_past - Unused for GPT
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • _encoder_outputs - Unused for GPT
  • _decoder_input_ids - Unused for GPT
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • output - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • encoder_hidden_states - None
  • past - None
  • hidden_states - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
  • attentions - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::gpt2::Gpt2Config;
use rust_bert::openai_gpt::OpenAIGPTLMHeadModel;
use rust_bert::pipelines::generation::{LMHeadModel, Cache};
 let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
 let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
 let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
 let position_ids = Tensor::arange(sequence_length, (Int64, device)).expand(&[batch_size, sequence_length], true);

 let (output, _, _, hidden_states, attentions) = no_grad(|| {
   gpt_model
        .forward_t(&Some(input_tensor),
                   Cache::None,
                   &Some(attention_mask),
                   &Some(token_type_ids),
                   &Some(position_ids),
                   &None,
                   None,
                   &None,
                   false).unwrap()
   });
Loading content...