Struct rust_bert::gpt2::GPT2LMHeadModel[][src]

pub struct GPT2LMHeadModel { /* fields omitted */ }
Expand description

GPT2 Language Modeling head

GPT2 model with a decoding head (linear layer without bias). The weights of the linear layer are tied to the word embeddings It is made of the following blocks:

  • transformer: Base Gpt2Model
  • lm_head: Linear layer without bias tied to the weights of the token id embeddings

Implementations

impl GPT2LMHeadModel[src]

pub fn new<'p, P>(p: P, config: &Gpt2Config) -> GPT2LMHeadModel where
    P: Borrow<Path<'p>>, 
[src]

Build a new GPT2LMHeadModel

Arguments

  • p - Variable store path for the root of the GPT2 model
  • config - Gpt2Config object defining the model architecture

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::Config;
use std::path::Path;
use tch::{nn, Device};

let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = Gpt2Config::from_file(config_path);
let gpt2: GPT2LMHeadModel = GPT2LMHeadModel::new(&p.root() / "gpt2", &config);

Trait Implementations

impl LMHeadModel for GPT2LMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • _encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, encoder_hidden_dim). Unused for GPT2
  • _decoder_input_ids - Optional tensor of shape (batch size, target_sequence_length). Unused for GPT2
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - Gpt2Cache made of Option<Vec<Tensor>> of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation_utils::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
    past.push(Tensor::rand(
        &[
            2,
            batch_size,
            config.n_head,
            past_sequence_length,
            config.n_embd / config.n_head,
        ],
        (Double, device),
    ))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let model_output = no_grad(|| {
    gpt2_model
        .forward_t(
            &Some(input_tensor),
            Cache::GPT2Cache(Some(past)),
            &Some(attention_mask),
            &Some(token_type_ids),
            &Some(position_ids),
            &None,
            None,
            &None,
            false,
        )
        .unwrap()
});

impl LanguageGenerator<GPT2LMHeadModel, Gpt2Vocab, Gpt2Tokenizer> for GPT2Generator[src]

fn generate<'a, S>(
    &self,
    prompt_texts: Option<S>,
    attention_mask: Option<Tensor>,
    min_length: impl Into<Option<i64>>,
    max_length: impl Into<Option<i64>>,
    decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<String> where
    S: AsRef<[&'a str]>, 
[src]

Generate text based on a vector of promp texts. Read more

fn generate_indices<'a, S>(
    &self,
    prompt_texts: Option<S>,
    attention_mask: Option<Tensor>,
    min_length: impl Into<Option<i64>>,
    max_length: impl Into<Option<i64>>,
    decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>> where
    S: AsRef<[&'a str]>, 
[src]

Generate token indices without decoding (useful for token-level operations before returning final text or as validation step during training). Read more

fn generate_from_ids_and_past(
    &self,
    input_ids: Tensor,
    attention_mask: Option<Tensor>,
    min_length: impl Into<Option<i64>>,
    max_length: impl Into<Option<i64>>,
    decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>>
[src]

Auto Trait Implementations

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

pub fn type_id(&self) -> TypeId[src]

Gets the TypeId of self. Read more

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

pub fn borrow(&self) -> &T[src]

Immutably borrows from an owned value. Read more

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

pub fn borrow_mut(&mut self) -> &mut T[src]

Mutably borrows from an owned value. Read more

impl<T> From<T> for T[src]

pub fn from(t: T) -> T[src]

Performs the conversion.

impl<T> Instrument for T[src]

fn instrument(self, span: Span) -> Instrumented<Self>[src]

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more

fn in_current_span(self) -> Instrumented<Self>[src]

Instruments this type with the current Span, returning an Instrumented wrapper. Read more

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

pub fn into(self) -> U[src]

Performs the conversion.

impl<T> Pointable for T

pub const ALIGN: usize

The alignment of pointer.

type Init = T

The type for initializers.

pub unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more

pub unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more

pub unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more

pub unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more

impl<T> Same<T> for T

type Output = T

Should always be Self

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>[src]

Performs the conversion.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

pub fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>[src]

Performs the conversion.

impl<V, T> VZip<V> for T where
    V: MultiLane<T>, 

pub fn vzip(self) -> V