[][src]Struct rust_bert::bart::BartForSequenceClassification

pub struct BartForSequenceClassification { /* fields omitted */ }

BART Model for sequence classification

BART model with a classification head It is made of the following blocks:

  • base_model: BartModel Base BART model
  • classification_head: BartClassificationHead made of 2 linear layers mapping hidden states to a target class
  • eos_token_id: token id for the EOS token carrying the pooled representation for classification

Methods

impl BartForSequenceClassification[src]

pub fn new(p: &Path, config: &BartConfig) -> BartForSequenceClassification[src]

Build a new BartForSequenceClassification

Arguments

  • p - Variable store path for the root of the BART model
  • config - BartConfig object defining the model architecture

Example

use tch::{nn, Device};
use rust_bert::Config;
use std::path::Path;
use rust_bert::bart::{BartConfig, BartForSequenceClassification};

let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = BartConfig::from_file(config_path);
let generation_mode = true;
let bart: BartForSequenceClassification = BartForSequenceClassification::new(&(&p.root() / "bart"), &config);

pub fn forward_t(
    &mut self,
    input_ids: &Tensor,
    attention_mask: Option<&Tensor>,
    encoder_outputs: Option<(Tensor, Option<Vec<Tensor>>, Option<Vec<Tensor>>)>,
    decoder_input_ids: Option<&Tensor>,
    decoder_attention_mask: Option<&Tensor>,
    train: bool
) -> (Tensor, Tensor, Option<Vec<Tensor>>, Option<Vec<Tensor>>, Option<Vec<Tensor>>, Option<Vec<Tensor>>)
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, source_sequence_length). Must be provided when not running in generation mode
  • attention_mask - Optional attention mask of shape (batch size, source_sequence_length) for the encoder positions. Positions with a mask with value 0 will be masked.
  • encoder_outputs - Optional tuple made of a tensor of shape (batch size, source_sequence_length, encoder_hidden_dim) and optional vectors of tensors of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size). These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialiazed with a BOS token)
  • decoder_attention_mask - Optional attention mask of shape (batch size, target_sequence_length) for the decoder positions. Positions with a mask with value 0 will be masked.
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • logits - Tensor of shape (batch size, num_classes) representing the logits for each class item and batch item
  • encoder_hidden_states - Tensor of shape (batch size, source_sequence_length, hidden_size) representing the activations of the last encoder hidden state
  • all_encoder_hidden_states - Option<Vec<Tensor>> of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size)
  • all_encoder_attentions - Option<Vec<Tensor>> of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size)
  • all_decoder_hidden_states - Option<Vec<Tensor>> of length num_decoder_layers with shape (batch size, target_sequence_length, hidden_size)
  • all_decoder_attentions - Option<Vec<Tensor>> of length num_decoder_layers with shape (batch size, target_sequence_length, hidden_size)

Example

use rust_bert::bart::{BartConfig, BartForConditionalGeneration};
 let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
 let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
 let encoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
 let decoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

 let (decoder_output, encoder_hidden_states,
      all_encoder_hidden_states, all_encoder_attentions,
      all_decoder_hidden_states, all_decoder_attentions) = no_grad(|| {
   bart_model
        .forward_t(Some(&input_tensor),
                   Some(&encoder_attention_mask),
                   None,
                   Some(&target_tensor),
                   Some(&decoder_attention_mask),
                   false)
   });

pub fn reset_cache(&mut self)[src]

Resets the decoder cached keys and values. Should be run for every new generation using the model.

Auto Trait Implementations

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.