[][src]Trait rust_bert::bert::BertEmbedding

pub trait BertEmbedding {
    fn new<'p, P>(p: P, config: &BertConfig) -> Self
    where
        P: Borrow<Path<'p>>
;
fn forward_t(
        &self,
        input_ids: Option<Tensor>,
        token_type_ids: Option<Tensor>,
        position_ids: Option<Tensor>,
        input_embeds: Option<Tensor>,
        train: bool
    ) -> Result<Tensor, &'static str>; }

BertEmbedding trait (for use in BertModel or RoBERTaModel)

Defines an interface for the embedding layers in BERT-based models

Required methods

fn new<'p, P>(p: P, config: &BertConfig) -> Self where
    P: Borrow<Path<'p>>, 

fn forward_t(
    &self,
    input_ids: Option<Tensor>,
    token_type_ids: Option<Tensor>,
    position_ids: Option<Tensor>,
    input_embeds: Option<Tensor>,
    train: bool
) -> Result<Tensor, &'static str>

Loading content...

Implementors

impl BertEmbedding for BertEmbeddings[src]

fn new<'p, P>(p: P, config: &BertConfig) -> BertEmbeddings where
    P: Borrow<Path<'p>>, 
[src]

Build a new BertEmbeddings

Arguments

  • p - Variable store path for the root of the BertEmbeddings model
  • config - BertConfig object defining the model architecture and vocab/hidden size

Example

use rust_bert::bert::{BertConfig, BertEmbedding, BertEmbeddings};
use rust_bert::Config;
use std::path::Path;
use tch::{nn, Device};

let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = BertConfig::from_file(config_path);
let bert_embeddings = BertEmbeddings::new(&p.root() / "bert_embeddings", &config);

fn forward_t(
    &self,
    input_ids: Option<Tensor>,
    token_type_ids: Option<Tensor>,
    position_ids: Option<Tensor>,
    input_embeds: Option<Tensor>,
    train: bool
) -> Result<Tensor, &'static str>
[src]

Forward pass through the embedding layer

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • token_type_ids -Optional segment id of shape (batch size, sequence_length). Convention is value of 0 for the first sentence (incl. [SEP]) and 1 for the second sentence. If None set to 0.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented from 0.
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • embedded_output - Tensor of shape (batch size, sequence_length, hidden_size)

Example

let (batch_size, sequence_length) = (64, 128);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let embedded_output = no_grad(|| {
    bert_embeddings
        .forward_t(
            Some(input_tensor),
            Some(token_type_ids),
            Some(position_ids),
            None,
            false,
        )
        .unwrap()
});

impl BertEmbedding for RobertaEmbeddings[src]

fn new<'p, P>(p: P, config: &BertConfig) -> RobertaEmbeddings where
    P: Borrow<Path<'p>>, 
[src]

Build a new RobertaEmbeddings

Arguments

  • p - Variable store path for the root of the BertEmbeddings model
  • config - BertConfig object defining the model architecture and vocab/hidden size

Example

use rust_bert::bert::{BertConfig, BertEmbedding};
use rust_bert::roberta::RobertaEmbeddings;
use rust_bert::Config;
use std::path::Path;
use tch::{nn, Device};

let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = BertConfig::from_file(config_path);
let robert_embeddings = RobertaEmbeddings::new(&p.root() / "bert_embeddings", &config);

fn forward_t(
    &self,
    input_ids: Option<Tensor>,
    token_type_ids: Option<Tensor>,
    position_ids: Option<Tensor>,
    input_embeds: Option<Tensor>,
    train: bool
) -> Result<Tensor, &'static str>
[src]

Forward pass through the embedding layer. This differs from the original BERT embeddings in how the position ids are calculated when not provided.

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • token_type_ids -Optional segment id of shape (batch size, sequence_length). Convention is value of 0 for the first sentence (incl. [SEP]) and 1 for the second sentence. If None set to 0.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented from 0.
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • embedded_output - Tensor of shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::roberta::RobertaEmbeddings;
let (batch_size, sequence_length) = (64, 128);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let embedded_output = no_grad(|| {
    roberta_embeddings
        .forward_t(
            Some(input_tensor),
            Some(token_type_ids),
            Some(position_ids),
            None,
            false,
        )
        .unwrap()
});
Loading content...