Trait rust_bert::pipelines::generation_utils::LMHeadModel[][src]

pub trait LMHeadModel {
    fn forward_t(
        &self,
        input_ids: &Option<Tensor>,
        layer_past: Cache,
        attention_mask: &Option<Tensor>,
        token_type_ids: &Option<Tensor>,
        position_ids: &Option<Tensor>,
        input_embeds: &Option<Tensor>,
        encoder_outputs: Option<&Tensor>,
        decoder_input_ids: &Option<Tensor>,
        train: bool
    ) -> Result<LMModelOutput, RustBertError>; }
Expand description

Language Model trait

Shared trait between language generation models (e.g. GPT2, GPT, BART) used in language generation pipelines.

Required methods

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Expand description

Forward pass through the model. Example provided for GPT2.

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • output - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
  • past - Option<Vec<Tensor>> of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)
  • hidden_states - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
  • attentions - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation_utils::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
    past.push(Tensor::rand(
        &[
            2,
            batch_size,
            config.n_head,
            past_sequence_length,
            config.n_embd / config.n_head,
        ],
        (Double, device),
    ))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let model_output = no_grad(|| {
    gpt2_model
        .forward_t(
            &Some(input_tensor),
            Cache::GPT2Cache(Some(past)),
            &Some(attention_mask),
            &Some(token_type_ids),
            &Some(position_ids),
            &None,
            None,
            &None,
            false,
        )
        .unwrap()
});

Implementors

impl LMHeadModel for BartForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of length num_layers containing tuples of optional LayerStates containing th elast calculated key and value pairs for the decoder. This avoids recomputing attention weights at past positions and speeds up decoding.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for BART
  • token_type_ids - Unused for BART
  • position_ids - Unused for BART
  • encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, hidden_size). When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialized with a BOS token)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - BartCache made of Option<Vec<(Option<Vec<&LayerState, &LayerState>>)>> of length n_layer containing the encoder past keys and values for both the self attention and the encoder cross attention of each layer of the decoder.

Example

use rust_bert::pipelines::generation_utils::LMHeadModel;
use rust_bert::bart::{BartForConditionalGeneration, BartConfig};
 let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
 let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
 let encoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
 let decoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

 let model_output = no_grad(|| {
   bart_model
        .forward_t(Some(&input_tensor),
                   Some(&encoder_attention_mask),
                   None,
                   Some(&target_tensor),
                   Some(&decoder_attention_mask),
                   None,
                   false)
   });

impl LMHeadModel for GPT2LMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • _encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, encoder_hidden_dim). Unused for GPT2
  • _decoder_input_ids - Optional tensor of shape (batch size, target_sequence_length). Unused for GPT2
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - Gpt2Cache made of Option<Vec<Tensor>> of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)

Example

use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config};
use rust_bert::pipelines::generation_utils::{Cache, LMHeadModel};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize);
for _ in 0..config.n_layer as usize {
    past.push(Tensor::rand(
        &[
            2,
            batch_size,
            config.n_head,
            past_sequence_length,
            config.n_embd / config.n_head,
        ],
        (Double, device),
    ))
}
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device))
    .expand(&[batch_size, sequence_length], true);

let model_output = no_grad(|| {
    gpt2_model
        .forward_t(
            &Some(input_tensor),
            Cache::GPT2Cache(Some(past)),
            &Some(attention_mask),
            &Some(token_type_ids),
            &Some(position_ids),
            &None,
            None,
            &None,
            false,
        )
        .unwrap()
});

impl LMHeadModel for GptNeoForCausalLM[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

impl LMHeadModel for MarianForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Unused for BART
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for BART
  • token_type_ids - Unused for BART
  • position_ids - Unused for BART
  • encoder_outputs - Optional tuple made of a tensor of shape (batch size, source_sequence_length, encoder_hidden_dim) and optional vectors of tensors of length num_encoder_layers with shape (batch size, source_sequence_length, hidden_size). These correspond to the encoder last hidden state and optional hidden states/attention weights for encoder layers. When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialiazed with a BOS token)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - BartCache made of Option<Vec<(Option<Vec<&LayerState, &LayerState>>)>> of length n_layer containing the encoder past keys and values for both the self attention and the encoder cross attention of each layer of the decoder.

Example

use rust_bert::bart::BartConfig;
use rust_bert::marian::MarianForConditionalGeneration;
let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
let encoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
let decoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

let model_output = no_grad(|| {
    marian_model.forward_t(
        Some(&input_tensor),
        Some(&encoder_attention_mask),
        None,
        Some(&target_tensor),
        Some(&decoder_attention_mask),
        None,
        false,
    )
});

impl LMHeadModel for OpenAIGPTLMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    _layer_past: Cache,
    attention_mask: &Option<Tensor>,
    token_type_ids: &Option<Tensor>,
    position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • _layer_past - Unused for GPT
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (see input_ids)
  • token_type_ids - Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.
  • position_ids - Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input.
  • _encoder_outputs - Unused for GPT
  • _decoder_input_ids - Unused for GPT
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - None
    • encoder_hidden_states - None
    • all_hidden_states - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
    • all_attentions - Option<Vec<Tensor>> of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)

Example

use rust_bert::gpt2::Gpt2Config;
use rust_bert::openai_gpt::OpenAIGPTLMHeadModel;
use rust_bert::pipelines::generation_utils::{LMHeadModel, Cache};
 let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
 let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
 let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
 let position_ids = Tensor::arange(sequence_length, (Int64, device)).expand(&[batch_size, sequence_length], true);

 let model_output = no_grad(|| {
   gpt_model
        .forward_t(&Some(input_tensor),
                   Cache::None,
                   &Some(attention_mask),
                   &Some(token_type_ids),
                   &Some(position_ids),
                   &None,
                   None,
                   &None,
                   false).unwrap()
   });

impl LMHeadModel for PegasusForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of length num_layers containing tuples of optional LayerStates containing th elast calculated key and value pairs for the decoder. This avoids recomputing attention weights at past positions and speeds up decoding.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for Pegasus
  • token_type_ids - Unused for Pegasus
  • position_ids - Unused for Pegasus
  • encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, hidden_size). When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length). Must be provided when running in generation mode (e.g. initialized with a BOS token)
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - BartCache made of Option<Vec<(Option<Vec<&LayerState, &LayerState>>)>> of length n_layer containing the encoder past keys and values for both the self attention and the encoder cross attention of each layer of the decoder.

Example

use rust_bert::pipelines::generation_utils::LMHeadModel;
use rust_bert::pegasus::{PegasusForConditionalGeneration, PegasusConfig};
 let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
 let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
 let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
 let encoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
 let decoder_attention_mask = Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

 let model_output = no_grad(|| {
   pegasus_model
        .forward_t(Some(&input_tensor),
                   Some(&encoder_attention_mask),
                   None,
                   Some(&target_tensor),
                   Some(&decoder_attention_mask),
                   None,
                   false)
   });

impl LMHeadModel for ProphetNetForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

impl LMHeadModel for ReformerModelWithLMHead[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    _decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

impl LMHeadModel for T5ForConditionalGeneration[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    cache: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (see input_embeds)
  • layer_past - Optional vector of length num_layers containing tuples of optional LayerStates containing th elast calculated key and value pairs for the decoder. This avoids recomputing attention weights at past positions and speeds up decoding.
  • attention_mask - Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1
  • input_embeds - Unused for T5
  • token_type_ids - Unused for T5
  • position_ids - Unused for T5
  • encoder_outputs - Optional tensor of shape (batch size, source_sequence_length, hidden_size). When provided, the encoder hidden state will not be recalculated. Useful for generation tasks.
  • decoder_input_ids - Optional input tensor of shape (batch size, target_sequence_length).
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - T5Cache made of Option<Vec<(Option<Vec<&LayerState, &LayerState>>)>> of length n_layer containing the encoder past keys and values for both the self attention and the encoder cross attention of each layer of the decoder.

Example

use rust_bert::t5::{T5Config, T5ForConditionalGeneration};
let (batch_size, source_sequence_length, target_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, source_sequence_length], (Int64, device));
let target_tensor = Tensor::rand(&[batch_size, target_sequence_length], (Int64, device));
let encoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));
let decoder_attention_mask =
    Tensor::ones(&[batch_size, source_sequence_length], (Int64, device));

let model_output = no_grad(|| {
    t5_model.forward_t(
        Some(&input_tensor),
        Some(&encoder_attention_mask),
        None,
        Some(&target_tensor),
        Some(&decoder_attention_mask),
        None,
        None,
        None,
        false,
    )
});

impl LMHeadModel for XLNetLMHeadModel[src]

fn forward_t(
    &self,
    input_ids: &Option<Tensor>,
    layer_past: Cache,
    attention_mask: &Option<Tensor>,
    _token_type_ids: &Option<Tensor>,
    _position_ids: &Option<Tensor>,
    _input_embeds: &Option<Tensor>,
    _encoder_outputs: Option<&Tensor>,
    decoder_input_ids: &Option<Tensor>,
    train: bool
) -> Result<LMModelOutput, RustBertError>
[src]

Forward pass through the model

Arguments

  • input_ids - Optional input tensor of shape (batch size, sequence_length). This or input_embeds must be provided.
  • attention_mask - Optional attention mask of shape (batch size, sequence_length) for the encoder positions. Positions with a mask with value 0 will be masked.
  • perm_mask - Optional tensor of shape (batch size, sequence_length, sequence_length). Mask to indicate the attention pattern for each input token (only used for pre-training over permutations, rather than simple token masking).
  • target_mapping - Optional tensor of shape (batch size, num_tokens, sequence_length) indicating the position of the masked words to predict.
  • token_type_ids - Optional tensor (batch size, sequence_length) indicating the sentence ID of the token (0: first sentence, 1: second sentence).
  • input_embeds - Optional input tensor of shape (batch size, sequence_length, embeddings dimension). This or input_ids must be provided.
  • old_layer_states - Optional vector of length num_layers containing optional LayerStates containing the last calculated content for the attention layers. This avoids recomputing attention weights at past positions and speeds up decoding.
  • train - boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.

Returns

  • LMModelOutput containing:
    • lm_logits - Tensor of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and position
    • cache - XLNetCache made of Option<Vec<Option<LayerState>>> of length n_layers and shape (past_sequence_length, batch size, hidden_size) containing the previous content

Example

use rust_bert::xlnet::{XLNetConfig, XLNetLMHeadModel};
let (batch_size, sequence_length) = (64, 128);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let attention_mask = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let target_tensor = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let target_mapping = Tensor::zeros(&[64, 1, 128], (Kind::Float, device));
let _ = target_mapping.narrow(2, 3, 1).fill_(1.0);

let model_output = no_grad(|| {
    xlnet_model.forward_t(
        Some(&input_tensor),
        Some(&attention_mask),
        None,
        Some(&target_mapping),
        None,
        None,
        None,
        false,
    )
});