Struct rust_bert::gpt2::GPT2LMHeadModel [−][src]
GPT2 Language Modeling head
GPT2 model with a decoding head (linear layer without bias). The weights of the linear layer are tied to the word embeddings It is made of the following blocks:
transformer
: Base Gpt2Modellm_head
: Linear layer without bias tied to the weights of the token id embeddings
Implementations
impl GPT2LMHeadModel
[src]
pub fn new<'p, P>(p: P, config: &Gpt2Config) -> GPT2LMHeadModel where
P: Borrow<Path<'p>>,
[src]
P: Borrow<Path<'p>>,
Build a new GPT2LMHeadModel
Arguments
p
- Variable store path for the root of the GPT2 modelconfig
-Gpt2Config
object defining the model architecture
Example
use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config}; use rust_bert::Config; use std::path::Path; use tch::{nn, Device}; let config_path = Path::new("path/to/config.json"); let device = Device::Cpu; let p = nn::VarStore::new(device); let config = Gpt2Config::from_file(config_path); let gpt2: GPT2LMHeadModel = GPT2LMHeadModel::new(&p.root() / "gpt2", &config);
Trait Implementations
impl LMHeadModel for GPT2LMHeadModel
[src]
fn forward_t(
&self,
input_ids: &Option<Tensor>,
layer_past: Cache,
attention_mask: &Option<Tensor>,
token_type_ids: &Option<Tensor>,
position_ids: &Option<Tensor>,
input_embeds: &Option<Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: &Option<Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
[src]
&self,
input_ids: &Option<Tensor>,
layer_past: Cache,
attention_mask: &Option<Tensor>,
token_type_ids: &Option<Tensor>,
position_ids: &Option<Tensor>,
input_embeds: &Option<Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: &Option<Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
Forward pass through the model
Arguments
input_ids
- Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (seeinput_embeds
)layer_past
- Optional vector of size n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head). When provided, these are concatenated with the current input keys and values.attention_mask
- Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1input_embeds
- Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (seeinput_ids
)token_type_ids
- Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.position_ids
- Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input._encoder_outputs
- Optional tensor of shape (batch size, source_sequence_length, encoder_hidden_dim). Unused for GPT2_decoder_input_ids
- Optional tensor of shape (batch size, target_sequence_length). Unused for GPT2train
- boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
Returns
LMModelOutput
containing:lm_logits
-Tensor
of shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and positioncache
-Gpt2Cache
made ofOption<Vec<Tensor>>
of length n_layer containing the past keys and values of each layer of shape (2, batch size, number of heads, past_sequence_length, hidden size per head)
Example
use rust_bert::gpt2::{GPT2LMHeadModel, Gpt2Config}; use rust_bert::pipelines::generation_utils::{Cache, LMHeadModel}; let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56); let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device)); let mut past: Vec<Tensor> = Vec::with_capacity(config.n_layer as usize); for _ in 0..config.n_layer as usize { past.push(Tensor::rand( &[ 2, batch_size, config.n_head, past_sequence_length, config.n_embd / config.n_head, ], (Double, device), )) } let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device)); let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device)); let position_ids = Tensor::arange(sequence_length, (Int64, device)) .expand(&[batch_size, sequence_length], true); let model_output = no_grad(|| { gpt2_model .forward_t( &Some(input_tensor), Cache::GPT2Cache(Some(past)), &Some(attention_mask), &Some(token_type_ids), &Some(position_ids), &None, None, &None, false, ) .unwrap() });
impl LanguageGenerator<GPT2LMHeadModel, Gpt2Vocab, Gpt2Tokenizer> for GPT2Generator
[src]
fn generate<'a, S>(
&self,
prompt_texts: Option<S>,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<String> where
S: AsRef<[&'a str]>,
[src]
&self,
prompt_texts: Option<S>,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<String> where
S: AsRef<[&'a str]>,
fn generate_indices<'a, S>(
&self,
prompt_texts: Option<S>,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>> where
S: AsRef<[&'a str]>,
[src]
&self,
prompt_texts: Option<S>,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>> where
S: AsRef<[&'a str]>,
fn generate_from_ids_and_past(
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>>
[src]
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
min_length: impl Into<Option<i64>>,
max_length: impl Into<Option<i64>>,
decoder_start_token_id: impl Into<Option<i64>>
) -> Vec<Vec<i64>>
Auto Trait Implementations
impl RefUnwindSafe for GPT2LMHeadModel
[src]
impl Send for GPT2LMHeadModel
[src]
impl !Sync for GPT2LMHeadModel
[src]
impl Unpin for GPT2LMHeadModel
[src]
impl UnwindSafe for GPT2LMHeadModel
[src]
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T> Instrument for T
[src]
pub fn instrument(self, span: Span) -> Instrumented<Self>
[src]
pub fn in_current_span(self) -> Instrumented<Self>
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T> Pointable for T
pub const ALIGN: usize
type Init = T
The type for initializers.
pub unsafe fn init(init: <T as Pointable>::Init) -> usize
pub unsafe fn deref<'a>(ptr: usize) -> &'a T
pub unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T
pub unsafe fn drop(ptr: usize)
impl<T> Same<T> for T
type Output = T
Should always be Self
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,
type Error = <U as TryFrom<T>>::Error
The type returned in the event of a conversion error.
pub fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>
[src]
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
V: MultiLane<T>,