Struct rust_bert::openai_gpt::OpenAIGPTLMHeadModel
source · [−]pub struct OpenAIGPTLMHeadModel { /* private fields */ }Expand description
GPT Language Modeling head
GPT model with a decoding head (linear layer without bias). The weights of the linear layer are tied to the word embeddings It is made of the following blocks:
transformer: Base Gpt2Modellm_head: Linear layer without bias tied to the weights of the token id embeddings
Implementations
sourceimpl OpenAIGPTLMHeadModel
impl OpenAIGPTLMHeadModel
sourcepub fn new<'p, P>(p: P, config: &Gpt2Config) -> OpenAIGPTLMHeadModel where
P: Borrow<Path<'p>>,
pub fn new<'p, P>(p: P, config: &Gpt2Config) -> OpenAIGPTLMHeadModel where
P: Borrow<Path<'p>>,
Build a new OpenAIGPTLMHeadModel
Arguments
p- Variable store path for the root of the GPT modelconfig-Gpt2Configobject defining the model architecture
Example
use rust_bert::gpt2::Gpt2Config;
use rust_bert::openai_gpt::OpenAIGPTLMHeadModel;
use rust_bert::Config;
use std::path::Path;
use tch::{nn, Device};
let config_path = Path::new("path/to/config.json");
let device = Device::Cpu;
let p = nn::VarStore::new(device);
let config = Gpt2Config::from_file(config_path);
let gpt2: OpenAIGPTLMHeadModel = OpenAIGPTLMHeadModel::new(&p.root() / "gpt", &config);Trait Implementations
sourceimpl LMHeadModel for OpenAIGPTLMHeadModel
impl LMHeadModel for OpenAIGPTLMHeadModel
sourcefn forward_t(
&self,
input_ids: Option<&Tensor>,
_layer_past: Cache,
attention_mask: Option<&Tensor>,
token_type_ids: Option<&Tensor>,
position_ids: Option<&Tensor>,
input_embeds: Option<&Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: Option<&Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
fn forward_t(
&self,
input_ids: Option<&Tensor>,
_layer_past: Cache,
attention_mask: Option<&Tensor>,
token_type_ids: Option<&Tensor>,
position_ids: Option<&Tensor>,
input_embeds: Option<&Tensor>,
_encoder_outputs: Option<&Tensor>,
_decoder_input_ids: Option<&Tensor>,
train: bool
) -> Result<LMModelOutput, RustBertError>
Forward pass through the model
Arguments
input_ids- Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (seeinput_embeds)_layer_past- Unused for GPTattention_mask- Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1input_embeds- Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (seeinput_ids)token_type_ids- Optional token type ids used to indicate the portion of the input the token belongs to. If not None, token type embeddings will be added to the token and position embeddings.position_ids- Optional position ids of shape (batch size, sequence_length). If None, will be incremented starting from the length of the past input._encoder_outputs- Unused for GPT_decoder_input_ids- Unused for GPTtrain- boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
Returns
LMModelOutputcontaining:lm_logits-Tensorof shape (batch size, sequence_length, vocab_size) representing the logits for each vocab item and positioncache- Noneencoder_hidden_states- Noneall_hidden_states-Option<Vec<Tensor>>of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)all_attentions-Option<Vec<Tensor>>of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
Example
use rust_bert::gpt2::Gpt2Config;
use rust_bert::openai_gpt::OpenAIGPTLMHeadModel;
use rust_bert::pipelines::generation_utils::{LMHeadModel, Cache};
let (batch_size, sequence_length, past_sequence_length) = (64, 128, 56);
let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device));
let attention_mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device));
let token_type_ids = Tensor::ones(&[batch_size, sequence_length], (Int64, device));
let position_ids = Tensor::arange(sequence_length, (Int64, device)).expand(&[batch_size, sequence_length], true);
let model_output = no_grad(|| {
gpt_model
.forward_t(Some(&input_tensor),
Cache::None,
Some(&attention_mask),
Some(&token_type_ids),
Some(&position_ids),
None,
None,
None,
false).unwrap()
});sourceimpl LanguageGenerator<OpenAIGPTLMHeadModel, OpenAiGptVocab, OpenAiGptTokenizer> for OpenAIGenerator
impl LanguageGenerator<OpenAIGPTLMHeadModel, OpenAiGptVocab, OpenAiGptTokenizer> for OpenAIGenerator
sourcefn generate<S>(
&self,
prompt_texts: Option<&[S]>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedTextOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator, where
S: AsRef<str> + Sync,
fn generate<S>(
&self,
prompt_texts: Option<&[S]>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedTextOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator, where
S: AsRef<str> + Sync,
A: Allocator,
Generate text based on a vector of promp texts. Read more
sourcefn generate_indices<S>(
&self,
prompt_texts: Option<&[S]>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator, where
S: AsRef<str> + Sync,
fn generate_indices<S>(
&self,
prompt_texts: Option<&[S]>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator, where
S: AsRef<str> + Sync,
A: Allocator,
Generate token indices without decoding (useful for token-level operations before returning final text or as validation step during training). Read more
sourcefn generate_from_ids_and_past(
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator,
fn generate_from_ids_and_past(
&self,
input_ids: Tensor,
attention_mask: Option<Tensor>,
generate_options: Option<GenerateOptions<'_>>
) -> Vec<GeneratedIndicesOutput>ⓘNotable traits for Vec<u8, A>impl<A> Write for Vec<u8, A> where
A: Allocator,
A: Allocator,
Generate token indices given a list of indices (useful when the input has been pre-tokenized). Returns a list of output tokens that need to be decoded using a tokenizer. Read more
sourcefn get_tokenizer(&self) -> &TokenizerOption
fn get_tokenizer(&self) -> &TokenizerOption
Returns a reference to the text generator’s tokenizer Read more
fn half(&mut self)
fn float(&mut self)
fn set_device(&mut self, device: Device)
Auto Trait Implementations
impl RefUnwindSafe for OpenAIGPTLMHeadModel
impl Send for OpenAIGPTLMHeadModel
impl !Sync for OpenAIGPTLMHeadModel
impl Unpin for OpenAIGPTLMHeadModel
impl UnwindSafe for OpenAIGPTLMHeadModel
Blanket Implementations
sourceimpl<T> BorrowMut<T> for T where
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
const: unstable · sourcefn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
sourceimpl<T> Instrument for T
impl<T> Instrument for T
sourcefn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Instruments this type with the provided Span, returning an
Instrumented wrapper. Read more
sourcefn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
impl<T> Pointable for T
impl<T> Pointable for T
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
fn vzip(self) -> V
sourceimpl<T> WithSubscriber for T
impl<T> WithSubscriber for T
sourcefn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self> where
S: Into<Dispatch>,
fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self> where
S: Into<Dispatch>,
Attaches the provided Subscriber to this type, returning a
WithDispatch wrapper. Read more
sourcefn with_current_subscriber(self) -> WithDispatch<Self>
fn with_current_subscriber(self) -> WithDispatch<Self>
Attaches the current default Subscriber to this type, returning a
WithDispatch wrapper. Read more