[−][src]Struct rust_bert::distilbert::DistilBertForTokenClassification
DistilBERT for token classification (e.g. NER, POS)
Token-level classifier predicting a label for each token provided. Note that because of wordpiece tokenization, the labels predicted are not necessarily aligned with words in the sentence. It is made of the following blocks:
distil_bert_model
: Base DistilBertModelclassifier
: Linear layer for token classification
Methods
impl DistilBertForTokenClassification
[src]
pub fn new(
p: &Path,
config: &DistilBertConfig
) -> DistilBertForTokenClassification
[src]
p: &Path,
config: &DistilBertConfig
) -> DistilBertForTokenClassification
Build a new DistilBertForTokenClassification
for sequence classification
Arguments
p
- Variable store path for the root of the DistilBertForTokenClassification modelconfig
-DistilBertConfig
object defining the model architecture
Example
use tch::{nn, Device}; use rust_bert::Config; use std::path::Path; use rust_bert::distilbert::{DistilBertConfig, DistilBertForTokenClassification}; let config_path = Path::new("path/to/config.json"); let device = Device::Cpu; let p = nn::VarStore::new(device); let config = DistilBertConfig::from_file(config_path); let distil_bert = DistilBertForTokenClassification::new(&(&p.root() / "distilbert"), &config);
pub fn forward_t(
&self,
input: Option<Tensor>,
mask: Option<Tensor>,
input_embeds: Option<Tensor>,
train: bool
) -> Result<(Tensor, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
[src]
&self,
input: Option<Tensor>,
mask: Option<Tensor>,
input_embeds: Option<Tensor>,
train: bool
) -> Result<(Tensor, Option<Vec<Tensor>>, Option<Vec<Tensor>>), &'static str>
Forward pass through the model
Arguments
input_ids
- Optional input tensor of shape (batch size, sequence_length). If None, pre-computed embeddings must be provided (seeinput_embeds
)mask
- Optional mask of shape (batch size, sequence_length). Masked position have value 0, non-masked value 1. If None set to 1input_embeds
- Optional pre-computed input embeddings of shape (batch size, sequence_length, hidden_size). If None, input ids must be provided (seeinput_ids
)train
- boolean flag to turn on/off the dropout layers in the model. Should be set to false for inference.
Returns
output
-Tensor
of shape (batch size, sequence_length, num_labels) representing the logits for position and classhidden_states
-Option<Vec<Tensor>>
of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)attentions
-Option<Vec<Tensor>>
of length num_hidden_layers with shape (batch size, sequence_length, hidden_size)
Example
use rust_bert::distilbert::{DistilBertConfig, DistilBertForTokenClassification}; let (batch_size, sequence_length) = (64, 128); let input_tensor = Tensor::rand(&[batch_size, sequence_length], (Int64, device)); let mask = Tensor::zeros(&[batch_size, sequence_length], (Int64, device)); let (output, all_hidden_states, all_attentions) = no_grad(|| { distilbert_model .forward_t(Some(input_tensor), Some(mask), None, false).unwrap() });
Auto Trait Implementations
impl !RefUnwindSafe for DistilBertForTokenClassification
impl !Send for DistilBertForTokenClassification
impl !Sync for DistilBertForTokenClassification
impl Unpin for DistilBertForTokenClassification
impl !UnwindSafe for DistilBertForTokenClassification
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,