Struct rust_bert::pipelines::translation::TranslationConfig [−][src]
pub struct TranslationConfig {}Show fields
pub model_resource: Resource, pub config_resource: Resource, pub vocab_resource: Resource, pub merges_resource: Resource, pub min_length: i64, pub max_length: i64, pub do_sample: bool, pub early_stopping: bool, pub num_beams: i64, pub temperature: f64, pub top_k: i64, pub top_p: f64, pub repetition_penalty: f64, pub length_penalty: f64, pub no_repeat_ngram_size: i64, pub num_return_sequences: i64, pub device: Device, pub prefix: Option<String>, pub num_beam_groups: Option<i64>, pub diversity_penalty: Option<f64>, pub model_type: ModelType,
Expand description
Configuration for text translation
Contains information regarding the model to load, mirrors the GenerationConfig, with a different set of default parameters and sets the device to place the model on.
Fields
model_resource: Resource
Expand description
Model weights resource (default: pretrained BART model on CNN-DM)
config_resource: Resource
Expand description
Config resource (default: pretrained BART model on CNN-DM)
vocab_resource: Resource
Expand description
Vocab resource (default: pretrained BART model on CNN-DM)
merges_resource: Resource
Expand description
Merges resource (default: pretrained BART model on CNN-DM)
min_length: i64
Expand description
Minimum sequence length (default: 0)
max_length: i64
Expand description
Maximum sequence length (default: 20)
do_sample: bool
Expand description
Sampling flag. If true, will perform top-k and/or nucleus sampling on generated tokens, otherwise greedy (deterministic) decoding (default: true)
early_stopping: bool
Expand description
Early stopping flag indicating if the beam search should stop as soon as num_beam
hypotheses have been generated (default: false)
num_beams: i64
Expand description
Number of beams for beam search (default: 5)
temperature: f64
Expand description
Temperature setting. Values higher than 1 will improve originality at the risk of reducing relevance (default: 1.0)
top_k: i64
Expand description
Top_k values for sampling tokens. Value higher than 0 will enable the feature (default: 0)
top_p: f64
Expand description
Top_p value for Nucleus sampling, Holtzman et al.. Keep top tokens until cumulative probability reaches top_p (default: 0.9)
repetition_penalty: f64
Expand description
Repetition penalty (mostly useful for CTRL decoders). Values higher than 1 will penalize tokens that have been already generated. (default: 1.0)
length_penalty: f64
Expand description
Exponential penalty based on the length of the hypotheses generated (default: 1.0)
no_repeat_ngram_size: i64
Expand description
Number of allowed repetitions of n-grams. Values higher than 0 turn on this feature (default: 3)
num_return_sequences: i64
Expand description
Number of sequences to return for each prompt text (default: 1)
device: Device
Expand description
Device to place the model on (default: CUDA/GPU when available)
prefix: Option<String>
Expand description
Prefix to append translation inputs with
num_beam_groups: Option<i64>
Expand description
Number of beam groups for diverse beam generation. If provided and higher than 1, will split the beams into beam subgroups leading to more diverse generation.
diversity_penalty: Option<f64>
Expand description
Diversity penalty for diverse beam search. High values will enforce more difference between beam groups (default: 5.5)
model_type: ModelType
Expand description
Model type used for translation
Implementations
impl TranslationConfig
[src]
impl TranslationConfig
[src]pub fn new(language: Language, device: Device) -> TranslationConfig
[src]
pub fn new(language: Language, device: Device) -> TranslationConfig
[src]Create a new TranslationCondiguration
from an available language.
Arguments
language
-Language
enum value (e.g.Language::EnglishToFrench
)device
-Device
to place the model on (CPU/GPU)
Example
use rust_bert::pipelines::translation::{Language, TranslationConfig}; use tch::Device; let translation_config = TranslationConfig::new(Language::FrenchToEnglish, Device::cuda_if_available());
pub fn new_from_resources(
model_resource: Resource,
config_resource: Resource,
vocab_resource: Resource,
sentence_piece_resource: Resource,
prefix: Option<String>,
device: Device,
model_type: ModelType
) -> TranslationConfig
[src]
pub fn new_from_resources(
model_resource: Resource,
config_resource: Resource,
vocab_resource: Resource,
sentence_piece_resource: Resource,
prefix: Option<String>,
device: Device,
model_type: ModelType
) -> TranslationConfig
[src]Create a new TranslationConfiguration
from custom (e.g. local) resources.
Arguments
model_resource
-Resource
pointing to the modelconfig_resource
-Resource
pointing to the configurationvocab_resource
-Resource
pointing to the vocabularysentence_piece_resource
-Resource
pointing to the sentence piece model of the source languagedevice
-Device
to place the model on (CPU/GPU)
Example
use rust_bert::pipelines::common::ModelType; use rust_bert::pipelines::translation::TranslationConfig; use rust_bert::resources::{LocalResource, Resource}; use std::path::PathBuf; use tch::Device; let config_resource = Resource::Local(LocalResource { local_path: PathBuf::from("path/to/config.json"), }); let model_resource = Resource::Local(LocalResource { local_path: PathBuf::from("path/to/model.ot"), }); let vocab_resource = Resource::Local(LocalResource { local_path: PathBuf::from("path/to/vocab.json"), }); let sentence_piece_resource = Resource::Local(LocalResource { local_path: PathBuf::from("path/to/spiece.model"), }); let translation_config = TranslationConfig::new_from_resources( model_resource, config_resource, vocab_resource, sentence_piece_resource, Some(">>fr<<".to_string()), Device::cuda_if_available(), ModelType::Marian, );
Trait Implementations
impl From<TranslationConfig> for GenerateConfig
[src]
impl From<TranslationConfig> for GenerateConfig
[src]fn from(config: TranslationConfig) -> GenerateConfig
[src]
fn from(config: TranslationConfig) -> GenerateConfig
[src]Performs the conversion.
Auto Trait Implementations
impl RefUnwindSafe for TranslationConfig
impl Send for TranslationConfig
impl Sync for TranslationConfig
impl Unpin for TranslationConfig
impl UnwindSafe for TranslationConfig
Blanket Implementations
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]pub fn borrow_mut(&mut self) -> &mut T
[src]
pub fn borrow_mut(&mut self) -> &mut T
[src]Mutably borrows from an owned value. Read more
impl<T> Instrument for T
[src]
impl<T> Instrument for T
[src]fn instrument(self, span: Span) -> Instrumented<Self>
[src]
fn instrument(self, span: Span) -> Instrumented<Self>
[src]Instruments this type with the provided Span
, returning an
Instrumented
wrapper. Read more
fn in_current_span(self) -> Instrumented<Self>
[src]
fn in_current_span(self) -> Instrumented<Self>
[src]impl<T> Pointable for T
impl<T> Pointable for T
impl<T> Same<T> for T
impl<T> Same<T> for T
type Output = T
type Output = T
Should always be Self
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
impl<V, T> VZip<V> for T where
V: MultiLane<T>,