use crate::RobertaVocab;
use crate::preprocessing::vocab::base_vocab::Vocab;
use crate::preprocessing::tokenizer::base_tokenizer::Tokenizer;
use std::collections::HashMap;
use crate::preprocessing::tokenizer::tokenization_utils::{bpe, split_on_special_tokens};
use std::rc::Rc;
use std::cell::RefCell;
use crate::preprocessing::vocab::bpe_vocab::BpePairVocab;
use regex::Regex;
use crate::preprocessing::tokenizer::constants::BYTES_TO_UNICODE;
pub struct RobertaTokenizer {
vocab: Rc<RobertaVocab>,
bpe_ranks: Rc<BpePairVocab>,
cache: RefCell<HashMap<String, Vec<String>>>,
pattern_lookahead: Regex,
pattern_tokenization: Regex,
}
impl RobertaTokenizer {
pub fn from_file(vocab_path: &str, merges_path: &str) -> RobertaTokenizer {
let vocab = Rc::new(RobertaVocab::from_file(vocab_path));
let bpe_ranks = Rc::new(BpePairVocab::from_file(merges_path));
let cache = RefCell::new(HashMap::new());
let pattern_lookahead = Regex::new(r"\s+\S").unwrap();
let pattern_tokenization = Regex::new(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+").unwrap();
RobertaTokenizer { vocab, bpe_ranks, cache, pattern_lookahead, pattern_tokenization }
}
pub fn from_existing_vocab_and_merges(vocab: Rc<RobertaVocab>, merges: Rc<BpePairVocab>) -> RobertaTokenizer {
let cache = RefCell::new(HashMap::new());
let pattern_lookahead = Regex::new(r"\s+\S").unwrap();
let pattern_tokenization = Regex::new(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+").unwrap();
RobertaTokenizer { vocab, bpe_ranks: merges, cache, pattern_lookahead, pattern_tokenization }
}
}
impl Tokenizer<RobertaVocab> for RobertaTokenizer {
fn vocab(&self) -> &RobertaVocab {
&self.vocab
}
fn tokenize(&self, text: &str) -> Vec<String> {
let mut tokenized_text: Vec<String> = Vec::with_capacity(text.len());
let temp_text = split_on_special_tokens(text, self.vocab.as_ref());
for text in temp_text {
if !self.vocab.special_values.contains_key(text) {
let mut sub_words: Vec<&str> = vec!();
let mut splits: Vec<&str> = vec!();
let mut i: usize = 0;
let mut end: usize;
for hit in self.pattern_lookahead.find_iter(text) {
end = hit.end() - 1 - hit.as_str().chars().last().unwrap().len_utf8();
splits.push(&text[i..end]);
i = end;
}
splits.push(&text[i..]);
for sub_word in splits {
for hit in self.pattern_tokenization.find_iter(sub_word) {
sub_words.push(hit.as_str());
}
}
for word in sub_words {
let word: String = word.as_bytes().iter().map(|v| BYTES_TO_UNICODE.get(&v).unwrap()).collect();
let cached: bool = match self.cache.borrow().get(&word) {
Some(value) => {
tokenized_text.extend(value.clone());
true
}
None => false
};
if !cached {
let bpe_output = bpe(&word, &self.bpe_ranks);
self.cache.borrow_mut().insert(word.to_owned(), bpe_output.clone());
tokenized_text.extend(bpe_output);
}
};
} else {
tokenized_text.push(text.to_owned());
}
}
tokenized_text
}
fn build_input_with_special_tokens(&self, tokens_1: Vec<i64>, tokens_2: Option<Vec<i64>>) -> (Vec<i64>, Vec<i8>, Vec<i8>) {
let mut output: Vec<i64> = vec!();
let mut token_segment_ids: Vec<i8> = vec!();
let mut special_tokens_mask: Vec<i8> = vec!();
special_tokens_mask.push(1);
special_tokens_mask.extend(vec![0; tokens_1.len()]);
special_tokens_mask.push(1);
token_segment_ids.extend(vec![0; tokens_1.len() + 2]);
output.push(self.vocab.token_to_id(RobertaVocab::cls_value()));
output.extend(tokens_1);
output.push(self.vocab.token_to_id(RobertaVocab::sep_value()));
if let Some(add_tokens) = tokens_2 {
special_tokens_mask.push(1);
special_tokens_mask.extend(vec![0; add_tokens.len()]);
special_tokens_mask.push(1);
token_segment_ids.push(0);
token_segment_ids.extend(vec![1; add_tokens.len() + 1]);
output.push(self.vocab.token_to_id(RobertaVocab::sep_value()));
output.extend(add_tokens);
output.push(self.vocab.token_to_id(RobertaVocab::sep_value()));
}
(output, token_segment_ids, special_tokens_mask)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::RobertaVocab;
use std::collections::HashMap;
use crate::preprocessing::tokenizer::base_tokenizer::{TruncationStrategy, TokenizedInput};
use crate::preprocessing::vocab::base_vocab::swap_key_values;
fn generate_test_vocab() -> RobertaVocab {
let values: HashMap<String, i64> = [
("t".to_owned(), 0),
("h".to_owned(), 1),
("a@@".to_owned(), 2),
("n".to_owned(), 3),
("the".to_owned(), 4),
("Ġ".to_owned(), 5),
("<unk>".to_owned(), 6),
("o@@".to_owned(), 7),
("<s>".to_owned(), 8),
("</s>".to_owned(), 9),
("<pad>".to_owned(), 10),
("<mask>".to_owned(), 11),
].iter().cloned().collect();
let special_values: HashMap<String, i64> = [
("<unk>".to_owned(), 6),
("<s>".to_owned(), 8),
("</s>".to_owned(), 9),
("<pad>".to_owned(), 10),
("<mask>".to_owned(), 11),
].iter().cloned().collect();
let indices = swap_key_values(&values);
let special_indices = swap_key_values(&special_values);
RobertaVocab { values, indices, unknown_value: "<unk>", special_values, special_indices }
}
fn generate_test_merges() -> BpePairVocab {
let values: HashMap<(String, String), i64> = [
(("Ġ".to_owned(), "t".to_owned()), 0),
(("Ġ".to_owned(), "n".to_owned()), 1),
(("e".to_owned(), "e".to_owned()), 2),
(("Ġt".to_owned(), "he".to_owned()), 3),
(("h".to_owned(), "e".to_owned()), 4),
(("t".to_owned(), "h".to_owned()), 5),
(("t".to_owned(), "he".to_owned()), 6),
].iter().cloned().collect();
BpePairVocab { values }
}
#[test]
fn test_roberta_tokenizer() {
let vocab = Rc::new(generate_test_vocab());
let merges = Rc::new(generate_test_merges());
let roberta_tokenizer: RobertaTokenizer = RobertaTokenizer::from_existing_vocab_and_merges(vocab, merges);
let test_tuples = [
(
"the earth",
vec!("the", "Ġ", "e", "a", "r", "th")
),
(
"",
vec!()
),
(
"✿",
vec!("â", "ľ", "¿")
),
];
let source_texts: Vec<&str> = test_tuples.iter().map(|v| v.0).collect();
let expected_results: Vec<Vec<&str>> = test_tuples.iter().map(|v| v.1.clone()).collect();
for (source_text, expected_result) in test_tuples.iter() {
assert_eq!(roberta_tokenizer.tokenize(*source_text), *expected_result);
}
assert_eq!(roberta_tokenizer.tokenize_list(source_texts.clone()), expected_results);
}
#[test]
fn test_encode() {
let vocab = Rc::new(generate_test_vocab());
let merges = Rc::new(generate_test_merges());
let roberta_tokenizer: RobertaTokenizer = RobertaTokenizer::from_existing_vocab_and_merges(vocab, merges);
let truncation_strategy = TruncationStrategy::LongestFirst;
let test_tuples = [
(
"the earth",
TokenizedInput { token_ids: vec!(8, 4, 5, 6, 6, 6, 6, 9), segment_ids: vec!(0, 0, 0, 0, 0, 0, 0, 0), special_tokens_mask: vec!(1, 0, 0, 0, 0, 0, 0, 1), overflowing_tokens: vec!(), num_truncated_tokens: 0 }
),
(
"✿",
TokenizedInput { token_ids: vec!(8, 6, 6, 6, 9), segment_ids: vec!(0, 0, 0, 0, 0), special_tokens_mask: vec!(1, 0, 0, 0, 1), overflowing_tokens: vec!(), num_truncated_tokens: 0 }
),
(
"",
TokenizedInput { token_ids: vec!(8, 9), segment_ids: vec!(0, 0), special_tokens_mask: vec!(1, 1), overflowing_tokens: vec!(), num_truncated_tokens: 0 }
)
];
let source_texts: Vec<&str> = test_tuples.iter().map(|v| v.0).collect();
let expected_results: Vec<TokenizedInput> = test_tuples.iter().map(|v| v.1.clone()).collect();
for (source_text, expected_result) in test_tuples.iter() {
assert_eq!(roberta_tokenizer.encode(source_text, None, 128, &truncation_strategy, 0),
*expected_result);
}
assert_eq!(roberta_tokenizer.encode_list(source_texts.clone(), 128, &truncation_strategy, 0), expected_results);
}
}