use std::path::Path;
use crate::error::TokenizerError;
use crate::tokenizer::base_tokenizer::{
Mask, Offset, OffsetSize, Token, TokenIdsWithOffsets, TokenIdsWithSpecialTokens, TokenRef,
};
use crate::tokenizer::tokenization_utils::{
clean_text, decompose_nfkc, is_whitespace, lowercase, split_at_regex,
};
use crate::tokenizer::{MultiThreadedTokenizer, Tokenizer};
use crate::vocab::{MarianVocab, SentencePieceModel, Vocab};
use regex::Regex;
pub struct MarianTokenizer {
model: SentencePieceModel,
vocab: MarianVocab,
pattern_language_code: Regex,
lower_case: bool,
}
impl MarianTokenizer {
pub fn from_files<V: AsRef<Path>, M: AsRef<Path>>(
vocab_path: V,
model_path: M,
lower_case: bool,
) -> Result<MarianTokenizer, TokenizerError> {
let vocab = MarianVocab::from_file(vocab_path)?;
let model = SentencePieceModel::from_file(model_path)?;
let pattern_language_code = Regex::new(r">>.+<<").unwrap();
Ok(MarianTokenizer {
model,
vocab,
pattern_language_code,
lower_case,
})
}
pub fn from_files_with_special_token_mapping<V: AsRef<Path>, M: AsRef<Path>, S: AsRef<Path>>(
vocab_path: V,
model_path: M,
lower_case: bool,
special_token_mapping_path: S,
) -> Result<MarianTokenizer, TokenizerError> {
let vocab = MarianVocab::from_file_with_special_token_mapping(
vocab_path,
special_token_mapping_path,
)?;
let model = SentencePieceModel::from_file(model_path)?;
let pattern_language_code = Regex::new(r">>.+<<").unwrap();
Ok(MarianTokenizer {
model,
vocab,
pattern_language_code,
lower_case,
})
}
pub fn from_existing_vocab_and_model(
vocab: MarianVocab,
model: SentencePieceModel,
lower_case: bool,
) -> MarianTokenizer {
let pattern_language_code = Regex::new(r">>.+<<").unwrap();
MarianTokenizer {
model,
vocab,
pattern_language_code,
lower_case,
}
}
}
impl Tokenizer<MarianVocab> for MarianTokenizer {
fn vocab(&self) -> &MarianVocab {
&self.vocab
}
fn tokenize_to_tokens(&self, text: TokenRef) -> Vec<Token> {
let tokens = split_at_regex(text, &self.pattern_language_code);
let (code_token, mut token) = match tokens.len() {
0 => {
return vec![];
}
1 => (None, tokens[0].to_owned()),
2 => (Some(tokens[0].to_owned()), tokens[1].to_owned()),
_ => {
let mut token = Token::new("".to_string());
for token_ref in tokens[1..].iter() {
token.text.push_str(token_ref.text);
token
.reference_offsets
.extend_from_slice(token_ref.reference_offsets);
token.offset.end = token_ref.offset.end;
}
(Some(tokens[0].to_owned()), token)
}
};
clean_text(&mut token, true);
decompose_nfkc(&mut token);
if self.lower_case {
lowercase(&mut token);
}
token.text = token.text.replace(|c: char| is_whitespace(&c), "\u{2581}");
if !token.text.starts_with('\u{2581}') {
token.text.insert(0, '\u{2581}');
token.reference_offsets.insert(0, 0);
};
let output = self.model.decode_forward_token_ref(token.as_ref());
let decoded = self.model.decode_backward(&output);
let mut output: Vec<Token> = Vec::with_capacity(decoded.len() + 1);
if let Some(code) = code_token {
output.push(code);
};
let mut is_prev_unknown = false;
for node in decoded {
if is_prev_unknown & (node.index == 0) {
let prev_token = output.last().unwrap();
let mut text = prev_token.text.clone();
text.push_str(node.text);
let mut reference_offsets = prev_token.reference_offsets.clone();
reference_offsets.extend_from_slice(node.reference_offsets);
let consolidated_unknown = Token {
text,
offset: Offset { begin: 0, end: 0 },
reference_offsets,
mask: Default::default(),
};
output.pop();
output.push(consolidated_unknown);
} else {
output.push(Token {
text: node.text.to_owned(),
offset: Offset { begin: 0, end: 0 },
reference_offsets: node.reference_offsets.to_vec(),
mask: Default::default(),
});
}
is_prev_unknown = node.index == 0;
}
self.model.populate_masks(output.as_mut_slice(), '\u{2581}');
output
}
fn convert_tokens_to_string(&self, tokens: Vec<String>) -> String {
tokens
.into_iter()
.map(|v| v.replace('\u{2581}', " "))
.collect::<Vec<String>>()
.join("")
}
fn build_input_with_special_tokens(
&self,
tokens_ids_with_offsets_1: TokenIdsWithOffsets,
tokens_ids_with_offsets_2: Option<TokenIdsWithOffsets>,
) -> TokenIdsWithSpecialTokens {
let mut output: Vec<i64> = vec![];
let mut token_segment_ids: Vec<i8> = vec![];
let mut special_tokens_mask: Vec<i8> = vec![];
let mut offsets: Vec<Option<Offset>> = vec![];
let mut original_offsets: Vec<Vec<OffsetSize>> = vec![];
let mut mask: Vec<Mask> = vec![];
special_tokens_mask.extend(vec![0; tokens_ids_with_offsets_1.ids.len()]);
token_segment_ids.extend(vec![0; tokens_ids_with_offsets_1.ids.len()]);
output.extend(tokens_ids_with_offsets_1.ids);
offsets.extend(tokens_ids_with_offsets_1.offsets);
original_offsets.extend(tokens_ids_with_offsets_1.reference_offsets);
mask.extend(tokens_ids_with_offsets_1.masks);
if let Some(tokens_ids_with_offsets_2_value) = tokens_ids_with_offsets_2 {
let length = tokens_ids_with_offsets_2_value.ids.len();
special_tokens_mask.extend(vec![0; length]);
token_segment_ids.extend(vec![1; length + 1]);
output.extend(tokens_ids_with_offsets_2_value.ids);
offsets.extend(tokens_ids_with_offsets_2_value.offsets);
original_offsets.extend(tokens_ids_with_offsets_2_value.reference_offsets);
mask.extend(tokens_ids_with_offsets_2_value.masks);
}
special_tokens_mask.push(1);
token_segment_ids.push(1);
output.push(self.vocab.token_to_id(self.vocab.get_eos_value()));
offsets.push(None);
original_offsets.push(vec![]);
mask.push(Mask::Special);
TokenIdsWithSpecialTokens {
token_ids: output,
segment_ids: token_segment_ids,
special_tokens_mask,
token_offsets: offsets,
reference_offsets: original_offsets,
mask,
}
}
}
impl MultiThreadedTokenizer<MarianVocab> for MarianTokenizer {}