1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
#[cfg(test)]
mod tests;
#[cfg(not(doctest))]
mod tokenizers;
pub use tokenizers::Tokenizer;
use crate::tokenization::tokenizers::{pre_tokenizers::whitespace::Whitespace};

/// Tokenize the string into a vector of each letter
pub fn batch_tokenize_alphabet(strings: Vec<String>) -> Vec<Vec<String>> {
    strings.iter().map(|string| {
        string.split("").map(|f| {f.to_string()}).collect()
    }).collect()
}

/// Tokenize the string into a vector of each letter
#[inline]
pub fn tokenize_alphabet(string: String) -> Vec<String> {
    let tokens: Vec<String> = string.split("").map(|f| {f.to_string()}).collect();
    tokens[1..tokens.len()-1].to_vec() // For some reason, the split adds empty strings to each end
}

/// Tokenizes strings by splitting at whitespace
pub fn batch_tokenize_spaces(strings: Vec<String>) -> Vec<Vec<String>> {
    strings.iter().map(|string| {
        let tokens: Vec<String> = string.split("").map(|f| {f.to_string()}).collect();
        tokens[1..tokens.len()-1].to_vec() // For some reason, the split adds empty strings to each end
    }).collect()
}

/// Tokenizes strings by splitting at whitespace
pub fn tokenize_spaces(string: String) -> Vec<String> {
    string.split(' ').map(|f| {f.to_string()}).collect()
}

/// Tokenizes string using BPE tokenization
pub fn tokenize_bpe(mut string: String, tokenizer: Option<&Tokenizer>) -> Vec<String> {
    tokenizers::utils::parallelism::set_parallelism(true);
    // Lowercase
    string = string.to_lowercase();
    // Create tokenizer and tokenize
    let encoding = match tokenizer {
        Some(tokenizer) =>
        {
            tokenizer.encode(string, false).expect("BPE tokenization failed!")
        },
        None => {
            let tokenizer = load_bpe_tokenizer();
            tokenizer.encode(string, false).expect("BPE tokenization failed!")
        }
    };
    // Convert back to string
    encoding.get_tokens().to_vec()
}

/// Tokenizes strings using BPE tokenization
pub fn batch_tokenize_bpe(mut strings: Vec<String>, tokenizer: Option<&Tokenizer>) -> Vec<Vec<String>> {
    tokenizers::utils::parallelism::set_parallelism(true);
    // Lowercase
    strings = strings.iter().map(|a| {a.to_lowercase()}).collect();
    // Create tokenizer and tokenize
    let encodings = match tokenizer {
        Some(tokenizer) =>
        {
            tokenizer.encode_batch(strings, false).expect("BPE tokenization failed!")
        },
        None => {
            let tokenizer = load_bpe_tokenizer();
            tokenizer.encode_batch(strings, false).expect("BPE tokenization failed!")
        }
    };
    // Convert back to strings
    let mut tokens: Vec<Vec<String>> = Vec::with_capacity(encodings.len());
    for encoding in encodings {
        tokens.push(encoding.get_tokens().to_vec());
    }
    tokens
}

/// Loads the BPE tokenizer
pub fn load_bpe_tokenizer() -> Tokenizer {
    use crate::tokenization::tokenizers::models::bpe::BPE;
    // Create tokenizer
    let bpe_builder = BPE::builder();
    let bpe_vocab = super::vocab::load_bpe_vocab();
    let mut merges: Vec<(String, String)> = Vec::new();
    let lines: Vec<&str> = include_str!("../resources/bpe_merges.txt").split('\n').collect();
    for line in lines {
        let line = String::from(line).replace("Ġ", "").replace("\n", "").replace("##", "");
        // Filter out junk
        if line.contains(' ') && !line.contains('#') {
            let line: Vec<&str> = line.split(' ').collect();
            // Make sure vocab contains both tokens and combined token
            if bpe_vocab.token2index.contains_key(&line[0].to_string()) && bpe_vocab.token2index.contains_key(&line[1].to_string()) && bpe_vocab.token2index.contains_key(&format!("{}{}", line[0].to_string(), line[1].to_string())) {
                merges.push((line[0].to_string(), line[1].to_string()));
            }
        }
    }
    let bpe_builder = bpe_builder.vocab_and_merges(bpe_vocab.token2index, merges);
    let bpe = bpe_builder
        .unk_token("[UNK]".into())
        .build().expect("BPE Tokenizer failed to build!");

    Tokenizer::new(bpe)
}

/// Tokenizes string using BPE tokenization
pub fn tokenize_wordpiece(mut string: String, tokenizer: Option<&Tokenizer>) -> Vec<String> {
    tokenizers::utils::parallelism::set_parallelism(true);
    // Lowercase
    string = string.to_lowercase();
    // Create tokenizer and tokenize
    let encoding = match tokenizer {
        Some(tokenizer) =>
        {
            tokenizer.encode(string, false).expect("BPE tokenization failed!")
        },
        None => {
            let tokenizer = load_wordpiece_tokenizer();
            tokenizer.encode(string, false).expect("BPE tokenization failed!")
        }
    };
    // Convert back to string
    encoding.get_tokens().to_vec()
}

/// Tokenizes strings using WordPiece tokenization
pub fn batch_tokenize_wordpiece(mut strings: Vec<String>, tokenizer: Option<&Tokenizer>) -> Vec<Vec<String>> {
    tokenizers::utils::parallelism::set_parallelism(true);
    // Lowercase
    strings = strings.iter().map(|a| {a.to_lowercase()}).collect();
    // Create tokenizer and tokenize
    let encodings = match tokenizer {
        Some(tokenizer) =>
        {
            tokenizer.encode_batch(strings, false).expect("WordPiece tokenization failed!")
        },
        None => {
            let tokenizer = load_wordpiece_tokenizer();
            tokenizer.encode_batch(strings, false).expect("WordPiece tokenization failed!")
        }
    };
    // Convert back to strings
    let mut tokens: Vec<Vec<String>> = Vec::with_capacity(encodings.len());
    for encoding in encodings.iter() {
        tokens.push(encoding.get_tokens().to_vec());
    }
    tokens
}

/// Loads the wordpiece tokenizer
pub fn load_wordpiece_tokenizer() -> Tokenizer {
    use tokenizers::models::wordpiece::WordPiece;
    use std::collections::HashMap;
    // Build tokenizer
    let wordpiece_builder = WordPiece::builder();
    let lines: Vec<&str> = include_str!("../resources/wordpiece_vocab.txt").split('\n').collect();
    let mut hashmap: HashMap<String, u32> = HashMap::new();
    for (i, line) in lines.iter().enumerate() {
        hashmap.insert(line.to_string(), i as u32);
    }
    let wordpiece_builder = wordpiece_builder.vocab(hashmap);
    let wordpiece = wordpiece_builder
        .build().expect("WordPiece Tokenizer failed to build!");

    let mut tokenizer = Tokenizer::new(wordpiece);
    tokenizer.with_pre_tokenizer(Whitespace::default());
    tokenizer
}

// UNTOKENIZATION FUNCTIONS
/// Untokenize alphabet tokens
pub fn batch_untokenize_alphabet(tokens: Vec<Vec<String>>) -> Vec<String> {
    tokens.iter().map(|tokens| {
        tokens.join("")
    }).collect()
}

/// Untokenize alphabet tokens
pub fn untokenize_alphabet(tokens: Vec<String>) -> String {
    tokens.join("")
}

/// Untokenize BPE tokens
pub fn batch_untokenize_bpe(tokens: Vec<Vec<String>>) -> Vec<String> {
    // BPE encodings contain spaces, so untokenizing is the same as alphabet
    batch_untokenize_alphabet(tokens)
}

/// Untokenize BPE tokens
pub fn untokenize_bpe(tokens: Vec<String>) -> String {
    // BPE encodings contain spaces, so untokenizing is the same as alphabet
    untokenize_alphabet(tokens)
}


/// Untokenize wordpiece tokens
pub fn batch_untokenize_wordpiece(tokens: Vec<Vec<String>>) -> Vec<String> {
    let punctuation  = [".".to_string(), "?".to_string(), "!".to_string(), ",".to_string(), "'".to_string(), r#"""#.to_string()];
    let mut untokenized_strings = vec![String::new(); tokens.len()];
    for i in 0..tokens.len() {
        for x in 0..tokens[i].len() {
            if tokens[i][x] != *"[PAD]" && tokens[i][x] != *"[EOS]" {
                if tokens[i][x].contains("##") || punctuation.contains(&tokens[i][x]) || x == 0 {
                    untokenized_strings[i] = format!("{}{}", untokenized_strings[i], tokens[i][x].replace("##", ""))
                }else {
                    untokenized_strings[i] = format!("{} {}", untokenized_strings[i], tokens[i][x])
                }
            }
        }
    }
    untokenized_strings
}

/// Untokenize wordpiece tokens
pub fn untokenize_wordpiece(tokens: Vec<String>) -> String {
    let punctuation  = [".".to_string(), "?".to_string(), "!".to_string(), ",".to_string(), "'".to_string(), r#"""#.to_string()];
    let mut untokenized_string = String::new();
    for (i, token) in tokens.iter().enumerate() {
        if token != "[PAD]" && token != "[EOS]" {
            if token.contains("##") || punctuation.contains(token) || i == 0 {
                untokenized_string = format!("{}{}", untokenized_string, token.replace("##", ""))
            }else {
                untokenized_string = format!("{} {}", untokenized_string, token)
            }
        }
    }
    untokenized_string
}

/// Untokenize space seperated tokens
pub fn batch_untokenize_spaces(tokens: Vec<Vec<String>>) -> Vec<String> {
    tokens.iter().map(|tokens| {
        tokens.join(" ")
    }).collect()
}

/// Untokenize space seperated tokens
pub fn untokenize_spaces(tokens: Vec<String>) -> String {
    tokens.join(" ")
}