WordTokenizer

Struct WordTokenizer 

Source
pub struct WordTokenizer { /* private fields */ }
Expand description

Tokenizer for splitting text into words

Implementations§

Source§

impl WordTokenizer

Source

pub fn new(lowercase: bool) -> Self

Create a new word tokenizer

Examples found in repository?
examples/text_processing_demo.rs (line 46)
14fn main() -> Result<(), Box<dyn std::error::Error>> {
15    println!("=== SciRS2 Text Processing Demo ===\n");
16
17    let documents = [
18        "The quick brown fox jumps over the lazy dog.",
19        "A fast red fox leaped over the sleeping canine.",
20        "Machine learning algorithms process textual data efficiently.",
21        "Text processing and natural language understanding are important.",
22    ];
23
24    // 1. Text Normalization
25    println!("1. Text Normalization");
26    let normalizer = BasicNormalizer::new(true, true);
27    for (i, doc) in documents.iter().enumerate() {
28        let normalized = normalizer.normalize(doc)?;
29        println!("Doc {}: {}", i + 1, normalized);
30    }
31    println!();
32
33    // 2. Text Cleaning
34    println!("2. Text Cleaning");
35    let cleaner = BasicTextCleaner::new(true, true, true);
36    for (i, doc) in documents.iter().enumerate() {
37        let cleaned = cleaner.clean(doc)?;
38        println!("Doc {}: {}", i + 1, cleaned);
39    }
40    println!();
41
42    // 3. Tokenization Examples
43    println!("3. Tokenization Examples");
44
45    // Word tokenization
46    let word_tokenizer = WordTokenizer::new(true);
47    let tokens = word_tokenizer.tokenize(documents[0])?;
48    println!("Word tokens: {tokens:?}");
49
50    // N-gram tokenization
51    let ngram_tokenizer = NgramTokenizer::new(2)?;
52    let ngrams = ngram_tokenizer.tokenize(documents[0])?;
53    println!("2-grams: {ngrams:?}");
54
55    // Regex tokenization
56    let regex_tokenizer = RegexTokenizer::new(r"\b\w+\b", false)?;
57    let regex_tokens = regex_tokenizer.tokenize(documents[0])?;
58    println!("Regex tokens: {regex_tokens:?}");
59    println!();
60
61    // 4. Stemming and Lemmatization
62    println!("4. Stemming and Lemmatization");
63    let porter_stemmer = PorterStemmer::new();
64    let lemmatizer = SimpleLemmatizer::new();
65
66    let test_words = vec!["running", "jumped", "better", "processing"];
67    for word in test_words {
68        let stemmed = porter_stemmer.stem(word)?;
69        let lemmatized = lemmatizer.stem(word)?;
70        println!("{word}: stemmed={stemmed}, lemmatized={lemmatized}");
71    }
72    println!();
73
74    // 5. Count Vectorization
75    println!("5. Count Vectorization");
76    let mut count_vectorizer = CountVectorizer::new(false);
77
78    let doc_refs = documents.to_vec();
79    count_vectorizer.fit(&doc_refs)?;
80
81    // Transform individual documents
82    let count_matrix = count_vectorizer.transform_batch(&doc_refs)?;
83    println!("Count vector shape: {:?}", count_matrix.shape());
84    println!("Vocabulary size: {}", count_vectorizer.vocabulary().len());
85
86    println!();
87
88    // 6. TF-IDF Vectorization
89    println!("6. TF-IDF Vectorization");
90    let mut tfidf_vectorizer = TfidfVectorizer::new(false, true, Some("l2".to_string()));
91
92    tfidf_vectorizer.fit(&doc_refs)?;
93    let tfidf_matrix = tfidf_vectorizer.transform_batch(&doc_refs)?;
94
95    println!("TF-IDF vector shape: {:?}", tfidf_matrix.shape());
96    println!("Sample TF-IDF values:");
97    for i in 0..3.min(tfidf_matrix.nrows()) {
98        for j in 0..5.min(tfidf_matrix.ncols()) {
99            print!("{:.3} ", tfidf_matrix[[i, j]]);
100        }
101        println!();
102    }
103    println!();
104
105    // 7. Complete Pipeline Example
106    println!("7. Complete Text Processing Pipeline");
107    let testtext = "The cats were running quickly through the gardens.";
108
109    // Normalize
110    let normalized = normalizer.normalize(testtext)?;
111    println!("Normalized: {normalized}");
112
113    // Clean
114    let cleaned = cleaner.clean(&normalized)?;
115    println!("Cleaned: {cleaned}");
116
117    // Tokenize
118    let tokens = word_tokenizer.tokenize(&cleaned)?;
119    println!("Tokens: {tokens:?}");
120
121    // Stem
122    let stemmed_tokens: Result<Vec<_>, _> = tokens
123        .iter()
124        .map(|token| porter_stemmer.stem(token))
125        .collect();
126    let stemmed_tokens = stemmed_tokens?;
127    println!("Stemmed: {stemmed_tokens:?}");
128
129    Ok(())
130}
More examples
Hide additional examples
examples/parallel_processing_demo.rs (line 61)
10fn main() -> Result<(), Box<dyn std::error::Error>> {
11    println!("Parallel Text Processing Demo");
12    println!("============================\n");
13
14    // Create test data with larger size to demonstrate parallelism
15    println!("Creating test data...");
16    let texts = create_testtexts(1000);
17
18    // Create references to handle &[&str] requirements
19    let text_refs: Vec<&str> = texts.iter().map(|s| s.as_str()).collect();
20
21    println!("Total documents: {}", texts.len());
22    println!("Example document: {}", texts[0]);
23
24    // 1. Simple Parallel Text Processing
25    println!("\n1. Basic Parallel Processing");
26    println!("---------------------------");
27
28    let processor = ParallelTextProcessor::new();
29
30    let start = Instant::now();
31    let word_counts = processor.process(&text_refs, |text| {
32        // Count words in each document
33        text.split_whitespace().count()
34    });
35    let duration = start.elapsed();
36
37    println!("Processed {} documents in {:.2?}", texts.len(), duration);
38    println!(
39        "Average word count: {:.2}",
40        word_counts.iter().sum::<usize>() as f64 / word_counts.len() as f64
41    );
42
43    // Sequential comparison
44    let start = Instant::now();
45    let _seq_word_counts: Vec<_> = texts
46        .iter()
47        .map(|text| text.split_whitespace().count())
48        .collect();
49    let seq_duration = start.elapsed();
50
51    println!("Sequential processing took {seq_duration:.2?}");
52    println!(
53        "Speedup factor: {:.2}x",
54        seq_duration.as_secs_f64() / duration.as_secs_f64()
55    );
56
57    // 2. Parallel Tokenization
58    println!("\n2. Parallel Tokenization");
59    println!("----------------------");
60
61    let tokenizer = ParallelTokenizer::new(WordTokenizer::new(true)); // Pass 'lowercase' parameter
62
63    let start = Instant::now();
64    let tokens = tokenizer.tokenize(&text_refs)?;
65    let duration = start.elapsed();
66
67    println!("Tokenized {} documents in {:.2?}", texts.len(), duration);
68    println!(
69        "Total tokens: {}",
70        tokens.iter().map(|t| t.len()).sum::<usize>()
71    );
72    println!(
73        "Sample tokens from first document: {:?}",
74        tokens[0].iter().take(5).collect::<Vec<_>>()
75    );
76
77    // Custom token processing
78    println!("\nCustom token processing...");
79    let start = Instant::now();
80    let token_stats = tokenizer.tokenize_and_map(&text_refs, |tokens| {
81        // Calculate token statistics
82        let count = tokens.len();
83        let avg_len = if count > 0 {
84            tokens.iter().map(|t| t.len()).sum::<usize>() as f64 / count as f64
85        } else {
86            0.0
87        };
88        (count, avg_len)
89    })?;
90    let duration = start.elapsed();
91
92    println!("Processed token statistics in {duration:.2?}");
93    println!(
94        "Average tokens per document: {:.2}",
95        token_stats.iter().map(|(count_, _)| *count_).sum::<usize>() as f64
96            / token_stats.len() as f64
97    );
98    println!(
99        "Average token length: {:.2}",
100        token_stats.iter().map(|(_, avg_len)| *avg_len).sum::<f64>() / token_stats.len() as f64
101    );
102
103    // 3. Parallel Vectorization
104    println!("\n3. Parallel Vectorization");
105    println!("------------------------");
106
107    // First fit the vectorizer
108    let mut vectorizer = TfidfVectorizer::default();
109    let start = Instant::now();
110
111    // Import the Vectorizer trait to use its methods
112    use scirs2_text::Vectorizer;
113    vectorizer.fit(&text_refs)?;
114    let fit_duration = start.elapsed();
115
116    println!("Fitted vectorizer in {fit_duration:.2?}");
117
118    // Now transform in parallel
119    let parallel_vectorizer = ParallelVectorizer::new(vectorizer).with_chunk_size(100);
120
121    let start = Instant::now();
122    let vectors = parallel_vectorizer.transform(&text_refs)?;
123    let transform_duration = start.elapsed();
124
125    println!(
126        "Transformed {} documents in {:.2?}",
127        texts.len(),
128        transform_duration
129    );
130    println!("Vector shape: {:?}", vectors.shape());
131    println!(
132        "Non-zero elements: {}",
133        vectors.iter().filter(|&&x| x > 0.0).count()
134    );
135
136    // 4. Batch Processing with Progress
137    println!("\n4. Batch Processing with Progress");
138    println!("--------------------------------");
139
140    let processor = ParallelCorpusProcessor::new(100).with_threads(num_cpus::get());
141
142    println!("Processing with {} threads...", num_cpus::get());
143    let start = Instant::now();
144
145    let last_progress = std::sync::Mutex::new(0);
146    let result = processor.process_with_progress(
147        &text_refs,
148        |batch| {
149            // Analyze batch of documents
150            let mut word_counts = Vec::new();
151            let mut char_counts = Vec::new();
152
153            for &text in batch {
154                word_counts.push(text.split_whitespace().count());
155                char_counts.push(text.chars().count());
156            }
157
158            Ok(word_counts.into_iter().zip(char_counts).collect::<Vec<_>>())
159        },
160        |current, total| {
161            // Only print progress updates at 10% intervals
162            let percent = current * 100 / total;
163            let mut last = last_progress.lock().unwrap();
164            if percent / 10 > *last / 10 {
165                println!("  Progress: {current}/{total}  ({percent}%)");
166                *last = percent;
167            }
168        },
169    )?;
170
171    let duration = start.elapsed();
172
173    println!("Processed {} documents in {:.2?}", texts.len(), duration);
174    println!(
175        "Average words per document: {:.2}",
176        result.iter().map(|(words_, _)| *words_).sum::<usize>() as f64 / result.len() as f64
177    );
178    println!(
179        "Average characters per document: {:.2}",
180        result.iter().map(|(_, chars)| chars).sum::<usize>() as f64 / result.len() as f64
181    );
182
183    // 5. Memory-efficient processing
184    println!("\n5. Memory-Efficient Large Corpus Processing");
185    println!("------------------------------------------");
186
187    println!("Simulating processing of a large corpus...");
188    let largetexts: Vec<&str> = text_refs.iter().cycle().take(5000).copied().collect();
189    println!("Large corpus size: {} documents", largetexts.len());
190
191    let processor = ParallelCorpusProcessor::new(250).with_max_memory(1024 * 1024 * 1024); // 1 GB limit
192
193    let start = Instant::now();
194    let summary = processor.process(&largetexts, |batch| {
195        // Compute simple statistics for the batch
196        let batch_size = batch.len();
197        let total_words: usize = batch
198            .iter()
199            .map(|&text| text.split_whitespace().count())
200            .sum();
201        let total_chars: usize = batch.iter().map(|&text| text.chars().count()).sum();
202
203        Ok(vec![(batch_size, total_words, total_chars)])
204    })?;
205    let duration = start.elapsed();
206
207    let total_words: usize = summary.iter().map(|(_, words_, _)| *words_).sum();
208    let total_chars: usize = summary.iter().map(|(_, _, chars)| *chars).sum();
209
210    println!("Processed large corpus in {duration:.2?}");
211    println!("Total words: {total_words}");
212    println!("Total chars: {total_chars}");
213    println!(
214        "Average processing speed: {:.2} documents/second",
215        largetexts.len() as f64 / duration.as_secs_f64()
216    );
217
218    Ok(())
219}
Source

pub fn withpattern(lowercase: bool, pattern: &str) -> Result<Self>

Create a new word tokenizer with a custom pattern

Trait Implementations§

Source§

impl Clone for WordTokenizer

Source§

fn clone(&self) -> WordTokenizer

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for WordTokenizer

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Default for WordTokenizer

Source§

fn default() -> Self

Returns the “default value” for a type. Read more
Source§

impl Tokenizer for WordTokenizer

Source§

fn tokenize(&self, text: &str) -> Result<Vec<String>>

Tokenize the input text into tokens
Source§

fn clone_box(&self) -> Box<dyn Tokenizer + Send + Sync>

Clone the tokenizer (for use in parallel processing)
Source§

fn tokenize_batch(&self, texts: &[&str]) -> Result<Vec<Vec<String>>>

Tokenize batch of text

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V