text_processing_demo/
text_processing_demo.rs1use scirs2_text::{
7 preprocess::{BasicNormalizer, BasicTextCleaner, TextCleaner, TextNormalizer},
8 stemming::{PorterStemmer, SimpleLemmatizer, Stemmer},
9 tokenize::{NgramTokenizer, RegexTokenizer, Tokenizer, WordTokenizer},
10 vectorize::{CountVectorizer, TfidfVectorizer, Vectorizer},
11};
12
13#[allow(dead_code)]
14fn main() -> Result<(), Box<dyn std::error::Error>> {
15 println!("=== SciRS2 Text Processing Demo ===\n");
16
17 let documents = [
18 "The quick brown fox jumps over the lazy dog.",
19 "A fast red fox leaped over the sleeping canine.",
20 "Machine learning algorithms process textual data efficiently.",
21 "Text processing and natural language understanding are important.",
22 ];
23
24 println!("1. Text Normalization");
26 let normalizer = BasicNormalizer::new(true, true);
27 for (i, doc) in documents.iter().enumerate() {
28 let normalized = normalizer.normalize(doc)?;
29 println!("Doc {}: {}", i + 1, normalized);
30 }
31 println!();
32
33 println!("2. Text Cleaning");
35 let cleaner = BasicTextCleaner::new(true, true, true);
36 for (i, doc) in documents.iter().enumerate() {
37 let cleaned = cleaner.clean(doc)?;
38 println!("Doc {}: {}", i + 1, cleaned);
39 }
40 println!();
41
42 println!("3. Tokenization Examples");
44
45 let word_tokenizer = WordTokenizer::new(true);
47 let tokens = word_tokenizer.tokenize(documents[0])?;
48 println!("Word tokens: {tokens:?}");
49
50 let ngram_tokenizer = NgramTokenizer::new(2)?;
52 let ngrams = ngram_tokenizer.tokenize(documents[0])?;
53 println!("2-grams: {ngrams:?}");
54
55 let regex_tokenizer = RegexTokenizer::new(r"\b\w+\b", false)?;
57 let regex_tokens = regex_tokenizer.tokenize(documents[0])?;
58 println!("Regex tokens: {regex_tokens:?}");
59 println!();
60
61 println!("4. Stemming and Lemmatization");
63 let porter_stemmer = PorterStemmer::new();
64 let lemmatizer = SimpleLemmatizer::new();
65
66 let test_words = vec!["running", "jumped", "better", "processing"];
67 for word in test_words {
68 let stemmed = porter_stemmer.stem(word)?;
69 let lemmatized = lemmatizer.stem(word)?;
70 println!("{word}: stemmed={stemmed}, lemmatized={lemmatized}");
71 }
72 println!();
73
74 println!("5. Count Vectorization");
76 let mut count_vectorizer = CountVectorizer::new(false);
77
78 let doc_refs = documents.to_vec();
79 count_vectorizer.fit(&doc_refs)?;
80
81 let count_matrix = count_vectorizer.transform_batch(&doc_refs)?;
83 println!("Count vector shape: {:?}", count_matrix.shape());
84 println!("Vocabulary size: {}", count_vectorizer.vocabulary().len());
85
86 println!();
87
88 println!("6. TF-IDF Vectorization");
90 let mut tfidf_vectorizer = TfidfVectorizer::new(false, true, Some("l2".to_string()));
91
92 tfidf_vectorizer.fit(&doc_refs)?;
93 let tfidf_matrix = tfidf_vectorizer.transform_batch(&doc_refs)?;
94
95 println!("TF-IDF vector shape: {:?}", tfidf_matrix.shape());
96 println!("Sample TF-IDF values:");
97 for i in 0..3.min(tfidf_matrix.nrows()) {
98 for j in 0..5.min(tfidf_matrix.ncols()) {
99 print!("{:.3} ", tfidf_matrix[[i, j]]);
100 }
101 println!();
102 }
103 println!();
104
105 println!("7. Complete Text Processing Pipeline");
107 let testtext = "The cats were running quickly through the gardens.";
108
109 let normalized = normalizer.normalize(testtext)?;
111 println!("Normalized: {normalized}");
112
113 let cleaned = cleaner.clean(&normalized)?;
115 println!("Cleaned: {cleaned}");
116
117 let tokens = word_tokenizer.tokenize(&cleaned)?;
119 println!("Tokens: {tokens:?}");
120
121 let stemmed_tokens: Result<Vec<_>, _> = tokens
123 .iter()
124 .map(|token| porter_stemmer.stem(token))
125 .collect();
126 let stemmed_tokens = stemmed_tokens?;
127 println!("Stemmed: {stemmed_tokens:?}");
128
129 Ok(())
130}