CountVectorizer

Struct CountVectorizer 

Source
pub struct CountVectorizer { /* private fields */ }
Expand description

Count vectorizer that uses a bag-of-words representation

Implementations§

Source§

impl CountVectorizer

Source

pub fn new(binary: bool) -> Self

Create a new count vectorizer

Examples found in repository?
examples/text_processing_demo.rs (line 76)
14fn main() -> Result<(), Box<dyn std::error::Error>> {
15    println!("=== SciRS2 Text Processing Demo ===\n");
16
17    let documents = [
18        "The quick brown fox jumps over the lazy dog.",
19        "A fast red fox leaped over the sleeping canine.",
20        "Machine learning algorithms process textual data efficiently.",
21        "Text processing and natural language understanding are important.",
22    ];
23
24    // 1. Text Normalization
25    println!("1. Text Normalization");
26    let normalizer = BasicNormalizer::new(true, true);
27    for (i, doc) in documents.iter().enumerate() {
28        let normalized = normalizer.normalize(doc)?;
29        println!("Doc {}: {}", i + 1, normalized);
30    }
31    println!();
32
33    // 2. Text Cleaning
34    println!("2. Text Cleaning");
35    let cleaner = BasicTextCleaner::new(true, true, true);
36    for (i, doc) in documents.iter().enumerate() {
37        let cleaned = cleaner.clean(doc)?;
38        println!("Doc {}: {}", i + 1, cleaned);
39    }
40    println!();
41
42    // 3. Tokenization Examples
43    println!("3. Tokenization Examples");
44
45    // Word tokenization
46    let word_tokenizer = WordTokenizer::new(true);
47    let tokens = word_tokenizer.tokenize(documents[0])?;
48    println!("Word tokens: {tokens:?}");
49
50    // N-gram tokenization
51    let ngram_tokenizer = NgramTokenizer::new(2)?;
52    let ngrams = ngram_tokenizer.tokenize(documents[0])?;
53    println!("2-grams: {ngrams:?}");
54
55    // Regex tokenization
56    let regex_tokenizer = RegexTokenizer::new(r"\b\w+\b", false)?;
57    let regex_tokens = regex_tokenizer.tokenize(documents[0])?;
58    println!("Regex tokens: {regex_tokens:?}");
59    println!();
60
61    // 4. Stemming and Lemmatization
62    println!("4. Stemming and Lemmatization");
63    let porter_stemmer = PorterStemmer::new();
64    let lemmatizer = SimpleLemmatizer::new();
65
66    let test_words = vec!["running", "jumped", "better", "processing"];
67    for word in test_words {
68        let stemmed = porter_stemmer.stem(word)?;
69        let lemmatized = lemmatizer.stem(word)?;
70        println!("{word}: stemmed={stemmed}, lemmatized={lemmatized}");
71    }
72    println!();
73
74    // 5. Count Vectorization
75    println!("5. Count Vectorization");
76    let mut count_vectorizer = CountVectorizer::new(false);
77
78    let doc_refs = documents.to_vec();
79    count_vectorizer.fit(&doc_refs)?;
80
81    // Transform individual documents
82    let count_matrix = count_vectorizer.transform_batch(&doc_refs)?;
83    println!("Count vector shape: {:?}", count_matrix.shape());
84    println!("Vocabulary size: {}", count_vectorizer.vocabulary().len());
85
86    println!();
87
88    // 6. TF-IDF Vectorization
89    println!("6. TF-IDF Vectorization");
90    let mut tfidf_vectorizer = TfidfVectorizer::new(false, true, Some("l2".to_string()));
91
92    tfidf_vectorizer.fit(&doc_refs)?;
93    let tfidf_matrix = tfidf_vectorizer.transform_batch(&doc_refs)?;
94
95    println!("TF-IDF vector shape: {:?}", tfidf_matrix.shape());
96    println!("Sample TF-IDF values:");
97    for i in 0..3.min(tfidf_matrix.nrows()) {
98        for j in 0..5.min(tfidf_matrix.ncols()) {
99            print!("{:.3} ", tfidf_matrix[[i, j]]);
100        }
101        println!();
102    }
103    println!();
104
105    // 7. Complete Pipeline Example
106    println!("7. Complete Text Processing Pipeline");
107    let testtext = "The cats were running quickly through the gardens.";
108
109    // Normalize
110    let normalized = normalizer.normalize(testtext)?;
111    println!("Normalized: {normalized}");
112
113    // Clean
114    let cleaned = cleaner.clean(&normalized)?;
115    println!("Cleaned: {cleaned}");
116
117    // Tokenize
118    let tokens = word_tokenizer.tokenize(&cleaned)?;
119    println!("Tokens: {tokens:?}");
120
121    // Stem
122    let stemmed_tokens: Result<Vec<_>, _> = tokens
123        .iter()
124        .map(|token| porter_stemmer.stem(token))
125        .collect();
126    let stemmed_tokens = stemmed_tokens?;
127    println!("Stemmed: {stemmed_tokens:?}");
128
129    Ok(())
130}
Source

pub fn with_tokenizer( tokenizer: Box<dyn Tokenizer + Send + Sync>, binary: bool, ) -> Self

Create a count vectorizer with a custom tokenizer

Source

pub fn vocabulary(&self) -> &Vocabulary

Get a reference to the vocabulary

Examples found in repository?
examples/text_processing_demo.rs (line 84)
14fn main() -> Result<(), Box<dyn std::error::Error>> {
15    println!("=== SciRS2 Text Processing Demo ===\n");
16
17    let documents = [
18        "The quick brown fox jumps over the lazy dog.",
19        "A fast red fox leaped over the sleeping canine.",
20        "Machine learning algorithms process textual data efficiently.",
21        "Text processing and natural language understanding are important.",
22    ];
23
24    // 1. Text Normalization
25    println!("1. Text Normalization");
26    let normalizer = BasicNormalizer::new(true, true);
27    for (i, doc) in documents.iter().enumerate() {
28        let normalized = normalizer.normalize(doc)?;
29        println!("Doc {}: {}", i + 1, normalized);
30    }
31    println!();
32
33    // 2. Text Cleaning
34    println!("2. Text Cleaning");
35    let cleaner = BasicTextCleaner::new(true, true, true);
36    for (i, doc) in documents.iter().enumerate() {
37        let cleaned = cleaner.clean(doc)?;
38        println!("Doc {}: {}", i + 1, cleaned);
39    }
40    println!();
41
42    // 3. Tokenization Examples
43    println!("3. Tokenization Examples");
44
45    // Word tokenization
46    let word_tokenizer = WordTokenizer::new(true);
47    let tokens = word_tokenizer.tokenize(documents[0])?;
48    println!("Word tokens: {tokens:?}");
49
50    // N-gram tokenization
51    let ngram_tokenizer = NgramTokenizer::new(2)?;
52    let ngrams = ngram_tokenizer.tokenize(documents[0])?;
53    println!("2-grams: {ngrams:?}");
54
55    // Regex tokenization
56    let regex_tokenizer = RegexTokenizer::new(r"\b\w+\b", false)?;
57    let regex_tokens = regex_tokenizer.tokenize(documents[0])?;
58    println!("Regex tokens: {regex_tokens:?}");
59    println!();
60
61    // 4. Stemming and Lemmatization
62    println!("4. Stemming and Lemmatization");
63    let porter_stemmer = PorterStemmer::new();
64    let lemmatizer = SimpleLemmatizer::new();
65
66    let test_words = vec!["running", "jumped", "better", "processing"];
67    for word in test_words {
68        let stemmed = porter_stemmer.stem(word)?;
69        let lemmatized = lemmatizer.stem(word)?;
70        println!("{word}: stemmed={stemmed}, lemmatized={lemmatized}");
71    }
72    println!();
73
74    // 5. Count Vectorization
75    println!("5. Count Vectorization");
76    let mut count_vectorizer = CountVectorizer::new(false);
77
78    let doc_refs = documents.to_vec();
79    count_vectorizer.fit(&doc_refs)?;
80
81    // Transform individual documents
82    let count_matrix = count_vectorizer.transform_batch(&doc_refs)?;
83    println!("Count vector shape: {:?}", count_matrix.shape());
84    println!("Vocabulary size: {}", count_vectorizer.vocabulary().len());
85
86    println!();
87
88    // 6. TF-IDF Vectorization
89    println!("6. TF-IDF Vectorization");
90    let mut tfidf_vectorizer = TfidfVectorizer::new(false, true, Some("l2".to_string()));
91
92    tfidf_vectorizer.fit(&doc_refs)?;
93    let tfidf_matrix = tfidf_vectorizer.transform_batch(&doc_refs)?;
94
95    println!("TF-IDF vector shape: {:?}", tfidf_matrix.shape());
96    println!("Sample TF-IDF values:");
97    for i in 0..3.min(tfidf_matrix.nrows()) {
98        for j in 0..5.min(tfidf_matrix.ncols()) {
99            print!("{:.3} ", tfidf_matrix[[i, j]]);
100        }
101        println!();
102    }
103    println!();
104
105    // 7. Complete Pipeline Example
106    println!("7. Complete Text Processing Pipeline");
107    let testtext = "The cats were running quickly through the gardens.";
108
109    // Normalize
110    let normalized = normalizer.normalize(testtext)?;
111    println!("Normalized: {normalized}");
112
113    // Clean
114    let cleaned = cleaner.clean(&normalized)?;
115    println!("Cleaned: {cleaned}");
116
117    // Tokenize
118    let tokens = word_tokenizer.tokenize(&cleaned)?;
119    println!("Tokens: {tokens:?}");
120
121    // Stem
122    let stemmed_tokens: Result<Vec<_>, _> = tokens
123        .iter()
124        .map(|token| porter_stemmer.stem(token))
125        .collect();
126    let stemmed_tokens = stemmed_tokens?;
127    println!("Stemmed: {stemmed_tokens:?}");
128
129    Ok(())
130}
More examples
Hide additional examples
examples/topic_modeling_demo.rs (line 45)
9fn main() -> Result<(), Box<dyn std::error::Error>> {
10    println!("Topic Modeling with LDA Demo");
11    println!("===========================\n");
12
13    // Sample documents about different topics
14    let documents = vec![
15        // Technology documents
16        "Artificial intelligence and machine learning are transforming the tech industry",
17        "Deep learning neural networks require powerful GPUs for training",
18        "Computer vision algorithms can now recognize objects in real time",
19        "Natural language processing helps computers understand human language",
20        // Sports documents
21        "The basketball team won the championship after a thrilling final game",
22        "Football players need excellent physical conditioning and teamwork",
23        "Tennis requires both physical fitness and mental concentration",
24        "Swimming is an excellent full-body workout and competitive sport",
25        // Science documents
26        "Climate change is affecting global weather patterns and ecosystems",
27        "Quantum physics explores the behavior of matter at atomic scales",
28        "Genetic research is unlocking the secrets of human DNA",
29        "Space exploration continues to reveal mysteries of the universe",
30    ];
31
32    // Convert documents to document-term matrix
33    let mut vectorizer = CountVectorizer::default();
34    let doc_term_matrix = vectorizer.fit_transform(&documents)?;
35
36    println!("Document-Term Matrix:");
37    println!(
38        "  Shape: ({}, {})",
39        doc_term_matrix.nrows(),
40        doc_term_matrix.ncols()
41    );
42    println!("  Vocabulary size: {}\n", vectorizer.vocabulary_size());
43
44    // Create vocabulary mapping
45    let vocabulary = vectorizer.vocabulary();
46    let mut word_index_map = HashMap::new();
47    for (word, &idx) in vocabulary.token_to_index().iter() {
48        word_index_map.insert(idx, word.clone());
49    }
50
51    // Train LDA model
52    let mut lda = LdaBuilder::new()
53        .ntopics(3)
54        .maxiter(100)
55        .random_seed(42)
56        .doc_topic_prior(0.1)
57        .topic_word_prior(0.01)
58        .learning_method(LdaLearningMethod::Batch)
59        .build();
60
61    println!("Training LDA model with 3 topics...");
62    let doc_topics = lda.fit_transform(&doc_term_matrix)?;
63    println!("Training completed!\n");
64
65    // Display document-topic assignments
66    println!("Document-Topic Assignments:");
67    for (doc_idx, topic_dist) in doc_topics.outer_iter().enumerate() {
68        let max_topic = topic_dist
69            .iter()
70            .enumerate()
71            .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
72            .map(|(idx_, _)| idx_)
73            .unwrap();
74
75        println!(
76            "Document {}: Topic {} (probabilities: {:.3}, {:.3}, {:.3})",
77            doc_idx + 1,
78            max_topic,
79            topic_dist[0],
80            topic_dist[1],
81            topic_dist[2]
82        );
83    }
84    println!();
85
86    // Get topics with top words
87    let topics = lda.get_topics(10, &word_index_map)?;
88
89    println!("Discovered Topics:");
90    for topic in &topics {
91        println!("\nTopic {}:", topic.id);
92        println!("Top words:");
93        for (word, weight) in &topic.top_words {
94            println!("  {word} ({weight:.4})");
95        }
96    }
97
98    // Analyze a new document
99    println!("\n\nAnalyzing a new document:");
100    let new_doc = "Machine learning algorithms are revolutionizing artificial intelligence";
101    let new_doc_vec = vectorizer.transform(new_doc)?;
102    let new_doc_topics = lda.transform(&new_doc_vec.insert_axis(scirs2_core::ndarray::Axis(0)))?;
103
104    println!("Document: \"{new_doc}\"");
105    println!("Topic distribution:");
106    for (topic_idx, &prob) in new_doc_topics.row(0).iter().enumerate() {
107        println!("  Topic {topic_idx}: {prob:.3}");
108    }
109
110    // Create another LDA model with different configuration
111    println!("\n\nTrying different LDA configuration:");
112    let mut lda2 = LatentDirichletAllocation::with_ntopics(4);
113    lda2.fit(&doc_term_matrix)?;
114
115    let topics2 = lda2.get_topics(5, &word_index_map)?;
116    println!("Discovered {} topics with top 5 words each:", topics2.len());
117    for topic in &topics2 {
118        let words: Vec<String> = topic
119            .top_words
120            .iter()
121            .map(|(word_, _)| word_.clone())
122            .collect();
123        println!("Topic {}: {}", topic.id, words.join(", "));
124    }
125
126    Ok(())
127}
Source

pub fn vocabulary_size(&self) -> usize

Get the vocabulary size

Examples found in repository?
examples/topic_modeling_demo.rs (line 42)
9fn main() -> Result<(), Box<dyn std::error::Error>> {
10    println!("Topic Modeling with LDA Demo");
11    println!("===========================\n");
12
13    // Sample documents about different topics
14    let documents = vec![
15        // Technology documents
16        "Artificial intelligence and machine learning are transforming the tech industry",
17        "Deep learning neural networks require powerful GPUs for training",
18        "Computer vision algorithms can now recognize objects in real time",
19        "Natural language processing helps computers understand human language",
20        // Sports documents
21        "The basketball team won the championship after a thrilling final game",
22        "Football players need excellent physical conditioning and teamwork",
23        "Tennis requires both physical fitness and mental concentration",
24        "Swimming is an excellent full-body workout and competitive sport",
25        // Science documents
26        "Climate change is affecting global weather patterns and ecosystems",
27        "Quantum physics explores the behavior of matter at atomic scales",
28        "Genetic research is unlocking the secrets of human DNA",
29        "Space exploration continues to reveal mysteries of the universe",
30    ];
31
32    // Convert documents to document-term matrix
33    let mut vectorizer = CountVectorizer::default();
34    let doc_term_matrix = vectorizer.fit_transform(&documents)?;
35
36    println!("Document-Term Matrix:");
37    println!(
38        "  Shape: ({}, {})",
39        doc_term_matrix.nrows(),
40        doc_term_matrix.ncols()
41    );
42    println!("  Vocabulary size: {}\n", vectorizer.vocabulary_size());
43
44    // Create vocabulary mapping
45    let vocabulary = vectorizer.vocabulary();
46    let mut word_index_map = HashMap::new();
47    for (word, &idx) in vocabulary.token_to_index().iter() {
48        word_index_map.insert(idx, word.clone());
49    }
50
51    // Train LDA model
52    let mut lda = LdaBuilder::new()
53        .ntopics(3)
54        .maxiter(100)
55        .random_seed(42)
56        .doc_topic_prior(0.1)
57        .topic_word_prior(0.01)
58        .learning_method(LdaLearningMethod::Batch)
59        .build();
60
61    println!("Training LDA model with 3 topics...");
62    let doc_topics = lda.fit_transform(&doc_term_matrix)?;
63    println!("Training completed!\n");
64
65    // Display document-topic assignments
66    println!("Document-Topic Assignments:");
67    for (doc_idx, topic_dist) in doc_topics.outer_iter().enumerate() {
68        let max_topic = topic_dist
69            .iter()
70            .enumerate()
71            .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
72            .map(|(idx_, _)| idx_)
73            .unwrap();
74
75        println!(
76            "Document {}: Topic {} (probabilities: {:.3}, {:.3}, {:.3})",
77            doc_idx + 1,
78            max_topic,
79            topic_dist[0],
80            topic_dist[1],
81            topic_dist[2]
82        );
83    }
84    println!();
85
86    // Get topics with top words
87    let topics = lda.get_topics(10, &word_index_map)?;
88
89    println!("Discovered Topics:");
90    for topic in &topics {
91        println!("\nTopic {}:", topic.id);
92        println!("Top words:");
93        for (word, weight) in &topic.top_words {
94            println!("  {word} ({weight:.4})");
95        }
96    }
97
98    // Analyze a new document
99    println!("\n\nAnalyzing a new document:");
100    let new_doc = "Machine learning algorithms are revolutionizing artificial intelligence";
101    let new_doc_vec = vectorizer.transform(new_doc)?;
102    let new_doc_topics = lda.transform(&new_doc_vec.insert_axis(scirs2_core::ndarray::Axis(0)))?;
103
104    println!("Document: \"{new_doc}\"");
105    println!("Topic distribution:");
106    for (topic_idx, &prob) in new_doc_topics.row(0).iter().enumerate() {
107        println!("  Topic {topic_idx}: {prob:.3}");
108    }
109
110    // Create another LDA model with different configuration
111    println!("\n\nTrying different LDA configuration:");
112    let mut lda2 = LatentDirichletAllocation::with_ntopics(4);
113    lda2.fit(&doc_term_matrix)?;
114
115    let topics2 = lda2.get_topics(5, &word_index_map)?;
116    println!("Discovered {} topics with top 5 words each:", topics2.len());
117    for topic in &topics2 {
118        let words: Vec<String> = topic
119            .top_words
120            .iter()
121            .map(|(word_, _)| word_.clone())
122            .collect();
123        println!("Topic {}: {}", topic.id, words.join(", "));
124    }
125
126    Ok(())
127}
Source

pub fn get_feature_count( &self, matrix: &Array2<f64>, document_index: usize, feature_index: usize, ) -> Option<f64>

Get feature count for a specific document and feature index from a matrix

Source

pub fn vocabulary_map(&self) -> HashMap<String, usize>

Get vocabulary as HashMap for compatibility with visualization

Trait Implementations§

Source§

impl Clone for CountVectorizer

Source§

fn clone(&self) -> Self

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Default for CountVectorizer

Source§

fn default() -> Self

Returns the “default value” for a type. Read more
Source§

impl Vectorizer for CountVectorizer

Source§

fn fit(&mut self, texts: &[&str]) -> Result<()>

Fit the vectorizer on a corpus of texts
Source§

fn transform(&self, text: &str) -> Result<Array1<f64>>

Transform a text into a vector
Source§

fn transform_batch(&self, texts: &[&str]) -> Result<Array2<f64>>

Transform a batch of texts into a matrix where each row is a document vector
Source§

fn fit_transform(&mut self, texts: &[&str]) -> Result<Array2<f64>>

Fit on a corpus and then transform a batch of texts

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V