1#![doc = include_str!("../README.md")]
2#![allow(dead_code)]
3
4mod case;
5mod char_ext;
6mod char_string;
7mod currency;
8mod dict_word_metadata;
9mod dict_word_metadata_orthography;
10mod document;
11mod edit_distance;
12pub mod expr;
13mod fat_token;
14mod ignored_lints;
15mod irregular_nouns;
16mod irregular_verbs;
17pub mod language_detection;
18mod lexing;
19pub mod linting;
20mod mask;
21mod number;
22mod offsets;
23pub mod parsers;
24pub mod patterns;
25mod punctuation;
26mod render_markdown;
27mod span;
28pub mod spell;
29mod sync;
30mod thesaurus_helper;
31mod title_case;
32mod token;
33mod token_kind;
34mod token_string_ext;
35mod vec_ext;
36pub mod weir;
37pub mod weirpack;
38
39use render_markdown::render_markdown;
40use std::collections::{BTreeMap, VecDeque};
41
42pub use case::{Case, CaseIterExt};
43pub use char_string::{CharString, CharStringExt};
44pub use currency::Currency;
45pub use dict_word_metadata::{
46 AdverbData, ConjunctionData, Degree, DeterminerData, Dialect, DialectFlags, DictWordMetadata,
47 NounData, PronounData, VerbData, VerbForm, VerbFormFlags,
48};
49pub use dict_word_metadata_orthography::{OrthFlags, Orthography};
50pub use document::Document;
51pub use fat_token::{FatStringToken, FatToken};
52pub use ignored_lints::{IgnoredLints, LintContext};
53pub use irregular_nouns::IrregularNouns;
54pub use irregular_verbs::IrregularVerbs;
55use linting::Lint;
56pub use mask::{Mask, Masker, RegexMasker};
57pub use number::{Number, OrdinalSuffix};
58pub use punctuation::{Punctuation, Quote};
59pub use span::Span;
60pub use sync::{LSend, Lrc};
61pub use title_case::{make_title_case, make_title_case_str};
62pub use token::Token;
63pub use token_kind::TokenKind;
64pub use token_string_ext::TokenStringExt;
65pub use vec_ext::VecExt;
66
67pub fn core_version() -> &'static str {
69 env!("CARGO_PKG_VERSION")
70}
71
72pub fn remove_overlaps(lints: &mut Vec<Lint>) {
77 if lints.len() < 2 {
78 return;
79 }
80
81 let mut remove_indices = VecDeque::new();
82 lints.sort_by_key(|l| l.priority);
83 lints.sort_by_key(|l| (l.span.start, !0 - l.span.end));
84
85 let mut cur = 0;
86
87 for (i, lint) in lints.iter().enumerate() {
88 if lint.span.start < cur {
89 remove_indices.push_back(i);
90 continue;
91 }
92 cur = lint.span.end;
93 }
94
95 lints.remove_indices(remove_indices);
96}
97
98pub fn remove_overlaps_map<K: Ord>(lint_map: &mut BTreeMap<K, Vec<Lint>>) {
103 let total: usize = lint_map.values().map(Vec::len).sum();
104 if total < 2 {
105 return;
106 }
107
108 struct IndexedSpan {
109 rule_idx: usize,
110 lint_idx: usize,
111 priority: u8,
112 start: usize,
113 end: usize,
114 }
115
116 let mut removal_flags: Vec<Vec<bool>> = lint_map
117 .values()
118 .map(|lints| vec![false; lints.len()])
119 .collect();
120
121 let mut spans = Vec::with_capacity(total);
122 for (rule_idx, (_, lints)) in lint_map.iter().enumerate() {
123 for (lint_idx, lint) in lints.iter().enumerate() {
124 spans.push(IndexedSpan {
125 priority: lint.priority,
126 rule_idx,
127 lint_idx,
128 start: lint.span.start,
129 end: lint.span.end,
130 });
131 }
132 }
133
134 spans.sort_by_key(|span| span.priority);
135 spans.sort_by_key(|span| (span.start, usize::MAX - span.end));
136
137 let mut cur = 0;
138 for span in spans {
139 if span.start < cur {
140 removal_flags[span.rule_idx][span.lint_idx] = true;
141 } else {
142 cur = span.end;
143 }
144 }
145
146 for (rule_idx, (_, lints)) in lint_map.iter_mut().enumerate() {
147 if removal_flags[rule_idx].iter().all(|flag| !*flag) {
148 continue;
149 }
150
151 let mut idx = 0;
152 lints.retain(|_| {
153 let remove = removal_flags[rule_idx][idx];
154 idx += 1;
155 !remove
156 });
157 }
158}
159
160#[cfg(test)]
161mod tests {
162 use std::hash::DefaultHasher;
163 use std::hash::{Hash, Hasher};
164
165 use itertools::Itertools;
166 use quickcheck_macros::quickcheck;
167
168 use crate::linting::Lint;
169 use crate::remove_overlaps_map;
170 use crate::spell::FstDictionary;
171 use crate::{
172 Dialect, Document,
173 linting::{LintGroup, Linter},
174 remove_overlaps,
175 };
176
177 #[test]
178 fn keeps_space_lint() {
179 let doc = Document::new_plain_english_curated("Ths tet");
180
181 let mut linter = LintGroup::new_curated(FstDictionary::curated(), Dialect::American);
182
183 let mut lints = linter.lint(&doc);
184
185 dbg!(&lints);
186 remove_overlaps(&mut lints);
187 dbg!(&lints);
188
189 assert_eq!(lints.len(), 3);
190 }
191
192 #[quickcheck]
193 fn overlap_removals_have_equivalent_behavior(s: String) {
194 let doc = Document::new_plain_english_curated(&s);
195 let mut linter = LintGroup::new_curated(FstDictionary::curated(), Dialect::American);
196
197 let mut lint_map = linter.organized_lints(&doc);
198 let mut lint_flat: Vec<_> = lint_map.values().flatten().cloned().collect();
199
200 remove_overlaps_map(&mut lint_map);
201 remove_overlaps(&mut lint_flat);
202
203 let post_removal_flat: Vec<_> = lint_map.values().flatten().cloned().collect();
204
205 fn hash_lint(lint: &Lint) -> u64 {
206 let mut hasher = DefaultHasher::new();
207 lint.hash(&mut hasher);
208 hasher.finish()
209 }
210
211 let lint_flat_hashes: Vec<_> = lint_flat.iter().map(hash_lint).sorted().collect();
213 let post_removal_flat_hashes: Vec<_> =
214 post_removal_flat.iter().map(hash_lint).sorted().collect();
215
216 assert_eq!(post_removal_flat_hashes, lint_flat_hashes);
217 }
218}