use std::{borrow::Cow, collections::HashMap, sync::LazyLock};
use regex::{Regex, Replacer};
use unicode_normalization::UnicodeNormalization;
type PreprocFn = dyn Fn(Cow<'_, str>) -> Cow<'_, str>;
trait CowRegex {
fn replace_all_cow<'a, R: Replacer>(&self, text: Cow<'a, str>, replace: R) -> Cow<'a, str>;
}
impl CowRegex for Regex {
fn replace_all_cow<'a, R: Replacer>(&self, text: Cow<'a, str>, replace: R) -> Cow<'a, str> {
match text {
Cow::Borrowed(find) => self.replace_all(find, replace),
Cow::Owned(find) => Cow::Owned(self.replace_all(&find, replace).into_owned()),
}
}
}
pub const PREPROC_NORMALIZE: [&PreprocFn; 6] = [
&normalize_unicode,
&remove_junk,
&blackbox_urls,
&normalize_horizontal_whitespace,
&normalize_punctuation,
&trim,
];
pub const PREPROC_AGGRESSIVE: [&PreprocFn; 8] = [
&remove_common_tokens,
&normalize_vertical_whitespace,
&remove_punctuation,
&lowercaseify,
&remove_title_line,
&remove_copyright_statements,
&collapse_whitespace,
&trim,
];
pub fn apply_normalizers(text: &str) -> Vec<String> {
let mut lines = Vec::new();
for line in text.split('\n') {
let mut out = Cow::from(line);
for preproc in &PREPROC_NORMALIZE {
out = preproc(out);
}
lines.push(out.into());
}
lines
}
pub fn apply_aggressive(text: &str) -> String {
let mut out = text.into();
for preproc in &PREPROC_AGGRESSIVE {
out = preproc(out);
}
out.into()
}
fn normalize_unicode(input: Cow<'_, str>) -> Cow<'_, str> {
input.nfc().collect::<String>().into()
}
fn remove_junk(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"[^\w\s\pP]+").unwrap());
RX.replace_all_cow(input, "")
}
fn blackbox_urls(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"https?://\S+").unwrap());
RX.replace_all_cow(input, "http://blackboxed/url")
}
fn normalize_horizontal_whitespace(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?x)[ \t\p{Zs} \\ / \| \x2044 ]+").unwrap());
RX.replace_all_cow(input, " ")
}
fn normalize_punctuation(input: Cow<'_, str>) -> Cow<'_, str> {
struct Rx {
quotes: Regex,
dash: Regex,
open: Regex,
close: Regex,
under: Regex,
copy: Regex,
}
static RX: LazyLock<Rx> = LazyLock::new(|| Rx {
quotes: Regex::new(r#"["'\p{Pi}\p{Pf}]+"#).unwrap(),
dash: Regex::new(r"\p{Pd}+").unwrap(),
open: Regex::new(r"\p{Ps}+").unwrap(),
close: Regex::new(r"\p{Pe}+").unwrap(),
under: Regex::new(r"\p{Pc}+").unwrap(),
copy: Regex::new(r"[©Ⓒⓒ]").unwrap(),
});
let mut out = input;
let rx = &RX;
out = rx.quotes.replace_all_cow(out, "'");
out = rx.dash.replace_all_cow(out, "-");
out = rx.open.replace_all_cow(out, "(");
out = rx.close.replace_all_cow(out, ")");
out = rx.under.replace_all_cow(out, "_");
rx.copy.replace_all_cow(out, "(c)")
}
fn trim(input: Cow<'_, str>) -> Cow<'_, str> {
match input {
Cow::Borrowed(text) => text.trim().into(),
Cow::Owned(text) => Cow::Owned(text.trim().to_owned()),
}
}
fn trim_byte_adjusted(s: &str, idx: usize) -> &str {
if idx >= s.len() {
return s;
}
if let Some(sub) = s.get(..idx) {
sub
} else {
let trailing_continuation = s.as_bytes()[..idx]
.iter()
.rev()
.take_while(|&byte| byte & 0b1100_0000 == 0b1000_0000)
.count();
&s[..idx - trailing_continuation - 1]
}
}
fn lcs_substr<'a>(f_line: &'a str, s_line: &'a str) -> &'a str {
let prefix_len = f_line
.as_bytes()
.iter()
.zip(s_line.as_bytes())
.take_while(|&(&f, &s)| f == s)
.count();
trim_byte_adjusted(f_line, prefix_len).trim()
}
fn remove_common_tokens(input: Cow<'_, str>) -> Cow<'_, str> {
let mut l_iter = input.split('\n');
let mut prefix_counts = HashMap::<_, u32>::new();
if let Some(first) = l_iter.next() {
let mut pair = ("", first);
let line_pairs = std::iter::from_fn(|| {
pair = (pair.1, l_iter.next()?);
Some(pair)
});
for (a, b) in line_pairs {
let common = lcs_substr(a, b);
if common.len() > 3 {
*prefix_counts.entry(common).or_insert(1) += 1;
}
}
}
let most_common = match prefix_counts.iter().max_by_key(|&(_k, v)| v) {
Some((prefix, _count)) => prefix,
None => return input,
};
let common_count = prefix_counts
.iter()
.filter_map(|(s, count)| Some(count).filter(|_| s.starts_with(most_common)))
.sum::<u32>();
let line_count = input.split('\n').count();
let prefix_threshold = (0.8f32 * line_count as f32) as _;
if common_count < prefix_threshold {
return input;
}
let mut rem = String::with_capacity(input.len());
for line in input.split('\n') {
rem.push_str(line.strip_prefix(most_common).unwrap_or(line).trim());
rem.push('\n');
}
rem.pop();
rem.into()
}
fn normalize_vertical_whitespace(input: Cow<'_, str>) -> Cow<'_, str> {
struct Rx {
misc: Regex,
num: Regex,
}
static RX: LazyLock<Rx> = LazyLock::new(|| Rx {
misc: Regex::new(r"[\r\n\v\f]").unwrap(),
num: Regex::new(r"\n{3,}").unwrap(),
});
let mut out = input;
let rx = &RX;
out = rx.misc.replace_all_cow(out, "\n");
rx.num.replace_all_cow(out, "\n\n")
}
fn remove_punctuation(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"[^\w\s]+").unwrap());
RX.replace_all_cow(input, "")
}
fn lowercaseify(input: Cow<'_, str>) -> Cow<'_, str> {
input.to_lowercase().into()
}
fn remove_title_line(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^.*license( version \S+)?( copyright.*)?\n\n").unwrap());
RX.replace_all_cow(input, "")
}
fn remove_copyright_statements(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(
r"(?mx)
(
# either a new paragraph, or the beginning of the text + empty lines
(\n\n|\A\n*)
# any number of lines starting with 'copyright' followed by a new paragraph
(^\x20*copyright.*?$)+
\n\n
)
|
(
# or the very first line if it has 'copyright' in it
\A.*copyright.*$
)
|
(
# or any lines that really look like a copyright statement
^copyright (\s+(c|\d+))+ .*?$
)
",
)
.unwrap()
});
RX.replace_all_cow(input, "\n\n")
}
fn collapse_whitespace(input: Cow<'_, str>) -> Cow<'_, str> {
static RX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\s+").unwrap());
RX.replace_all_cow(input, " ")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn trim_byte_adjusted_respects_multibyte_characters() {
let input = "RustКраб橙蟹🦀";
let expected = [
"",
"R",
"Ru",
"Rus",
"Rust",
"Rust",
"RustК",
"RustК",
"RustКр",
"RustКр",
"RustКра",
"RustКра",
"RustКраб",
"RustКраб",
"RustКраб",
"RustКраб橙",
"RustКраб橙",
"RustКраб橙",
"RustКраб橙蟹",
"RustКраб橙蟹",
"RustКраб橙蟹",
"RustКраб橙蟹",
"RustКраб橙蟹🦀",
];
for (i, &outcome) in expected.iter().enumerate() {
assert_eq!(outcome, trim_byte_adjusted(input, i));
}
}
#[test]
fn greatest_substring_removal() {
let text = "%%Copyright: Copyright\n\
%%Copyright: All rights reserved.\n\
%%Copyright: Redistribution and use in source and binary forms, with or\n\
%%Copyright: without modification, are permitted provided that the\n\
%%Copyright: following conditions are met:\n\
\n\
abcd";
let new_text = remove_common_tokens(text.into());
println!("{}", new_text);
assert!(
!new_text.contains("%%Copyright"),
"new text shouldn't contain the common substring"
);
}
#[test]
fn greatest_substring_removal_keep_inner() {
let text = "this string should still have\n\
this word -> this <- in it even though\n\
this is still the most common word";
let new_text = remove_common_tokens(text.into());
println!("-- {}", new_text);
assert!(!new_text.contains("\nthis"));
assert!(new_text.contains("this"));
let text = "aaaa bbbb cccc dddd\n\
eeee ffff aaaa gggg\n\
hhhh iiii jjjj";
let new_text = remove_common_tokens(text.into());
println!("-- {}", new_text);
assert!(new_text.contains("aaaa")); }
#[test]
fn greatest_substring_removal_42() {
let text = "AAAAAA line 1\n\
AAAAAA another line here\n\
AAAAAA yet another line here\n\
AAAAAA how long will this go on\n\
AAAAAA another line here\n\
AAAAAA more\n\
AAAAAA one more\n\
AAAAAA two more\n\
AAAAAA three more\n\
AAAAAA four more\n\
AAAAAA five more\n\
AAAAAA six more\n\
\n\
preserve\n\
keep";
let new_text = remove_common_tokens(text.into());
println!("{}", new_text);
assert!(new_text.contains("preserve"));
assert!(new_text.contains("keep"));
assert!(!new_text.contains("AAAAAA"));
}
#[test]
fn normalize_no_line_mangle() {
let text = "some license
copyright 2012 person
\tlicense\r
text
\t
goes
here";
let text_lines = text.lines().count();
let normalized = apply_normalizers(text);
let normalized_lines = normalized.len();
assert_eq!(
text_lines, normalized_lines,
"normalizers shouldnt change line counts"
);
}
}