#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FileStatus {
Added,
Modified,
Deleted,
Renamed,
}
impl FileStatus {
pub fn badge(&self) -> &'static str {
match self {
FileStatus::Added => "A",
FileStatus::Modified => "M",
FileStatus::Deleted => "D",
FileStatus::Renamed => "R",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LineKind {
Context,
Added,
Removed,
HunkHeader,
FoldDown,
FoldUp,
}
impl LineKind {
pub fn is_code(&self) -> bool {
matches!(
self,
LineKind::Context | LineKind::Added | LineKind::Removed
)
}
pub fn is_navigable(&self) -> bool {
matches!(
self,
LineKind::Context
| LineKind::Added
| LineKind::Removed
| LineKind::FoldDown
| LineKind::FoldUp
)
}
pub fn is_fold(&self) -> bool {
matches!(self, LineKind::FoldDown | LineKind::FoldUp)
}
pub fn is_hunk_boundary(&self) -> bool {
matches!(
self,
LineKind::HunkHeader | LineKind::FoldDown | LineKind::FoldUp
)
}
pub fn is_fixed_height(&self) -> bool {
self.is_fold() || matches!(self, LineKind::HunkHeader)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TokenKind {
Default,
Keyword,
String,
Comment,
Number,
Constant,
Type,
Function,
Variable,
Operator,
Punctuation,
}
#[derive(Debug, Clone)]
pub struct DiffLine {
pub kind: LineKind,
pub text: String,
pub segments: Vec<(TokenKind, String)>,
pub gutter: String,
pub old_lineno: Option<u32>,
pub new_lineno: Option<u32>,
pub fold_count: u32,
pub hunk_header: String,
pub word_highlights: Vec<(usize, usize)>, pub paired: bool,
pub no_newline: bool,
}
impl DiffLine {
fn hunk(text: String) -> Self {
Self {
kind: LineKind::HunkHeader,
text,
segments: Vec::new(),
gutter: format_gutter(None, None),
old_lineno: None,
new_lineno: None,
fold_count: 0,
hunk_header: String::new(),
word_highlights: Vec::new(),
paired: false,
no_newline: false,
}
}
pub fn body(
kind: LineKind,
text: String,
old_lineno: Option<u32>,
new_lineno: Option<u32>,
) -> Self {
Self {
kind,
text,
segments: Vec::new(),
gutter: format_gutter(old_lineno, new_lineno),
old_lineno,
new_lineno,
fold_count: 0,
hunk_header: String::new(),
word_highlights: Vec::new(),
paired: false,
no_newline: false,
}
}
pub fn fold_down(old_start: u32, new_start: u32, count: u32, hunk_header: String) -> Self {
Self {
kind: LineKind::FoldDown,
text: format!("â–¼ {} lines", count),
segments: Vec::new(),
gutter: format_gutter(None, None),
old_lineno: Some(old_start),
new_lineno: Some(new_start),
fold_count: count,
hunk_header: hunk_header.clone(),
word_highlights: Vec::new(),
paired: false,
no_newline: false,
}
}
pub fn fold_up(old_start: u32, new_start: u32, count: u32, hunk_header: String) -> Self {
Self {
kind: LineKind::FoldUp,
text: format!("â–² {} lines", count),
segments: Vec::new(),
gutter: format_gutter(None, None),
old_lineno: Some(old_start),
new_lineno: Some(new_start),
fold_count: count,
hunk_header,
word_highlights: Vec::new(),
paired: false,
no_newline: false,
}
}
}
fn format_gutter(old: Option<u32>, new: Option<u32>) -> String {
let fmt = |n: Option<u32>| match n {
Some(v) => format!("{:>4}", v),
None => " ".to_string(),
};
format!("{} {} ", fmt(old), fmt(new))
}
#[derive(Debug, Clone)]
pub struct FileDiff {
pub path: String,
pub old_path: Option<String>,
pub status: FileStatus,
pub added: usize,
pub removed: usize,
pub lines: Vec<DiffLine>,
pub auto_collapsed: bool,
pub viewed: bool,
}
pub fn parse_unified(input: &str) -> Vec<FileDiff> {
let mut files: Vec<FileDiff> = Vec::new();
let mut cur: Option<FileDiff> = None;
let mut pending_old: Option<String> = None;
let mut old_ln: u32 = 0;
let mut new_ln: u32 = 0;
for line in input.lines() {
if let Some(rest) = line.strip_prefix("diff --git ") {
if let Some(f) = cur.take() {
files.push(f);
}
let (a, b) = parse_diff_git_header(rest);
let path = b.clone().or_else(|| a.clone()).unwrap_or_default();
cur = Some(FileDiff {
path: strip_prefix_ab(&path),
old_path: a.map(|p| strip_prefix_ab(&p)),
status: FileStatus::Modified,
added: 0,
removed: 0,
lines: Vec::new(),
auto_collapsed: false,
viewed: false,
});
pending_old = None;
old_ln = 0;
new_ln = 0;
continue;
}
let Some(file) = cur.as_mut() else { continue };
if line.starts_with("new file") {
file.status = FileStatus::Added;
continue;
}
if line.starts_with("deleted file") {
file.status = FileStatus::Deleted;
continue;
}
if line.starts_with("rename ") || line.starts_with("similarity index") {
if let Some(old_path) = line.strip_prefix("rename from ") {
pending_old = Some(old_path.to_string());
}
if let Some(new_path) = line.strip_prefix("rename to ") {
file.status = FileStatus::Renamed;
if let Some(old) = pending_old.take() {
file.old_path = Some(old);
}
file.path = new_path.to_string();
}
continue;
}
if let Some(rest) = line.strip_prefix("--- ") {
file.old_path = parse_diff_path_header(rest);
continue;
}
if let Some(rest) = line.strip_prefix("+++ ") {
if let Some(path) = parse_diff_path_header(rest) {
file.path = path;
}
continue;
}
if line.starts_with("index ")
|| line.starts_with("Binary files")
|| line.starts_with("old mode")
|| line.starts_with("new mode")
{
continue;
}
if line.starts_with('\\') {
if let Some(last) = file.lines.last_mut() {
if last.kind.is_code() {
last.no_newline = true;
}
}
continue;
}
if line.starts_with("@@") {
let (old_start, new_start) = match parse_hunk_header(line) {
Some((o, n)) => (o, n),
None => {
old_ln = 0;
new_ln = 0;
file.lines.push(DiffLine::hunk(line.to_string()));
continue;
}
};
let fold_new_start = if new_ln == 0 { 1 } else { new_ln };
let fold_old_start = if old_ln == 0 { 1 } else { old_ln };
let fold_count = new_start.saturating_sub(fold_new_start);
old_ln = old_start;
new_ln = new_start;
if fold_count > 0 {
let header = line.to_string();
if fold_count > 20 {
file.lines.push(DiffLine::fold_down(
fold_old_start,
fold_new_start,
fold_count,
String::new(),
));
file.lines.push(DiffLine::fold_up(
fold_old_start,
fold_new_start,
fold_count,
header,
));
} else {
file.lines.push(DiffLine::fold_down(
fold_old_start,
fold_new_start,
fold_count,
header,
));
}
} else {
file.lines.push(DiffLine::hunk(line.to_string()));
}
continue;
}
if let Some(rest) = line.strip_prefix('+') {
file.added += 1;
file.lines.push(DiffLine::body(
LineKind::Added,
rest.to_string(),
None,
Some(new_ln),
));
new_ln += 1;
} else if let Some(rest) = line.strip_prefix('-') {
file.removed += 1;
file.lines.push(DiffLine::body(
LineKind::Removed,
rest.to_string(),
Some(old_ln),
None,
));
old_ln += 1;
} else if let Some(rest) = line.strip_prefix(' ') {
file.lines.push(DiffLine::body(
LineKind::Context,
rest.to_string(),
Some(old_ln),
Some(new_ln),
));
old_ln += 1;
new_ln += 1;
}
}
if let Some(f) = cur.take() {
files.push(f);
}
for f in &mut files {
apply_auto_collapse(f);
compute_word_diff_highlights(f);
}
files
}
fn compute_word_diff_highlights(file: &mut FileDiff) {
use crate::word_diff::score_pair;
let mut i = 0;
while i < file.lines.len() {
if file.lines[i].kind != LineKind::Removed {
i += 1;
continue;
}
let removed_start = i;
while i < file.lines.len() && file.lines[i].kind == LineKind::Removed {
i += 1;
}
let removed_end = i;
let added_start = i;
while i < file.lines.len() && file.lines[i].kind == LineKind::Added {
i += 1;
}
let added_end = i;
let removed_count = removed_end - removed_start;
let added_count = added_end - added_start;
const MAX_BLOCK_PAIRS: usize = 400;
if removed_count * added_count > MAX_BLOCK_PAIRS {
continue;
}
struct Candidate {
score: f32,
r: usize,
a: usize,
rhl: Vec<(usize, usize)>,
ahl: Vec<(usize, usize)>,
}
let mut candidates: Vec<Candidate> = Vec::new();
for r in removed_start..removed_end {
for a in added_start..added_end {
let Some((score, rh, ah)) = score_pair(&file.lines[r].text, &file.lines[a].text)
else {
continue;
};
candidates.push(Candidate {
score,
r,
a,
rhl: rh.into_iter().map(|h| (h.start, h.end)).collect(),
ahl: ah.into_iter().map(|h| (h.start, h.end)).collect(),
});
}
}
candidates.sort_by(|x, y| {
y.score
.partial_cmp(&x.score)
.unwrap_or(std::cmp::Ordering::Equal)
});
let mut r_used = vec![false; removed_count];
let mut a_used = vec![false; added_count];
for c in candidates {
let ri = c.r - removed_start;
let ai = c.a - added_start;
if r_used[ri] || a_used[ai] {
continue;
}
r_used[ri] = true;
a_used[ai] = true;
file.lines[c.r].paired = true;
file.lines[c.r].word_highlights = c.rhl;
file.lines[c.a].paired = true;
file.lines[c.a].word_highlights = c.ahl;
}
}
}
fn parse_hunk_header(line: &str) -> Option<(u32, u32)> {
let rest = line.strip_prefix("@@")?.trim_start();
let mut parts = rest.split_whitespace();
let old = parts.next()?.strip_prefix('-')?;
let new = parts.next()?.strip_prefix('+')?;
let old_start: u32 = old.split(',').next()?.parse().ok()?;
let new_start: u32 = new.split(',').next()?.parse().ok()?;
Some((old_start, new_start))
}
pub fn apply_auto_collapse(f: &mut FileDiff) {
f.auto_collapsed = should_auto_collapse(&f.path) || has_generated_marker(&f.lines);
if f.auto_collapsed {
f.viewed = true;
}
}
fn parse_diff_git_header(rest: &str) -> (Option<String>, Option<String>) {
if let Some(a_body) = rest.strip_prefix("\"a/")
&& let Some(idx) = a_body.find("\" \"b/")
&& let Some(b_body) = a_body[idx + "\" \"b/".len()..].strip_suffix('"')
{
return (
Some(unquote_c_path(&a_body[..idx])),
Some(unquote_c_path(b_body)),
);
}
if let Some(a) = rest.strip_prefix("a/") {
if let Some(idx) = a.find(" b/") {
return (
Some(a[..idx].to_string()),
Some(a[idx + " b/".len()..].to_string()),
);
}
}
let mut parts = rest.split_whitespace();
match parts.next() {
Some(first) => {
let last = parts.last().unwrap_or(first);
(Some(strip_prefix_ab(first)), Some(strip_prefix_ab(last)))
}
None => (None, None),
}
}
fn parse_diff_path_header(rest: &str) -> Option<String> {
let raw = rest.split('\t').next().unwrap_or(rest).trim_end();
if raw.is_empty() || raw == "/dev/null" {
return None;
}
let path = if let Some(body) = raw.strip_prefix('"').and_then(|s| s.strip_suffix('"')) {
unquote_c_path(body)
} else {
raw.to_string()
};
Some(strip_prefix_ab(&path))
}
fn unquote_c_path(s: &str) -> String {
let bytes = s.as_bytes();
let mut out: Vec<u8> = Vec::with_capacity(bytes.len());
let mut i = 0;
while i < bytes.len() {
if bytes[i] == b'\\' && i + 1 < bytes.len() {
let rest = &bytes[i + 1..];
if rest.len() >= 3 && rest[..3].iter().all(|b| (b'0'..=b'7').contains(b)) {
let v = ((rest[0] - b'0') << 6) | ((rest[1] - b'0') << 3) | (rest[2] - b'0');
out.push(v);
i += 4;
continue;
}
let mapped = match rest[0] {
b'n' => Some(b'\n'),
b't' => Some(b'\t'),
b'r' => Some(b'\r'),
b'"' => Some(b'"'),
b'\\' => Some(b'\\'),
b'a' => Some(0x07),
b'b' => Some(0x08),
b'f' => Some(0x0c),
b'v' => Some(0x0b),
_ => None,
};
if let Some(b) = mapped {
out.push(b);
i += 2;
continue;
}
}
out.push(bytes[i]);
i += 1;
}
String::from_utf8(out).unwrap_or_else(|_| s.to_string())
}
fn strip_prefix_ab(p: &str) -> String {
if let Some(s) = p.strip_prefix("a/") {
return s.to_string();
}
if let Some(s) = p.strip_prefix("b/") {
return s.to_string();
}
p.to_string()
}
#[cfg(test)]
mod tests {
use super::*;
const SAMPLE: &str = "diff --git a/src/foo.rs b/src/foo.rs
index 1111111..2222222 100644
--- a/src/foo.rs
+++ b/src/foo.rs
@@ -1,3 +1,4 @@
fn a() {}
-fn b() {}
+fn bb() {}
+fn c() {}
diff --git a/new.txt b/new.txt
new file mode 100644
index 0000000..3333333
--- /dev/null
+++ b/new.txt
@@ -0,0 +1,2 @@
+hello
+world
";
#[test]
fn parses_two_files() {
let files = parse_unified(SAMPLE);
assert_eq!(files.len(), 2);
assert_eq!(files[0].path, "src/foo.rs");
assert_eq!(files[0].status, FileStatus::Modified);
assert_eq!(files[0].added, 2);
assert_eq!(files[0].removed, 1);
assert_eq!(files[1].path, "new.txt");
assert_eq!(files[1].status, FileStatus::Added);
assert_eq!(files[1].added, 2);
}
#[test]
fn parses_paths_with_spaces() {
let input = "diff --git a/with space.txt b/with space.txt
index 111..222 100644
--- a/with space.txt\t
+++ b/with space.txt\t
@@ -1 +1 @@
-old
+new
";
let files = parse_unified(input);
assert_eq!(files.len(), 1);
assert_eq!(files[0].path, "with space.txt");
assert_eq!(files[0].old_path.as_deref(), Some("with space.txt"));
}
#[test]
fn parses_paths_with_multiple_spaces_and_unicode() {
let input = "diff --git a/emoji 🎉.md b/emoji 🎉.md
index 111..222 100644
--- a/emoji 🎉.md
+++ b/emoji 🎉.md
@@ -1 +1 @@
-old
+new
";
let files = parse_unified(input);
assert_eq!(files.len(), 1);
assert_eq!(files[0].path, "emoji 🎉.md");
}
#[test]
fn parses_added_file_with_space_in_name() {
let input = "diff --git a/new file.txt b/new file.txt
new file mode 100644
index 0000000..3333333
--- /dev/null
+++ b/new file.txt
@@ -0,0 +1 @@
+hello
";
let files = parse_unified(input);
assert_eq!(files.len(), 1);
assert_eq!(files[0].path, "new file.txt");
assert_eq!(files[0].status, FileStatus::Added);
assert_eq!(files[0].old_path, None);
}
#[test]
fn parses_hunk_header_variants() {
assert_eq!(parse_hunk_header("@@ -1,3 +1,4 @@"), Some((1, 1)));
assert_eq!(parse_hunk_header("@@ -1 +1 @@"), Some((1, 1)));
assert_eq!(parse_hunk_header("@@ -0,0 +1,2 @@"), Some((0, 1)));
assert_eq!(
parse_hunk_header("@@ -10,5 +20,7 @@ fn foo()"),
Some((10, 20))
);
assert_eq!(parse_hunk_header("@@ broken"), None);
}
#[test]
fn tracks_line_numbers_in_gutter() {
let files = parse_unified(SAMPLE);
let f = &files[0];
let body: Vec<_> = f.lines.iter().filter(|l| l.kind.is_code()).collect();
assert_eq!(body[0].gutter, " 1 1 ");
assert_eq!(body[1].gutter, " 2 ");
assert_eq!(body[2].gutter, " 2 ");
}
#[test]
fn no_fold_for_first_hunk_at_line_1() {
let files = parse_unified(SAMPLE);
assert!(!files[0].lines.iter().any(|l| l.kind.is_fold()));
}
#[test]
fn inserts_fold_between_hunks() {
let input = "diff --git a/f.rs b/f.rs
index aaa..bbb 100644
--- a/f.rs
+++ b/f.rs
@@ -1,2 +1,3 @@
line1
+added
line2
@@ -20,2 +21,2 @@ fn bar()
line20
-old
+new
";
let files = parse_unified(input);
let folds: Vec<_> = files[0].lines.iter().filter(|l| l.kind.is_fold()).collect();
assert_eq!(folds.len(), 1);
assert_eq!(folds[0].kind, LineKind::FoldDown);
assert_eq!(folds[0].new_lineno, Some(4));
assert_eq!(folds[0].fold_count, 17);
}
#[test]
fn pair_selection_matches_by_similarity_not_order() {
let input = "diff --git a/f.rs b/f.rs
index a..b 100644
--- a/f.rs
+++ b/f.rs
@@ -1,6 +1,6 @@
-fn alpha_original(x: i32) {}
-fn beta_original(y: i32) {}
-fn gamma_original(z: i32) {}
+fn gamma_original(z: i64) {}
+fn alpha_original(x: i64) {}
+fn beta_original(y: i64) {}
";
let files = parse_unified(input);
let f = &files[0];
let removed: Vec<&DiffLine> = f
.lines
.iter()
.filter(|l| l.kind == LineKind::Removed)
.collect();
let added: Vec<&DiffLine> = f
.lines
.iter()
.filter(|l| l.kind == LineKind::Added)
.collect();
assert_eq!(removed.len(), 3);
assert_eq!(added.len(), 3);
for line in removed.iter().chain(added.iter()) {
assert!(line.paired, "expected paired for text={:?}", line.text);
assert!(
!line.word_highlights.is_empty(),
"expected word_highlights for text={:?}",
line.text,
);
}
}
#[test]
fn unpaired_lines_stay_unpaired() {
let input = "diff --git a/f.rs b/f.rs
index a..b 100644
--- a/f.rs
+++ b/f.rs
@@ -1,2 +1,2 @@
-fn delete_me_completely() {}
+struct WhollyNewThing;
";
let files = parse_unified(input);
let f = &files[0];
let removed = f
.lines
.iter()
.find(|l| l.kind == LineKind::Removed)
.unwrap();
let added = f.lines.iter().find(|l| l.kind == LineKind::Added).unwrap();
assert!(!removed.paired);
assert!(!added.paired);
assert!(removed.word_highlights.is_empty());
assert!(added.word_highlights.is_empty());
}
#[test]
fn auto_collapses_lock_files() {
assert!(should_auto_collapse("Cargo.lock"));
assert!(should_auto_collapse("sub/package-lock.json"));
assert!(should_auto_collapse("vendor/foo.go"));
assert!(!should_auto_collapse("src/main.rs"));
}
#[test]
fn parses_no_newline_at_eof_marker() {
let input = "diff --git a/f.txt b/f.txt
index 111..222 100644
--- a/f.txt
+++ b/f.txt
@@ -1 +1 @@
-old line
\\ No newline at end of file
+new line
\\ No newline at end of file
";
let files = parse_unified(input);
let f = &files[0];
let removed = f
.lines
.iter()
.find(|l| l.kind == LineKind::Removed)
.unwrap();
let added = f.lines.iter().find(|l| l.kind == LineKind::Added).unwrap();
assert!(removed.no_newline);
assert!(added.no_newline);
}
#[test]
fn auto_collapsed_files_seed_viewed() {
let input = "diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 0000000..3333333
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,2 @@
+{
+}
diff --git a/src/main.rs b/src/main.rs
index 111..222 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1 +1 @@
-old
+new
";
let files = parse_unified(input);
let lock = files
.iter()
.find(|f| f.path == "package-lock.json")
.unwrap();
let main = files.iter().find(|f| f.path == "src/main.rs").unwrap();
assert!(lock.auto_collapsed);
assert!(lock.viewed);
assert!(!main.auto_collapsed);
assert!(!main.viewed);
}
#[test]
fn unquotes_core_quotepath_headers() {
let input = "diff --git \"a/\\346\\227\\245\\346\\234\\254\\350\\252\\236.md\" \"b/\\346\\227\\245\\346\\234\\254\\350\\252\\236.md\"
index 111..222 100644
--- \"a/\\346\\227\\245\\346\\234\\254\\350\\252\\236.md\"
+++ \"b/\\346\\227\\245\\346\\234\\254\\350\\252\\236.md\"
@@ -1 +1 @@
-old
+new
";
let files = parse_unified(input);
assert_eq!(files.len(), 1);
assert_eq!(files[0].path, "日本語.md");
assert_eq!(files[0].old_path.as_deref(), Some("日本語.md"));
}
#[test]
fn unquote_c_path_handles_simple_escapes() {
assert_eq!(unquote_c_path("foo"), "foo");
assert_eq!(unquote_c_path("a\\tb"), "a\tb");
assert_eq!(unquote_c_path("a\\\"b"), "a\"b");
assert_eq!(unquote_c_path("a\\\\b"), "a\\b");
assert_eq!(unquote_c_path("a\\040b"), "a b");
}
#[test]
fn handles_dev_null_clears_old_path_for_added_file() {
let input = "diff --git a/new.txt b/new.txt
new file mode 100644
index 0000000..3333333
--- /dev/null
+++ b/new.txt
@@ -0,0 +1 @@
+hello
";
let files = parse_unified(input);
assert_eq!(files[0].old_path, None);
}
}
fn has_generated_marker(lines: &[DiffLine]) -> bool {
const MARKERS: &[&str] = &[
"@GENERATED",
"DO NOT EDIT",
"AUTO-GENERATED",
"AUTO GENERATED",
"CODE GENERATED BY",
"GENERATED BY ORVAL",
"PRISMA CLIENT",
"THIS FILE IS AUTO-GENERATED",
"THIS FILE WAS AUTOMATICALLY GENERATED",
"GENERATED CODE",
"GENERATED SIGNEDSOURCE",
];
for line in lines.iter().filter(|l| l.kind.is_code()).take(20) {
let upper = line.text.to_uppercase();
if MARKERS.iter().any(|m| upper.contains(m)) {
return true;
}
}
false
}
#[derive(Debug, Clone, Copy)]
pub struct SplitRow {
pub left: Option<usize>,
pub right: Option<usize>,
}
pub fn build_split_rows(file: &FileDiff) -> Vec<SplitRow> {
let mut rows: Vec<SplitRow> = Vec::new();
let mut removed_buf: Vec<usize> = Vec::new();
let mut added_buf: Vec<usize> = Vec::new();
let flush = |removed: &mut Vec<usize>, added: &mut Vec<usize>, out: &mut Vec<SplitRow>| {
let max = removed.len().max(added.len());
for i in 0..max {
out.push(SplitRow {
left: removed.get(i).copied(),
right: added.get(i).copied(),
});
}
removed.clear();
added.clear();
};
for (idx, line) in file.lines.iter().enumerate() {
match line.kind {
LineKind::Removed => {
if !added_buf.is_empty() {
flush(&mut removed_buf, &mut added_buf, &mut rows);
}
removed_buf.push(idx);
}
LineKind::Added => {
added_buf.push(idx);
}
_ => {
flush(&mut removed_buf, &mut added_buf, &mut rows);
rows.push(SplitRow {
left: Some(idx),
right: Some(idx),
});
}
}
}
flush(&mut removed_buf, &mut added_buf, &mut rows);
rows
}
fn should_auto_collapse(path: &str) -> bool {
let name = path.rsplit('/').next().unwrap_or(path);
const LOCK_FILES: &[&str] = &[
"package-lock.json",
"yarn.lock",
"pnpm-lock.yaml",
"Cargo.lock",
"Gemfile.lock",
"Pipfile.lock",
"poetry.lock",
"composer.lock",
"go.sum",
];
if LOCK_FILES.contains(&name) {
return true;
}
if name.ends_with(".min.js")
|| name.ends_with(".min.css")
|| name.ends_with(".map")
|| name.ends_with(".generated.ts")
|| name.ends_with(".gen.go")
{
return true;
}
const DIR_PATTERNS: &[&str] = &[
"vendor/",
"node_modules/",
"dist/",
"build/",
"/generated/",
"/__generated__/",
"/gen/",
];
for pat in DIR_PATTERNS {
if path.contains(pat) || path.starts_with(pat.trim_start_matches('/')) {
return true;
}
}
false
}