1use std::collections::{HashMap, HashSet, VecDeque};
2
3use nucleo_matcher::pattern::{CaseMatching, Normalization, Pattern};
4use nucleo_matcher::{Config, Matcher, Utf32Str};
5
6use crate::graph::{Edge, GraphFile, Node, Note};
7use crate::index::Bm25Index;
8use crate::text_norm;
9
10const BM25_K1: f64 = 1.5;
11const BM25_B: f64 = 0.75;
12const DEFAULT_TARGET_CHARS: usize = 4200;
13const MIN_TARGET_CHARS: usize = 300;
14const MAX_TARGET_CHARS: usize = 12_000;
15const FUZZY_NEIGHBOR_CONTEXT_CAP: u32 = 220;
16const FUZZY_NO_PRIMARY_CONTEXT_DIVISOR: u32 = 3;
17const FUZZY_DESCRIPTION_WEIGHT: u32 = 2;
18const FUZZY_FACT_WEIGHT: u32 = 2;
19const FUZZY_NOTE_BODY_WEIGHT: u32 = 1;
20const FUZZY_NOTE_TAG_WEIGHT: u32 = 2;
21const BM25_PHRASE_MATCH_BOOST: i64 = 120;
22const BM25_TOKEN_MATCH_BOOST: i64 = 45;
23const BM25_ID_WEIGHT: usize = 5;
24const BM25_NAME_WEIGHT: usize = 4;
25const BM25_ALIAS_WEIGHT: usize = 4;
26const BM25_DESCRIPTION_WEIGHT: usize = 2;
27const BM25_FACT_WEIGHT: usize = 2;
28const BM25_NOTE_BODY_WEIGHT: usize = 1;
29const BM25_NOTE_TAG_WEIGHT: usize = 1;
30const BM25_NEIGHBOR_WEIGHT: usize = 1;
31const IMPORTANCE_NEUTRAL: f64 = 0.5;
32const IMPORTANCE_MAX_ABS_BOOST: f64 = 66.0;
33const SCORE_META_MAX_RATIO: f64 = 0.35;
34const SCORE_META_MIN_CAP: i64 = 30;
35const SCORE_META_MAX_CAP: i64 = 240;
36
37#[derive(Debug, Clone, Copy)]
38pub enum FindMode {
39 Fuzzy,
40 Bm25,
41 Hybrid,
42}
43
44#[derive(Debug, Clone, Copy)]
45pub struct FindTune {
46 pub bm25: f64,
47 pub fuzzy: f64,
48 pub vector: f64,
49}
50
51impl FindTune {
52 pub fn parse(raw: &str) -> Option<Self> {
53 let mut tune = Self::default();
54 for part in raw.split(',') {
55 let (key, value) = part.split_once('=')?;
56 let value = value.trim().parse::<f64>().ok()?;
57 match key.trim() {
58 "bm25" => tune.bm25 = value,
59 "fuzzy" => tune.fuzzy = value,
60 "vector" => tune.vector = value,
61 _ => return None,
62 }
63 }
64 Some(tune.clamped())
65 }
66
67 fn clamped(self) -> Self {
68 Self {
69 bm25: self.bm25.clamp(0.0, 1.0),
70 fuzzy: self.fuzzy.clamp(0.0, 1.0),
71 vector: self.vector.clamp(0.0, 1.0),
72 }
73 }
74}
75
76impl Default for FindTune {
77 fn default() -> Self {
78 Self {
79 bm25: 0.55,
80 fuzzy: 0.35,
81 vector: 0.10,
82 }
83 }
84}
85
86#[derive(Clone, Copy)]
87struct ScoredNode<'a> {
88 score: i64,
89 node: &'a Node,
90 breakdown: ScoreBreakdown,
91}
92
93#[derive(Debug, Clone, Copy)]
94struct ScoreBreakdown {
95 raw_relevance: f64,
96 normalized_relevance: i64,
97 lexical_boost: i64,
98 feedback_boost: i64,
99 importance_boost: i64,
100 authority_raw: i64,
101 authority_applied: i64,
102 authority_cap: i64,
103}
104
105struct RawCandidate<'a> {
106 node: &'a Node,
107 raw_relevance: f64,
108 lexical_boost: i64,
109}
110
111struct FindQueryContext<'a> {
112 notes_by_node: HashMap<&'a str, Vec<&'a Note>>,
113 neighbors_by_node: HashMap<&'a str, Vec<&'a Node>>,
114}
115
116impl<'a> FindQueryContext<'a> {
117 fn build(graph: &'a GraphFile) -> Self {
118 let node_by_id: HashMap<&'a str, &'a Node> = graph
119 .nodes
120 .iter()
121 .map(|node| (node.id.as_str(), node))
122 .collect();
123
124 let mut notes_by_node: HashMap<&'a str, Vec<&'a Note>> = HashMap::new();
125 for note in &graph.notes {
126 notes_by_node
127 .entry(note.node_id.as_str())
128 .or_default()
129 .push(note);
130 }
131
132 let mut neighbors_by_node: HashMap<&'a str, Vec<&'a Node>> = HashMap::new();
133 for edge in &graph.edges {
134 if let (Some(source), Some(target)) = (
135 node_by_id.get(edge.source_id.as_str()),
136 node_by_id.get(edge.target_id.as_str()),
137 ) {
138 neighbors_by_node
139 .entry(source.id.as_str())
140 .or_default()
141 .push(*target);
142 neighbors_by_node
143 .entry(target.id.as_str())
144 .or_default()
145 .push(*source);
146 }
147 }
148
149 for neighbors in neighbors_by_node.values_mut() {
150 neighbors.sort_by(|left, right| left.id.cmp(&right.id));
151 neighbors.dedup_by(|left, right| left.id == right.id);
152 }
153
154 Self {
155 notes_by_node,
156 neighbors_by_node,
157 }
158 }
159
160 fn notes_for(&self, node_id: &str) -> &[&'a Note] {
161 self.notes_by_node
162 .get(node_id)
163 .map(Vec::as_slice)
164 .unwrap_or(&[])
165 }
166
167 fn neighbors_for(&self, node_id: &str) -> &[&'a Node] {
168 self.neighbors_by_node
169 .get(node_id)
170 .map(Vec::as_slice)
171 .unwrap_or(&[])
172 }
173}
174
175#[derive(Debug, Clone)]
176pub struct ScoreBreakdownResult {
177 pub raw_relevance: f64,
178 pub normalized_relevance: i64,
179 pub lexical_boost: i64,
180 pub feedback_boost: i64,
181 pub importance_boost: i64,
182 pub authority_raw: i64,
183 pub authority_applied: i64,
184 pub authority_cap: i64,
185}
186
187#[derive(Debug, Clone)]
188pub struct ScoredNodeResult {
189 pub score: i64,
190 pub node: Node,
191 pub breakdown: ScoreBreakdownResult,
192}
193
194pub fn render_find(
195 graph: &GraphFile,
196 queries: &[String],
197 limit: usize,
198 include_features: bool,
199 include_metadata: bool,
200 mode: FindMode,
201 full: bool,
202) -> String {
203 render_find_with_index(
204 graph,
205 queries,
206 limit,
207 include_features,
208 include_metadata,
209 mode,
210 full,
211 false,
212 None,
213 )
214}
215
216pub fn render_find_with_index(
217 graph: &GraphFile,
218 queries: &[String],
219 limit: usize,
220 include_features: bool,
221 include_metadata: bool,
222 mode: FindMode,
223 full: bool,
224 debug_score: bool,
225 index: Option<&Bm25Index>,
226) -> String {
227 render_find_with_index_tuned(
228 graph,
229 queries,
230 limit,
231 include_features,
232 include_metadata,
233 mode,
234 full,
235 debug_score,
236 index,
237 None,
238 )
239}
240
241pub fn render_find_with_index_tuned(
242 graph: &GraphFile,
243 queries: &[String],
244 limit: usize,
245 include_features: bool,
246 include_metadata: bool,
247 mode: FindMode,
248 full: bool,
249 debug_score: bool,
250 index: Option<&Bm25Index>,
251 tune: Option<&FindTune>,
252) -> String {
253 let mut sections = Vec::new();
254 for query in queries {
255 let matches = find_all_matches_with_index(
256 graph,
257 query,
258 include_features,
259 include_metadata,
260 mode,
261 index,
262 tune,
263 );
264 let total = matches.len();
265 let visible: Vec<_> = matches.into_iter().take(limit).collect();
266 let shown = visible.len();
267 let mut lines = vec![render_result_header(query, shown, total)];
268 for scored in visible {
269 lines.push(render_scored_node_block(graph, &scored, full, debug_score));
270 }
271 push_limit_omission_line(&mut lines, shown, total);
272 sections.push(lines.join("\n"));
273 }
274 format!("{}\n", sections.join("\n\n"))
275}
276
277pub fn find_nodes(
278 graph: &GraphFile,
279 query: &str,
280 limit: usize,
281 include_features: bool,
282 include_metadata: bool,
283 mode: FindMode,
284) -> Vec<Node> {
285 find_matches_with_index(
286 graph,
287 query,
288 limit,
289 include_features,
290 include_metadata,
291 mode,
292 None,
293 None,
294 )
295 .into_iter()
296 .map(|item| item.node.clone())
297 .collect()
298}
299
300pub fn find_nodes_with_index(
301 graph: &GraphFile,
302 query: &str,
303 limit: usize,
304 include_features: bool,
305 include_metadata: bool,
306 mode: FindMode,
307 index: Option<&Bm25Index>,
308) -> Vec<Node> {
309 find_matches_with_index(
310 graph,
311 query,
312 limit,
313 include_features,
314 include_metadata,
315 mode,
316 index,
317 None,
318 )
319 .into_iter()
320 .map(|item| item.node.clone())
321 .collect()
322}
323
324pub fn find_nodes_with_index_tuned(
325 graph: &GraphFile,
326 query: &str,
327 limit: usize,
328 include_features: bool,
329 include_metadata: bool,
330 mode: FindMode,
331 index: Option<&Bm25Index>,
332 tune: Option<&FindTune>,
333) -> Vec<Node> {
334 find_matches_with_index(
335 graph,
336 query,
337 limit,
338 include_features,
339 include_metadata,
340 mode,
341 index,
342 tune,
343 )
344 .into_iter()
345 .map(|item| item.node.clone())
346 .collect()
347}
348
349pub fn find_nodes_and_total_with_index(
350 graph: &GraphFile,
351 query: &str,
352 limit: usize,
353 include_features: bool,
354 include_metadata: bool,
355 mode: FindMode,
356 index: Option<&Bm25Index>,
357) -> (usize, Vec<Node>) {
358 let matches = find_all_matches_with_index(
359 graph,
360 query,
361 include_features,
362 include_metadata,
363 mode,
364 index,
365 None,
366 );
367 let total = matches.len();
368 let nodes = matches
369 .into_iter()
370 .take(limit)
371 .map(|item| item.node.clone())
372 .collect();
373 (total, nodes)
374}
375
376pub fn find_scored_nodes_and_total_with_index(
377 graph: &GraphFile,
378 query: &str,
379 limit: usize,
380 include_features: bool,
381 include_metadata: bool,
382 mode: FindMode,
383 index: Option<&Bm25Index>,
384) -> (usize, Vec<ScoredNodeResult>) {
385 find_scored_nodes_and_total_with_index_tuned(
386 graph,
387 query,
388 limit,
389 include_features,
390 include_metadata,
391 mode,
392 index,
393 None,
394 )
395}
396
397pub fn find_scored_nodes_and_total_with_index_tuned(
398 graph: &GraphFile,
399 query: &str,
400 limit: usize,
401 include_features: bool,
402 include_metadata: bool,
403 mode: FindMode,
404 index: Option<&Bm25Index>,
405 tune: Option<&FindTune>,
406) -> (usize, Vec<ScoredNodeResult>) {
407 let matches = find_all_matches_with_index(
408 graph,
409 query,
410 include_features,
411 include_metadata,
412 mode,
413 index,
414 tune,
415 );
416 let total = matches.len();
417 let nodes = matches
418 .into_iter()
419 .take(limit)
420 .map(|item| ScoredNodeResult {
421 score: item.score,
422 node: item.node.clone(),
423 breakdown: ScoreBreakdownResult {
424 raw_relevance: item.breakdown.raw_relevance,
425 normalized_relevance: item.breakdown.normalized_relevance,
426 lexical_boost: item.breakdown.lexical_boost,
427 feedback_boost: item.breakdown.feedback_boost,
428 importance_boost: item.breakdown.importance_boost,
429 authority_raw: item.breakdown.authority_raw,
430 authority_applied: item.breakdown.authority_applied,
431 authority_cap: item.breakdown.authority_cap,
432 },
433 })
434 .collect();
435 (total, nodes)
436}
437
438pub fn count_find_results(
439 graph: &GraphFile,
440 queries: &[String],
441 limit: usize,
442 include_features: bool,
443 include_metadata: bool,
444 mode: FindMode,
445) -> usize {
446 count_find_results_with_index(
447 graph,
448 queries,
449 limit,
450 include_features,
451 include_metadata,
452 mode,
453 None,
454 )
455}
456
457pub fn count_find_results_with_index(
458 graph: &GraphFile,
459 queries: &[String],
460 _limit: usize,
461 include_features: bool,
462 include_metadata: bool,
463 mode: FindMode,
464 index: Option<&Bm25Index>,
465) -> usize {
466 let mut total = 0;
467 for query in queries {
468 total += find_all_matches_with_index(
469 graph,
470 query,
471 include_features,
472 include_metadata,
473 mode,
474 index,
475 None,
476 )
477 .len();
478 }
479 total
480}
481
482pub fn render_node(graph: &GraphFile, node: &Node, full: bool) -> String {
483 format!("{}\n", render_node_block(graph, node, full))
484}
485
486pub fn render_node_adaptive(graph: &GraphFile, node: &Node, target_chars: Option<usize>) -> String {
487 let target = clamp_target_chars(target_chars);
488 let full = format!("{}\n", render_node_block(graph, node, true));
489 if fits_target_chars(&full, target) {
490 return full;
491 }
492 let mut candidates = Vec::new();
493 for (depth, detail, edge_cap) in [
494 (0usize, DetailLevel::Rich, 8usize),
495 (1usize, DetailLevel::Rich, 8usize),
496 (2usize, DetailLevel::Rich, 6usize),
497 (2usize, DetailLevel::Compact, 6usize),
498 (2usize, DetailLevel::Minimal, 2usize),
499 ] {
500 let rendered = render_single_node_candidate(graph, node, depth, detail, edge_cap);
501 candidates.push(Candidate {
502 rendered,
503 depth,
504 detail,
505 shown_nodes: 1 + depth,
506 });
507 }
508 pick_best_candidate(candidates, target)
509}
510
511pub fn render_find_adaptive_with_index(
512 graph: &GraphFile,
513 queries: &[String],
514 limit: usize,
515 include_features: bool,
516 include_metadata: bool,
517 mode: FindMode,
518 target_chars: Option<usize>,
519 debug_score: bool,
520 index: Option<&Bm25Index>,
521) -> String {
522 render_find_adaptive_with_index_tuned(
523 graph,
524 queries,
525 limit,
526 include_features,
527 include_metadata,
528 mode,
529 target_chars,
530 debug_score,
531 index,
532 None,
533 )
534}
535
536pub fn render_find_adaptive_with_index_tuned(
537 graph: &GraphFile,
538 queries: &[String],
539 limit: usize,
540 include_features: bool,
541 include_metadata: bool,
542 mode: FindMode,
543 target_chars: Option<usize>,
544 debug_score: bool,
545 index: Option<&Bm25Index>,
546 tune: Option<&FindTune>,
547) -> String {
548 let target = clamp_target_chars(target_chars);
549 let mut sections = Vec::new();
550 for query in queries {
551 let matches = find_all_matches_with_index(
552 graph,
553 query,
554 include_features,
555 include_metadata,
556 mode,
557 index,
558 tune,
559 );
560 let total = matches.len();
561 let visible: Vec<_> = matches.into_iter().take(limit).collect();
562 let section = if visible.len() == 1 {
563 render_single_result_section(graph, query, &visible[0], total, target, debug_score)
564 } else {
565 render_multi_result_section(graph, query, &visible, total, target, debug_score)
566 };
567 sections.push(section);
568 }
569 format!("{}\n", sections.join("\n\n"))
570}
571
572#[derive(Clone, Copy)]
573enum DetailLevel {
574 Rich,
575 Compact,
576 Minimal,
577}
578
579struct Candidate {
580 rendered: String,
581 depth: usize,
582 detail: DetailLevel,
583 shown_nodes: usize,
584}
585
586impl DetailLevel {
587 fn utility_bonus(self) -> usize {
588 match self {
589 DetailLevel::Rich => 20,
590 DetailLevel::Compact => 10,
591 DetailLevel::Minimal => 0,
592 }
593 }
594}
595
596fn clamp_target_chars(target_chars: Option<usize>) -> usize {
597 target_chars
598 .unwrap_or(DEFAULT_TARGET_CHARS)
599 .clamp(MIN_TARGET_CHARS, MAX_TARGET_CHARS)
600}
601
602fn render_single_result_section(
603 graph: &GraphFile,
604 query: &str,
605 node: &ScoredNode<'_>,
606 total_available: usize,
607 target: usize,
608 debug_score: bool,
609) -> String {
610 let header = render_result_header(query, 1, total_available);
611 let full = render_single_result_candidate(
612 graph,
613 &header,
614 node,
615 total_available,
616 0,
617 DetailLevel::Rich,
618 8,
619 true,
620 debug_score,
621 );
622 if fits_target_chars(&full, target) {
623 return full.trim_end().to_owned();
624 }
625 let mut candidates = Vec::new();
626 for (depth, detail, edge_cap) in [
627 (0usize, DetailLevel::Rich, 8usize),
628 (1usize, DetailLevel::Rich, 8usize),
629 (2usize, DetailLevel::Rich, 6usize),
630 (2usize, DetailLevel::Compact, 6usize),
631 (2usize, DetailLevel::Minimal, 2usize),
632 ] {
633 candidates.push(Candidate {
634 rendered: render_single_result_candidate(
635 graph,
636 &header,
637 node,
638 total_available,
639 depth,
640 detail,
641 edge_cap,
642 false,
643 debug_score,
644 ),
645 depth,
646 detail,
647 shown_nodes: 1 + depth,
648 });
649 }
650 pick_best_candidate(candidates, target)
651 .trim_end()
652 .to_owned()
653}
654
655fn render_multi_result_section(
656 graph: &GraphFile,
657 query: &str,
658 nodes: &[ScoredNode<'_>],
659 total_available: usize,
660 target: usize,
661 debug_score: bool,
662) -> String {
663 let visible_total = nodes.len();
664 let full = render_full_result_section(graph, query, nodes, total_available, debug_score);
665 if fits_target_chars(&full, target) {
666 return full;
667 }
668 let mut candidates = Vec::new();
669 let full_cap = visible_total;
670 let mid_cap = full_cap.min(5);
671 let low_cap = full_cap.min(3);
672
673 for (detail, edge_cap, result_cap, depth) in [
674 (DetailLevel::Rich, 4usize, full_cap.min(4), 0usize),
675 (DetailLevel::Compact, 3usize, full_cap, 0usize),
676 (DetailLevel::Rich, 2usize, mid_cap, 1usize),
677 (DetailLevel::Compact, 1usize, full_cap, 0usize),
678 (DetailLevel::Minimal, 1usize, mid_cap, 0usize),
679 (DetailLevel::Minimal, 0usize, low_cap, 0usize),
680 (DetailLevel::Minimal, 0usize, low_cap.min(2), 1usize),
681 ] {
682 let shown = result_cap.min(nodes.len());
683 let mut lines = vec![render_result_header(query, shown, total_available)];
684 for node in nodes.iter().take(shown) {
685 lines.extend(render_scored_node_candidate_lines(
686 graph,
687 node,
688 0,
689 detail,
690 edge_cap,
691 debug_score,
692 ));
693 if depth > 0 {
694 lines.extend(render_neighbor_layers(graph, node.node, depth, detail));
695 }
696 }
697 if visible_total > shown {
698 lines.push(format!("... +{} more nodes omitted", visible_total - shown));
699 }
700 push_limit_omission_line(&mut lines, visible_total, total_available);
701 candidates.push(Candidate {
702 rendered: format!("{}\n", lines.join("\n")),
703 depth,
704 detail,
705 shown_nodes: shown,
706 });
707 }
708
709 pick_best_candidate(candidates, target)
710 .trim_end()
711 .to_owned()
712}
713
714fn pick_best_candidate(candidates: Vec<Candidate>, target: usize) -> String {
715 let lower = (target as f64 * 0.7) as usize;
716 let mut best: Option<(usize, usize, usize, usize, String)> = None;
717
718 for candidate in candidates {
719 let chars = candidate.rendered.chars().count();
720 let overshoot = chars.saturating_sub(target);
721 let undershoot = lower.saturating_sub(chars);
722 let penalty = overshoot.saturating_mul(10).saturating_add(undershoot);
723 let utility = candidate
724 .depth
725 .saturating_mul(100)
726 .saturating_add(candidate.shown_nodes.saturating_mul(5))
727 .saturating_add(candidate.detail.utility_bonus());
728
729 let entry = (
730 penalty,
731 overshoot,
732 usize::MAX - utility,
733 usize::MAX - chars,
734 candidate.rendered,
735 );
736 if best.as_ref().is_none_or(|current| {
737 entry.0 < current.0
738 || (entry.0 == current.0 && entry.1 < current.1)
739 || (entry.0 == current.0 && entry.1 == current.1 && entry.2 < current.2)
740 || (entry.0 == current.0
741 && entry.1 == current.1
742 && entry.2 == current.2
743 && entry.3 < current.3)
744 }) {
745 best = Some(entry);
746 }
747 }
748
749 best.map(|item| item.4).unwrap_or_else(|| "\n".to_owned())
750}
751
752fn render_full_result_section(
753 graph: &GraphFile,
754 query: &str,
755 nodes: &[ScoredNode<'_>],
756 total_available: usize,
757 debug_score: bool,
758) -> String {
759 let mut lines = vec![render_result_header(query, nodes.len(), total_available)];
760 for node in nodes {
761 lines.push(render_scored_node_block(graph, node, true, debug_score));
762 }
763 push_limit_omission_line(&mut lines, nodes.len(), total_available);
764 lines.join("\n")
765}
766
767fn render_result_header(query: &str, shown: usize, total: usize) -> String {
768 let query = escape_cli_text(query);
769 if shown < total {
770 format!("? {query} ({shown}/{total})")
771 } else {
772 format!("? {query} ({total})")
773 }
774}
775
776fn push_limit_omission_line(lines: &mut Vec<String>, shown: usize, total: usize) {
777 let omitted = total.saturating_sub(shown);
778 if omitted > 0 {
779 lines.push(format!("... {omitted} more nodes omitted by limit"));
780 }
781}
782
783fn fits_target_chars(rendered: &str, target: usize) -> bool {
784 rendered.chars().count() <= target
785}
786
787fn render_single_node_candidate(
788 graph: &GraphFile,
789 node: &Node,
790 depth: usize,
791 detail: DetailLevel,
792 edge_cap: usize,
793) -> String {
794 let lines = render_single_node_candidate_lines(graph, node, depth, detail, edge_cap);
795 format!("{}\n", lines.join("\n"))
796}
797
798fn render_single_result_candidate(
799 graph: &GraphFile,
800 header: &str,
801 node: &ScoredNode<'_>,
802 total_available: usize,
803 depth: usize,
804 detail: DetailLevel,
805 edge_cap: usize,
806 full: bool,
807 debug_score: bool,
808) -> String {
809 let mut lines = vec![header.to_owned()];
810 if full {
811 lines.push(render_scored_node_block(graph, node, true, debug_score));
812 } else {
813 lines.extend(render_scored_node_candidate_lines(
814 graph,
815 node,
816 depth,
817 detail,
818 edge_cap,
819 debug_score,
820 ));
821 }
822 push_limit_omission_line(&mut lines, 1, total_available);
823 format!("{}\n", lines.join("\n"))
824}
825
826fn render_single_node_candidate_lines(
827 graph: &GraphFile,
828 node: &Node,
829 depth: usize,
830 detail: DetailLevel,
831 edge_cap: usize,
832) -> Vec<String> {
833 let mut lines = render_node_lines_with_edges(graph, node, detail, edge_cap);
834 if depth > 0 {
835 lines.extend(render_neighbor_layers(graph, node, depth, detail));
836 }
837 lines
838}
839
840fn render_scored_node_candidate_lines(
841 graph: &GraphFile,
842 node: &ScoredNode<'_>,
843 depth: usize,
844 detail: DetailLevel,
845 edge_cap: usize,
846 debug_score: bool,
847) -> Vec<String> {
848 let mut lines = vec![format!("score: {}", node.score)];
849 if debug_score {
850 lines.push(render_score_debug_line(node));
851 }
852 lines.extend(render_single_node_candidate_lines(
853 graph, node.node, depth, detail, edge_cap,
854 ));
855 lines
856}
857
858fn render_scored_node_block(
859 graph: &GraphFile,
860 node: &ScoredNode<'_>,
861 full: bool,
862 debug_score: bool,
863) -> String {
864 if debug_score {
865 format!(
866 "score: {}\n{}\n{}",
867 node.score,
868 render_score_debug_line(node),
869 render_node_block(graph, node.node, full)
870 )
871 } else {
872 format!(
873 "score: {}\n{}",
874 node.score,
875 render_node_block(graph, node.node, full)
876 )
877 }
878}
879
880fn render_score_debug_line(node: &ScoredNode<'_>) -> String {
881 format!(
882 "score_debug: raw_relevance={:.3} normalized_relevance={} lexical_boost={} feedback_boost={} importance_boost={} authority_raw={} authority_applied={} authority_cap={}",
883 node.breakdown.raw_relevance,
884 node.breakdown.normalized_relevance,
885 node.breakdown.lexical_boost,
886 node.breakdown.feedback_boost,
887 node.breakdown.importance_boost,
888 node.breakdown.authority_raw,
889 node.breakdown.authority_applied,
890 node.breakdown.authority_cap,
891 )
892}
893
894fn render_neighbor_layers(
895 graph: &GraphFile,
896 root: &Node,
897 max_depth: usize,
898 detail: DetailLevel,
899) -> Vec<String> {
900 let mut out = Vec::new();
901 let mut seen: HashSet<String> = HashSet::from([root.id.clone()]);
902 let mut queue: VecDeque<(String, usize)> = VecDeque::from([(root.id.clone(), 0usize)]);
903 let mut layers: Vec<Vec<&Node>> = vec![Vec::new(); max_depth + 1];
904
905 while let Some((node_id, depth)) = queue.pop_front() {
906 if depth >= max_depth {
907 continue;
908 }
909 for incident in incident_edges(graph, &node_id) {
910 if seen.insert(incident.related.id.clone()) {
911 let next_depth = depth + 1;
912 if next_depth <= max_depth {
913 layers[next_depth].push(incident.related);
914 queue.push_back((incident.related.id.clone(), next_depth));
915 }
916 }
917 }
918 }
919
920 for depth in 1..=max_depth {
921 if layers[depth].is_empty() {
922 continue;
923 }
924 let cap = match detail {
925 DetailLevel::Rich => 6,
926 DetailLevel::Compact => 4,
927 DetailLevel::Minimal => 3,
928 };
929 let shown = layers[depth].len().min(cap);
930 out.push(format!(
931 "depth {depth}: {shown}/{} neighbors",
932 layers[depth].len()
933 ));
934 for node in layers[depth].iter().take(shown) {
935 out.extend(render_node_identity_lines(node, detail));
936 }
937 if layers[depth].len() > shown {
938 out.push(format!(
939 "... +{} more neighbors omitted",
940 layers[depth].len() - shown
941 ));
942 }
943 }
944
945 out
946}
947
948fn render_node_lines_with_edges(
949 graph: &GraphFile,
950 node: &Node,
951 detail: DetailLevel,
952 edge_cap: usize,
953) -> Vec<String> {
954 let mut lines = render_node_identity_lines(node, detail);
955 lines.extend(render_node_link_lines(graph, node, edge_cap));
956 lines
957}
958
959fn render_node_identity_lines(node: &Node, detail: DetailLevel) -> Vec<String> {
960 let mut lines = Vec::new();
961 match detail {
962 DetailLevel::Rich => {
963 lines.push(format!(
964 "# {} | {} [{}]",
965 node.id,
966 escape_cli_text(&node.name),
967 node.r#type
968 ));
969 if !node.properties.alias.is_empty() {
970 lines.push(format!(
971 "aka: {}",
972 node.properties
973 .alias
974 .iter()
975 .map(|alias| escape_cli_text(alias))
976 .collect::<Vec<_>>()
977 .join(", ")
978 ));
979 }
980 push_description_line(&mut lines, &node.properties.description, None);
981 let shown_facts = node.properties.key_facts.len().min(3);
982 for fact in node.properties.key_facts.iter().take(shown_facts) {
983 lines.push(format!("- {}", escape_cli_text(fact)));
984 }
985 let omitted = node.properties.key_facts.len().saturating_sub(shown_facts);
986 if omitted > 0 {
987 lines.push(format!("... {omitted} more facts omitted"));
988 }
989 }
990 DetailLevel::Compact => {
991 lines.push(format!(
992 "# {} | {} [{}]",
993 node.id,
994 escape_cli_text(&node.name),
995 node.r#type
996 ));
997 push_description_line(&mut lines, &node.properties.description, Some(140));
998 if let Some(fact) = node.properties.key_facts.first() {
999 lines.push(format!("- {}", escape_cli_text(fact)));
1000 }
1001 }
1002 DetailLevel::Minimal => {
1003 lines.push(format!(
1004 "# {} | {} [{}]",
1005 node.id,
1006 escape_cli_text(&node.name),
1007 node.r#type
1008 ));
1009 }
1010 }
1011 lines
1012}
1013
1014fn render_node_link_lines(graph: &GraphFile, node: &Node, edge_cap: usize) -> Vec<String> {
1015 let incident = incident_edges(graph, &node.id);
1016 if incident.is_empty() {
1017 return Vec::new();
1018 }
1019
1020 let mut lines = Vec::new();
1021 if incident.len() > 12 {
1022 lines.push(format!("links: {} total", incident.len()));
1023 let (out_summary, in_summary) = summarize_relations(&incident);
1024 if !out_summary.is_empty() {
1025 lines.push(format!("out: {out_summary}"));
1026 }
1027 if !in_summary.is_empty() {
1028 lines.push(format!("in: {in_summary}"));
1029 }
1030 }
1031
1032 let shown = incident.len().min(edge_cap);
1033 for edge in incident.into_iter().take(shown) {
1034 let prefix = if edge.incoming { "<-" } else { "->" };
1035 lines.extend(render_edge_lines(prefix, edge.edge, edge.related, false));
1036 }
1037 if edge_cap > 0 && incident_count(graph, &node.id) > shown {
1038 lines.push(format!(
1039 "... {} more links omitted",
1040 incident_count(graph, &node.id) - shown
1041 ));
1042 }
1043 lines
1044}
1045
1046fn incident_count(graph: &GraphFile, node_id: &str) -> usize {
1047 graph
1048 .edges
1049 .iter()
1050 .filter(|edge| edge.source_id == node_id || edge.target_id == node_id)
1051 .count()
1052}
1053
1054struct IncidentEdge<'a> {
1055 edge: &'a Edge,
1056 related: &'a Node,
1057 incoming: bool,
1058}
1059
1060fn incident_edges<'a>(graph: &'a GraphFile, node_id: &str) -> Vec<IncidentEdge<'a>> {
1061 let mut edges = Vec::new();
1062 for edge in &graph.edges {
1063 if edge.source_id == node_id {
1064 if let Some(related) = graph.node_by_id(&edge.target_id) {
1065 edges.push(IncidentEdge {
1066 edge,
1067 related,
1068 incoming: false,
1069 });
1070 }
1071 } else if edge.target_id == node_id {
1072 if let Some(related) = graph.node_by_id(&edge.source_id) {
1073 edges.push(IncidentEdge {
1074 edge,
1075 related,
1076 incoming: true,
1077 });
1078 }
1079 }
1080 }
1081 edges.sort_by(|left, right| {
1082 right
1083 .related
1084 .properties
1085 .importance
1086 .partial_cmp(&left.related.properties.importance)
1087 .unwrap_or(std::cmp::Ordering::Equal)
1088 .then_with(|| left.edge.relation.cmp(&right.edge.relation))
1089 .then_with(|| left.related.id.cmp(&right.related.id))
1090 });
1091 edges
1092}
1093
1094fn summarize_relations(edges: &[IncidentEdge<'_>]) -> (String, String) {
1095 let mut out: std::collections::BTreeMap<String, usize> = std::collections::BTreeMap::new();
1096 let mut incoming: std::collections::BTreeMap<String, usize> = std::collections::BTreeMap::new();
1097
1098 for edge in edges {
1099 let bucket = if edge.incoming {
1100 &mut incoming
1101 } else {
1102 &mut out
1103 };
1104 *bucket.entry(edge.edge.relation.clone()).or_insert(0) += 1;
1105 }
1106
1107 (join_relation_counts(&out), join_relation_counts(&incoming))
1108}
1109
1110fn join_relation_counts(counts: &std::collections::BTreeMap<String, usize>) -> String {
1111 counts
1112 .iter()
1113 .take(3)
1114 .map(|(relation, count)| format!("{relation} x{count}"))
1115 .collect::<Vec<_>>()
1116 .join(", ")
1117}
1118
1119fn render_node_block(graph: &GraphFile, node: &Node, full: bool) -> String {
1120 let mut lines = Vec::new();
1121 lines.push(format!(
1122 "# {} | {} [{}]",
1123 node.id,
1124 escape_cli_text(&node.name),
1125 node.r#type
1126 ));
1127
1128 if !node.properties.alias.is_empty() {
1129 lines.push(format!(
1130 "aka: {}",
1131 node.properties
1132 .alias
1133 .iter()
1134 .map(|alias| escape_cli_text(alias))
1135 .collect::<Vec<_>>()
1136 .join(", ")
1137 ));
1138 }
1139 push_description_line(
1140 &mut lines,
1141 &node.properties.description,
1142 if full { None } else { Some(200) },
1143 );
1144 if full {
1145 if !node.properties.domain_area.is_empty() {
1146 lines.push(format!(
1147 "domain_area: {}",
1148 escape_cli_text(&node.properties.domain_area)
1149 ));
1150 }
1151 if !node.properties.provenance.is_empty() {
1152 lines.push(format!(
1153 "provenance: {}",
1154 escape_cli_text(&node.properties.provenance)
1155 ));
1156 }
1157 if let Some(confidence) = node.properties.confidence {
1158 lines.push(format!("confidence: {confidence}"));
1159 }
1160 lines.push(format!("importance: {}", node.properties.importance));
1161 if !node.properties.created_at.is_empty() {
1162 lines.push(format!("created_at: {}", node.properties.created_at));
1163 }
1164 }
1165
1166 let facts_to_show = if full {
1167 node.properties.key_facts.len()
1168 } else {
1169 node.properties.key_facts.len().min(2)
1170 };
1171 for fact in node.properties.key_facts.iter().take(facts_to_show) {
1172 lines.push(format!("- {}", escape_cli_text(fact)));
1173 }
1174 let omitted = node
1175 .properties
1176 .key_facts
1177 .len()
1178 .saturating_sub(facts_to_show);
1179 if omitted > 0 {
1180 lines.push(format!("... {omitted} more facts omitted"));
1181 }
1182
1183 if full {
1184 if !node.source_files.is_empty() {
1185 lines.push(format!(
1186 "sources: {}",
1187 node.source_files
1188 .iter()
1189 .map(|source| escape_cli_text(source))
1190 .collect::<Vec<_>>()
1191 .join(", ")
1192 ));
1193 }
1194 push_feedback_lines(
1195 &mut lines,
1196 node.properties.feedback_score,
1197 node.properties.feedback_count,
1198 node.properties.feedback_last_ts_ms,
1199 None,
1200 );
1201 }
1202
1203 let attached_notes: Vec<_> = graph
1204 .notes
1205 .iter()
1206 .filter(|note| note.node_id == node.id)
1207 .collect();
1208 if full && !attached_notes.is_empty() {
1209 lines.push(format!("notes: {}", attached_notes.len()));
1210 for note in attached_notes {
1211 lines.extend(render_attached_note_lines(note));
1212 }
1213 }
1214
1215 for edge in outgoing_edges(graph, &node.id, full) {
1216 if let Some(target) = graph.node_by_id(&edge.target_id) {
1217 lines.extend(render_edge_lines("->", edge, target, full));
1218 }
1219 }
1220 for edge in incoming_edges(graph, &node.id, full) {
1221 if let Some(source) = graph.node_by_id(&edge.source_id) {
1222 lines.extend(render_edge_lines("<-", edge, source, full));
1223 }
1224 }
1225
1226 lines.join("\n")
1227}
1228
1229fn outgoing_edges<'a>(graph: &'a GraphFile, node_id: &str, full: bool) -> Vec<&'a Edge> {
1230 let mut edges: Vec<&Edge> = graph
1231 .edges
1232 .iter()
1233 .filter(|edge| edge.source_id == node_id)
1234 .collect();
1235 edges.sort_by_key(|edge| (&edge.relation, &edge.target_id));
1236 if !full {
1237 edges.truncate(3);
1238 }
1239 edges
1240}
1241
1242fn incoming_edges<'a>(graph: &'a GraphFile, node_id: &str, full: bool) -> Vec<&'a Edge> {
1243 let mut edges: Vec<&Edge> = graph
1244 .edges
1245 .iter()
1246 .filter(|edge| edge.target_id == node_id)
1247 .collect();
1248 edges.sort_by_key(|edge| (&edge.relation, &edge.source_id));
1249 if !full {
1250 edges.truncate(3);
1251 }
1252 edges
1253}
1254
1255fn render_edge_lines(prefix: &str, edge: &Edge, related: &Node, full: bool) -> Vec<String> {
1256 let (arrow, relation) = if edge.relation.starts_with("NOT_") {
1257 (
1258 format!("{prefix}!"),
1259 edge.relation.trim_start_matches("NOT_"),
1260 )
1261 } else {
1262 (prefix.to_owned(), edge.relation.as_str())
1263 };
1264
1265 let mut line = format!(
1266 "{arrow} {relation} | {} | {}",
1267 related.id,
1268 escape_cli_text(&related.name)
1269 );
1270 if !edge.properties.detail.is_empty() {
1271 line.push_str(" | ");
1272 let detail = escape_cli_text(&edge.properties.detail);
1273 if full {
1274 line.push_str(&detail);
1275 } else {
1276 line.push_str(&truncate(&detail, 80));
1277 }
1278 }
1279 let mut lines = vec![line];
1280 if full {
1281 push_feedback_lines(
1282 &mut lines,
1283 edge.properties.feedback_score,
1284 edge.properties.feedback_count,
1285 edge.properties.feedback_last_ts_ms,
1286 Some("edge_"),
1287 );
1288 if !edge.properties.valid_from.is_empty() {
1289 lines.push(format!("edge_valid_from: {}", edge.properties.valid_from));
1290 }
1291 if !edge.properties.valid_to.is_empty() {
1292 lines.push(format!("edge_valid_to: {}", edge.properties.valid_to));
1293 }
1294 }
1295 lines
1296}
1297
1298fn truncate(value: &str, max_len: usize) -> String {
1299 let char_count = value.chars().count();
1300 if char_count <= max_len {
1301 return value.to_owned();
1302 }
1303 let truncated: String = value.chars().take(max_len.saturating_sub(3)).collect();
1304 format!("{truncated}...")
1305}
1306
1307fn escape_cli_text(value: &str) -> String {
1308 let mut out = String::new();
1309 for ch in value.chars() {
1310 match ch {
1311 '\\' => out.push_str("\\\\"),
1312 '\n' => out.push_str("\\n"),
1313 '\r' => out.push_str("\\r"),
1314 '\t' => out.push_str("\\t"),
1315 _ => out.push(ch),
1316 }
1317 }
1318 out
1319}
1320
1321fn push_description_line(lines: &mut Vec<String>, description: &str, max_len: Option<usize>) {
1322 if description.is_empty() {
1323 return;
1324 }
1325 let escaped = escape_cli_text(description);
1326 let rendered = match max_len {
1327 Some(limit) => truncate(&escaped, limit),
1328 None => escaped,
1329 };
1330 lines.push(format!("desc: {rendered}"));
1331}
1332
1333fn push_feedback_lines(
1334 lines: &mut Vec<String>,
1335 score: f64,
1336 count: u64,
1337 last_ts_ms: Option<u64>,
1338 prefix: Option<&str>,
1339) {
1340 let prefix = prefix.unwrap_or("");
1341 if score != 0.0 {
1342 lines.push(format!("{prefix}feedback_score: {score}"));
1343 }
1344 if count != 0 {
1345 lines.push(format!("{prefix}feedback_count: {count}"));
1346 }
1347 if let Some(ts) = last_ts_ms {
1348 lines.push(format!("{prefix}feedback_last_ts_ms: {ts}"));
1349 }
1350}
1351
1352fn render_attached_note_lines(note: &crate::graph::Note) -> Vec<String> {
1353 let mut lines = vec![format!("! {}", note.id)];
1354 if !note.body.is_empty() {
1355 lines.push(format!("note_body: {}", escape_cli_text(¬e.body)));
1356 }
1357 if !note.tags.is_empty() {
1358 lines.push(format!(
1359 "note_tags: {}",
1360 note.tags
1361 .iter()
1362 .map(|tag| escape_cli_text(tag))
1363 .collect::<Vec<_>>()
1364 .join(", ")
1365 ));
1366 }
1367 if !note.author.is_empty() {
1368 lines.push(format!("note_author: {}", escape_cli_text(¬e.author)));
1369 }
1370 if !note.created_at.is_empty() {
1371 lines.push(format!("note_created_at: {}", note.created_at));
1372 }
1373 if !note.provenance.is_empty() {
1374 lines.push(format!(
1375 "note_provenance: {}",
1376 escape_cli_text(¬e.provenance)
1377 ));
1378 }
1379 if !note.source_files.is_empty() {
1380 lines.push(format!(
1381 "note_sources: {}",
1382 note.source_files
1383 .iter()
1384 .map(|source| escape_cli_text(source))
1385 .collect::<Vec<_>>()
1386 .join(", ")
1387 ));
1388 }
1389 lines
1390}
1391
1392fn find_matches_with_index<'a>(
1393 graph: &'a GraphFile,
1394 query: &str,
1395 limit: usize,
1396 include_features: bool,
1397 include_metadata: bool,
1398 mode: FindMode,
1399 index: Option<&Bm25Index>,
1400 tune: Option<&FindTune>,
1401) -> Vec<ScoredNode<'a>> {
1402 let mut matches = find_all_matches_with_index(
1403 graph,
1404 query,
1405 include_features,
1406 include_metadata,
1407 mode,
1408 index,
1409 tune,
1410 );
1411 matches.truncate(limit);
1412 matches
1413}
1414
1415fn find_all_matches_with_index<'a>(
1416 graph: &'a GraphFile,
1417 query: &str,
1418 include_features: bool,
1419 include_metadata: bool,
1420 mode: FindMode,
1421 index: Option<&Bm25Index>,
1422 tune: Option<&FindTune>,
1423) -> Vec<ScoredNode<'a>> {
1424 let context = FindQueryContext::build(graph);
1425 let rewritten_query = rewrite_query(query);
1426 let fuzzy_query = if rewritten_query.is_empty() {
1427 query.to_owned()
1428 } else {
1429 rewritten_query
1430 };
1431 let mut scored: Vec<ScoredNode<'a>> = match mode {
1432 FindMode::Fuzzy => {
1433 let pattern = Pattern::parse(&fuzzy_query, CaseMatching::Ignore, Normalization::Smart);
1434 let mut matcher = Matcher::new(Config::DEFAULT);
1435 let candidates = graph
1436 .nodes
1437 .iter()
1438 .filter(|node| node_is_searchable(node, include_features, include_metadata))
1439 .filter_map(|node| {
1440 score_node(&context, node, &fuzzy_query, &pattern, &mut matcher).map(|score| {
1441 RawCandidate {
1442 node,
1443 raw_relevance: score as f64,
1444 lexical_boost: 0,
1445 }
1446 })
1447 })
1448 .collect();
1449 compose_scores(candidates)
1450 }
1451 FindMode::Bm25 => compose_scores(score_bm25_raw(
1452 graph,
1453 &context,
1454 &fuzzy_query,
1455 include_features,
1456 include_metadata,
1457 index,
1458 )),
1459 FindMode::Hybrid => compose_scores(score_hybrid_raw(
1460 graph,
1461 &context,
1462 &fuzzy_query,
1463 include_features,
1464 include_metadata,
1465 index,
1466 tune.copied().unwrap_or_default(),
1467 )),
1468 };
1469
1470 scored.sort_by(|left, right| {
1471 right
1472 .score
1473 .cmp(&left.score)
1474 .then_with(|| left.node.id.cmp(&right.node.id))
1475 });
1476 let mut seen_ids = HashSet::new();
1477 scored.retain(|item| {
1478 let key = crate::validate::normalize_node_id(&item.node.id).to_ascii_lowercase();
1479 seen_ids.insert(key)
1480 });
1481 scored
1482}
1483
1484fn compose_scores<'a>(candidates: Vec<RawCandidate<'a>>) -> Vec<ScoredNode<'a>> {
1485 let max_raw = candidates
1486 .iter()
1487 .map(|candidate| candidate.raw_relevance)
1488 .fold(0.0f64, f64::max);
1489 let max_raw_log = max_raw.ln_1p();
1490
1491 candidates
1492 .into_iter()
1493 .filter_map(|candidate| {
1494 if candidate.raw_relevance <= 0.0 {
1495 return None;
1496 }
1497 let normalized_relevance = if max_raw_log > 0.0 {
1498 ((candidate.raw_relevance.ln_1p() / max_raw_log) * 1000.0).round() as i64
1499 } else {
1500 0
1501 };
1502 let feedback = feedback_boost(candidate.node);
1503 let importance = importance_boost(candidate.node);
1504 let authority_raw = feedback + importance;
1505 let relative_cap =
1506 ((normalized_relevance as f64) * SCORE_META_MAX_RATIO).round() as i64;
1507 let authority_cap = relative_cap.max(SCORE_META_MIN_CAP).min(SCORE_META_MAX_CAP);
1508 let authority_applied = authority_raw.clamp(-authority_cap, authority_cap);
1509 let final_score = normalized_relevance + authority_applied;
1510
1511 Some(ScoredNode {
1512 score: final_score,
1513 node: candidate.node,
1514 breakdown: ScoreBreakdown {
1515 raw_relevance: candidate.raw_relevance,
1516 normalized_relevance,
1517 lexical_boost: candidate.lexical_boost,
1518 feedback_boost: feedback,
1519 importance_boost: importance,
1520 authority_raw,
1521 authority_applied,
1522 authority_cap,
1523 },
1524 })
1525 })
1526 .collect()
1527}
1528
1529fn feedback_boost(node: &Node) -> i64 {
1530 let count = node.properties.feedback_count as f64;
1531 if count <= 0.0 {
1532 return 0;
1533 }
1534 let avg = node.properties.feedback_score / count;
1535 let confidence = (count.ln_1p() / 3.0).min(1.0);
1536 let scaled = avg * 200.0 * confidence;
1537 scaled.clamp(-300.0, 300.0).round() as i64
1538}
1539
1540fn importance_boost(node: &Node) -> i64 {
1541 let normalized_importance = if (0.0..=1.0).contains(&node.properties.importance) {
1542 node.properties.importance
1543 } else if (1.0..=6.0).contains(&node.properties.importance) {
1544 (node.properties.importance - 1.0) / 5.0
1545 } else {
1546 node.properties.importance.clamp(0.0, 1.0)
1547 };
1548 let normalized = (normalized_importance - IMPORTANCE_NEUTRAL) * 2.0;
1549 (normalized * IMPORTANCE_MAX_ABS_BOOST).round() as i64
1550}
1551
1552fn score_bm25_raw<'a>(
1553 graph: &'a GraphFile,
1554 context: &FindQueryContext<'a>,
1555 query: &str,
1556 include_features: bool,
1557 include_metadata: bool,
1558 index: Option<&Bm25Index>,
1559) -> Vec<RawCandidate<'a>> {
1560 let terms = text_norm::expand_query_terms(query);
1561 if terms.is_empty() {
1562 return Vec::new();
1563 }
1564
1565 if let Some(idx) = index {
1566 let results = idx.search(&terms, graph);
1567 return results
1568 .into_iter()
1569 .filter_map(|(node_id, score)| {
1570 let node = graph.node_by_id(&node_id)?;
1571 if !node_is_searchable(node, include_features, include_metadata) {
1572 return None;
1573 }
1574 let document_terms = node_document_terms(context, node);
1575 let lexical_boost = bm25_lexical_boost(&terms, &document_terms);
1576 Some(RawCandidate {
1577 node,
1578 raw_relevance: score as f64 * 100.0 + lexical_boost as f64,
1579 lexical_boost,
1580 })
1581 })
1582 .collect();
1583 }
1584
1585 let mut docs: Vec<(&'a Node, Vec<String>)> = graph
1586 .nodes
1587 .iter()
1588 .filter(|node| node_is_searchable(node, include_features, include_metadata))
1589 .map(|node| (node, node_document_terms(context, node)))
1590 .collect();
1591
1592 if docs.is_empty() {
1593 return Vec::new();
1594 }
1595
1596 let mut df: std::collections::HashMap<&str, usize> = std::collections::HashMap::new();
1597 for term in &terms {
1598 let mut count = 0usize;
1599 for (_, tokens) in &docs {
1600 if tokens.iter().any(|t| t == term) {
1601 count += 1;
1602 }
1603 }
1604 df.insert(term.as_str(), count);
1605 }
1606
1607 let total_docs = docs.len() as f64;
1608 let avgdl = docs
1609 .iter()
1610 .map(|(_, tokens)| tokens.len() as f64)
1611 .sum::<f64>()
1612 / total_docs;
1613
1614 let mut scored = Vec::new();
1615
1616 for (node, tokens) in docs.drain(..) {
1617 let dl = tokens.len() as f64;
1618 if dl == 0.0 {
1619 continue;
1620 }
1621 let mut score = 0.0f64;
1622 for term in &terms {
1623 let tf = tokens.iter().filter(|t| *t == term).count() as f64;
1624 if tf == 0.0 {
1625 continue;
1626 }
1627 let df_t = *df.get(term.as_str()).unwrap_or(&0) as f64;
1628 let idf = (1.0 + (total_docs - df_t + 0.5) / (df_t + 0.5)).ln();
1629 let denom = tf + BM25_K1 * (1.0 - BM25_B + BM25_B * (dl / avgdl));
1630 score += idf * (tf * (BM25_K1 + 1.0) / denom);
1631 }
1632 if score > 0.0 {
1633 let lexical_boost = bm25_lexical_boost(&terms, &tokens);
1634 scored.push(RawCandidate {
1635 node,
1636 raw_relevance: score * 100.0 + lexical_boost as f64,
1637 lexical_boost,
1638 });
1639 }
1640 }
1641
1642 scored
1643}
1644
1645fn score_hybrid_raw<'a>(
1646 graph: &'a GraphFile,
1647 context: &FindQueryContext<'a>,
1648 query: &str,
1649 include_features: bool,
1650 include_metadata: bool,
1651 index: Option<&Bm25Index>,
1652 tune: FindTune,
1653) -> Vec<RawCandidate<'a>> {
1654 let pattern = Pattern::parse(query, CaseMatching::Ignore, Normalization::Smart);
1655 let mut matcher = Matcher::new(Config::DEFAULT);
1656
1657 let mut fuzzy_raw = HashMap::new();
1658 for node in graph
1659 .nodes
1660 .iter()
1661 .filter(|node| node_is_searchable(node, include_features, include_metadata))
1662 {
1663 if let Some(score) = score_node(context, node, query, &pattern, &mut matcher) {
1664 fuzzy_raw.insert(node.id.as_str(), score as f64);
1665 }
1666 }
1667
1668 let bm25_candidates = score_bm25_raw(
1669 graph,
1670 context,
1671 query,
1672 include_features,
1673 include_metadata,
1674 index,
1675 );
1676 let mut bm25_raw = HashMap::new();
1677 let mut lexical_boost = HashMap::new();
1678 for candidate in bm25_candidates {
1679 bm25_raw.insert(candidate.node.id.as_str(), candidate.raw_relevance);
1680 lexical_boost.insert(candidate.node.id.as_str(), candidate.lexical_boost);
1681 }
1682
1683 let fuzzy_norm = normalize_raw_scores(&fuzzy_raw);
1684 let bm25_norm = normalize_raw_scores(&bm25_raw);
1685 let total_weight = (tune.bm25 + tune.fuzzy).max(0.0001);
1686
1687 graph
1688 .nodes
1689 .iter()
1690 .filter(|node| node_is_searchable(node, include_features, include_metadata))
1691 .filter_map(|node| {
1692 let f = fuzzy_norm.get(node.id.as_str()).copied().unwrap_or(0.0);
1693 let b = bm25_norm.get(node.id.as_str()).copied().unwrap_or(0.0);
1694 let combined = ((tune.fuzzy * f) + (tune.bm25 * b)) / total_weight;
1695 if combined <= 0.0 {
1696 return None;
1697 }
1698 Some(RawCandidate {
1699 node,
1700 raw_relevance: combined * 1000.0,
1701 lexical_boost: lexical_boost.get(node.id.as_str()).copied().unwrap_or(0),
1702 })
1703 })
1704 .collect()
1705}
1706
1707fn normalize_raw_scores<'a>(raw: &'a HashMap<&'a str, f64>) -> HashMap<&'a str, f64> {
1708 let max_raw = raw.values().copied().fold(0.0f64, f64::max);
1709 let max_log = max_raw.ln_1p();
1710 raw.iter()
1711 .map(|(id, value)| {
1712 let normalized = if max_log > 0.0 {
1713 value.ln_1p() / max_log
1714 } else {
1715 0.0
1716 };
1717 (*id, normalized.clamp(0.0, 1.0))
1718 })
1719 .collect()
1720}
1721
1722fn node_is_searchable(node: &Node, include_features: bool, include_metadata: bool) -> bool {
1723 (include_features || node.r#type != "Feature") && (include_metadata || node.r#type != "^")
1724}
1725
1726fn node_document_terms(context: &FindQueryContext<'_>, node: &Node) -> Vec<String> {
1727 let mut tokens = Vec::new();
1728 push_terms(&mut tokens, &node.id, BM25_ID_WEIGHT);
1729 push_terms(&mut tokens, &node.name, BM25_NAME_WEIGHT);
1730 push_terms(
1731 &mut tokens,
1732 &node.properties.description,
1733 BM25_DESCRIPTION_WEIGHT,
1734 );
1735 for alias in &node.properties.alias {
1736 push_terms(&mut tokens, alias, BM25_ALIAS_WEIGHT);
1737 }
1738 for fact in &node.properties.key_facts {
1739 push_terms(&mut tokens, fact, BM25_FACT_WEIGHT);
1740 }
1741 for note in context.notes_for(&node.id) {
1742 push_terms(&mut tokens, ¬e.body, BM25_NOTE_BODY_WEIGHT);
1743 for tag in ¬e.tags {
1744 push_terms(&mut tokens, tag, BM25_NOTE_TAG_WEIGHT);
1745 }
1746 }
1747 for neighbor in context.neighbors_for(&node.id) {
1748 push_terms(&mut tokens, &neighbor.id, BM25_NEIGHBOR_WEIGHT);
1749 push_terms(&mut tokens, &neighbor.name, BM25_NEIGHBOR_WEIGHT);
1750 push_terms(
1751 &mut tokens,
1752 &neighbor.properties.description,
1753 BM25_NEIGHBOR_WEIGHT,
1754 );
1755 for alias in &neighbor.properties.alias {
1756 push_terms(&mut tokens, alias, BM25_NEIGHBOR_WEIGHT);
1757 }
1758 }
1759 tokens
1760}
1761
1762fn push_terms(target: &mut Vec<String>, value: &str, weight: usize) {
1763 if value.is_empty() {
1764 return;
1765 }
1766 let terms = tokenize(value);
1767 for _ in 0..weight {
1768 target.extend(terms.iter().cloned());
1769 }
1770}
1771
1772fn tokenize(text: &str) -> Vec<String> {
1773 text_norm::tokenize(text)
1774}
1775
1776fn rewrite_query(query: &str) -> String {
1777 text_norm::expand_query_terms(query).join(" ")
1778}
1779
1780fn bm25_lexical_boost(query_terms: &[String], document_terms: &[String]) -> i64 {
1781 if query_terms.is_empty() || document_terms.is_empty() {
1782 return 0;
1783 }
1784 if query_terms.len() > 1 && contains_token_phrase(document_terms, query_terms) {
1785 return BM25_PHRASE_MATCH_BOOST;
1786 }
1787 let document_vocab: HashSet<&str> = document_terms.iter().map(String::as_str).collect();
1788 let query_vocab: HashSet<&str> = query_terms.iter().map(String::as_str).collect();
1789 let matched_tokens = query_vocab
1790 .iter()
1791 .filter(|token| document_vocab.contains(**token))
1792 .count() as i64;
1793 if matched_tokens == 0 {
1794 return 0;
1795 }
1796 let query_token_count = query_vocab.len() as i64;
1797 (matched_tokens * BM25_TOKEN_MATCH_BOOST + query_token_count - 1) / query_token_count
1798}
1799
1800fn contains_token_phrase(document_terms: &[String], query_terms: &[String]) -> bool {
1801 if query_terms.is_empty() || query_terms.len() > document_terms.len() {
1802 return false;
1803 }
1804 document_terms
1805 .windows(query_terms.len())
1806 .any(|window| window == query_terms)
1807}
1808
1809fn score_node(
1810 context: &FindQueryContext<'_>,
1811 node: &Node,
1812 query: &str,
1813 pattern: &Pattern,
1814 matcher: &mut Matcher,
1815) -> Option<u32> {
1816 let mut primary_score = 0;
1817 let mut primary_hits = 0;
1818
1819 let id_score = score_primary_field(query, pattern, matcher, &node.id, 5);
1820 if id_score > 0 {
1821 primary_hits += 1;
1822 }
1823 primary_score += id_score;
1824
1825 let name_score = score_primary_field(query, pattern, matcher, &node.name, 4);
1826 if name_score > 0 {
1827 primary_hits += 1;
1828 }
1829 primary_score += name_score;
1830
1831 for alias in &node.properties.alias {
1832 let alias_score = score_primary_field(query, pattern, matcher, alias, 4);
1833 if alias_score > 0 {
1834 primary_hits += 1;
1835 }
1836 primary_score += alias_score;
1837 }
1838
1839 let mut contextual_score = score_secondary_field(
1840 query,
1841 pattern,
1842 matcher,
1843 &node.properties.description,
1844 FUZZY_DESCRIPTION_WEIGHT,
1845 );
1846 for fact in &node.properties.key_facts {
1847 contextual_score += score_secondary_field(query, pattern, matcher, fact, FUZZY_FACT_WEIGHT);
1848 }
1849 contextual_score += score_notes_context(context, node, query, pattern, matcher);
1850
1851 let neighbor_context = score_neighbor_context(context, node, query, pattern, matcher)
1852 .min(FUZZY_NEIGHBOR_CONTEXT_CAP);
1853 contextual_score += if primary_hits > 0 {
1854 neighbor_context / 2
1855 } else {
1856 neighbor_context
1857 };
1858
1859 if primary_hits == 0 {
1860 contextual_score /= FUZZY_NO_PRIMARY_CONTEXT_DIVISOR;
1861 }
1862
1863 let total = primary_score + contextual_score;
1864 (total > 0).then_some(total)
1865}
1866
1867fn score_notes_context(
1868 context: &FindQueryContext<'_>,
1869 node: &Node,
1870 query: &str,
1871 pattern: &Pattern,
1872 matcher: &mut Matcher,
1873) -> u32 {
1874 let mut total = 0;
1875 for note in context.notes_for(&node.id) {
1876 total += score_secondary_field(query, pattern, matcher, ¬e.body, FUZZY_NOTE_BODY_WEIGHT);
1877 for tag in ¬e.tags {
1878 total += score_secondary_field(query, pattern, matcher, tag, FUZZY_NOTE_TAG_WEIGHT);
1879 }
1880 }
1881 total
1882}
1883
1884fn score_neighbor_context(
1885 context: &FindQueryContext<'_>,
1886 node: &Node,
1887 query: &str,
1888 pattern: &Pattern,
1889 matcher: &mut Matcher,
1890) -> u32 {
1891 let mut best = 0;
1892
1893 for neighbor in context.neighbors_for(&node.id) {
1894 let mut score = score_secondary_field(query, pattern, matcher, &neighbor.id, 1)
1895 + score_secondary_field(query, pattern, matcher, &neighbor.name, 1)
1896 + score_secondary_field(query, pattern, matcher, &neighbor.properties.description, 1);
1897
1898 for alias in &neighbor.properties.alias {
1899 score += score_secondary_field(query, pattern, matcher, alias, 1);
1900 }
1901
1902 best = best.max(score);
1903 }
1904
1905 best
1906}
1907
1908fn score_field(pattern: &Pattern, matcher: &mut Matcher, value: &str) -> Option<u32> {
1909 if value.is_empty() {
1910 return None;
1911 }
1912 let mut buf = Vec::new();
1913 let haystack = Utf32Str::new(value, &mut buf);
1914 pattern.score(haystack, matcher)
1915}
1916
1917fn score_primary_field(
1918 query: &str,
1919 pattern: &Pattern,
1920 matcher: &mut Matcher,
1921 value: &str,
1922 weight: u32,
1923) -> u32 {
1924 let bonus = textual_bonus(query, value);
1925 let fuzzy = score_field(pattern, matcher, value).unwrap_or(0);
1926 if bonus == 0 && fuzzy == 0 {
1927 return 0;
1928 }
1929 (fuzzy + bonus) * weight
1930}
1931
1932fn score_secondary_field(
1933 query: &str,
1934 pattern: &Pattern,
1935 matcher: &mut Matcher,
1936 value: &str,
1937 weight: u32,
1938) -> u32 {
1939 let bonus = textual_bonus(query, value);
1940 let fuzzy = score_field(pattern, matcher, value).unwrap_or(0);
1941 if bonus == 0 && fuzzy == 0 {
1942 return 0;
1943 }
1944 (fuzzy + bonus / 2) * weight
1945}
1946
1947fn textual_bonus(query: &str, value: &str) -> u32 {
1948 let query = query.trim().to_lowercase();
1949 let value = value.to_lowercase();
1950
1951 if value == query {
1952 return 400;
1953 }
1954 if value.contains(&query) {
1955 return 200;
1956 }
1957
1958 query
1959 .split_whitespace()
1960 .map(|token| {
1961 if value.contains(token) {
1962 80
1963 } else if is_subsequence(token, &value) {
1964 40
1965 } else {
1966 0
1967 }
1968 })
1969 .sum()
1970}
1971
1972fn is_subsequence(needle: &str, haystack: &str) -> bool {
1973 if needle.is_empty() {
1974 return false;
1975 }
1976
1977 let mut chars = needle.chars();
1978 let mut current = match chars.next() {
1979 Some(ch) => ch,
1980 None => return false,
1981 };
1982
1983 for ch in haystack.chars() {
1984 if ch == current {
1985 match chars.next() {
1986 Some(next) => current = next,
1987 None => return true,
1988 }
1989 }
1990 }
1991
1992 false
1993}
1994
1995#[cfg(test)]
1996mod tests {
1997 use super::*;
1998
1999 fn make_node(
2000 id: &str,
2001 name: &str,
2002 description: &str,
2003 key_facts: &[&str],
2004 alias: &[&str],
2005 importance: f64,
2006 feedback_score: f64,
2007 feedback_count: u64,
2008 ) -> Node {
2009 let mut properties = crate::graph::NodeProperties::default();
2010 properties.description = description.to_owned();
2011 properties.key_facts = key_facts.iter().map(|v| (*v).to_owned()).collect();
2012 properties.alias = alias.iter().map(|v| (*v).to_owned()).collect();
2013 properties.importance = importance;
2014 properties.feedback_score = feedback_score;
2015 properties.feedback_count = feedback_count;
2016 Node {
2017 id: id.to_owned(),
2018 r#type: "Concept".to_owned(),
2019 name: name.to_owned(),
2020 properties,
2021 source_files: Vec::new(),
2022 }
2023 }
2024
2025 fn score_for(results: &[ScoredNode<'_>], id: &str) -> i64 {
2026 results
2027 .iter()
2028 .find(|item| item.node.id == id)
2029 .map(|item| item.score)
2030 .expect("score for node")
2031 }
2032
2033 #[test]
2034 fn textual_bonus_tiers_are_stable() {
2035 assert_eq!(textual_bonus("abc", "abc"), 400);
2036 assert_eq!(textual_bonus("abc", "xxabcxx"), 200);
2037 assert_eq!(textual_bonus("abc def", "aa abc and def zz"), 160);
2038 assert_eq!(textual_bonus("abc", "aXbYc"), 40);
2039 assert_eq!(textual_bonus("abc", "zzz"), 0);
2040 }
2041
2042 #[test]
2043 fn tokenize_handles_unicode_casefolding() {
2044 let tokens = tokenize("ŁÓDŹ smart-home");
2045 assert_eq!(tokens, vec!["łódź", "smart", "home"]);
2046 }
2047
2048 #[test]
2049 fn bm25_lexical_boost_prefers_phrase_then_tokens() {
2050 let query_terms = tokenize("smart home api");
2051 assert_eq!(
2052 bm25_lexical_boost(&query_terms, &tokenize("x smart home api y")),
2053 120
2054 );
2055 assert_eq!(
2056 bm25_lexical_boost(&query_terms, &tokenize("smart x api y home")),
2057 45
2058 );
2059 assert_eq!(
2060 bm25_lexical_boost(&query_terms, &tokenize("nothing here")),
2061 0
2062 );
2063 }
2064
2065 #[test]
2066 fn score_node_uses_key_facts_and_notes_without_primary_match() {
2067 let node = make_node(
2068 "concept:gateway",
2069 "Gateway",
2070 "",
2071 &["Autentykacja OAuth2 przez konto producenta"],
2072 &[],
2073 0.5,
2074 0.0,
2075 0,
2076 );
2077 let mut graph = GraphFile::new("test");
2078 graph.nodes.push(node.clone());
2079 graph.notes.push(crate::graph::Note {
2080 id: "note:oauth".to_owned(),
2081 node_id: node.id.clone(),
2082 body: "Token refresh przez OAuth2".to_owned(),
2083 tags: vec!["oauth2".to_owned()],
2084 ..Default::default()
2085 });
2086
2087 let pattern = Pattern::parse(
2088 "oauth2 producenta",
2089 CaseMatching::Ignore,
2090 Normalization::Smart,
2091 );
2092 let context = FindQueryContext::build(&graph);
2093 let mut matcher = Matcher::new(Config::DEFAULT);
2094 let score = score_node(&context, &node, "oauth2 producenta", &pattern, &mut matcher);
2095 assert!(score.is_some_and(|value| value > 0));
2096
2097 let empty_graph = GraphFile::new("empty");
2098 let empty_node = make_node("concept:gateway", "Gateway", "", &[], &[], 0.5, 0.0, 0);
2099 let empty_context = FindQueryContext::build(&empty_graph);
2100 let mut matcher = Matcher::new(Config::DEFAULT);
2101 let empty_score = score_node(
2102 &empty_context,
2103 &empty_node,
2104 "oauth2 producenta",
2105 &pattern,
2106 &mut matcher,
2107 );
2108 assert!(empty_score.is_none());
2109 }
2110
2111 #[test]
2112 fn score_bm25_respects_importance_boost_for_equal_documents() {
2113 let mut graph = GraphFile::new("test");
2114 graph.nodes.push(make_node(
2115 "concept:high",
2116 "High",
2117 "smart home api",
2118 &[],
2119 &[],
2120 1.0,
2121 0.0,
2122 0,
2123 ));
2124 graph.nodes.push(make_node(
2125 "concept:low",
2126 "Low",
2127 "smart home api",
2128 &[],
2129 &[],
2130 0.0,
2131 0.0,
2132 0,
2133 ));
2134
2135 let results = find_all_matches_with_index(
2136 &graph,
2137 "smart home api",
2138 true,
2139 false,
2140 FindMode::Bm25,
2141 None,
2142 None,
2143 );
2144 let high_score = score_for(&results, "concept:high");
2145 let low_score = score_for(&results, "concept:low");
2146 assert!(high_score > low_score);
2147 }
2148
2149 #[test]
2150 fn final_score_caps_authority_boost_for_weak_relevance() {
2151 let weak = make_node(
2152 "concept:weak",
2153 "Weak",
2154 "smart home api",
2155 &[],
2156 &[],
2157 1.0,
2158 300.0,
2159 1,
2160 );
2161 let strong = make_node(
2162 "concept:strong",
2163 "Strong",
2164 "smart home api smart home api smart home api smart home api",
2165 &[],
2166 &[],
2167 0.5,
2168 0.0,
2169 0,
2170 );
2171 let candidates = vec![
2172 RawCandidate {
2173 node: &weak,
2174 raw_relevance: 12.0,
2175 lexical_boost: 0,
2176 },
2177 RawCandidate {
2178 node: &strong,
2179 raw_relevance: 100.0,
2180 lexical_boost: 0,
2181 },
2182 ];
2183 let scored = compose_scores(candidates);
2184 let weak_scored = scored
2185 .iter()
2186 .find(|item| item.node.id == "concept:weak")
2187 .expect("weak node");
2188 assert_eq!(
2189 weak_scored.breakdown.authority_applied,
2190 weak_scored.breakdown.authority_cap
2191 );
2192 assert!(weak_scored.breakdown.authority_raw > weak_scored.breakdown.authority_cap);
2193 }
2194
2195 #[test]
2196 fn importance_and_feedback_boost_have_expected_ranges() {
2197 let high_importance = make_node("concept:high", "High", "", &[], &[], 1.0, 0.0, 0);
2198 let low_importance = make_node("concept:low", "Low", "", &[], &[], 0.0, 0.0, 0);
2199 assert_eq!(importance_boost(&high_importance), 66);
2200 assert_eq!(importance_boost(&low_importance), -66);
2201
2202 let positive = make_node("concept:pos", "Pos", "", &[], &[], 0.5, 1.0, 1);
2203 let negative = make_node("concept:neg", "Neg", "", &[], &[], 0.5, -2.0, 1);
2204 let saturated = make_node("concept:sat", "Sat", "", &[], &[], 0.5, 300.0, 1);
2205 assert_eq!(feedback_boost(&positive), 46);
2206 assert_eq!(feedback_boost(&negative), -92);
2207 assert_eq!(feedback_boost(&saturated), 300);
2208 }
2209
2210 #[test]
2211 fn find_deduplicates_results_by_node_id_for_single_query() {
2212 let mut graph = GraphFile::new("test");
2213 graph.nodes.push(make_node(
2214 "concept:rule",
2215 "Business Rule",
2216 "Rule for billing decisions",
2217 &["Business rule validation"],
2218 &["billing rule"],
2219 0.5,
2220 0.0,
2221 0,
2222 ));
2223 graph.nodes.push(make_node(
2224 "concept:rule",
2225 "Business Rule Duplicate",
2226 "Duplicate record with same id",
2227 &["Business rule duplicate"],
2228 &[],
2229 0.5,
2230 0.0,
2231 0,
2232 ));
2233
2234 let results = find_all_matches_with_index(
2235 &graph,
2236 "business rule",
2237 true,
2238 false,
2239 FindMode::Hybrid,
2240 None,
2241 None,
2242 );
2243 let rule_hits = results
2244 .iter()
2245 .filter(|item| item.node.id == "concept:rule")
2246 .count();
2247 assert_eq!(rule_hits, 1);
2248 }
2249
2250 #[test]
2251 fn hybrid_score_does_not_change_when_only_vector_weight_changes() {
2252 let mut graph = GraphFile::new("test");
2253 graph.nodes.push(make_node(
2254 "concept:auth",
2255 "Authentication Rule",
2256 "Business rule for authentication",
2257 &["auth rule"],
2258 &["login policy"],
2259 0.5,
2260 0.0,
2261 0,
2262 ));
2263
2264 let with_vector = find_all_matches_with_index(
2265 &graph,
2266 "authentication rule",
2267 true,
2268 false,
2269 FindMode::Hybrid,
2270 None,
2271 Some(&FindTune {
2272 bm25: 0.55,
2273 fuzzy: 0.35,
2274 vector: 1.0,
2275 }),
2276 );
2277 let no_vector = find_all_matches_with_index(
2278 &graph,
2279 "authentication rule",
2280 true,
2281 false,
2282 FindMode::Hybrid,
2283 None,
2284 Some(&FindTune {
2285 bm25: 0.55,
2286 fuzzy: 0.35,
2287 vector: 0.0,
2288 }),
2289 );
2290
2291 assert_eq!(with_vector.len(), 1);
2292 assert_eq!(no_vector.len(), 1);
2293 assert_eq!(with_vector[0].score, no_vector[0].score);
2294 }
2295
2296 #[test]
2297 fn find_hides_metadata_nodes_unless_enabled() {
2298 let mut graph = GraphFile::new("test");
2299 graph.nodes.push(make_node(
2300 "^:graph_info",
2301 "Graph Metadata",
2302 "Internal metadata",
2303 &["graph_uuid=abc123"],
2304 &[],
2305 0.5,
2306 0.0,
2307 0,
2308 ));
2309 if let Some(meta) = graph
2310 .nodes
2311 .iter_mut()
2312 .find(|node| node.id == "^:graph_info")
2313 {
2314 meta.r#type = "^".to_owned();
2315 }
2316
2317 let hidden = find_all_matches_with_index(
2318 &graph,
2319 "graph uuid",
2320 true,
2321 false,
2322 FindMode::Hybrid,
2323 None,
2324 None,
2325 );
2326 assert!(hidden.is_empty());
2327
2328 let shown = find_all_matches_with_index(
2329 &graph,
2330 "graph uuid",
2331 true,
2332 true,
2333 FindMode::Hybrid,
2334 None,
2335 None,
2336 );
2337 assert_eq!(shown.len(), 1);
2338 assert_eq!(shown[0].node.id, "^:graph_info");
2339 }
2340}