Skip to main content

oxilean_parse/expr_cache/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::functions::*;
6use std::collections::{HashMap, VecDeque};
7
8/// A cache entry with adaptive eviction support.
9#[allow(dead_code)]
10#[allow(missing_docs)]
11pub struct AdaptiveCacheEntry<V> {
12    #[allow(missing_docs)]
13    pub value: V,
14    #[allow(missing_docs)]
15    pub priority: CachePriority,
16    #[allow(missing_docs)]
17    pub access_count: u64,
18    #[allow(missing_docs)]
19    pub last_access: u64,
20    #[allow(missing_docs)]
21    pub insert_time: u64,
22}
23impl<V> AdaptiveCacheEntry<V> {
24    #[allow(dead_code)]
25    #[allow(missing_docs)]
26    pub fn new(value: V, priority: CachePriority, now: u64) -> Self {
27        Self {
28            value,
29            priority,
30            access_count: 0,
31            last_access: now,
32            insert_time: now,
33        }
34    }
35    #[allow(dead_code)]
36    #[allow(missing_docs)]
37    pub fn touch(&mut self, now: u64) {
38        self.access_count += 1;
39        self.last_access = now;
40    }
41    #[allow(dead_code)]
42    #[allow(missing_docs)]
43    pub fn eviction_score(&self, now: u64) -> f64 {
44        let age = (now - self.last_access) as f64;
45        let freq = (self.access_count + 1) as f64;
46        let boost = match self.priority {
47            CachePriority::Pinned => f64::INFINITY,
48            CachePriority::High => 8.0,
49            CachePriority::Normal => 4.0,
50            CachePriority::Low => 1.0,
51        };
52        (freq * boost) / (age + 1.0)
53    }
54}
55/// Interned symbol with kind.
56#[allow(dead_code)]
57#[allow(missing_docs)]
58pub struct SymbolInterner {
59    symbols: std::collections::HashMap<String, u32>,
60    by_id: Vec<String>,
61}
62impl SymbolInterner {
63    #[allow(dead_code)]
64    #[allow(missing_docs)]
65    pub fn new() -> Self {
66        Self {
67            symbols: std::collections::HashMap::new(),
68            by_id: Vec::new(),
69        }
70    }
71    #[allow(dead_code)]
72    #[allow(missing_docs)]
73    pub fn intern(&mut self, name: &str) -> u32 {
74        if let Some(&id) = self.symbols.get(name) {
75            return id;
76        }
77        let id = self.by_id.len() as u32;
78        self.by_id.push(name.to_string());
79        self.symbols.insert(name.to_string(), id);
80        id
81    }
82    #[allow(dead_code)]
83    #[allow(missing_docs)]
84    pub fn lookup(&self, id: u32) -> Option<&str> {
85        self.by_id.get(id as usize).map(|s| s.as_str())
86    }
87    #[allow(dead_code)]
88    #[allow(missing_docs)]
89    pub fn contains(&self, name: &str) -> bool {
90        self.symbols.contains_key(name)
91    }
92    #[allow(dead_code)]
93    #[allow(missing_docs)]
94    pub fn size(&self) -> usize {
95        self.by_id.len()
96    }
97}
98/// A multi-level cache: L1 (small, fast), L2 (larger, slower).
99#[allow(dead_code)]
100#[allow(missing_docs)]
101pub struct MultiLevelCache<V: Clone> {
102    l1: WindowCache<u64, V>,
103    l2: std::collections::HashMap<u64, V>,
104    l2_capacity: usize,
105    l1_hits: u64,
106    l2_hits: u64,
107    misses: u64,
108}
109impl<V: Clone> MultiLevelCache<V> {
110    #[allow(dead_code)]
111    #[allow(missing_docs)]
112    pub fn new(l1_cap: usize, l2_cap: usize) -> Self {
113        Self {
114            l1: WindowCache::new(l1_cap),
115            l2: std::collections::HashMap::new(),
116            l2_capacity: l2_cap,
117            l1_hits: 0,
118            l2_hits: 0,
119            misses: 0,
120        }
121    }
122    #[allow(dead_code)]
123    #[allow(missing_docs)]
124    pub fn insert(&mut self, key: u64, value: V) {
125        self.l1.insert(key, value.clone());
126        if self.l2.len() < self.l2_capacity {
127            self.l2.insert(key, value);
128        }
129    }
130    #[allow(dead_code)]
131    #[allow(missing_docs)]
132    pub fn get(&mut self, key: &u64) -> Option<V> {
133        if let Some(v) = self.l1.get(key) {
134            self.l1_hits += 1;
135            return Some(v.clone());
136        }
137        if let Some(v) = self.l2.get(key) {
138            self.l2_hits += 1;
139            return Some(v.clone());
140        }
141        self.misses += 1;
142        None
143    }
144    #[allow(dead_code)]
145    #[allow(missing_docs)]
146    pub fn l1_hit_rate(&self) -> f64 {
147        let total = self.l1_hits + self.l2_hits + self.misses;
148        if total == 0 {
149            0.0
150        } else {
151            self.l1_hits as f64 / total as f64
152        }
153    }
154}
155/// Versioned cache.
156#[allow(dead_code)]
157#[allow(missing_docs)]
158pub struct VersionedCache<K: std::hash::Hash + Eq, V> {
159    entries: std::collections::HashMap<K, (V, u64)>,
160    version: u64,
161}
162impl<K: std::hash::Hash + Eq, V> VersionedCache<K, V> {
163    #[allow(dead_code)]
164    #[allow(missing_docs)]
165    pub fn new() -> Self {
166        Self {
167            entries: std::collections::HashMap::new(),
168            version: 0,
169        }
170    }
171    #[allow(dead_code)]
172    #[allow(missing_docs)]
173    pub fn insert(&mut self, key: K, value: V) {
174        self.entries.insert(key, (value, self.version));
175    }
176    #[allow(dead_code)]
177    #[allow(missing_docs)]
178    pub fn get(&self, key: &K) -> Option<&V> {
179        self.entries
180            .get(key)
181            .and_then(|(v, ver)| if *ver == self.version { Some(v) } else { None })
182    }
183    #[allow(dead_code)]
184    #[allow(missing_docs)]
185    pub fn bump_version(&mut self) {
186        self.version += 1;
187    }
188    #[allow(dead_code)]
189    #[allow(missing_docs)]
190    pub fn purge_stale(&mut self) {
191        let v = self.version;
192        self.entries.retain(|_, (_, ver)| *ver == v);
193    }
194    #[allow(dead_code)]
195    #[allow(missing_docs)]
196    pub fn version(&self) -> u64 {
197        self.version
198    }
199}
200/// Token frequency table.
201#[allow(dead_code)]
202#[allow(missing_docs)]
203#[derive(Default)]
204pub struct TokenFrequencyTable {
205    counts: std::collections::HashMap<String, u64>,
206}
207impl TokenFrequencyTable {
208    #[allow(dead_code)]
209    #[allow(missing_docs)]
210    pub fn new() -> Self {
211        Self::default()
212    }
213    #[allow(dead_code)]
214    #[allow(missing_docs)]
215    pub fn record(&mut self, token: &str) {
216        *self.counts.entry(token.to_string()).or_insert(0) += 1;
217    }
218    #[allow(dead_code)]
219    #[allow(missing_docs)]
220    pub fn count(&self, token: &str) -> u64 {
221        self.counts.get(token).copied().unwrap_or(0)
222    }
223    #[allow(dead_code)]
224    #[allow(missing_docs)]
225    pub fn top_n(&self, n: usize) -> Vec<(&str, u64)> {
226        let mut pairs: Vec<_> = self.counts.iter().map(|(k, &v)| (k.as_str(), v)).collect();
227        pairs.sort_by(|a, b| b.1.cmp(&a.1));
228        pairs.truncate(n);
229        pairs
230    }
231    #[allow(dead_code)]
232    #[allow(missing_docs)]
233    pub fn unique_tokens(&self) -> usize {
234        self.counts.len()
235    }
236    #[allow(dead_code)]
237    #[allow(missing_docs)]
238    pub fn total_tokens(&self) -> u64 {
239        self.counts.values().sum()
240    }
241}
242/// Adaptive LRU cache that self-tunes capacity based on hit rate.
243#[allow(dead_code)]
244#[allow(missing_docs)]
245pub struct AdaptiveLruCache<V> {
246    inner: LruCache<V>,
247    min_capacity: usize,
248    max_capacity: usize,
249    hits: u64,
250    misses: u64,
251    tune_interval: u64,
252    ops: u64,
253}
254impl<V> AdaptiveLruCache<V> {
255    #[allow(dead_code)]
256    #[allow(missing_docs)]
257    pub fn new(initial: usize, min: usize, max: usize) -> Self {
258        Self {
259            inner: LruCache::new(initial),
260            min_capacity: min,
261            max_capacity: max,
262            hits: 0,
263            misses: 0,
264            tune_interval: 1000,
265            ops: 0,
266        }
267    }
268    #[allow(dead_code)]
269    #[allow(missing_docs)]
270    pub fn insert(&mut self, key: u64, value: V) {
271        self.inner.insert(key, value);
272        self.ops += 1;
273    }
274    #[allow(dead_code)]
275    #[allow(missing_docs)]
276    pub fn get(&mut self, key: u64) -> Option<&V> {
277        self.ops += 1;
278        match self.inner.get(key) {
279            Some(v) => {
280                self.hits += 1;
281                Some(v)
282            }
283            None => {
284                self.misses += 1;
285                None
286            }
287        }
288    }
289    #[allow(dead_code)]
290    #[allow(missing_docs)]
291    pub fn hit_rate(&self) -> f64 {
292        let total = self.hits + self.misses;
293        if total == 0 {
294            0.0
295        } else {
296            self.hits as f64 / total as f64
297        }
298    }
299    #[allow(dead_code)]
300    #[allow(missing_docs)]
301    pub fn len(&self) -> usize {
302        self.inner.len()
303    }
304    #[allow(dead_code)]
305    #[allow(missing_docs)]
306    pub fn is_empty(&self) -> bool {
307        self.inner.is_empty()
308    }
309}
310/// Policy cache with pluggable eviction.
311#[allow(dead_code)]
312#[allow(missing_docs)]
313pub struct PolicyCache<K: std::hash::Hash + Eq, V> {
314    entries: std::collections::HashMap<K, (V, u64, u64)>,
315    clock: u64,
316    capacity: usize,
317}
318impl<K: std::hash::Hash + Eq, V> PolicyCache<K, V> {
319    #[allow(dead_code)]
320    #[allow(missing_docs)]
321    pub fn new(capacity: usize) -> Self {
322        Self {
323            entries: std::collections::HashMap::new(),
324            clock: 0,
325            capacity,
326        }
327    }
328    #[allow(dead_code)]
329    #[allow(missing_docs)]
330    pub fn insert(&mut self, key: K, value: V) {
331        self.clock += 1;
332        if self.entries.len() >= self.capacity {
333            let first_key: Option<K> = self.entries.keys().next().map(|k| {
334                let raw = k as *const K;
335                unsafe { std::ptr::read(raw) }
336            });
337            if let Some(fk) = first_key {
338                self.entries.remove(&fk);
339            }
340        }
341        self.entries.insert(key, (value, 0, self.clock));
342    }
343    #[allow(dead_code)]
344    #[allow(missing_docs)]
345    pub fn get(&mut self, key: &K) -> Option<&V> {
346        self.clock += 1;
347        let now = self.clock;
348        if let Some((v, ac, la)) = self.entries.get_mut(key) {
349            *ac += 1;
350            *la = now;
351            Some(v)
352        } else {
353            None
354        }
355    }
356    #[allow(dead_code)]
357    #[allow(missing_docs)]
358    pub fn len(&self) -> usize {
359        self.entries.len()
360    }
361    #[allow(dead_code)]
362    #[allow(missing_docs)]
363    pub fn is_empty(&self) -> bool {
364        self.entries.is_empty()
365    }
366}
367/// String interner — maps strings to compact IDs
368#[allow(missing_docs)]
369pub struct StringInterner {
370    strings: Vec<String>,
371    map: HashMap<String, u32>,
372}
373impl StringInterner {
374    /// Create an empty interner
375    #[allow(missing_docs)]
376    pub fn new() -> Self {
377        StringInterner {
378            strings: Vec::new(),
379            map: HashMap::new(),
380        }
381    }
382    /// Intern a string, returning a deduplicated ID
383    #[allow(missing_docs)]
384    pub fn intern(&mut self, s: &str) -> InternedStr {
385        if let Some(&id) = self.map.get(s) {
386            return InternedStr(id);
387        }
388        let id = self.strings.len() as u32;
389        self.strings.push(s.to_string());
390        self.map.insert(s.to_string(), id);
391        InternedStr(id)
392    }
393    /// Look up the string for a given ID
394    #[allow(missing_docs)]
395    pub fn get(&self, id: InternedStr) -> Option<&str> {
396        self.strings.get(id.0 as usize).map(String::as_str)
397    }
398    /// Number of unique strings interned
399    #[allow(missing_docs)]
400    pub fn len(&self) -> usize {
401        self.strings.len()
402    }
403    /// True if no strings have been interned
404    #[allow(missing_docs)]
405    pub fn is_empty(&self) -> bool {
406        self.strings.is_empty()
407    }
408    /// True if the given string has already been interned
409    #[allow(missing_docs)]
410    pub fn contains(&self, s: &str) -> bool {
411        self.map.contains_key(s)
412    }
413    /// Estimated memory usage in bytes
414    #[allow(missing_docs)]
415    pub fn memory_bytes(&self) -> usize {
416        let string_bytes: usize = self.strings.iter().map(|s| s.len() + 24).sum();
417        let map_bytes = self.map.len() * 64;
418        string_bytes + map_bytes
419    }
420}
421/// LFU eviction policy.
422#[allow(dead_code)]
423#[allow(missing_docs)]
424pub struct LfuEviction {
425    min_freq: u64,
426    age_factor: f64,
427}
428impl LfuEviction {
429    #[allow(dead_code)]
430    #[allow(missing_docs)]
431    pub fn new(min_freq: u64, age_factor: f64) -> Self {
432        Self {
433            min_freq,
434            age_factor,
435        }
436    }
437    #[allow(dead_code)]
438    #[allow(missing_docs)]
439    pub fn should_evict(&self, access_count: u64, last_access: u64, now: u64) -> bool {
440        let age = now.saturating_sub(last_access) as f64;
441        let effective = access_count as f64 / (1.0 + age * self.age_factor);
442        effective < self.min_freq as f64
443    }
444    #[allow(dead_code)]
445    #[allow(missing_docs)]
446    pub fn policy_name(&self) -> &'static str {
447        "LFU-Age"
448    }
449}
450/// Bloom filter.
451#[allow(dead_code)]
452#[allow(missing_docs)]
453pub struct BloomFilter {
454    bits: Vec<u8>,
455    size_bits: usize,
456    num_hashes: usize,
457}
458impl BloomFilter {
459    #[allow(dead_code)]
460    #[allow(missing_docs)]
461    pub fn new(size_bits: usize, num_hashes: usize) -> Self {
462        let bytes = (size_bits + 7) / 8;
463        Self {
464            bits: vec![0u8; bytes],
465            size_bits,
466            num_hashes,
467        }
468    }
469    fn bit_indices(&self, key: u64) -> Vec<usize> {
470        (0..self.num_hashes)
471            .map(|i| {
472                let h = fnv1a_hash(&key.to_le_bytes()) ^ (i as u64 * 2654435761);
473                (h as usize) % self.size_bits
474            })
475            .collect()
476    }
477    #[allow(dead_code)]
478    #[allow(missing_docs)]
479    pub fn insert(&mut self, key: u64) {
480        for idx in self.bit_indices(key) {
481            self.bits[idx / 8] |= 1 << (idx % 8);
482        }
483    }
484    #[allow(dead_code)]
485    #[allow(missing_docs)]
486    pub fn may_contain(&self, key: u64) -> bool {
487        self.bit_indices(key)
488            .iter()
489            .all(|&idx| self.bits[idx / 8] & (1 << (idx % 8)) != 0)
490    }
491    #[allow(dead_code)]
492    #[allow(missing_docs)]
493    pub fn clear(&mut self) {
494        for b in &mut self.bits {
495            *b = 0;
496        }
497    }
498}
499/// An expression "diff" cache: stores the diff between two version of an expression.
500#[allow(dead_code)]
501#[allow(missing_docs)]
502pub struct ExprDiffCache {
503    diffs: std::collections::HashMap<(u64, u64), String>,
504    max_size: usize,
505}
506impl ExprDiffCache {
507    #[allow(dead_code)]
508    #[allow(missing_docs)]
509    pub fn new(max_size: usize) -> Self {
510        Self {
511            diffs: std::collections::HashMap::new(),
512            max_size,
513        }
514    }
515    #[allow(dead_code)]
516    #[allow(missing_docs)]
517    pub fn store(&mut self, a: u64, b: u64, diff: impl Into<String>) {
518        if self.diffs.len() >= self.max_size {
519            if let Some(&k) = self.diffs.keys().next() {
520                self.diffs.remove(&k);
521            }
522        }
523        let key = if a <= b { (a, b) } else { (b, a) };
524        self.diffs.insert(key, diff.into());
525    }
526    #[allow(dead_code)]
527    #[allow(missing_docs)]
528    pub fn lookup(&self, a: u64, b: u64) -> Option<&str> {
529        let key = if a <= b { (a, b) } else { (b, a) };
530        self.diffs.get(&key).map(|s| s.as_str())
531    }
532    #[allow(dead_code)]
533    #[allow(missing_docs)]
534    pub fn size(&self) -> usize {
535        self.diffs.len()
536    }
537}
538/// A cache that serialises itself to a byte sequence for persistence.
539#[allow(dead_code)]
540#[allow(missing_docs)]
541pub struct PersistentCache {
542    entries: Vec<(u64, String)>,
543}
544impl PersistentCache {
545    #[allow(dead_code)]
546    #[allow(missing_docs)]
547    pub fn new() -> Self {
548        Self {
549            entries: Vec::new(),
550        }
551    }
552    #[allow(dead_code)]
553    #[allow(missing_docs)]
554    pub fn insert(&mut self, key: u64, value: impl Into<String>) {
555        self.entries.push((key, value.into()));
556    }
557    #[allow(dead_code)]
558    #[allow(missing_docs)]
559    pub fn lookup(&self, key: u64) -> Option<&str> {
560        self.entries
561            .iter()
562            .rev()
563            .find(|(k, _)| *k == key)
564            .map(|(_, v)| v.as_str())
565    }
566    #[allow(dead_code)]
567    #[allow(missing_docs)]
568    pub fn serialize(&self) -> String {
569        self.entries
570            .iter()
571            .map(|(k, v)| format!("{}:{}", k, v))
572            .collect::<Vec<_>>()
573            .join("|")
574    }
575    #[allow(dead_code)]
576    #[allow(missing_docs)]
577    pub fn deserialize(s: &str) -> Self {
578        let mut cache = Self::new();
579        for part in s.split('|') {
580            if let Some((k, v)) = part.split_once(':') {
581                if let Ok(key) = k.parse::<u64>() {
582                    cache.insert(key, v);
583                }
584            }
585        }
586        cache
587    }
588    #[allow(dead_code)]
589    #[allow(missing_docs)]
590    pub fn entry_count(&self) -> usize {
591        self.entries.len()
592    }
593}
594/// Parse result cache.
595#[allow(dead_code)]
596#[allow(missing_docs)]
597pub struct ParseResultCache {
598    entries: std::collections::HashMap<u64, ParseCacheEntry>,
599    max_entries: usize,
600    hits: u64,
601    misses: u64,
602}
603impl ParseResultCache {
604    #[allow(dead_code)]
605    #[allow(missing_docs)]
606    pub fn new(max_entries: usize) -> Self {
607        Self {
608            entries: std::collections::HashMap::new(),
609            max_entries,
610            hits: 0,
611            misses: 0,
612        }
613    }
614    #[allow(dead_code)]
615    #[allow(missing_docs)]
616    pub fn lookup(&mut self, source: &str) -> Option<&ParseCacheEntry> {
617        let key = fnv1a_hash(source.as_bytes());
618        if let Some(e) = self.entries.get_mut(&key) {
619            e.use_count += 1;
620            self.hits += 1;
621            Some(e)
622        } else {
623            self.misses += 1;
624            None
625        }
626    }
627    #[allow(dead_code)]
628    #[allow(missing_docs)]
629    pub fn store(&mut self, source: &str, result_repr: String, parse_time_us: u64) {
630        if self.entries.len() >= self.max_entries {
631            if let Some((&k, _)) = self.entries.iter().min_by_key(|(_, v)| v.use_count) {
632                self.entries.remove(&k);
633            }
634        }
635        let key = fnv1a_hash(source.as_bytes());
636        self.entries.insert(
637            key,
638            ParseCacheEntry {
639                source_hash: key,
640                result_repr,
641                parse_time_us,
642                use_count: 1,
643            },
644        );
645    }
646    #[allow(dead_code)]
647    #[allow(missing_docs)]
648    pub fn hit_rate(&self) -> f64 {
649        let total = self.hits + self.misses;
650        if total == 0 {
651            0.0
652        } else {
653            self.hits as f64 / total as f64
654        }
655    }
656    #[allow(dead_code)]
657    #[allow(missing_docs)]
658    pub fn stats(&self) -> (u64, u64, f64) {
659        (self.hits, self.misses, self.hit_rate())
660    }
661}
662/// LRU parse cache for declaration re-use
663#[allow(missing_docs)]
664pub struct ParseCache {
665    entries: HashMap<DeclHash, CacheEntry>,
666    max_entries: usize,
667    hits: u64,
668    misses: u64,
669}
670impl ParseCache {
671    /// Create a new cache with the given maximum number of entries
672    #[allow(missing_docs)]
673    pub fn new(max_entries: usize) -> Self {
674        ParseCache {
675            entries: HashMap::new(),
676            max_entries,
677            hits: 0,
678            misses: 0,
679        }
680    }
681    /// Look up a cached entry by source text
682    #[allow(missing_docs)]
683    pub fn lookup(&mut self, text: &str) -> Option<&CacheEntry> {
684        let hash = DeclHash::compute(text);
685        if let Some(entry) = self.entries.get_mut(&hash) {
686            entry.hit_count += 1;
687            self.hits += 1;
688            return self.entries.get(&hash);
689        }
690        self.misses += 1;
691        None
692    }
693    /// Insert a new entry into the cache
694    #[allow(missing_docs)]
695    pub fn insert(&mut self, text: &str, name: Option<String>) {
696        let hash = DeclHash::compute(text);
697        if self.entries.len() >= self.max_entries {
698            self.evict_lru();
699        }
700        let entry = CacheEntry {
701            hash: hash.clone(),
702            source: text.to_string(),
703            decl_name: name,
704            hit_count: 0,
705        };
706        self.entries.insert(hash, entry);
707    }
708    /// Fraction of lookups that were cache hits (0.0 if no lookups yet)
709    #[allow(missing_docs)]
710    pub fn hit_rate(&self) -> f64 {
711        let total = self.hits + self.misses;
712        if total == 0 {
713            0.0
714        } else {
715            self.hits as f64 / total as f64
716        }
717    }
718    /// Number of entries in the cache
719    #[allow(missing_docs)]
720    pub fn len(&self) -> usize {
721        self.entries.len()
722    }
723    /// True if the cache is empty
724    #[allow(missing_docs)]
725    pub fn is_empty(&self) -> bool {
726        self.entries.is_empty()
727    }
728    /// Evict the entry with the lowest hit_count if over capacity
729    #[allow(missing_docs)]
730    pub fn evict_lru(&mut self) {
731        if self.entries.is_empty() {
732            return;
733        }
734        let min_key = self
735            .entries
736            .iter()
737            .min_by_key(|(_, e)| e.hit_count)
738            .map(|(k, _)| k.clone());
739        if let Some(key) = min_key {
740            self.entries.remove(&key);
741        }
742    }
743    /// Clear all entries and reset statistics
744    #[allow(missing_docs)]
745    pub fn clear(&mut self) {
746        self.entries.clear();
747        self.hits = 0;
748        self.misses = 0;
749    }
750}
751/// Expression segment for incremental re-parsing.
752#[allow(dead_code)]
753#[allow(missing_docs)]
754#[derive(Debug, Clone)]
755pub struct ExprSegment {
756    #[allow(missing_docs)]
757    pub start: usize,
758    #[allow(missing_docs)]
759    pub end: usize,
760    #[allow(missing_docs)]
761    pub hash: u64,
762    #[allow(missing_docs)]
763    pub kind: SegmentKind,
764}
765impl ExprSegment {
766    #[allow(dead_code)]
767    #[allow(missing_docs)]
768    pub fn from_slice(src: &str, start: usize, end: usize, kind: SegmentKind) -> Self {
769        let hash = fnv1a_hash(&src.as_bytes()[start..end]);
770        Self {
771            start,
772            end,
773            hash,
774            kind,
775        }
776    }
777    #[allow(dead_code)]
778    #[allow(missing_docs)]
779    pub fn len(&self) -> usize {
780        self.end - self.start
781    }
782    #[allow(dead_code)]
783    #[allow(missing_docs)]
784    pub fn is_empty(&self) -> bool {
785        self.start == self.end
786    }
787}
788/// Cache warmup configuration.
789#[allow(dead_code)]
790#[allow(missing_docs)]
791#[derive(Clone, Debug)]
792pub struct CacheWarmup {
793    #[allow(missing_docs)]
794    pub sources: Vec<String>,
795    #[allow(missing_docs)]
796    pub priority: CachePriority,
797    #[allow(missing_docs)]
798    pub max_warmup_ms: u64,
799}
800impl CacheWarmup {
801    #[allow(dead_code)]
802    #[allow(missing_docs)]
803    pub fn new(sources: Vec<String>) -> Self {
804        Self {
805            sources,
806            priority: CachePriority::Normal,
807            max_warmup_ms: 100,
808        }
809    }
810    #[allow(dead_code)]
811    #[allow(missing_docs)]
812    pub fn with_priority(mut self, p: CachePriority) -> Self {
813        self.priority = p;
814        self
815    }
816    #[allow(dead_code)]
817    #[allow(missing_docs)]
818    pub fn source_count(&self) -> usize {
819        self.sources.len()
820    }
821}
822/// Subexpression frequency map.
823#[allow(dead_code)]
824#[allow(missing_docs)]
825#[derive(Default)]
826pub struct SubexprFrequencyMap {
827    counts: std::collections::HashMap<u64, u32>,
828}
829impl SubexprFrequencyMap {
830    #[allow(dead_code)]
831    #[allow(missing_docs)]
832    pub fn new() -> Self {
833        Self::default()
834    }
835    #[allow(dead_code)]
836    #[allow(missing_docs)]
837    pub fn record(&mut self, hash: u64) {
838        *self.counts.entry(hash).or_insert(0) += 1;
839    }
840    #[allow(dead_code)]
841    #[allow(missing_docs)]
842    pub fn frequency(&self, hash: u64) -> u32 {
843        self.counts.get(&hash).copied().unwrap_or(0)
844    }
845    #[allow(dead_code)]
846    #[allow(missing_docs)]
847    pub fn top_k(&self, k: usize) -> Vec<(u64, u32)> {
848        let mut pairs: Vec<_> = self.counts.iter().map(|(&h, &c)| (h, c)).collect();
849        pairs.sort_by(|a, b| b.1.cmp(&a.1));
850        pairs.truncate(k);
851        pairs
852    }
853    #[allow(dead_code)]
854    #[allow(missing_docs)]
855    pub fn total_unique(&self) -> usize {
856        self.counts.len()
857    }
858    #[allow(dead_code)]
859    #[allow(missing_docs)]
860    pub fn total_occurrences(&self) -> u64 {
861        self.counts.values().map(|&c| c as u64).sum()
862    }
863}
864/// Cache report with statistics.
865#[allow(dead_code)]
866#[allow(missing_docs)]
867#[derive(Debug)]
868pub struct CacheReport {
869    #[allow(missing_docs)]
870    pub cache_size: usize,
871    #[allow(missing_docs)]
872    pub hit_count: u64,
873    #[allow(missing_docs)]
874    pub miss_count: u64,
875    #[allow(missing_docs)]
876    pub eviction_count: u64,
877    #[allow(missing_docs)]
878    pub memory_bytes: usize,
879}
880impl CacheReport {
881    #[allow(dead_code)]
882    #[allow(missing_docs)]
883    pub fn new(size: usize, hits: u64, misses: u64, evictions: u64, mem: usize) -> Self {
884        Self {
885            cache_size: size,
886            hit_count: hits,
887            miss_count: misses,
888            eviction_count: evictions,
889            memory_bytes: mem,
890        }
891    }
892    #[allow(dead_code)]
893    #[allow(missing_docs)]
894    pub fn hit_rate(&self) -> f64 {
895        let total = self.hit_count + self.miss_count;
896        if total == 0 {
897            0.0
898        } else {
899            self.hit_count as f64 / total as f64
900        }
901    }
902    #[allow(dead_code)]
903    #[allow(missing_docs)]
904    pub fn summary(&self) -> String {
905        format!(
906            "size={} hits={} misses={} evictions={} hit_rate={:.1}% mem={}B",
907            self.cache_size,
908            self.hit_count,
909            self.miss_count,
910            self.eviction_count,
911            self.hit_rate() * 100.0,
912            self.memory_bytes
913        )
914    }
915}
916/// LRU cache implementation.
917#[allow(dead_code)]
918#[allow(missing_docs)]
919pub struct LruCache<V> {
920    capacity: usize,
921    map: std::collections::HashMap<u64, V>,
922    order: std::collections::VecDeque<u64>,
923}
924impl<V> LruCache<V> {
925    #[allow(dead_code)]
926    #[allow(missing_docs)]
927    pub fn new(capacity: usize) -> Self {
928        Self {
929            capacity,
930            map: std::collections::HashMap::new(),
931            order: std::collections::VecDeque::new(),
932        }
933    }
934    #[allow(dead_code)]
935    #[allow(missing_docs)]
936    pub fn insert(&mut self, key: u64, value: V) {
937        if self.map.len() >= self.capacity {
938            if let Some(old) = self.order.pop_front() {
939                self.map.remove(&old);
940            }
941        }
942        self.order.push_back(key);
943        self.map.insert(key, value);
944    }
945    #[allow(dead_code)]
946    #[allow(missing_docs)]
947    pub fn get(&self, key: u64) -> Option<&V> {
948        self.map.get(&key)
949    }
950    #[allow(dead_code)]
951    #[allow(missing_docs)]
952    pub fn len(&self) -> usize {
953        self.map.len()
954    }
955    #[allow(dead_code)]
956    #[allow(missing_docs)]
957    pub fn is_empty(&self) -> bool {
958        self.map.is_empty()
959    }
960    #[allow(dead_code)]
961    #[allow(missing_docs)]
962    pub fn contains(&self, key: u64) -> bool {
963        self.map.contains_key(&key)
964    }
965}
966/// Cache prewarmer.
967#[allow(dead_code)]
968#[allow(missing_docs)]
969pub struct CachePrewarmer {
970    sources: Vec<String>,
971    warmup_count: usize,
972}
973impl CachePrewarmer {
974    #[allow(dead_code)]
975    #[allow(missing_docs)]
976    pub fn new(sources: Vec<String>) -> Self {
977        Self {
978            sources,
979            warmup_count: 0,
980        }
981    }
982    #[allow(dead_code)]
983    #[allow(missing_docs)]
984    pub fn prewarm_all(&mut self, cache: &mut ParseResultCache) -> usize {
985        let mut warmed = 0;
986        for src in &self.sources {
987            if cache.lookup(src).is_none() {
988                let cs = compute_checksum(src);
989                cache.store(src, format!("warmed:{}", cs), 0);
990                warmed += 1;
991            }
992        }
993        self.warmup_count += warmed;
994        warmed
995    }
996    #[allow(dead_code)]
997    #[allow(missing_docs)]
998    pub fn total_warmed(&self) -> usize {
999        self.warmup_count
1000    }
1001}
1002/// Windowed cache metrics.
1003#[allow(dead_code)]
1004#[allow(missing_docs)]
1005#[derive(Default, Debug, Clone)]
1006pub struct WindowedCacheMetrics {
1007    #[allow(missing_docs)]
1008    pub window_hits: u64,
1009    #[allow(missing_docs)]
1010    pub window_misses: u64,
1011    #[allow(missing_docs)]
1012    pub window_evictions: u64,
1013    #[allow(missing_docs)]
1014    pub window_inserts: u64,
1015    #[allow(missing_docs)]
1016    pub window_size: usize,
1017}
1018impl WindowedCacheMetrics {
1019    #[allow(dead_code)]
1020    #[allow(missing_docs)]
1021    pub fn new(window_size: usize) -> Self {
1022        Self {
1023            window_size,
1024            ..Default::default()
1025        }
1026    }
1027    #[allow(dead_code)]
1028    #[allow(missing_docs)]
1029    pub fn record_hit(&mut self) {
1030        self.window_hits += 1;
1031    }
1032    #[allow(dead_code)]
1033    #[allow(missing_docs)]
1034    pub fn record_miss(&mut self) {
1035        self.window_misses += 1;
1036    }
1037    #[allow(dead_code)]
1038    #[allow(missing_docs)]
1039    pub fn record_eviction(&mut self) {
1040        self.window_evictions += 1;
1041    }
1042    #[allow(dead_code)]
1043    #[allow(missing_docs)]
1044    pub fn record_insert(&mut self) {
1045        self.window_inserts += 1;
1046    }
1047    #[allow(dead_code)]
1048    #[allow(missing_docs)]
1049    pub fn hit_rate(&self) -> f64 {
1050        let total = self.window_hits + self.window_misses;
1051        if total == 0 {
1052            0.0
1053        } else {
1054            self.window_hits as f64 / total as f64
1055        }
1056    }
1057    #[allow(dead_code)]
1058    #[allow(missing_docs)]
1059    pub fn reset(&mut self) {
1060        self.window_hits = 0;
1061        self.window_misses = 0;
1062        self.window_evictions = 0;
1063        self.window_inserts = 0;
1064    }
1065}
1066/// TTL eviction policy.
1067#[allow(dead_code)]
1068#[allow(missing_docs)]
1069pub struct TtlEviction {
1070    ttl_ticks: u64,
1071}
1072impl TtlEviction {
1073    #[allow(dead_code)]
1074    #[allow(missing_docs)]
1075    pub fn new(ttl_ticks: u64) -> Self {
1076        Self { ttl_ticks }
1077    }
1078    #[allow(dead_code)]
1079    #[allow(missing_docs)]
1080    pub fn should_evict(&self, _ac: u64, last_access: u64, now: u64) -> bool {
1081        now.saturating_sub(last_access) > self.ttl_ticks
1082    }
1083    #[allow(dead_code)]
1084    #[allow(missing_docs)]
1085    pub fn policy_name(&self) -> &'static str {
1086        "TTL"
1087    }
1088}
1089/// Classify cache entry tier.
1090#[allow(dead_code)]
1091#[derive(Debug, Clone, PartialEq, Eq)]
1092pub enum CacheTier {
1093    #[allow(missing_docs)]
1094    Hot,
1095    #[allow(missing_docs)]
1096    Warm,
1097    #[allow(missing_docs)]
1098    Cold,
1099    #[allow(missing_docs)]
1100    Dead,
1101}
1102#[allow(dead_code)]
1103#[allow(missing_docs)]
1104#[derive(Clone, Debug)]
1105pub struct TypeCheckResult {
1106    #[allow(missing_docs)]
1107    pub expr_hash: u64,
1108    #[allow(missing_docs)]
1109    pub inferred_type: String,
1110    #[allow(missing_docs)]
1111    pub is_valid: bool,
1112    #[allow(missing_docs)]
1113    pub check_time_us: u64,
1114}
1115/// Macro expansion cache.
1116#[allow(dead_code)]
1117#[allow(missing_docs)]
1118pub struct MacroExpansionCache {
1119    entries: std::collections::HashMap<u64, MacroExpansionEntry>,
1120    max_size: usize,
1121}
1122impl MacroExpansionCache {
1123    #[allow(dead_code)]
1124    #[allow(missing_docs)]
1125    pub fn new(max_size: usize) -> Self {
1126        Self {
1127            entries: std::collections::HashMap::new(),
1128            max_size,
1129        }
1130    }
1131    #[allow(dead_code)]
1132    #[allow(missing_docs)]
1133    pub fn lookup(&mut self, macro_hash: u64, arg_hash: u64) -> Option<&MacroExpansionEntry> {
1134        let key = mix_hashes(macro_hash, arg_hash);
1135        if let Some(e) = self.entries.get_mut(&key) {
1136            e.use_count += 1;
1137            Some(e)
1138        } else {
1139            None
1140        }
1141    }
1142    #[allow(dead_code)]
1143    #[allow(missing_docs)]
1144    pub fn store(&mut self, entry: MacroExpansionEntry) {
1145        if self.entries.len() >= self.max_size {
1146            if let Some((&k, _)) = self.entries.iter().min_by_key(|(_, v)| v.use_count) {
1147                self.entries.remove(&k);
1148            }
1149        }
1150        let key = mix_hashes(entry.macro_hash, entry.arg_hash);
1151        self.entries.insert(key, entry);
1152    }
1153    #[allow(dead_code)]
1154    #[allow(missing_docs)]
1155    pub fn total_stored(&self) -> usize {
1156        self.entries.len()
1157    }
1158}
1159/// Priority levels for cache entries.
1160#[allow(dead_code)]
1161#[allow(missing_docs)]
1162#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
1163pub enum CachePriority {
1164    #[allow(missing_docs)]
1165    Low = 0,
1166    #[allow(missing_docs)]
1167    Normal = 1,
1168    #[allow(missing_docs)]
1169    High = 2,
1170    #[allow(missing_docs)]
1171    Pinned = 3,
1172}
1173/// Interned string — lightweight identifier for a deduplicated string
1174#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
1175#[allow(missing_docs)]
1176pub struct InternedStr(u32);
1177impl InternedStr {
1178    /// Return the raw index of this interned string
1179    #[allow(missing_docs)]
1180    pub fn idx(self) -> u32 {
1181        self.0
1182    }
1183}
1184/// Parse cache entry
1185#[derive(Debug, Clone)]
1186#[allow(missing_docs)]
1187pub struct CacheEntry {
1188    /// Hash of the declaration source text
1189    pub hash: DeclHash,
1190    /// Original source text
1191    pub source: String,
1192    /// Declaration name if known
1193    pub decl_name: Option<String>,
1194    /// Number of cache hits for this entry
1195    pub hit_count: u32,
1196}
1197#[allow(dead_code)]
1198#[allow(missing_docs)]
1199#[derive(Debug, Clone, PartialEq, Eq)]
1200pub enum SegmentKind {
1201    #[allow(missing_docs)]
1202    Atom,
1203    #[allow(missing_docs)]
1204    App,
1205    #[allow(missing_docs)]
1206    Lambda,
1207    #[allow(missing_docs)]
1208    Pi,
1209    #[allow(missing_docs)]
1210    Let,
1211    #[allow(missing_docs)]
1212    Other,
1213}
1214/// Cache pressure monitor.
1215#[allow(dead_code)]
1216#[allow(missing_docs)]
1217#[derive(Default, Debug)]
1218pub struct CachePressureMonitor {
1219    #[allow(missing_docs)]
1220    pub evictions: u64,
1221    #[allow(missing_docs)]
1222    pub inserts: u64,
1223    #[allow(missing_docs)]
1224    pub lookups: u64,
1225    #[allow(missing_docs)]
1226    pub hits: u64,
1227    #[allow(missing_docs)]
1228    pub peak_size: usize,
1229}
1230impl CachePressureMonitor {
1231    #[allow(dead_code)]
1232    #[allow(missing_docs)]
1233    pub fn new() -> Self {
1234        Self::default()
1235    }
1236    #[allow(dead_code)]
1237    #[allow(missing_docs)]
1238    pub fn record_insert(&mut self, current_size: usize) {
1239        self.inserts += 1;
1240        if current_size > self.peak_size {
1241            self.peak_size = current_size;
1242        }
1243    }
1244    #[allow(dead_code)]
1245    #[allow(missing_docs)]
1246    pub fn record_eviction(&mut self) {
1247        self.evictions += 1;
1248    }
1249    #[allow(dead_code)]
1250    #[allow(missing_docs)]
1251    pub fn record_lookup(&mut self, hit: bool) {
1252        self.lookups += 1;
1253        if hit {
1254            self.hits += 1;
1255        }
1256    }
1257    #[allow(dead_code)]
1258    #[allow(missing_docs)]
1259    pub fn hit_rate(&self) -> f64 {
1260        if self.lookups == 0 {
1261            0.0
1262        } else {
1263            self.hits as f64 / self.lookups as f64
1264        }
1265    }
1266    #[allow(dead_code)]
1267    #[allow(missing_docs)]
1268    pub fn report(&self) -> String {
1269        format!(
1270            "hits={} misses={} hit_rate={:.1}% peak={}",
1271            self.hits,
1272            self.lookups.saturating_sub(self.hits),
1273            self.hit_rate() * 100.0,
1274            self.peak_size
1275        )
1276    }
1277}
1278/// Window cache.
1279#[allow(dead_code)]
1280#[allow(missing_docs)]
1281pub struct WindowCache<K: std::hash::Hash + Eq + Clone, V> {
1282    map: std::collections::HashMap<K, V>,
1283    order: VecDeque<K>,
1284    window: usize,
1285}
1286impl<K: std::hash::Hash + Eq + Clone, V> WindowCache<K, V> {
1287    #[allow(dead_code)]
1288    #[allow(missing_docs)]
1289    pub fn new(window: usize) -> Self {
1290        Self {
1291            map: std::collections::HashMap::new(),
1292            order: VecDeque::new(),
1293            window,
1294        }
1295    }
1296    #[allow(dead_code)]
1297    #[allow(missing_docs)]
1298    pub fn insert(&mut self, key: K, value: V) {
1299        if self.map.len() >= self.window {
1300            if let Some(old) = self.order.pop_front() {
1301                self.map.remove(&old);
1302            }
1303        }
1304        self.order.push_back(key.clone());
1305        self.map.insert(key, value);
1306    }
1307    #[allow(dead_code)]
1308    #[allow(missing_docs)]
1309    pub fn get(&self, key: &K) -> Option<&V> {
1310        self.map.get(key)
1311    }
1312    #[allow(dead_code)]
1313    #[allow(missing_docs)]
1314    pub fn len(&self) -> usize {
1315        self.map.len()
1316    }
1317    #[allow(dead_code)]
1318    #[allow(missing_docs)]
1319    pub fn is_empty(&self) -> bool {
1320        self.map.is_empty()
1321    }
1322}
1323/// Namespaced cache.
1324#[allow(dead_code)]
1325#[allow(missing_docs)]
1326pub struct NamespacedCache<K: std::hash::Hash + Eq, V> {
1327    namespaces: std::collections::HashMap<String, std::collections::HashMap<K, V>>,
1328}
1329impl<K: std::hash::Hash + Eq, V> NamespacedCache<K, V> {
1330    #[allow(dead_code)]
1331    #[allow(missing_docs)]
1332    pub fn new() -> Self {
1333        Self {
1334            namespaces: std::collections::HashMap::new(),
1335        }
1336    }
1337    #[allow(dead_code)]
1338    #[allow(missing_docs)]
1339    pub fn insert(&mut self, ns: &str, key: K, value: V) {
1340        self.namespaces
1341            .entry(ns.to_string())
1342            .or_default()
1343            .insert(key, value);
1344    }
1345    #[allow(dead_code)]
1346    #[allow(missing_docs)]
1347    pub fn get(&self, ns: &str, key: &K) -> Option<&V> {
1348        self.namespaces.get(ns)?.get(key)
1349    }
1350    #[allow(dead_code)]
1351    #[allow(missing_docs)]
1352    pub fn invalidate_namespace(&mut self, ns: &str) {
1353        self.namespaces.remove(ns);
1354    }
1355    #[allow(dead_code)]
1356    #[allow(missing_docs)]
1357    pub fn namespace_count(&self) -> usize {
1358        self.namespaces.len()
1359    }
1360    #[allow(dead_code)]
1361    #[allow(missing_docs)]
1362    pub fn total_entries(&self) -> usize {
1363        self.namespaces.values().map(|m| m.len()).sum()
1364    }
1365}
1366/// Expression pool with reference counting.
1367#[allow(dead_code)]
1368#[allow(missing_docs)]
1369pub struct ExprPool {
1370    exprs: std::collections::HashMap<u64, (String, usize)>,
1371}
1372impl ExprPool {
1373    #[allow(dead_code)]
1374    #[allow(missing_docs)]
1375    pub fn new() -> Self {
1376        Self {
1377            exprs: std::collections::HashMap::new(),
1378        }
1379    }
1380    #[allow(dead_code)]
1381    #[allow(missing_docs)]
1382    pub fn intern(&mut self, repr: String) -> u64 {
1383        let hash = fnv1a_hash(repr.as_bytes());
1384        let entry = self.exprs.entry(hash).or_insert_with(|| (repr, 0));
1385        entry.1 += 1;
1386        hash
1387    }
1388    #[allow(dead_code)]
1389    #[allow(missing_docs)]
1390    pub fn release(&mut self, hash: u64) {
1391        if let Some(entry) = self.exprs.get_mut(&hash) {
1392            if entry.1 > 0 {
1393                entry.1 -= 1;
1394            }
1395            if entry.1 == 0 {
1396                self.exprs.remove(&hash);
1397            }
1398        }
1399    }
1400    #[allow(dead_code)]
1401    #[allow(missing_docs)]
1402    pub fn get(&self, hash: u64) -> Option<&str> {
1403        self.exprs.get(&hash).map(|(s, _)| s.as_str())
1404    }
1405    #[allow(dead_code)]
1406    #[allow(missing_docs)]
1407    pub fn total_exprs(&self) -> usize {
1408        self.exprs.len()
1409    }
1410    #[allow(dead_code)]
1411    #[allow(missing_docs)]
1412    pub fn total_refs(&self) -> usize {
1413        self.exprs.values().map(|(_, rc)| rc).sum()
1414    }
1415}
1416/// Segment table for cache invalidation.
1417#[allow(dead_code)]
1418#[allow(missing_docs)]
1419pub struct SegmentTable {
1420    segments: Vec<ExprSegment>,
1421    hashes_by_range: std::collections::BTreeMap<(usize, usize), u64>,
1422}
1423impl SegmentTable {
1424    #[allow(dead_code)]
1425    #[allow(missing_docs)]
1426    pub fn new() -> Self {
1427        Self {
1428            segments: Vec::new(),
1429            hashes_by_range: std::collections::BTreeMap::new(),
1430        }
1431    }
1432    #[allow(dead_code)]
1433    #[allow(missing_docs)]
1434    pub fn add(&mut self, seg: ExprSegment) {
1435        self.hashes_by_range.insert((seg.start, seg.end), seg.hash);
1436        self.segments.push(seg);
1437    }
1438    #[allow(dead_code)]
1439    #[allow(missing_docs)]
1440    pub fn invalidate_range(&mut self, start: usize, end: usize) {
1441        self.segments.retain(|s| s.end <= start || s.start >= end);
1442        let keys: Vec<_> = self
1443            .hashes_by_range
1444            .range((start, 0)..=(end, usize::MAX))
1445            .map(|(k, _)| *k)
1446            .collect();
1447        for k in keys {
1448            self.hashes_by_range.remove(&k);
1449        }
1450    }
1451    #[allow(dead_code)]
1452    #[allow(missing_docs)]
1453    pub fn lookup_hash(&self, start: usize, end: usize) -> Option<u64> {
1454        self.hashes_by_range.get(&(start, end)).copied()
1455    }
1456    #[allow(dead_code)]
1457    #[allow(missing_docs)]
1458    pub fn count(&self) -> usize {
1459        self.segments.len()
1460    }
1461}
1462/// Memo table for parser results.
1463#[allow(dead_code)]
1464#[allow(missing_docs)]
1465pub struct MemoTable {
1466    entries: std::collections::HashMap<(usize, String), MemoEntry>,
1467}
1468impl MemoTable {
1469    #[allow(dead_code)]
1470    #[allow(missing_docs)]
1471    pub fn new() -> Self {
1472        Self {
1473            entries: std::collections::HashMap::new(),
1474        }
1475    }
1476    #[allow(dead_code)]
1477    #[allow(missing_docs)]
1478    pub fn lookup(&self, pos: usize, rule: &str) -> Option<&MemoEntry> {
1479        self.entries.get(&(pos, rule.to_string()))
1480    }
1481    #[allow(dead_code)]
1482    #[allow(missing_docs)]
1483    pub fn store(&mut self, pos: usize, rule: impl Into<String>, entry: MemoEntry) {
1484        self.entries.insert((pos, rule.into()), entry);
1485    }
1486    #[allow(dead_code)]
1487    #[allow(missing_docs)]
1488    pub fn size(&self) -> usize {
1489        self.entries.len()
1490    }
1491}
1492/// Global expression table (hash-consing).
1493#[allow(dead_code)]
1494#[allow(missing_docs)]
1495pub struct GlobalExprTable {
1496    by_repr: std::collections::HashMap<String, u64>,
1497    by_hash: std::collections::HashMap<u64, String>,
1498    next_id: u64,
1499}
1500impl GlobalExprTable {
1501    #[allow(dead_code)]
1502    #[allow(missing_docs)]
1503    pub fn new() -> Self {
1504        Self {
1505            by_repr: std::collections::HashMap::new(),
1506            by_hash: std::collections::HashMap::new(),
1507            next_id: 0,
1508        }
1509    }
1510    #[allow(dead_code)]
1511    #[allow(missing_docs)]
1512    pub fn intern(&mut self, repr: impl Into<String>) -> u64 {
1513        let r = repr.into();
1514        if let Some(&id) = self.by_repr.get(&r) {
1515            return id;
1516        }
1517        let id = self.next_id;
1518        self.next_id += 1;
1519        self.by_repr.insert(r.clone(), id);
1520        self.by_hash.insert(id, r);
1521        id
1522    }
1523    #[allow(dead_code)]
1524    #[allow(missing_docs)]
1525    pub fn lookup_repr(&self, id: u64) -> Option<&str> {
1526        self.by_hash.get(&id).map(|s| s.as_str())
1527    }
1528    #[allow(dead_code)]
1529    #[allow(missing_docs)]
1530    pub fn table_size(&self) -> usize {
1531        self.by_hash.len()
1532    }
1533}
1534/// String pool for deduplication.
1535#[allow(dead_code)]
1536#[allow(missing_docs)]
1537pub struct StringPool {
1538    pool: std::collections::HashSet<String>,
1539    total_saved_bytes: usize,
1540}
1541impl StringPool {
1542    #[allow(dead_code)]
1543    #[allow(missing_docs)]
1544    pub fn new() -> Self {
1545        Self {
1546            pool: std::collections::HashSet::new(),
1547            total_saved_bytes: 0,
1548        }
1549    }
1550    #[allow(dead_code)]
1551    #[allow(missing_docs)]
1552    pub fn intern(&mut self, s: &str) -> String {
1553        if !self.pool.contains(s) {
1554            self.pool.insert(s.to_string());
1555        } else {
1556            self.total_saved_bytes += s.len();
1557        }
1558        s.to_string()
1559    }
1560    #[allow(dead_code)]
1561    #[allow(missing_docs)]
1562    pub fn count(&self) -> usize {
1563        self.pool.len()
1564    }
1565    #[allow(dead_code)]
1566    #[allow(missing_docs)]
1567    pub fn saved_bytes(&self) -> usize {
1568        self.total_saved_bytes
1569    }
1570}
1571/// Alpha-equality cache.
1572#[allow(dead_code)]
1573#[allow(missing_docs)]
1574pub struct AlphaEqCache {
1575    known_equal: std::collections::HashSet<(u64, u64)>,
1576    known_inequal: std::collections::HashSet<(u64, u64)>,
1577}
1578impl AlphaEqCache {
1579    #[allow(dead_code)]
1580    #[allow(missing_docs)]
1581    pub fn new() -> Self {
1582        Self {
1583            known_equal: std::collections::HashSet::new(),
1584            known_inequal: std::collections::HashSet::new(),
1585        }
1586    }
1587    #[allow(dead_code)]
1588    #[allow(missing_docs)]
1589    pub fn mark_equal(&mut self, a: u64, b: u64) {
1590        let key = if a <= b { (a, b) } else { (b, a) };
1591        self.known_equal.insert(key);
1592        self.known_inequal.remove(&key);
1593    }
1594    #[allow(dead_code)]
1595    #[allow(missing_docs)]
1596    pub fn mark_inequal(&mut self, a: u64, b: u64) {
1597        let key = if a <= b { (a, b) } else { (b, a) };
1598        self.known_inequal.insert(key);
1599        self.known_equal.remove(&key);
1600    }
1601    #[allow(dead_code)]
1602    #[allow(missing_docs)]
1603    pub fn query(&self, a: u64, b: u64) -> Option<bool> {
1604        let key = if a <= b { (a, b) } else { (b, a) };
1605        if self.known_equal.contains(&key) {
1606            Some(true)
1607        } else if self.known_inequal.contains(&key) {
1608            Some(false)
1609        } else {
1610            None
1611        }
1612    }
1613    #[allow(dead_code)]
1614    #[allow(missing_docs)]
1615    pub fn stats(&self) -> (usize, usize) {
1616        (self.known_equal.len(), self.known_inequal.len())
1617    }
1618}
1619/// Token sequence hash for declaration fingerprinting
1620#[derive(Debug, Clone, PartialEq, Eq, Hash)]
1621#[allow(missing_docs)]
1622pub struct DeclHash(u64);
1623impl DeclHash {
1624    /// Compute a DJB2-style hash of the text bytes
1625    #[allow(missing_docs)]
1626    pub fn compute(text: &str) -> Self {
1627        let mut hash: u64 = 5381;
1628        for byte in text.bytes() {
1629            hash = hash.wrapping_mul(33).wrapping_add(byte as u64);
1630        }
1631        DeclHash(hash)
1632    }
1633    /// Raw hash value
1634    #[allow(missing_docs)]
1635    pub fn value(&self) -> u64 {
1636        self.0
1637    }
1638}
1639/// Expression location index: maps hash to source locations.
1640#[allow(dead_code)]
1641#[allow(missing_docs)]
1642pub struct ExprLocationIndex {
1643    index: std::collections::HashMap<u64, Vec<(usize, usize)>>,
1644}
1645impl ExprLocationIndex {
1646    #[allow(dead_code)]
1647    #[allow(missing_docs)]
1648    pub fn new() -> Self {
1649        Self {
1650            index: std::collections::HashMap::new(),
1651        }
1652    }
1653    #[allow(dead_code)]
1654    #[allow(missing_docs)]
1655    pub fn record(&mut self, hash: u64, start: usize, end: usize) {
1656        self.index.entry(hash).or_default().push((start, end));
1657    }
1658    #[allow(dead_code)]
1659    #[allow(missing_docs)]
1660    pub fn locations(&self, hash: u64) -> &[(usize, usize)] {
1661        self.index.get(&hash).map(|v| v.as_slice()).unwrap_or(&[])
1662    }
1663    #[allow(dead_code)]
1664    #[allow(missing_docs)]
1665    pub fn count_occurrences(&self, hash: u64) -> usize {
1666        self.index.get(&hash).map(|v| v.len()).unwrap_or(0)
1667    }
1668    #[allow(dead_code)]
1669    #[allow(missing_docs)]
1670    pub fn total_tracked(&self) -> usize {
1671        self.index.values().map(|v| v.len()).sum()
1672    }
1673}
1674/// Cache health report.
1675#[allow(dead_code)]
1676#[allow(missing_docs)]
1677#[derive(Debug)]
1678pub struct CacheHealthReport {
1679    #[allow(missing_docs)]
1680    pub total_entries: usize,
1681    #[allow(missing_docs)]
1682    pub hot_entries: usize,
1683    #[allow(missing_docs)]
1684    pub warm_entries: usize,
1685    #[allow(missing_docs)]
1686    pub cold_entries: usize,
1687    #[allow(missing_docs)]
1688    pub dead_entries: usize,
1689    #[allow(missing_docs)]
1690    pub estimated_waste_pct: f64,
1691}
1692impl CacheHealthReport {
1693    #[allow(dead_code)]
1694    #[allow(missing_docs)]
1695    pub fn is_healthy(&self) -> bool {
1696        self.estimated_waste_pct < 50.0
1697    }
1698    #[allow(dead_code)]
1699    #[allow(missing_docs)]
1700    pub fn summary(&self) -> String {
1701        format!(
1702            "total={} hot={} warm={} cold={} dead={} waste={:.1}%",
1703            self.total_entries,
1704            self.hot_entries,
1705            self.warm_entries,
1706            self.cold_entries,
1707            self.dead_entries,
1708            self.estimated_waste_pct
1709        )
1710    }
1711}
1712#[allow(dead_code)]
1713#[allow(missing_docs)]
1714#[derive(Clone, Debug)]
1715pub struct MemoEntry {
1716    #[allow(missing_docs)]
1717    pub end_pos: usize,
1718    #[allow(missing_docs)]
1719    pub result: String,
1720    #[allow(missing_docs)]
1721    pub success: bool,
1722}
1723#[allow(dead_code)]
1724#[allow(missing_docs)]
1725pub struct ParseCacheEntry {
1726    #[allow(missing_docs)]
1727    pub source_hash: u64,
1728    #[allow(missing_docs)]
1729    pub result_repr: String,
1730    #[allow(missing_docs)]
1731    pub parse_time_us: u64,
1732    #[allow(missing_docs)]
1733    pub use_count: u64,
1734}
1735/// A sliding window for recent token sequences.
1736#[allow(dead_code)]
1737#[allow(missing_docs)]
1738pub struct TokenWindow {
1739    tokens: std::collections::VecDeque<String>,
1740    capacity: usize,
1741}
1742impl TokenWindow {
1743    #[allow(dead_code)]
1744    #[allow(missing_docs)]
1745    pub fn new(capacity: usize) -> Self {
1746        Self {
1747            tokens: std::collections::VecDeque::new(),
1748            capacity,
1749        }
1750    }
1751    #[allow(dead_code)]
1752    #[allow(missing_docs)]
1753    pub fn push(&mut self, tok: impl Into<String>) {
1754        self.tokens.push_back(tok.into());
1755        if self.tokens.len() > self.capacity {
1756            self.tokens.pop_front();
1757        }
1758    }
1759    #[allow(dead_code)]
1760    #[allow(missing_docs)]
1761    pub fn as_slice(&self) -> Vec<&str> {
1762        self.tokens.iter().map(|s| s.as_str()).collect()
1763    }
1764    #[allow(dead_code)]
1765    #[allow(missing_docs)]
1766    pub fn contains(&self, tok: &str) -> bool {
1767        self.tokens.iter().any(|t| t == tok)
1768    }
1769    #[allow(dead_code)]
1770    #[allow(missing_docs)]
1771    pub fn len(&self) -> usize {
1772        self.tokens.len()
1773    }
1774    #[allow(dead_code)]
1775    #[allow(missing_docs)]
1776    pub fn is_empty(&self) -> bool {
1777        self.tokens.is_empty()
1778    }
1779    #[allow(dead_code)]
1780    #[allow(missing_docs)]
1781    pub fn is_full(&self) -> bool {
1782        self.tokens.len() == self.capacity
1783    }
1784}
1785/// Two-queue cache.
1786#[allow(dead_code)]
1787#[allow(missing_docs)]
1788pub struct TwoQueueCache<K: std::hash::Hash + Eq + Clone, V> {
1789    capacity: usize,
1790    clock: u64,
1791    main: std::collections::HashMap<K, AdaptiveCacheEntry<V>>,
1792    probation: std::collections::VecDeque<K>,
1793    protected: std::collections::VecDeque<K>,
1794    probation_cap: usize,
1795}
1796impl<K: std::hash::Hash + Eq + Clone, V> TwoQueueCache<K, V> {
1797    #[allow(dead_code)]
1798    #[allow(missing_docs)]
1799    pub fn new(capacity: usize) -> Self {
1800        Self {
1801            capacity,
1802            clock: 0,
1803            main: std::collections::HashMap::new(),
1804            probation: std::collections::VecDeque::new(),
1805            protected: std::collections::VecDeque::new(),
1806            probation_cap: capacity,
1807        }
1808    }
1809    #[allow(dead_code)]
1810    #[allow(missing_docs)]
1811    pub fn insert(&mut self, key: K, value: V) {
1812        self.clock += 1;
1813        let entry = AdaptiveCacheEntry::new(value, CachePriority::Normal, self.clock);
1814        if self.main.len() >= self.capacity {
1815            if let Some(k) = self.probation.pop_front() {
1816                self.main.remove(&k);
1817            } else if let Some(k) = self.protected.pop_front() {
1818                self.main.remove(&k);
1819            }
1820        }
1821        self.probation.push_back(key.clone());
1822        if self.probation.len() > self.probation_cap {
1823            if let Some(old) = self.probation.pop_front() {
1824                self.main.remove(&old);
1825            }
1826        }
1827        self.main.insert(key, entry);
1828    }
1829    #[allow(dead_code)]
1830    #[allow(missing_docs)]
1831    pub fn get(&mut self, key: &K) -> Option<&V> {
1832        self.clock += 1;
1833        let now = self.clock;
1834        if let Some(entry) = self.main.get_mut(key) {
1835            entry.touch(now);
1836            Some(&entry.value)
1837        } else {
1838            None
1839        }
1840    }
1841    #[allow(dead_code)]
1842    #[allow(missing_docs)]
1843    pub fn len(&self) -> usize {
1844        self.main.len()
1845    }
1846    #[allow(dead_code)]
1847    #[allow(missing_docs)]
1848    pub fn is_empty(&self) -> bool {
1849        self.main.is_empty()
1850    }
1851}
1852/// Interning statistics.
1853#[allow(dead_code)]
1854#[allow(missing_docs)]
1855#[derive(Default, Debug)]
1856pub struct InterningStats {
1857    #[allow(missing_docs)]
1858    pub total_intern_calls: u64,
1859    #[allow(missing_docs)]
1860    pub unique_strings: u64,
1861    #[allow(missing_docs)]
1862    pub bytes_saved: u64,
1863}
1864impl InterningStats {
1865    #[allow(dead_code)]
1866    #[allow(missing_docs)]
1867    pub fn new() -> Self {
1868        Self::default()
1869    }
1870    #[allow(dead_code)]
1871    #[allow(missing_docs)]
1872    pub fn record_hit(&mut self, str_len: usize) {
1873        self.total_intern_calls += 1;
1874        self.bytes_saved += str_len as u64;
1875    }
1876    #[allow(dead_code)]
1877    #[allow(missing_docs)]
1878    pub fn record_new(&mut self) {
1879        self.total_intern_calls += 1;
1880        self.unique_strings += 1;
1881    }
1882    #[allow(dead_code)]
1883    #[allow(missing_docs)]
1884    pub fn dedup_ratio(&self) -> f64 {
1885        if self.unique_strings == 0 {
1886            0.0
1887        } else {
1888            self.total_intern_calls as f64 / self.unique_strings as f64
1889        }
1890    }
1891}
1892/// Cache key builder using a chain of hash operations.
1893#[allow(dead_code)]
1894#[allow(missing_docs)]
1895pub struct CacheKeyBuilder {
1896    hash: u64,
1897}
1898impl CacheKeyBuilder {
1899    #[allow(dead_code)]
1900    #[allow(missing_docs)]
1901    pub fn new() -> Self {
1902        Self {
1903            hash: 0xcbf29ce484222325,
1904        }
1905    }
1906    #[allow(dead_code)]
1907    #[allow(missing_docs)]
1908    pub fn with_str(self, s: &str) -> Self {
1909        Self {
1910            hash: mix_hashes(self.hash, fnv1a_hash(s.as_bytes())),
1911        }
1912    }
1913    #[allow(dead_code)]
1914    #[allow(missing_docs)]
1915    pub fn with_u64(self, n: u64) -> Self {
1916        Self {
1917            hash: mix_hashes(self.hash, n),
1918        }
1919    }
1920    #[allow(dead_code)]
1921    #[allow(missing_docs)]
1922    pub fn with_usize(self, n: usize) -> Self {
1923        self.with_u64(n as u64)
1924    }
1925    #[allow(dead_code)]
1926    #[allow(missing_docs)]
1927    pub fn build(self) -> u64 {
1928        self.hash
1929    }
1930}
1931/// Nesting depth tracker.
1932#[allow(dead_code)]
1933#[allow(missing_docs)]
1934pub struct NestingDepthTracker {
1935    current_depth: usize,
1936    max_depth: usize,
1937    peak_depth: usize,
1938}
1939impl NestingDepthTracker {
1940    #[allow(dead_code)]
1941    #[allow(missing_docs)]
1942    pub fn new(max_depth: usize) -> Self {
1943        Self {
1944            current_depth: 0,
1945            max_depth,
1946            peak_depth: 0,
1947        }
1948    }
1949    #[allow(dead_code)]
1950    #[allow(missing_docs)]
1951    pub fn enter(&mut self) -> Result<(), &'static str> {
1952        if self.current_depth >= self.max_depth {
1953            return Err("max nesting exceeded");
1954        }
1955        self.current_depth += 1;
1956        if self.current_depth > self.peak_depth {
1957            self.peak_depth = self.current_depth;
1958        }
1959        Ok(())
1960    }
1961    #[allow(dead_code)]
1962    #[allow(missing_docs)]
1963    pub fn exit(&mut self) {
1964        if self.current_depth > 0 {
1965            self.current_depth -= 1;
1966        }
1967    }
1968    #[allow(dead_code)]
1969    #[allow(missing_docs)]
1970    pub fn depth(&self) -> usize {
1971        self.current_depth
1972    }
1973    #[allow(dead_code)]
1974    #[allow(missing_docs)]
1975    pub fn peak(&self) -> usize {
1976        self.peak_depth
1977    }
1978    #[allow(dead_code)]
1979    #[allow(missing_docs)]
1980    pub fn is_safe(&self) -> bool {
1981        self.current_depth < self.max_depth
1982    }
1983}
1984/// Rolling hash.
1985#[allow(dead_code)]
1986#[allow(missing_docs)]
1987pub struct RollingHash {
1988    base: u64,
1989    modulus: u64,
1990    current: u64,
1991    window_size: usize,
1992    window: VecDeque<u8>,
1993    base_pow: u64,
1994}
1995impl RollingHash {
1996    #[allow(dead_code)]
1997    #[allow(missing_docs)]
1998    pub fn new(window_size: usize) -> Self {
1999        let base: u64 = 257;
2000        let modulus: u64 = 1_000_000_007;
2001        let mut base_pow = 1u64;
2002        for _ in 0..window_size.saturating_sub(1) {
2003            base_pow = base_pow.wrapping_mul(base) % modulus;
2004        }
2005        Self {
2006            base,
2007            modulus,
2008            current: 0,
2009            window_size,
2010            window: VecDeque::new(),
2011            base_pow,
2012        }
2013    }
2014    #[allow(dead_code)]
2015    #[allow(missing_docs)]
2016    pub fn push(&mut self, byte: u8) -> u64 {
2017        self.current = (self.current.wrapping_mul(self.base) + byte as u64) % self.modulus;
2018        self.window.push_back(byte);
2019        if self.window.len() > self.window_size {
2020            let old = self
2021                .window
2022                .pop_front()
2023                .expect("window len > window_size >= 1");
2024            let rem = self.base_pow.wrapping_mul(old as u64) % self.modulus;
2025            self.current = (self.current + self.modulus - rem) % self.modulus;
2026        }
2027        self.current
2028    }
2029    #[allow(dead_code)]
2030    #[allow(missing_docs)]
2031    pub fn current_hash(&self) -> u64 {
2032        self.current
2033    }
2034    #[allow(dead_code)]
2035    #[allow(missing_docs)]
2036    pub fn window_full(&self) -> bool {
2037        self.window.len() == self.window_size
2038    }
2039}
2040/// Cache coverage report.
2041#[allow(dead_code)]
2042#[allow(missing_docs)]
2043#[derive(Debug, Default)]
2044pub struct CacheCoverageReport {
2045    #[allow(missing_docs)]
2046    pub total_source_bytes: usize,
2047    #[allow(missing_docs)]
2048    pub cached_bytes: usize,
2049    #[allow(missing_docs)]
2050    pub uncached_bytes: usize,
2051}
2052impl CacheCoverageReport {
2053    #[allow(dead_code)]
2054    #[allow(missing_docs)]
2055    pub fn new() -> Self {
2056        Self::default()
2057    }
2058    #[allow(dead_code)]
2059    #[allow(missing_docs)]
2060    pub fn record_cached(&mut self, bytes: usize) {
2061        self.cached_bytes += bytes;
2062        self.total_source_bytes += bytes;
2063    }
2064    #[allow(dead_code)]
2065    #[allow(missing_docs)]
2066    pub fn record_uncached(&mut self, bytes: usize) {
2067        self.uncached_bytes += bytes;
2068        self.total_source_bytes += bytes;
2069    }
2070    #[allow(dead_code)]
2071    #[allow(missing_docs)]
2072    pub fn coverage_pct(&self) -> f64 {
2073        if self.total_source_bytes == 0 {
2074            0.0
2075        } else {
2076            self.cached_bytes as f64 / self.total_source_bytes as f64 * 100.0
2077        }
2078    }
2079    #[allow(dead_code)]
2080    #[allow(missing_docs)]
2081    pub fn summary(&self) -> String {
2082        format!(
2083            "coverage={:.1}% cached={}B total={}B",
2084            self.coverage_pct(),
2085            self.cached_bytes,
2086            self.total_source_bytes
2087        )
2088    }
2089}
2090/// Hash set 64.
2091#[allow(dead_code)]
2092#[allow(missing_docs)]
2093pub struct HashSet64 {
2094    inner: std::collections::HashSet<u64>,
2095}
2096impl HashSet64 {
2097    #[allow(dead_code)]
2098    #[allow(missing_docs)]
2099    pub fn new() -> Self {
2100        Self {
2101            inner: std::collections::HashSet::new(),
2102        }
2103    }
2104    #[allow(dead_code)]
2105    #[allow(missing_docs)]
2106    pub fn insert(&mut self, h: u64) -> bool {
2107        self.inner.insert(h)
2108    }
2109    #[allow(dead_code)]
2110    #[allow(missing_docs)]
2111    pub fn contains(&self, h: u64) -> bool {
2112        self.inner.contains(&h)
2113    }
2114    #[allow(dead_code)]
2115    #[allow(missing_docs)]
2116    pub fn len(&self) -> usize {
2117        self.inner.len()
2118    }
2119    #[allow(dead_code)]
2120    #[allow(missing_docs)]
2121    pub fn is_empty(&self) -> bool {
2122        self.inner.is_empty()
2123    }
2124    #[allow(dead_code)]
2125    #[allow(missing_docs)]
2126    pub fn clear(&mut self) {
2127        self.inner.clear();
2128    }
2129}
2130#[allow(dead_code)]
2131#[allow(missing_docs)]
2132#[derive(Clone, Debug)]
2133pub struct MacroExpansionEntry {
2134    #[allow(missing_docs)]
2135    pub macro_hash: u64,
2136    #[allow(missing_docs)]
2137    pub arg_hash: u64,
2138    #[allow(missing_docs)]
2139    pub expansion: String,
2140    #[allow(missing_docs)]
2141    pub expansion_depth: usize,
2142    #[allow(missing_docs)]
2143    pub use_count: u32,
2144}
2145/// Bump allocator for string storage.
2146#[allow(dead_code)]
2147#[allow(missing_docs)]
2148pub struct BumpAllocator {
2149    buffer: Vec<u8>,
2150    offset: usize,
2151}
2152impl BumpAllocator {
2153    #[allow(dead_code)]
2154    #[allow(missing_docs)]
2155    pub fn new(capacity: usize) -> Self {
2156        Self {
2157            buffer: vec![0u8; capacity],
2158            offset: 0,
2159        }
2160    }
2161    #[allow(dead_code)]
2162    #[allow(missing_docs)]
2163    pub fn alloc_str(&mut self, s: &str) -> Option<usize> {
2164        let bytes = s.as_bytes();
2165        if self.offset + bytes.len() > self.buffer.len() {
2166            return None;
2167        }
2168        let pos = self.offset;
2169        self.buffer[pos..pos + bytes.len()].copy_from_slice(bytes);
2170        self.offset += bytes.len();
2171        Some(pos)
2172    }
2173    #[allow(dead_code)]
2174    #[allow(missing_docs)]
2175    pub fn get_str(&self, pos: usize, len: usize) -> Option<&str> {
2176        let end = pos + len;
2177        if end > self.buffer.len() {
2178            return None;
2179        }
2180        std::str::from_utf8(&self.buffer[pos..end]).ok()
2181    }
2182    #[allow(dead_code)]
2183    #[allow(missing_docs)]
2184    pub fn used(&self) -> usize {
2185        self.offset
2186    }
2187    #[allow(dead_code)]
2188    #[allow(missing_docs)]
2189    pub fn remaining(&self) -> usize {
2190        self.buffer.len() - self.offset
2191    }
2192    #[allow(dead_code)]
2193    #[allow(missing_docs)]
2194    pub fn reset(&mut self) {
2195        self.offset = 0;
2196    }
2197}
2198/// Type check cache.
2199#[allow(dead_code)]
2200#[allow(missing_docs)]
2201pub struct TypeCheckCache {
2202    cache: std::collections::HashMap<u64, TypeCheckResult>,
2203    capacity: usize,
2204}
2205impl TypeCheckCache {
2206    #[allow(dead_code)]
2207    #[allow(missing_docs)]
2208    pub fn new(capacity: usize) -> Self {
2209        Self {
2210            cache: std::collections::HashMap::new(),
2211            capacity,
2212        }
2213    }
2214    #[allow(dead_code)]
2215    #[allow(missing_docs)]
2216    pub fn lookup(&self, hash: u64) -> Option<&TypeCheckResult> {
2217        self.cache.get(&hash)
2218    }
2219    #[allow(dead_code)]
2220    #[allow(missing_docs)]
2221    pub fn store(&mut self, result: TypeCheckResult) {
2222        if self.cache.len() >= self.capacity {
2223            if let Some(&k) = self.cache.keys().next() {
2224                self.cache.remove(&k);
2225            }
2226        }
2227        self.cache.insert(result.expr_hash, result);
2228    }
2229    #[allow(dead_code)]
2230    #[allow(missing_docs)]
2231    pub fn invalidate(&mut self, hash: u64) {
2232        self.cache.remove(&hash);
2233    }
2234    #[allow(dead_code)]
2235    #[allow(missing_docs)]
2236    pub fn valid_count(&self) -> usize {
2237        self.cache.values().filter(|r| r.is_valid).count()
2238    }
2239}