Skip to main content

tsz_solver/intern/
mod.rs

1//! Type interning for structural deduplication.
2//!
3//! This module implements the type interning engine that converts
4//! `TypeData` structures into lightweight `TypeId` handles.
5//!
6//! Benefits:
7//! - O(1) type equality (just compare `TypeId` values)
8//! - Memory efficient (each unique structure stored once)
9//! - Cache-friendly (work with u32 arrays instead of heap objects)
10//!
11//! # Concurrency Strategy
12//!
13//! The `TypeInterner` uses a sharded DashMap-based architecture for lock-free
14//! concurrent access:
15//!
16//! - **Sharded Type Storage**: 64 shards based on hash of `TypeData` to minimize contention
17//! - **`DashMap` for Interning**: Each shard uses `DashMap` for lock-free read/write operations
18//! - **Arc for Immutability**: Type data is stored in Arc<T> for cheap cloning
19//! - **No `RwLock`<Vec<T>>**: Avoids the read-then-write deadlock pattern
20//!
21//! This design allows true parallel type checking without lock contention.
22
23use crate::def::DefId;
24#[cfg(test)]
25use crate::types::*;
26use crate::types::{
27    CallSignature, CallableShape, CallableShapeId, ConditionalType, ConditionalTypeId,
28    FunctionShape, FunctionShapeId, IndexSignature, IntrinsicKind, LiteralValue, MappedType,
29    MappedTypeId, ObjectFlags, ObjectShape, ObjectShapeId, OrderedFloat, PropertyInfo,
30    PropertyLookup, SymbolRef, TemplateLiteralId, TemplateSpan, TupleElement, TupleListId,
31    TypeApplication, TypeApplicationId, TypeData, TypeId, TypeListId, TypeParamInfo, Visibility,
32};
33use crate::visitor::is_identity_comparable_type;
34use dashmap::DashMap;
35use dashmap::mapref::entry::Entry;
36use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet, FxHasher};
37use smallvec::SmallVec;
38use std::hash::{Hash, Hasher};
39use std::sync::{
40    Arc, OnceLock,
41    atomic::{AtomicU32, Ordering},
42};
43use tsz_common::interner::{Atom, ShardedInterner};
44
45mod intersection;
46mod normalize;
47mod template;
48
49const SHARD_BITS: u32 = 6;
50const SHARD_COUNT: usize = 1 << SHARD_BITS; // 64 shards
51const SHARD_MASK: u32 = (SHARD_COUNT as u32) - 1;
52pub(crate) const PROPERTY_MAP_THRESHOLD: usize = 24;
53const TYPE_LIST_INLINE: usize = 8;
54
55/// Maximum template literal expansion limit.
56/// WASM environments have limited linear memory, so we use a much lower limit
57/// to prevent OOM. Native CLI can handle more.
58#[cfg(target_arch = "wasm32")]
59pub(crate) const TEMPLATE_LITERAL_EXPANSION_LIMIT: usize = 2_000;
60#[cfg(not(target_arch = "wasm32"))]
61pub(crate) const TEMPLATE_LITERAL_EXPANSION_LIMIT: usize = 100_000;
62
63/// Maximum number of interned types before aborting.
64/// WASM linear memory cannot grow indefinitely, so we cap at 500k types.
65#[cfg(target_arch = "wasm32")]
66pub(crate) const MAX_INTERNED_TYPES: usize = 500_000;
67#[cfg(not(target_arch = "wasm32"))]
68pub(crate) const MAX_INTERNED_TYPES: usize = 5_000_000;
69
70pub(crate) type TypeListBuffer = SmallVec<[TypeId; TYPE_LIST_INLINE]>;
71type ObjectPropertyIndex = DashMap<ObjectShapeId, Arc<FxHashMap<Atom, usize>>, FxBuildHasher>;
72type ObjectPropertyMap = OnceLock<ObjectPropertyIndex>;
73
74/// Inner data for a `TypeShard`, lazily initialized.
75struct TypeShardInner {
76    /// Map from `TypeData` to local index within this shard
77    key_to_index: DashMap<TypeData, u32, FxBuildHasher>,
78    /// Map from local index to `TypeData` (using Arc for shared access)
79    index_to_key: DashMap<u32, Arc<TypeData>, FxBuildHasher>,
80}
81
82/// A single shard of the type interned storage.
83///
84/// Uses `OnceLock` for lazy initialization - `DashMaps` are only allocated
85/// when the shard is first accessed, reducing startup overhead.
86struct TypeShard {
87    /// Lazily initialized inner maps
88    inner: OnceLock<TypeShardInner>,
89    /// Atomic counter for allocating new indices in this shard
90    /// Kept outside `OnceLock` for fast checks without initialization
91    next_index: AtomicU32,
92}
93
94impl TypeShard {
95    const fn new() -> Self {
96        Self {
97            inner: OnceLock::new(),
98            next_index: AtomicU32::new(0),
99        }
100    }
101
102    /// Get the inner maps, initializing on first access
103    #[inline]
104    fn get_inner(&self) -> &TypeShardInner {
105        self.inner.get_or_init(|| TypeShardInner {
106            key_to_index: DashMap::with_hasher(FxBuildHasher),
107            index_to_key: DashMap::with_hasher(FxBuildHasher),
108        })
109    }
110
111    /// Check if a key exists without initializing the shard
112    #[inline]
113    fn is_empty(&self) -> bool {
114        self.next_index.load(Ordering::Relaxed) == 0
115    }
116}
117
118/// Inner data for `ConcurrentSliceInterner`, lazily initialized.
119struct SliceInternerInner<T> {
120    items: DashMap<u32, Arc<[T]>, FxBuildHasher>,
121    map: DashMap<Arc<[T]>, u32, FxBuildHasher>,
122}
123
124/// Lock-free slice interner using `DashMap` for concurrent access.
125/// Uses lazy initialization to defer `DashMap` allocation until first use.
126struct ConcurrentSliceInterner<T> {
127    inner: OnceLock<SliceInternerInner<T>>,
128    next_id: AtomicU32,
129}
130
131impl<T> ConcurrentSliceInterner<T>
132where
133    T: Eq + Hash + Clone + Send + Sync + 'static,
134{
135    const fn new() -> Self {
136        Self {
137            inner: OnceLock::new(),
138            next_id: AtomicU32::new(1), // Reserve 0 for empty
139        }
140    }
141
142    #[inline]
143    fn get_inner(&self) -> &SliceInternerInner<T> {
144        self.inner.get_or_init(|| {
145            let items = DashMap::with_hasher(FxBuildHasher);
146            let map = DashMap::with_hasher(FxBuildHasher);
147            let empty: Arc<[T]> = Arc::from(Vec::new());
148            items.insert(0, std::sync::Arc::clone(&empty));
149            map.insert(empty, 0);
150            SliceInternerInner { items, map }
151        })
152    }
153
154    fn intern(&self, items_slice: &[T]) -> u32 {
155        if items_slice.is_empty() {
156            return 0;
157        }
158
159        let inner = self.get_inner();
160        let temp_arc: Arc<[T]> = Arc::from(items_slice.to_vec());
161
162        // Try to get existing ID via reverse map — O(1)
163        if let Some(ref_entry) = inner.map.get(&temp_arc) {
164            return *ref_entry.value();
165        }
166
167        // Allocate new ID
168        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
169
170        // Double-check: another thread might have inserted while we allocated
171        match inner.map.entry(std::sync::Arc::clone(&temp_arc)) {
172            dashmap::mapref::entry::Entry::Vacant(e) => {
173                e.insert(id);
174                inner.items.insert(id, temp_arc);
175                id
176            }
177            dashmap::mapref::entry::Entry::Occupied(e) => *e.get(),
178        }
179    }
180
181    fn get(&self, id: u32) -> Option<Arc<[T]>> {
182        // For id 0 (empty), we can return without initializing
183        if id == 0 {
184            return Some(Arc::from(Vec::new()));
185        }
186        self.inner
187            .get()?
188            .items
189            .get(&id)
190            .map(|e| std::sync::Arc::clone(e.value()))
191    }
192
193    fn empty(&self) -> Arc<[T]> {
194        Arc::from(Vec::new())
195    }
196}
197
198/// Inner data for `ConcurrentValueInterner`, lazily initialized.
199struct ValueInternerInner<T> {
200    items: DashMap<u32, Arc<T>, FxBuildHasher>,
201    map: DashMap<Arc<T>, u32, FxBuildHasher>,
202}
203
204/// Lock-free value interner using `DashMap` for concurrent access.
205/// Uses lazy initialization to defer `DashMap` allocation until first use.
206struct ConcurrentValueInterner<T> {
207    inner: OnceLock<ValueInternerInner<T>>,
208    next_id: AtomicU32,
209}
210
211impl<T> ConcurrentValueInterner<T>
212where
213    T: Eq + Hash + Clone + Send + Sync + 'static,
214{
215    const fn new() -> Self {
216        Self {
217            inner: OnceLock::new(),
218            next_id: AtomicU32::new(0),
219        }
220    }
221
222    #[inline]
223    fn get_inner(&self) -> &ValueInternerInner<T> {
224        self.inner.get_or_init(|| ValueInternerInner {
225            items: DashMap::with_hasher(FxBuildHasher),
226            map: DashMap::with_hasher(FxBuildHasher),
227        })
228    }
229
230    fn intern(&self, value: T) -> u32 {
231        let inner = self.get_inner();
232        let value_arc = Arc::new(value);
233
234        // Try to get existing ID
235        if let Some(ref_entry) = inner.map.get(&value_arc) {
236            return *ref_entry.value();
237        }
238
239        // Allocate new ID
240        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
241
242        // Double-check: another thread might have inserted while we allocated
243        match inner.map.entry(std::sync::Arc::clone(&value_arc)) {
244            Entry::Vacant(e) => {
245                e.insert(id);
246                inner.items.insert(id, value_arc);
247                id
248            }
249            Entry::Occupied(e) => *e.get(),
250        }
251    }
252
253    fn get(&self, id: u32) -> Option<Arc<T>> {
254        self.inner
255            .get()?
256            .items
257            .get(&id)
258            .map(|e| std::sync::Arc::clone(e.value()))
259    }
260}
261
262/// Type interning table with lock-free concurrent access.
263///
264/// Uses sharded `DashMap` structures for all internal storage, enabling
265/// true parallel type checking without lock contention.
266///
267/// All internal structures use lazy initialization via `OnceLock` to minimize
268/// startup overhead - `DashMaps` are only allocated when first accessed.
269pub struct TypeInterner {
270    /// Sharded storage for user-defined types (lazily initialized)
271    shards: Vec<TypeShard>,
272    /// String interner for property names and string literals (already lock-free)
273    pub string_interner: ShardedInterner,
274    /// Concurrent interners for type components (lazily initialized)
275    type_lists: ConcurrentSliceInterner<TypeId>,
276    tuple_lists: ConcurrentSliceInterner<TupleElement>,
277    template_lists: ConcurrentSliceInterner<TemplateSpan>,
278    object_shapes: ConcurrentValueInterner<ObjectShape>,
279    /// Object property maps: lazily initialized `DashMap`
280    object_property_maps: ObjectPropertyMap,
281    function_shapes: ConcurrentValueInterner<FunctionShape>,
282    callable_shapes: ConcurrentValueInterner<CallableShape>,
283    conditional_types: ConcurrentValueInterner<ConditionalType>,
284    mapped_types: ConcurrentValueInterner<MappedType>,
285    applications: ConcurrentValueInterner<TypeApplication>,
286    /// Cache for `is_identity_comparable_type` checks (memoized O(1) lookup after first computation)
287    identity_comparable_cache: DashMap<TypeId, bool, FxBuildHasher>,
288    /// The global Array base type (e.g., Array<T> from lib.d.ts)
289    array_base_type: OnceLock<TypeId>,
290    /// Type parameters for the Array base type
291    array_base_type_params: OnceLock<Vec<TypeParamInfo>>,
292    /// Boxed interface types for primitives (e.g., String interface for `string`).
293    /// Registered from lib.d.ts during primordial type setup.
294    boxed_types: DashMap<IntrinsicKind, TypeId, FxBuildHasher>,
295}
296
297impl std::fmt::Debug for TypeInterner {
298    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
299        f.debug_struct("TypeInterner")
300            .field("shards", &self.shards.len())
301            .finish_non_exhaustive()
302    }
303}
304
305impl TypeInterner {
306    /// Create a new type interner with pre-registered intrinsics.
307    ///
308    /// Uses lazy initialization for all `DashMap` structures to minimize
309    /// startup overhead. `DashMaps` are only allocated when first accessed.
310    pub fn new() -> Self {
311        let shards: Vec<TypeShard> = (0..SHARD_COUNT).map(|_| TypeShard::new()).collect();
312
313        Self {
314            shards,
315            // String interner - common strings are interned on-demand for faster startup
316            string_interner: ShardedInterner::new(),
317            type_lists: ConcurrentSliceInterner::new(),
318            tuple_lists: ConcurrentSliceInterner::new(),
319            template_lists: ConcurrentSliceInterner::new(),
320            object_shapes: ConcurrentValueInterner::new(),
321            object_property_maps: OnceLock::new(),
322            function_shapes: ConcurrentValueInterner::new(),
323            callable_shapes: ConcurrentValueInterner::new(),
324            conditional_types: ConcurrentValueInterner::new(),
325            mapped_types: ConcurrentValueInterner::new(),
326            applications: ConcurrentValueInterner::new(),
327            identity_comparable_cache: DashMap::with_hasher(FxBuildHasher),
328            array_base_type: OnceLock::new(),
329            array_base_type_params: OnceLock::new(),
330            boxed_types: DashMap::with_hasher(FxBuildHasher),
331        }
332    }
333
334    /// Set the global Array base type (e.g., Array<T> from lib.d.ts).
335    ///
336    /// This should be called once during primordial type setup when lib.d.ts is processed.
337    /// Once set, the value cannot be changed (`OnceLock` enforces this).
338    pub fn set_array_base_type(&self, type_id: TypeId, params: Vec<TypeParamInfo>) {
339        let _ = self.array_base_type.set(type_id);
340        let _ = self.array_base_type_params.set(params);
341    }
342
343    /// Get the global Array base type, if it has been set.
344    #[inline]
345    pub fn get_array_base_type(&self) -> Option<TypeId> {
346        self.array_base_type.get().copied()
347    }
348
349    /// Get the type parameters for the global Array base type, if it has been set.
350    #[inline]
351    pub fn get_array_base_type_params(&self) -> &[TypeParamInfo] {
352        self.array_base_type_params
353            .get()
354            .map_or(&[], |v| v.as_slice())
355    }
356
357    /// Set a boxed interface type for a primitive intrinsic kind.
358    ///
359    /// Called during primordial type setup when lib.d.ts is processed.
360    /// For example, `set_boxed_type(IntrinsicKind::String, type_id_of_String_interface)`
361    /// enables property access on `string` values to resolve through the String interface.
362    pub fn set_boxed_type(&self, kind: IntrinsicKind, type_id: TypeId) {
363        self.boxed_types.insert(kind, type_id);
364    }
365
366    /// Get the boxed interface type for a primitive intrinsic kind.
367    #[inline]
368    pub fn get_boxed_type(&self, kind: IntrinsicKind) -> Option<TypeId> {
369        self.boxed_types.get(&kind).map(|r| *r)
370    }
371
372    /// Get the object property maps, initializing on first access
373    #[inline]
374    fn get_object_property_maps(&self) -> &ObjectPropertyIndex {
375        self.object_property_maps
376            .get_or_init(|| DashMap::with_hasher(FxBuildHasher))
377    }
378
379    /// Check if a type can be compared by `TypeId` identity alone (O(1) equality).
380    /// Results are cached for O(1) lookup after first computation.
381    /// This is used for optimization in BCT and subtype checking.
382    #[inline]
383    pub fn is_identity_comparable_type(&self, type_id: TypeId) -> bool {
384        // Fast path: check cache first
385        if let Some(cached) = self.identity_comparable_cache.get(&type_id) {
386            return *cached;
387        }
388        // Compute and cache
389        let result = is_identity_comparable_type(self, type_id);
390        self.identity_comparable_cache.insert(type_id, result);
391        result
392    }
393
394    /// Intern a string into an Atom.
395    /// This is used when constructing types with property names or string literals.
396    pub fn intern_string(&self, s: &str) -> Atom {
397        self.string_interner.intern(s)
398    }
399
400    /// Resolve an Atom back to its string value.
401    /// This is used when formatting types for error messages.
402    pub fn resolve_atom(&self, atom: Atom) -> String {
403        self.string_interner.resolve(atom).to_string()
404    }
405
406    /// Resolve an Atom without allocating a new String.
407    pub fn resolve_atom_ref(&self, atom: Atom) -> Arc<str> {
408        self.string_interner.resolve(atom)
409    }
410
411    pub fn type_list(&self, id: TypeListId) -> Arc<[TypeId]> {
412        self.type_lists
413            .get(id.0)
414            .unwrap_or_else(|| self.type_lists.empty())
415    }
416
417    pub fn tuple_list(&self, id: TupleListId) -> Arc<[TupleElement]> {
418        self.tuple_lists
419            .get(id.0)
420            .unwrap_or_else(|| self.tuple_lists.empty())
421    }
422
423    pub fn template_list(&self, id: TemplateLiteralId) -> Arc<[TemplateSpan]> {
424        self.template_lists
425            .get(id.0)
426            .unwrap_or_else(|| self.template_lists.empty())
427    }
428
429    pub fn object_shape(&self, id: ObjectShapeId) -> Arc<ObjectShape> {
430        self.object_shapes.get(id.0).unwrap_or_else(|| {
431            Arc::new(ObjectShape {
432                flags: ObjectFlags::empty(),
433                properties: Vec::new(),
434                string_index: None,
435                number_index: None,
436                symbol: None,
437            })
438        })
439    }
440
441    pub fn object_property_index(&self, shape_id: ObjectShapeId, name: Atom) -> PropertyLookup {
442        let shape = self.object_shape(shape_id);
443        if shape.properties.len() < PROPERTY_MAP_THRESHOLD {
444            return PropertyLookup::Uncached;
445        }
446
447        match self.object_property_map(shape_id, &shape) {
448            Some(map) => match map.get(&name) {
449                Some(&idx) => PropertyLookup::Found(idx),
450                None => PropertyLookup::NotFound,
451            },
452            None => PropertyLookup::Uncached,
453        }
454    }
455
456    /// Get or create a property map for an object shape.
457    ///
458    /// This uses a lock-free pattern with `DashMap` to avoid the read-then-write
459    /// deadlock that existed in the previous `RwLock`<Vec> implementation.
460    fn object_property_map(
461        &self,
462        shape_id: ObjectShapeId,
463        shape: &ObjectShape,
464    ) -> Option<Arc<FxHashMap<Atom, usize>>> {
465        if shape.properties.len() < PROPERTY_MAP_THRESHOLD {
466            return None;
467        }
468
469        let maps = self.get_object_property_maps();
470
471        // Try to get existing map (lock-free read)
472        if let Some(map) = maps.get(&shape_id) {
473            return Some(std::sync::Arc::clone(&map));
474        }
475
476        // Build the property map
477        let mut map = FxHashMap::default();
478        for (idx, prop) in shape.properties.iter().enumerate() {
479            map.insert(prop.name, idx);
480        }
481        let map = Arc::new(map);
482
483        // Try to insert - if another thread inserted first, use theirs
484        match maps.entry(shape_id) {
485            Entry::Vacant(e) => {
486                e.insert(std::sync::Arc::clone(&map));
487                Some(map)
488            }
489            Entry::Occupied(e) => Some(std::sync::Arc::clone(e.get())),
490        }
491    }
492
493    pub fn function_shape(&self, id: FunctionShapeId) -> Arc<FunctionShape> {
494        self.function_shapes.get(id.0).unwrap_or_else(|| {
495            Arc::new(FunctionShape {
496                type_params: Vec::new(),
497                params: Vec::new(),
498                this_type: None,
499                return_type: TypeId::ERROR,
500                type_predicate: None,
501                is_constructor: false,
502                is_method: false,
503            })
504        })
505    }
506
507    pub fn callable_shape(&self, id: CallableShapeId) -> Arc<CallableShape> {
508        self.callable_shapes.get(id.0).unwrap_or_else(|| {
509            Arc::new(CallableShape {
510                call_signatures: Vec::new(),
511                construct_signatures: Vec::new(),
512                properties: Vec::new(),
513                ..Default::default()
514            })
515        })
516    }
517
518    pub fn conditional_type(&self, id: ConditionalTypeId) -> Arc<ConditionalType> {
519        self.conditional_types.get(id.0).unwrap_or_else(|| {
520            Arc::new(ConditionalType {
521                check_type: TypeId::ERROR,
522                extends_type: TypeId::ERROR,
523                true_type: TypeId::ERROR,
524                false_type: TypeId::ERROR,
525                is_distributive: false,
526            })
527        })
528    }
529
530    pub fn mapped_type(&self, id: MappedTypeId) -> Arc<MappedType> {
531        self.mapped_types.get(id.0).unwrap_or_else(|| {
532            Arc::new(MappedType {
533                type_param: TypeParamInfo {
534                    is_const: false,
535                    name: self.intern_string("_"),
536                    constraint: None,
537                    default: None,
538                },
539                constraint: TypeId::ERROR,
540                name_type: None,
541                template: TypeId::ERROR,
542                readonly_modifier: None,
543                optional_modifier: None,
544            })
545        })
546    }
547
548    pub fn type_application(&self, id: TypeApplicationId) -> Arc<TypeApplication> {
549        self.applications.get(id.0).unwrap_or_else(|| {
550            Arc::new(TypeApplication {
551                base: TypeId::ERROR,
552                args: Vec::new(),
553            })
554        })
555    }
556
557    /// Intern a type key and return its `TypeId`.
558    /// If the key already exists, returns the existing `TypeId`.
559    /// Otherwise, creates a new `TypeId` and stores the key.
560    ///
561    /// This uses a lock-free pattern with `DashMap` for concurrent access.
562    pub fn intern(&self, key: TypeData) -> TypeId {
563        if let Some(id) = self.get_intrinsic_id(&key) {
564            return id;
565        }
566
567        // Circuit breaker: prevent OOM by limiting total interned types
568        // This is especially important for WASM where linear memory is limited.
569        // We do a cheap approximate check (summing shard indices) to avoid
570        // iterating all shards on every intern call.
571        if self.approximate_count() > MAX_INTERNED_TYPES {
572            return TypeId::ERROR;
573        }
574
575        let mut hasher = FxHasher::default();
576        key.hash(&mut hasher);
577        let shard_idx = (hasher.finish() as usize) & (SHARD_COUNT - 1);
578        let shard = &self.shards[shard_idx];
579        let inner = shard.get_inner();
580
581        // Try to get existing ID (lock-free read)
582        if let Some(entry) = inner.key_to_index.get(&key) {
583            let local_index = *entry.value();
584            return self.make_id(local_index, shard_idx as u32);
585        }
586
587        // Allocate new index
588        let local_index = shard.next_index.fetch_add(1, Ordering::Relaxed);
589        if local_index > (u32::MAX >> SHARD_BITS) {
590            // Return error type instead of panicking
591            return TypeId::ERROR;
592        }
593
594        // Double-check: another thread might have inserted while we allocated
595        match inner.key_to_index.entry(key.clone()) {
596            Entry::Vacant(e) => {
597                e.insert(local_index);
598                let key_arc = Arc::new(key);
599                inner.index_to_key.insert(local_index, key_arc);
600                self.make_id(local_index, shard_idx as u32)
601            }
602            Entry::Occupied(e) => {
603                // Another thread inserted first, use their ID
604                let existing_index = *e.get();
605                self.make_id(existing_index, shard_idx as u32)
606            }
607        }
608    }
609
610    /// Look up the `TypeData` for a given `TypeId`.
611    ///
612    /// This uses lock-free `DashMap` access with lazy shard initialization.
613    pub fn lookup(&self, id: TypeId) -> Option<TypeData> {
614        if id.is_intrinsic() || id.is_error() {
615            return self.get_intrinsic_key(id);
616        }
617
618        let raw_val = id.0.checked_sub(TypeId::FIRST_USER)?;
619        let shard_idx = (raw_val & SHARD_MASK) as usize;
620        let local_index = raw_val >> SHARD_BITS;
621
622        let shard = self.shards.get(shard_idx)?;
623        // If shard is empty, no types have been interned there yet
624        if shard.is_empty() {
625            return None;
626        }
627        shard
628            .get_inner()
629            .index_to_key
630            .get(&{ local_index })
631            .map(|r| r.value().as_ref().clone())
632    }
633
634    fn intern_type_list(&self, members: Vec<TypeId>) -> TypeListId {
635        TypeListId(self.type_lists.intern(&members))
636    }
637
638    fn intern_tuple_list(&self, elements: Vec<TupleElement>) -> TupleListId {
639        TupleListId(self.tuple_lists.intern(&elements))
640    }
641
642    pub(crate) fn intern_template_list(&self, spans: Vec<TemplateSpan>) -> TemplateLiteralId {
643        TemplateLiteralId(self.template_lists.intern(&spans))
644    }
645
646    pub fn intern_object_shape(&self, shape: ObjectShape) -> ObjectShapeId {
647        ObjectShapeId(self.object_shapes.intern(shape))
648    }
649
650    fn intern_function_shape(&self, shape: FunctionShape) -> FunctionShapeId {
651        FunctionShapeId(self.function_shapes.intern(shape))
652    }
653
654    fn intern_callable_shape(&self, shape: CallableShape) -> CallableShapeId {
655        CallableShapeId(self.callable_shapes.intern(shape))
656    }
657
658    fn intern_conditional_type(&self, conditional: ConditionalType) -> ConditionalTypeId {
659        ConditionalTypeId(self.conditional_types.intern(conditional))
660    }
661
662    fn intern_mapped_type(&self, mapped: MappedType) -> MappedTypeId {
663        MappedTypeId(self.mapped_types.intern(mapped))
664    }
665
666    fn intern_application(&self, application: TypeApplication) -> TypeApplicationId {
667        TypeApplicationId(self.applications.intern(application))
668    }
669
670    /// Get the number of interned types (lock-free read)
671    pub fn len(&self) -> usize {
672        let mut total = TypeId::FIRST_USER as usize;
673        for shard in &self.shards {
674            total += shard.next_index.load(Ordering::Relaxed) as usize;
675        }
676        total
677    }
678
679    /// Check if the interner is empty (only has intrinsics)
680    pub fn is_empty(&self) -> bool {
681        self.len() <= TypeId::FIRST_USER as usize
682    }
683
684    /// Get an approximate count of interned types.
685    /// This is cheaper than `len()` as it samples only a few shards.
686    /// Used for the circuit breaker to avoid OOM.
687    #[inline]
688    fn approximate_count(&self) -> usize {
689        // Sample first 4 shards and extrapolate (assumes uniform distribution)
690        let mut sample_total: usize = 0;
691        for shard in self.shards.iter().take(4) {
692            sample_total += shard.next_index.load(Ordering::Relaxed) as usize;
693        }
694        // Extrapolate to all 64 shards
695        (sample_total * SHARD_COUNT / 4) + TypeId::FIRST_USER as usize
696    }
697
698    #[inline]
699    fn make_id(&self, local_index: u32, shard_idx: u32) -> TypeId {
700        let raw_val = (local_index << SHARD_BITS) | (shard_idx & SHARD_MASK);
701        let id = TypeId(TypeId::FIRST_USER + raw_val);
702
703        // SAFETY: Assert that we're not overflowing into the local ID space (MSB=1).
704        // Global TypeIds must have MSB=0 (0x7FFFFFFF-) to allow ScopedTypeInterner
705        // to use the upper half (0x80000000+) for ephemeral types.
706        debug_assert!(
707            id.is_global(),
708            "Global TypeId overflow: {id:?} - would conflict with local ID space"
709        );
710
711        id
712    }
713
714    const fn get_intrinsic_id(&self, key: &TypeData) -> Option<TypeId> {
715        match key {
716            TypeData::Intrinsic(kind) => Some(kind.to_type_id()),
717            TypeData::Error => Some(TypeId::ERROR),
718            // Map boolean literals to their intrinsic IDs to avoid duplicates
719            TypeData::Literal(LiteralValue::Boolean(true)) => Some(TypeId::BOOLEAN_TRUE),
720            TypeData::Literal(LiteralValue::Boolean(false)) => Some(TypeId::BOOLEAN_FALSE),
721            _ => None,
722        }
723    }
724
725    const fn get_intrinsic_key(&self, id: TypeId) -> Option<TypeData> {
726        match id {
727            TypeId::NONE | TypeId::ERROR => Some(TypeData::Error),
728            TypeId::NEVER => Some(TypeData::Intrinsic(IntrinsicKind::Never)),
729            TypeId::UNKNOWN => Some(TypeData::Intrinsic(IntrinsicKind::Unknown)),
730            TypeId::ANY => Some(TypeData::Intrinsic(IntrinsicKind::Any)),
731            TypeId::VOID => Some(TypeData::Intrinsic(IntrinsicKind::Void)),
732            TypeId::UNDEFINED => Some(TypeData::Intrinsic(IntrinsicKind::Undefined)),
733            TypeId::NULL => Some(TypeData::Intrinsic(IntrinsicKind::Null)),
734            TypeId::BOOLEAN => Some(TypeData::Intrinsic(IntrinsicKind::Boolean)),
735            TypeId::NUMBER => Some(TypeData::Intrinsic(IntrinsicKind::Number)),
736            TypeId::STRING => Some(TypeData::Intrinsic(IntrinsicKind::String)),
737            TypeId::BIGINT => Some(TypeData::Intrinsic(IntrinsicKind::Bigint)),
738            TypeId::SYMBOL => Some(TypeData::Intrinsic(IntrinsicKind::Symbol)),
739            TypeId::OBJECT | TypeId::PROMISE_BASE => {
740                Some(TypeData::Intrinsic(IntrinsicKind::Object))
741            }
742            TypeId::BOOLEAN_TRUE => Some(TypeData::Literal(LiteralValue::Boolean(true))),
743            TypeId::BOOLEAN_FALSE => Some(TypeData::Literal(LiteralValue::Boolean(false))),
744            TypeId::FUNCTION => Some(TypeData::Intrinsic(IntrinsicKind::Function)),
745            _ => None,
746        }
747    }
748
749    // =========================================================================
750    // Convenience methods for common type constructions
751    // =========================================================================
752
753    /// Intern an intrinsic type
754    pub const fn intrinsic(&self, kind: IntrinsicKind) -> TypeId {
755        kind.to_type_id()
756    }
757
758    /// Intern a literal string type
759    pub fn literal_string(&self, value: &str) -> TypeId {
760        let atom = self.intern_string(value);
761        self.intern(TypeData::Literal(LiteralValue::String(atom)))
762    }
763
764    /// Intern a literal string type from an already-interned Atom
765    pub fn literal_string_atom(&self, atom: Atom) -> TypeId {
766        self.intern(TypeData::Literal(LiteralValue::String(atom)))
767    }
768
769    /// Intern a literal number type
770    pub fn literal_number(&self, value: f64) -> TypeId {
771        self.intern(TypeData::Literal(LiteralValue::Number(OrderedFloat(value))))
772    }
773
774    /// Intern a literal boolean type
775    pub fn literal_boolean(&self, value: bool) -> TypeId {
776        self.intern(TypeData::Literal(LiteralValue::Boolean(value)))
777    }
778
779    /// Intern a literal bigint type
780    pub fn literal_bigint(&self, value: &str) -> TypeId {
781        let atom = self.intern_string(&self.normalize_bigint_literal(value));
782        self.intern(TypeData::Literal(LiteralValue::BigInt(atom)))
783    }
784
785    /// Intern a literal bigint type, allowing a sign prefix without extra clones.
786    pub fn literal_bigint_with_sign(&self, negative: bool, digits: &str) -> TypeId {
787        let normalized = self.normalize_bigint_literal(digits);
788        if normalized == "0" {
789            return self.literal_bigint(&normalized);
790        }
791        if !negative {
792            return self.literal_bigint(&normalized);
793        }
794
795        let mut value = String::with_capacity(normalized.len() + 1);
796        value.push('-');
797        value.push_str(&normalized);
798        let atom = self.string_interner.intern_owned(value);
799        self.intern(TypeData::Literal(LiteralValue::BigInt(atom)))
800    }
801
802    fn normalize_bigint_literal(&self, value: &str) -> String {
803        let stripped = value.replace('_', "");
804        if stripped.is_empty() {
805            return "0".to_string();
806        }
807
808        let (base, digits) = if stripped.starts_with("0x") || stripped.starts_with("0X") {
809            (16, &stripped[2..])
810        } else if stripped.starts_with("0o") || stripped.starts_with("0O") {
811            (8, &stripped[2..])
812        } else if stripped.starts_with("0b") || stripped.starts_with("0B") {
813            (2, &stripped[2..])
814        } else {
815            (10, stripped.as_str())
816        };
817
818        if digits.is_empty() {
819            return "0".to_string();
820        }
821
822        if base == 10 {
823            let normalized = digits.trim_start_matches('0');
824            return if normalized.is_empty() {
825                "0".to_string()
826            } else {
827                normalized.to_string()
828            };
829        }
830
831        let mut decimal: Vec<u8> = vec![0];
832        for ch in digits.chars() {
833            let Some(digit) = ch.to_digit(base) else {
834                return "0".to_string();
835            };
836            let digit = digit as u16;
837            let mut carry = digit;
838            let base = base as u16;
839            for dec in decimal.iter_mut() {
840                let value = u16::from(*dec) * base + carry;
841                *dec = (value % 10) as u8;
842                carry = value / 10;
843            }
844            while carry > 0 {
845                decimal.push((carry % 10) as u8);
846                carry /= 10;
847            }
848        }
849
850        while decimal.len() > 1 && *decimal.last().unwrap_or(&0) == 0 {
851            decimal.pop();
852        }
853
854        let mut out = String::with_capacity(decimal.len());
855        for digit in decimal.iter().rev() {
856            out.push(char::from(b'0' + *digit));
857        }
858        out
859    }
860
861    /// Intern a union type, normalizing and deduplicating members
862    pub fn union(&self, members: Vec<TypeId>) -> TypeId {
863        self.union_from_iter(members)
864    }
865
866    /// Intern a union type from a vector that is already sorted and deduped.
867    /// This is an O(N) operation that avoids redundant sorting.
868    pub fn union_from_sorted_vec(&self, flat: Vec<TypeId>) -> TypeId {
869        if flat.is_empty() {
870            return TypeId::NEVER;
871        }
872        if flat.len() == 1 {
873            return flat[0];
874        }
875
876        let list_id = self.intern_type_list(flat);
877        self.intern(TypeData::Union(list_id))
878    }
879
880    /// Intern a union type while preserving member structure.
881    ///
882    /// This keeps unknown/literal members intact for property access checks.
883    pub fn union_preserve_members(&self, members: Vec<TypeId>) -> TypeId {
884        if members.is_empty() {
885            return TypeId::NEVER;
886        }
887
888        let mut flat: TypeListBuffer = SmallVec::new();
889        for member in members {
890            if let Some(TypeData::Union(inner)) = self.lookup(member) {
891                let members = self.type_list(inner);
892                flat.extend(members.iter().copied());
893            } else {
894                flat.push(member);
895            }
896        }
897
898        flat.sort_by_key(|id| id.0);
899        flat.dedup();
900        flat.retain(|id| *id != TypeId::NEVER);
901
902        if flat.is_empty() {
903            return TypeId::NEVER;
904        }
905        if flat.len() == 1 {
906            return flat[0];
907        }
908
909        let list_id = self.intern_type_list(flat.into_vec());
910        self.intern(TypeData::Union(list_id))
911    }
912
913    /// Fast path for unions that already fit in registers.
914    pub fn union2(&self, left: TypeId, right: TypeId) -> TypeId {
915        // Fast paths to avoid expensive normalize_union for trivial cases
916        if left == right {
917            return left;
918        }
919        if left == TypeId::NEVER {
920            return right;
921        }
922        if right == TypeId::NEVER {
923            return left;
924        }
925        self.union_from_iter([left, right])
926    }
927
928    /// Fast path for three-member unions without heap allocations.
929    pub fn union3(&self, first: TypeId, second: TypeId, third: TypeId) -> TypeId {
930        self.union_from_iter([first, second, third])
931    }
932
933    fn union_from_iter<I>(&self, members: I) -> TypeId
934    where
935        I: IntoIterator<Item = TypeId>,
936    {
937        let mut iter = members.into_iter();
938        let Some(first) = iter.next() else {
939            return TypeId::NEVER;
940        };
941        let Some(second) = iter.next() else {
942            return first;
943        };
944
945        let mut flat: TypeListBuffer = SmallVec::new();
946        self.push_union_member(&mut flat, first);
947        self.push_union_member(&mut flat, second);
948        for member in iter {
949            self.push_union_member(&mut flat, member);
950        }
951
952        self.normalize_union(flat)
953    }
954
955    fn push_union_member(&self, flat: &mut TypeListBuffer, member: TypeId) {
956        if let Some(TypeData::Union(inner)) = self.lookup(member) {
957            let members = self.type_list(inner);
958            flat.extend(members.iter().copied());
959        } else {
960            flat.push(member);
961        }
962    }
963
964    fn normalize_union(&self, mut flat: TypeListBuffer) -> TypeId {
965        // Deduplicate and sort for consistent hashing
966        flat.sort_by_key(|id| id.0);
967        flat.dedup();
968
969        // Handle special cases
970        if flat.contains(&TypeId::ERROR) {
971            return TypeId::ERROR;
972        }
973        if flat.is_empty() {
974            return TypeId::NEVER;
975        }
976        if flat.len() == 1 {
977            return flat[0];
978        }
979        // If any member is `any`, the union is `any`
980        if flat.contains(&TypeId::ANY) {
981            return TypeId::ANY;
982        }
983        // If any member is `unknown`, the union is `unknown`
984        if flat.contains(&TypeId::UNKNOWN) {
985            return TypeId::UNKNOWN;
986        }
987        // Remove `never` from unions
988        flat.retain(|id| *id != TypeId::NEVER);
989        if flat.is_empty() {
990            return TypeId::NEVER;
991        }
992        if flat.len() == 1 {
993            return flat[0];
994        }
995
996        // Absorb literal types into their corresponding primitive types
997        // e.g., "a" | string | number => string | number
998        // e.g., 1 | 2 | number => number
999        // e.g., true | boolean => boolean
1000        self.absorb_literals_into_primitives(&mut flat);
1001
1002        if flat.is_empty() {
1003            return TypeId::NEVER;
1004        }
1005        if flat.len() == 1 {
1006            return flat[0];
1007        }
1008
1009        // Reduce union using subtype checks (e.g., {a: 1} | {a: 1 | number} => {a: 1 | number})
1010        // Skip reduction if union contains complex types (TypeParameters, Lazy, etc.)
1011        let has_complex = flat.iter().any(|&id| {
1012            matches!(
1013                self.lookup(id),
1014                Some(TypeData::TypeParameter(_) | TypeData::Lazy(_))
1015            )
1016        });
1017        if !has_complex {
1018            self.reduce_union_subtypes(&mut flat);
1019        }
1020
1021        if flat.is_empty() {
1022            return TypeId::NEVER;
1023        }
1024        if flat.len() == 1 {
1025            return flat[0];
1026        }
1027
1028        let list_id = self.intern_type_list(flat.into_vec());
1029        self.intern(TypeData::Union(list_id))
1030    }
1031
1032    /// Intern an intersection type, normalizing and deduplicating members
1033    pub fn intersection(&self, members: Vec<TypeId>) -> TypeId {
1034        self.intersection_from_iter(members)
1035    }
1036
1037    /// Fast path for two-member intersections.
1038    pub fn intersection2(&self, left: TypeId, right: TypeId) -> TypeId {
1039        self.intersection_from_iter([left, right])
1040    }
1041
1042    /// Create an intersection type WITHOUT triggering `normalize_intersection`
1043    ///
1044    /// This is a low-level operation used by the `SubtypeChecker` to merge
1045    /// properties from intersection members without causing infinite recursion.
1046    ///
1047    /// # Safety
1048    /// Only use this when you need to synthesize a type for intermediate checking.
1049    /// Do NOT use for final compiler output (like .d.ts generation) as the
1050    /// resulting type will be "unsimplified".
1051    pub fn intersect_types_raw(&self, members: Vec<TypeId>) -> TypeId {
1052        // Use SmallVec to keep stack allocation benefits
1053        let mut flat: TypeListBuffer = SmallVec::new();
1054
1055        for member in members {
1056            // Structural flattening is safe and cheap
1057            if let Some(TypeData::Intersection(inner)) = self.lookup(member) {
1058                let inner_members = self.type_list(inner);
1059                flat.extend(inner_members.iter().copied());
1060            } else {
1061                flat.push(member);
1062            }
1063        }
1064
1065        // Abort reduction if any member is a Lazy type.
1066        // The interner (Judge) cannot resolve symbols, so if we have unresolved types,
1067        // we must preserve the intersection as-is without attempting to merge or reduce.
1068        let has_unresolved = flat
1069            .iter()
1070            .any(|&id| matches!(self.lookup(id), Some(TypeData::Lazy(_))));
1071        if has_unresolved {
1072            // Basic dedup without any simplification
1073            flat.sort_by_key(|id| id.0);
1074            flat.dedup();
1075            let list_id = self.intern_type_list(flat.into_vec());
1076            return self.intern(TypeData::Intersection(list_id));
1077        }
1078
1079        // =========================================================
1080        // Canonicalization: Handle callable order preservation
1081        // =========================================================
1082        // In TypeScript, intersections of functions represent overloads, and
1083        // order matters. We need to separate callables from non-callables.
1084
1085        let has_callables = flat.iter().any(|&id| self.is_callable_type(id));
1086
1087        if !has_callables {
1088            // Fast path: No callables, sort everything for canonicalization
1089            flat.sort_by_key(|id| id.0);
1090            flat.dedup();
1091        } else {
1092            // Slow path: Separate callables and others to preserve order
1093            let mut callables = SmallVec::<[TypeId; 4]>::new();
1094
1095            // Retain only non-callables in 'flat', move callables to 'callables'
1096            // This preserves the order of callables as they are extracted
1097            let mut i = 0;
1098            while i < flat.len() {
1099                if self.is_callable_type(flat[i]) {
1100                    callables.push(flat.remove(i));
1101                } else {
1102                    i += 1;
1103                }
1104            }
1105
1106            // Sort and dedup non-callables
1107            flat.sort_by_key(|id| id.0);
1108            flat.dedup();
1109
1110            // Deduplicate callables (preserving order)
1111            let mut seen = FxHashSet::default();
1112            callables.retain(|id| seen.insert(*id));
1113
1114            // Merge: Put non-callables first (canonical), then callables (ordered)
1115            flat.extend(callables);
1116        }
1117
1118        // =========================================================
1119        // O(1) Fast Paths (Safe to do without recursion)
1120        // =========================================================
1121
1122        // 1. If any member is Never, the result is Never
1123        if flat.contains(&TypeId::NEVER) {
1124            return TypeId::NEVER;
1125        }
1126
1127        // 2. If any member is Any, the result is Any (unless Never is present)
1128        if flat.contains(&TypeId::ANY) {
1129            return TypeId::ANY;
1130        }
1131
1132        // 3. Remove Unknown (Identity element for intersection)
1133        flat.retain(|id| *id != TypeId::UNKNOWN);
1134
1135        // 4. Check for disjoint primitives (e.g., string & number = never)
1136        // If we have multiple intrinsic primitive types that are disjoint, return never
1137        if self.has_disjoint_primitives(&flat) {
1138            return TypeId::NEVER;
1139        }
1140
1141        // =========================================================
1142        // Final Construction
1143        // =========================================================
1144
1145        if flat.is_empty() {
1146            return TypeId::UNKNOWN;
1147        }
1148        if flat.len() == 1 {
1149            return flat[0];
1150        }
1151
1152        // Create the intersection directly without calling normalize_intersection
1153        let list_id = self.intern_type_list(flat.into_vec());
1154        self.intern(TypeData::Intersection(list_id))
1155    }
1156
1157    /// Convenience wrapper for raw intersection of two types
1158    pub fn intersect_types_raw2(&self, a: TypeId, b: TypeId) -> TypeId {
1159        self.intersect_types_raw(vec![a, b])
1160    }
1161
1162    fn intersection_from_iter<I>(&self, members: I) -> TypeId
1163    where
1164        I: IntoIterator<Item = TypeId>,
1165    {
1166        let mut iter = members.into_iter();
1167        let Some(first) = iter.next() else {
1168            return TypeId::UNKNOWN;
1169        };
1170        let Some(second) = iter.next() else {
1171            return first;
1172        };
1173
1174        let mut flat: TypeListBuffer = SmallVec::new();
1175        self.push_intersection_member(&mut flat, first);
1176        self.push_intersection_member(&mut flat, second);
1177        for member in iter {
1178            self.push_intersection_member(&mut flat, member);
1179        }
1180
1181        self.normalize_intersection(flat)
1182    }
1183
1184    fn push_intersection_member(&self, flat: &mut TypeListBuffer, member: TypeId) {
1185        if let Some(TypeData::Intersection(inner)) = self.lookup(member) {
1186            let members = self.type_list(inner);
1187            flat.extend(members.iter().copied());
1188        } else {
1189            flat.push(member);
1190        }
1191    }
1192
1193    /// Check if a type is a function or callable (order-sensitive in intersections).
1194    ///
1195    /// In TypeScript, intersection types are commutative for structural types
1196    /// (objects, primitives) but non-commutative for call signatures.
1197    /// For example: `((a: string) => void) & ((a: number) => void)` has a different
1198    /// overload order than the reverse.
1199    fn is_callable_type(&self, id: TypeId) -> bool {
1200        matches!(
1201            self.lookup(id),
1202            Some(TypeData::Function(_) | TypeData::Callable(_))
1203        )
1204    }
1205
1206    // Intersection normalization, empty object elimination, callable/object
1207    // merging, and distribution are in `intersection.rs`.
1208
1209    /// Intern an array type
1210    pub fn array(&self, element: TypeId) -> TypeId {
1211        self.intern(TypeData::Array(element))
1212    }
1213
1214    /// Canonical `this` type.
1215    pub fn this_type(&self) -> TypeId {
1216        self.intern(TypeData::ThisType)
1217    }
1218
1219    /// Intern a readonly array type
1220    /// Returns a distinct type from mutable arrays to enforce readonly semantics
1221    pub fn readonly_array(&self, element: TypeId) -> TypeId {
1222        let array_type = self.array(element);
1223        self.intern(TypeData::ReadonlyType(array_type))
1224    }
1225
1226    /// Intern a tuple type
1227    pub fn tuple(&self, elements: Vec<TupleElement>) -> TypeId {
1228        let list_id = self.intern_tuple_list(elements);
1229        self.intern(TypeData::Tuple(list_id))
1230    }
1231
1232    /// Intern a readonly tuple type
1233    /// Returns a distinct type from mutable tuples to enforce readonly semantics
1234    pub fn readonly_tuple(&self, elements: Vec<TupleElement>) -> TypeId {
1235        let tuple_type = self.tuple(elements);
1236        self.intern(TypeData::ReadonlyType(tuple_type))
1237    }
1238
1239    /// Wrap any type in a `ReadonlyType` marker
1240    /// This is used for the `readonly` type operator
1241    pub fn readonly_type(&self, inner: TypeId) -> TypeId {
1242        self.intern(TypeData::ReadonlyType(inner))
1243    }
1244
1245    /// Wrap a type in a `NoInfer` marker.
1246    pub fn no_infer(&self, inner: TypeId) -> TypeId {
1247        self.intern(TypeData::NoInfer(inner))
1248    }
1249
1250    /// Create a `unique symbol` type for a symbol declaration.
1251    pub fn unique_symbol(&self, symbol: SymbolRef) -> TypeId {
1252        self.intern(TypeData::UniqueSymbol(symbol))
1253    }
1254
1255    /// Create an `infer` binder with the provided info.
1256    pub fn infer(&self, info: TypeParamInfo) -> TypeId {
1257        self.intern(TypeData::Infer(info))
1258    }
1259
1260    pub fn bound_parameter(&self, index: u32) -> TypeId {
1261        self.intern(TypeData::BoundParameter(index))
1262    }
1263
1264    pub fn recursive(&self, depth: u32) -> TypeId {
1265        self.intern(TypeData::Recursive(depth))
1266    }
1267
1268    /// Wrap a type in a `KeyOf` marker.
1269    pub fn keyof(&self, inner: TypeId) -> TypeId {
1270        self.intern(TypeData::KeyOf(inner))
1271    }
1272
1273    /// Build an indexed access type (`T[K]`).
1274    pub fn index_access(&self, object_type: TypeId, index_type: TypeId) -> TypeId {
1275        self.intern(TypeData::IndexAccess(object_type, index_type))
1276    }
1277
1278    /// Build a nominal enum type that preserves `DefId` identity and carries
1279    /// structural member information for compatibility with primitive relations.
1280    pub fn enum_type(&self, def_id: DefId, structural_type: TypeId) -> TypeId {
1281        self.intern(TypeData::Enum(def_id, structural_type))
1282    }
1283
1284    /// Intern an object type with properties.
1285    pub fn object(&self, properties: Vec<PropertyInfo>) -> TypeId {
1286        self.object_with_flags(properties, ObjectFlags::empty())
1287    }
1288
1289    /// Intern a fresh object type with properties.
1290    pub fn object_fresh(&self, properties: Vec<PropertyInfo>) -> TypeId {
1291        self.object_with_flags(properties, ObjectFlags::FRESH_LITERAL)
1292    }
1293
1294    /// Intern an object type with properties and custom flags.
1295    pub fn object_with_flags(
1296        &self,
1297        mut properties: Vec<PropertyInfo>,
1298        flags: ObjectFlags,
1299    ) -> TypeId {
1300        // Sort by property name for consistent hashing
1301        properties.sort_by_key(|a| a.name);
1302        let shape_id = self.intern_object_shape(ObjectShape {
1303            flags,
1304            properties,
1305            string_index: None,
1306            number_index: None,
1307            symbol: None,
1308        });
1309        self.intern(TypeData::Object(shape_id))
1310    }
1311
1312    /// Intern an object type with properties, custom flags, and optional symbol.
1313    /// This is used for interfaces that need symbol tracking but no index signatures.
1314    pub fn object_with_flags_and_symbol(
1315        &self,
1316        mut properties: Vec<PropertyInfo>,
1317        flags: ObjectFlags,
1318        symbol: Option<tsz_binder::SymbolId>,
1319    ) -> TypeId {
1320        // Sort by property name for consistent hashing
1321        properties.sort_by_key(|a| a.name);
1322        let shape_id = self.intern_object_shape(ObjectShape {
1323            flags,
1324            properties,
1325            string_index: None,
1326            number_index: None,
1327            symbol,
1328        });
1329        self.intern(TypeData::Object(shape_id))
1330    }
1331
1332    /// Intern an object type with index signatures.
1333    pub fn object_with_index(&self, mut shape: ObjectShape) -> TypeId {
1334        // Sort properties by name for consistent hashing
1335        shape.properties.sort_by_key(|a| a.name);
1336        let shape_id = self.intern_object_shape(shape);
1337        self.intern(TypeData::ObjectWithIndex(shape_id))
1338    }
1339
1340    /// Intern a function type
1341    pub fn function(&self, shape: FunctionShape) -> TypeId {
1342        let shape_id = self.intern_function_shape(shape);
1343        self.intern(TypeData::Function(shape_id))
1344    }
1345
1346    /// Intern a callable type with overloaded signatures
1347    pub fn callable(&self, shape: CallableShape) -> TypeId {
1348        let shape_id = self.intern_callable_shape(shape);
1349        self.intern(TypeData::Callable(shape_id))
1350    }
1351
1352    /// Intern a conditional type
1353    pub fn conditional(&self, conditional: ConditionalType) -> TypeId {
1354        let conditional_id = self.intern_conditional_type(conditional);
1355        self.intern(TypeData::Conditional(conditional_id))
1356    }
1357
1358    /// Intern a mapped type
1359    pub fn mapped(&self, mapped: MappedType) -> TypeId {
1360        let mapped_id = self.intern_mapped_type(mapped);
1361        self.intern(TypeData::Mapped(mapped_id))
1362    }
1363
1364    /// Build a string intrinsic (`Uppercase`, `Lowercase`, etc.) marker.
1365    pub fn string_intrinsic(
1366        &self,
1367        kind: crate::types::StringIntrinsicKind,
1368        type_arg: TypeId,
1369    ) -> TypeId {
1370        self.intern(TypeData::StringIntrinsic { kind, type_arg })
1371    }
1372
1373    /// Intern a type reference (deprecated - use `lazy()` with `DefId` instead).
1374    ///
1375    /// This method is kept for backward compatibility with tests and legacy code.
1376    /// It converts `SymbolRef` to `DefId` and creates `TypeData::Lazy`.
1377    ///
1378    /// Deprecated: new code should use `lazy(def_id)` instead.
1379    pub fn reference(&self, symbol: SymbolRef) -> TypeId {
1380        // Convert SymbolRef to DefId by wrapping the raw u32 value
1381        // This maintains the same identity while using the new TypeData::Lazy variant
1382        let def_id = DefId(symbol.0);
1383        self.intern(TypeData::Lazy(def_id))
1384    }
1385
1386    /// Intern a lazy type reference (DefId-based).
1387    ///
1388    /// This is the replacement for `reference()` that uses Solver-owned
1389    /// `DefIds` instead of Binder-owned `SymbolRefs`.
1390    ///
1391    /// Use this method for all new type references
1392    /// to enable O(1) type equality across Binder and Solver boundaries.
1393    pub fn lazy(&self, def_id: DefId) -> TypeId {
1394        self.intern(TypeData::Lazy(def_id))
1395    }
1396
1397    /// Intern a type parameter.
1398    pub fn type_param(&self, info: TypeParamInfo) -> TypeId {
1399        self.intern(TypeData::TypeParameter(info))
1400    }
1401
1402    /// Intern a type query (`typeof value`) marker.
1403    pub fn type_query(&self, symbol: SymbolRef) -> TypeId {
1404        self.intern(TypeData::TypeQuery(symbol))
1405    }
1406
1407    /// Intern a generic type application
1408    pub fn application(&self, base: TypeId, args: Vec<TypeId>) -> TypeId {
1409        let app_id = self.intern_application(TypeApplication { base, args });
1410        self.intern(TypeData::Application(app_id))
1411    }
1412}
1413
1414impl Default for TypeInterner {
1415    fn default() -> Self {
1416        Self::new()
1417    }
1418}
1419
1420#[cfg(test)]
1421#[path = "../../tests/intern_tests.rs"]
1422mod tests;
1423
1424#[cfg(test)]
1425#[path = "../../tests/concurrent_tests.rs"]
1426mod concurrent_tests;