Skip to main content

tsz_solver/
intern.rs

1//! Type interning for structural deduplication.
2//!
3//! This module implements the type interning engine that converts
4//! `TypeData` structures into lightweight `TypeId` handles.
5//!
6//! Benefits:
7//! - O(1) type equality (just compare `TypeId` values)
8//! - Memory efficient (each unique structure stored once)
9//! - Cache-friendly (work with u32 arrays instead of heap objects)
10//!
11//! # Concurrency Strategy
12//!
13//! The `TypeInterner` uses a sharded DashMap-based architecture for lock-free
14//! concurrent access:
15//!
16//! - **Sharded Type Storage**: 64 shards based on hash of `TypeData` to minimize contention
17//! - **`DashMap` for Interning**: Each shard uses `DashMap` for lock-free read/write operations
18//! - **Arc for Immutability**: Type data is stored in Arc<T> for cheap cloning
19//! - **No `RwLock`<Vec<T>>**: Avoids the read-then-write deadlock pattern
20//!
21//! This design allows true parallel type checking without lock contention.
22
23use crate::def::DefId;
24#[cfg(test)]
25use crate::types::*;
26use crate::types::{
27    CallSignature, CallableShape, CallableShapeId, ConditionalType, ConditionalTypeId,
28    FunctionShape, FunctionShapeId, IndexSignature, IntrinsicKind, LiteralValue, MappedType,
29    MappedTypeId, ObjectFlags, ObjectShape, ObjectShapeId, OrderedFloat, PropertyInfo,
30    PropertyLookup, SymbolRef, TemplateLiteralId, TemplateSpan, TupleElement, TupleListId,
31    TypeApplication, TypeApplicationId, TypeData, TypeId, TypeListId, TypeParamInfo, Visibility,
32};
33use crate::visitor::is_unit_type;
34use dashmap::DashMap;
35use dashmap::mapref::entry::Entry;
36use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet, FxHasher};
37use smallvec::SmallVec;
38use std::hash::{Hash, Hasher};
39use std::sync::{
40    Arc, OnceLock,
41    atomic::{AtomicU32, Ordering},
42};
43use tsz_common::interner::{Atom, ShardedInterner};
44
45#[path = "intern_intersection.rs"]
46mod intern_intersection;
47
48// Re-export for test access
49
50const SHARD_BITS: u32 = 6;
51const SHARD_COUNT: usize = 1 << SHARD_BITS; // 64 shards
52const SHARD_MASK: u32 = (SHARD_COUNT as u32) - 1;
53pub(crate) const PROPERTY_MAP_THRESHOLD: usize = 24;
54const TYPE_LIST_INLINE: usize = 8;
55
56/// Maximum template literal expansion limit.
57/// WASM environments have limited linear memory, so we use a much lower limit
58/// to prevent OOM. Native CLI can handle more.
59#[cfg(target_arch = "wasm32")]
60pub(crate) const TEMPLATE_LITERAL_EXPANSION_LIMIT: usize = 2_000;
61#[cfg(not(target_arch = "wasm32"))]
62pub(crate) const TEMPLATE_LITERAL_EXPANSION_LIMIT: usize = 100_000;
63
64/// Maximum number of interned types before aborting.
65/// WASM linear memory cannot grow indefinitely, so we cap at 500k types.
66#[cfg(target_arch = "wasm32")]
67pub(crate) const MAX_INTERNED_TYPES: usize = 500_000;
68#[cfg(not(target_arch = "wasm32"))]
69pub(crate) const MAX_INTERNED_TYPES: usize = 5_000_000;
70
71pub(crate) type TypeListBuffer = SmallVec<[TypeId; TYPE_LIST_INLINE]>;
72type ObjectPropertyIndex = DashMap<ObjectShapeId, Arc<FxHashMap<Atom, usize>>, FxBuildHasher>;
73type ObjectPropertyMap = OnceLock<ObjectPropertyIndex>;
74
75/// Inner data for a `TypeShard`, lazily initialized.
76struct TypeShardInner {
77    /// Map from `TypeData` to local index within this shard
78    key_to_index: DashMap<TypeData, u32, FxBuildHasher>,
79    /// Map from local index to `TypeData` (using Arc for shared access)
80    index_to_key: DashMap<u32, Arc<TypeData>, FxBuildHasher>,
81}
82
83/// A single shard of the type interned storage.
84///
85/// Uses `OnceLock` for lazy initialization - `DashMaps` are only allocated
86/// when the shard is first accessed, reducing startup overhead.
87struct TypeShard {
88    /// Lazily initialized inner maps
89    inner: OnceLock<TypeShardInner>,
90    /// Atomic counter for allocating new indices in this shard
91    /// Kept outside `OnceLock` for fast checks without initialization
92    next_index: AtomicU32,
93}
94
95impl TypeShard {
96    const fn new() -> Self {
97        Self {
98            inner: OnceLock::new(),
99            next_index: AtomicU32::new(0),
100        }
101    }
102
103    /// Get the inner maps, initializing on first access
104    #[inline]
105    fn get_inner(&self) -> &TypeShardInner {
106        self.inner.get_or_init(|| TypeShardInner {
107            key_to_index: DashMap::with_hasher(FxBuildHasher),
108            index_to_key: DashMap::with_hasher(FxBuildHasher),
109        })
110    }
111
112    /// Check if a key exists without initializing the shard
113    #[inline]
114    fn is_empty(&self) -> bool {
115        self.next_index.load(Ordering::Relaxed) == 0
116    }
117}
118
119/// Inner data for `ConcurrentSliceInterner`, lazily initialized.
120struct SliceInternerInner<T> {
121    items: DashMap<u32, Arc<[T]>, FxBuildHasher>,
122    map: DashMap<Arc<[T]>, u32, FxBuildHasher>,
123}
124
125/// Lock-free slice interner using `DashMap` for concurrent access.
126/// Uses lazy initialization to defer `DashMap` allocation until first use.
127struct ConcurrentSliceInterner<T> {
128    inner: OnceLock<SliceInternerInner<T>>,
129    next_id: AtomicU32,
130}
131
132impl<T> ConcurrentSliceInterner<T>
133where
134    T: Eq + Hash + Clone + Send + Sync + 'static,
135{
136    const fn new() -> Self {
137        Self {
138            inner: OnceLock::new(),
139            next_id: AtomicU32::new(1), // Reserve 0 for empty
140        }
141    }
142
143    #[inline]
144    fn get_inner(&self) -> &SliceInternerInner<T> {
145        self.inner.get_or_init(|| {
146            let items = DashMap::with_hasher(FxBuildHasher);
147            let map = DashMap::with_hasher(FxBuildHasher);
148            let empty: Arc<[T]> = Arc::from(Vec::new());
149            items.insert(0, std::sync::Arc::clone(&empty));
150            map.insert(empty, 0);
151            SliceInternerInner { items, map }
152        })
153    }
154
155    fn intern(&self, items_slice: &[T]) -> u32 {
156        if items_slice.is_empty() {
157            return 0;
158        }
159
160        let inner = self.get_inner();
161        let temp_arc: Arc<[T]> = Arc::from(items_slice.to_vec());
162
163        // Try to get existing ID via reverse map — O(1)
164        if let Some(ref_entry) = inner.map.get(&temp_arc) {
165            return *ref_entry.value();
166        }
167
168        // Allocate new ID
169        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
170
171        // Double-check: another thread might have inserted while we allocated
172        match inner.map.entry(std::sync::Arc::clone(&temp_arc)) {
173            dashmap::mapref::entry::Entry::Vacant(e) => {
174                e.insert(id);
175                inner.items.insert(id, temp_arc);
176                id
177            }
178            dashmap::mapref::entry::Entry::Occupied(e) => *e.get(),
179        }
180    }
181
182    fn get(&self, id: u32) -> Option<Arc<[T]>> {
183        // For id 0 (empty), we can return without initializing
184        if id == 0 {
185            return Some(Arc::from(Vec::new()));
186        }
187        self.inner
188            .get()?
189            .items
190            .get(&id)
191            .map(|e| std::sync::Arc::clone(e.value()))
192    }
193
194    fn empty(&self) -> Arc<[T]> {
195        Arc::from(Vec::new())
196    }
197}
198
199/// Inner data for `ConcurrentValueInterner`, lazily initialized.
200struct ValueInternerInner<T> {
201    items: DashMap<u32, Arc<T>, FxBuildHasher>,
202    map: DashMap<Arc<T>, u32, FxBuildHasher>,
203}
204
205/// Lock-free value interner using `DashMap` for concurrent access.
206/// Uses lazy initialization to defer `DashMap` allocation until first use.
207struct ConcurrentValueInterner<T> {
208    inner: OnceLock<ValueInternerInner<T>>,
209    next_id: AtomicU32,
210}
211
212impl<T> ConcurrentValueInterner<T>
213where
214    T: Eq + Hash + Clone + Send + Sync + 'static,
215{
216    const fn new() -> Self {
217        Self {
218            inner: OnceLock::new(),
219            next_id: AtomicU32::new(0),
220        }
221    }
222
223    #[inline]
224    fn get_inner(&self) -> &ValueInternerInner<T> {
225        self.inner.get_or_init(|| ValueInternerInner {
226            items: DashMap::with_hasher(FxBuildHasher),
227            map: DashMap::with_hasher(FxBuildHasher),
228        })
229    }
230
231    fn intern(&self, value: T) -> u32 {
232        let inner = self.get_inner();
233        let value_arc = Arc::new(value);
234
235        // Try to get existing ID
236        if let Some(ref_entry) = inner.map.get(&value_arc) {
237            return *ref_entry.value();
238        }
239
240        // Allocate new ID
241        let id = self.next_id.fetch_add(1, Ordering::Relaxed);
242
243        // Double-check: another thread might have inserted while we allocated
244        match inner.map.entry(std::sync::Arc::clone(&value_arc)) {
245            Entry::Vacant(e) => {
246                e.insert(id);
247                inner.items.insert(id, value_arc);
248                id
249            }
250            Entry::Occupied(e) => *e.get(),
251        }
252    }
253
254    fn get(&self, id: u32) -> Option<Arc<T>> {
255        self.inner
256            .get()?
257            .items
258            .get(&id)
259            .map(|e| std::sync::Arc::clone(e.value()))
260    }
261}
262
263/// Type interning table with lock-free concurrent access.
264///
265/// Uses sharded `DashMap` structures for all internal storage, enabling
266/// true parallel type checking without lock contention.
267///
268/// All internal structures use lazy initialization via `OnceLock` to minimize
269/// startup overhead - `DashMaps` are only allocated when first accessed.
270pub struct TypeInterner {
271    /// Sharded storage for user-defined types (lazily initialized)
272    shards: Vec<TypeShard>,
273    /// String interner for property names and string literals (already lock-free)
274    pub string_interner: ShardedInterner,
275    /// Concurrent interners for type components (lazily initialized)
276    type_lists: ConcurrentSliceInterner<TypeId>,
277    tuple_lists: ConcurrentSliceInterner<TupleElement>,
278    template_lists: ConcurrentSliceInterner<TemplateSpan>,
279    object_shapes: ConcurrentValueInterner<ObjectShape>,
280    /// Object property maps: lazily initialized `DashMap`
281    object_property_maps: ObjectPropertyMap,
282    function_shapes: ConcurrentValueInterner<FunctionShape>,
283    callable_shapes: ConcurrentValueInterner<CallableShape>,
284    conditional_types: ConcurrentValueInterner<ConditionalType>,
285    mapped_types: ConcurrentValueInterner<MappedType>,
286    applications: ConcurrentValueInterner<TypeApplication>,
287    /// Cache for `is_unit_type` checks (memoized O(1) lookup after first computation)
288    unit_type_cache: DashMap<TypeId, bool, FxBuildHasher>,
289    /// The global Array base type (e.g., Array<T> from lib.d.ts)
290    array_base_type: OnceLock<TypeId>,
291    /// Type parameters for the Array base type
292    array_base_type_params: OnceLock<Vec<TypeParamInfo>>,
293    /// Boxed interface types for primitives (e.g., String interface for `string`).
294    /// Registered from lib.d.ts during primordial type setup.
295    boxed_types: DashMap<IntrinsicKind, TypeId, FxBuildHasher>,
296}
297
298impl std::fmt::Debug for TypeInterner {
299    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
300        f.debug_struct("TypeInterner")
301            .field("shards", &self.shards.len())
302            .finish_non_exhaustive()
303    }
304}
305
306impl TypeInterner {
307    /// Create a new type interner with pre-registered intrinsics.
308    ///
309    /// Uses lazy initialization for all `DashMap` structures to minimize
310    /// startup overhead. `DashMaps` are only allocated when first accessed.
311    pub fn new() -> Self {
312        let shards: Vec<TypeShard> = (0..SHARD_COUNT).map(|_| TypeShard::new()).collect();
313
314        Self {
315            shards,
316            // String interner - common strings are interned on-demand for faster startup
317            string_interner: ShardedInterner::new(),
318            type_lists: ConcurrentSliceInterner::new(),
319            tuple_lists: ConcurrentSliceInterner::new(),
320            template_lists: ConcurrentSliceInterner::new(),
321            object_shapes: ConcurrentValueInterner::new(),
322            object_property_maps: OnceLock::new(),
323            function_shapes: ConcurrentValueInterner::new(),
324            callable_shapes: ConcurrentValueInterner::new(),
325            conditional_types: ConcurrentValueInterner::new(),
326            mapped_types: ConcurrentValueInterner::new(),
327            applications: ConcurrentValueInterner::new(),
328            unit_type_cache: DashMap::with_hasher(FxBuildHasher),
329            array_base_type: OnceLock::new(),
330            array_base_type_params: OnceLock::new(),
331            boxed_types: DashMap::with_hasher(FxBuildHasher),
332        }
333    }
334
335    /// Set the global Array base type (e.g., Array<T> from lib.d.ts).
336    ///
337    /// This should be called once during primordial type setup when lib.d.ts is processed.
338    /// Once set, the value cannot be changed (`OnceLock` enforces this).
339    pub fn set_array_base_type(&self, type_id: TypeId, params: Vec<TypeParamInfo>) {
340        let _ = self.array_base_type.set(type_id);
341        let _ = self.array_base_type_params.set(params);
342    }
343
344    /// Get the global Array base type, if it has been set.
345    #[inline]
346    pub fn get_array_base_type(&self) -> Option<TypeId> {
347        self.array_base_type.get().copied()
348    }
349
350    /// Get the type parameters for the global Array base type, if it has been set.
351    #[inline]
352    pub fn get_array_base_type_params(&self) -> &[TypeParamInfo] {
353        self.array_base_type_params
354            .get()
355            .map_or(&[], |v| v.as_slice())
356    }
357
358    /// Set a boxed interface type for a primitive intrinsic kind.
359    ///
360    /// Called during primordial type setup when lib.d.ts is processed.
361    /// For example, `set_boxed_type(IntrinsicKind::String, type_id_of_String_interface)`
362    /// enables property access on `string` values to resolve through the String interface.
363    pub fn set_boxed_type(&self, kind: IntrinsicKind, type_id: TypeId) {
364        self.boxed_types.insert(kind, type_id);
365    }
366
367    /// Get the boxed interface type for a primitive intrinsic kind.
368    #[inline]
369    pub fn get_boxed_type(&self, kind: IntrinsicKind) -> Option<TypeId> {
370        self.boxed_types.get(&kind).map(|r| *r)
371    }
372
373    /// Get the object property maps, initializing on first access
374    #[inline]
375    fn get_object_property_maps(&self) -> &ObjectPropertyIndex {
376        self.object_property_maps
377            .get_or_init(|| DashMap::with_hasher(FxBuildHasher))
378    }
379
380    /// Check if a type is a "unit type" (represents exactly one value).
381    /// Results are cached for O(1) lookup after first computation.
382    /// This is used for optimization in BCT and subtype checking.
383    #[inline]
384    pub fn is_unit_type(&self, type_id: TypeId) -> bool {
385        // Fast path: check cache first
386        if let Some(cached) = self.unit_type_cache.get(&type_id) {
387            return *cached;
388        }
389        // Compute and cache
390        let result = is_unit_type(self, type_id);
391        self.unit_type_cache.insert(type_id, result);
392        result
393    }
394
395    /// Intern a string into an Atom.
396    /// This is used when constructing types with property names or string literals.
397    pub fn intern_string(&self, s: &str) -> Atom {
398        self.string_interner.intern(s)
399    }
400
401    /// Resolve an Atom back to its string value.
402    /// This is used when formatting types for error messages.
403    pub fn resolve_atom(&self, atom: Atom) -> String {
404        self.string_interner.resolve(atom).to_string()
405    }
406
407    /// Resolve an Atom without allocating a new String.
408    pub fn resolve_atom_ref(&self, atom: Atom) -> Arc<str> {
409        self.string_interner.resolve(atom)
410    }
411
412    pub fn type_list(&self, id: TypeListId) -> Arc<[TypeId]> {
413        self.type_lists
414            .get(id.0)
415            .unwrap_or_else(|| self.type_lists.empty())
416    }
417
418    pub fn tuple_list(&self, id: TupleListId) -> Arc<[TupleElement]> {
419        self.tuple_lists
420            .get(id.0)
421            .unwrap_or_else(|| self.tuple_lists.empty())
422    }
423
424    pub fn template_list(&self, id: TemplateLiteralId) -> Arc<[TemplateSpan]> {
425        self.template_lists
426            .get(id.0)
427            .unwrap_or_else(|| self.template_lists.empty())
428    }
429
430    pub fn object_shape(&self, id: ObjectShapeId) -> Arc<ObjectShape> {
431        self.object_shapes.get(id.0).unwrap_or_else(|| {
432            Arc::new(ObjectShape {
433                flags: ObjectFlags::empty(),
434                properties: Vec::new(),
435                string_index: None,
436                number_index: None,
437                symbol: None,
438            })
439        })
440    }
441
442    pub fn object_property_index(&self, shape_id: ObjectShapeId, name: Atom) -> PropertyLookup {
443        let shape = self.object_shape(shape_id);
444        if shape.properties.len() < PROPERTY_MAP_THRESHOLD {
445            return PropertyLookup::Uncached;
446        }
447
448        match self.object_property_map(shape_id, &shape) {
449            Some(map) => match map.get(&name) {
450                Some(&idx) => PropertyLookup::Found(idx),
451                None => PropertyLookup::NotFound,
452            },
453            None => PropertyLookup::Uncached,
454        }
455    }
456
457    /// Get or create a property map for an object shape.
458    ///
459    /// This uses a lock-free pattern with `DashMap` to avoid the read-then-write
460    /// deadlock that existed in the previous `RwLock`<Vec> implementation.
461    fn object_property_map(
462        &self,
463        shape_id: ObjectShapeId,
464        shape: &ObjectShape,
465    ) -> Option<Arc<FxHashMap<Atom, usize>>> {
466        if shape.properties.len() < PROPERTY_MAP_THRESHOLD {
467            return None;
468        }
469
470        let maps = self.get_object_property_maps();
471
472        // Try to get existing map (lock-free read)
473        if let Some(map) = maps.get(&shape_id) {
474            return Some(std::sync::Arc::clone(&map));
475        }
476
477        // Build the property map
478        let mut map = FxHashMap::default();
479        for (idx, prop) in shape.properties.iter().enumerate() {
480            map.insert(prop.name, idx);
481        }
482        let map = Arc::new(map);
483
484        // Try to insert - if another thread inserted first, use theirs
485        match maps.entry(shape_id) {
486            Entry::Vacant(e) => {
487                e.insert(std::sync::Arc::clone(&map));
488                Some(map)
489            }
490            Entry::Occupied(e) => Some(std::sync::Arc::clone(e.get())),
491        }
492    }
493
494    pub fn function_shape(&self, id: FunctionShapeId) -> Arc<FunctionShape> {
495        self.function_shapes.get(id.0).unwrap_or_else(|| {
496            Arc::new(FunctionShape {
497                type_params: Vec::new(),
498                params: Vec::new(),
499                this_type: None,
500                return_type: TypeId::ERROR,
501                type_predicate: None,
502                is_constructor: false,
503                is_method: false,
504            })
505        })
506    }
507
508    pub fn callable_shape(&self, id: CallableShapeId) -> Arc<CallableShape> {
509        self.callable_shapes.get(id.0).unwrap_or_else(|| {
510            Arc::new(CallableShape {
511                call_signatures: Vec::new(),
512                construct_signatures: Vec::new(),
513                properties: Vec::new(),
514                ..Default::default()
515            })
516        })
517    }
518
519    pub fn conditional_type(&self, id: ConditionalTypeId) -> Arc<ConditionalType> {
520        self.conditional_types.get(id.0).unwrap_or_else(|| {
521            Arc::new(ConditionalType {
522                check_type: TypeId::ERROR,
523                extends_type: TypeId::ERROR,
524                true_type: TypeId::ERROR,
525                false_type: TypeId::ERROR,
526                is_distributive: false,
527            })
528        })
529    }
530
531    pub fn mapped_type(&self, id: MappedTypeId) -> Arc<MappedType> {
532        self.mapped_types.get(id.0).unwrap_or_else(|| {
533            Arc::new(MappedType {
534                type_param: TypeParamInfo {
535                    is_const: false,
536                    name: self.intern_string("_"),
537                    constraint: None,
538                    default: None,
539                },
540                constraint: TypeId::ERROR,
541                name_type: None,
542                template: TypeId::ERROR,
543                readonly_modifier: None,
544                optional_modifier: None,
545            })
546        })
547    }
548
549    pub fn type_application(&self, id: TypeApplicationId) -> Arc<TypeApplication> {
550        self.applications.get(id.0).unwrap_or_else(|| {
551            Arc::new(TypeApplication {
552                base: TypeId::ERROR,
553                args: Vec::new(),
554            })
555        })
556    }
557
558    /// Intern a type key and return its `TypeId`.
559    /// If the key already exists, returns the existing `TypeId`.
560    /// Otherwise, creates a new `TypeId` and stores the key.
561    ///
562    /// This uses a lock-free pattern with `DashMap` for concurrent access.
563    pub fn intern(&self, key: TypeData) -> TypeId {
564        if let Some(id) = self.get_intrinsic_id(&key) {
565            return id;
566        }
567
568        // Circuit breaker: prevent OOM by limiting total interned types
569        // This is especially important for WASM where linear memory is limited.
570        // We do a cheap approximate check (summing shard indices) to avoid
571        // iterating all shards on every intern call.
572        if self.approximate_count() > MAX_INTERNED_TYPES {
573            return TypeId::ERROR;
574        }
575
576        let mut hasher = FxHasher::default();
577        key.hash(&mut hasher);
578        let shard_idx = (hasher.finish() as usize) & (SHARD_COUNT - 1);
579        let shard = &self.shards[shard_idx];
580        let inner = shard.get_inner();
581
582        // Try to get existing ID (lock-free read)
583        if let Some(entry) = inner.key_to_index.get(&key) {
584            let local_index = *entry.value();
585            return self.make_id(local_index, shard_idx as u32);
586        }
587
588        // Allocate new index
589        let local_index = shard.next_index.fetch_add(1, Ordering::Relaxed);
590        if local_index > (u32::MAX >> SHARD_BITS) {
591            // Return error type instead of panicking
592            return TypeId::ERROR;
593        }
594
595        // Double-check: another thread might have inserted while we allocated
596        match inner.key_to_index.entry(key.clone()) {
597            Entry::Vacant(e) => {
598                e.insert(local_index);
599                let key_arc = Arc::new(key);
600                inner.index_to_key.insert(local_index, key_arc);
601                self.make_id(local_index, shard_idx as u32)
602            }
603            Entry::Occupied(e) => {
604                // Another thread inserted first, use their ID
605                let existing_index = *e.get();
606                self.make_id(existing_index, shard_idx as u32)
607            }
608        }
609    }
610
611    /// Look up the `TypeData` for a given `TypeId`.
612    ///
613    /// This uses lock-free `DashMap` access with lazy shard initialization.
614    pub fn lookup(&self, id: TypeId) -> Option<TypeData> {
615        if id.is_intrinsic() || id.is_error() {
616            return self.get_intrinsic_key(id);
617        }
618
619        let raw_val = id.0.checked_sub(TypeId::FIRST_USER)?;
620        let shard_idx = (raw_val & SHARD_MASK) as usize;
621        let local_index = raw_val >> SHARD_BITS;
622
623        let shard = self.shards.get(shard_idx)?;
624        // If shard is empty, no types have been interned there yet
625        if shard.is_empty() {
626            return None;
627        }
628        shard
629            .get_inner()
630            .index_to_key
631            .get(&{ local_index })
632            .map(|r| r.value().as_ref().clone())
633    }
634
635    fn intern_type_list(&self, members: Vec<TypeId>) -> TypeListId {
636        TypeListId(self.type_lists.intern(&members))
637    }
638
639    fn intern_tuple_list(&self, elements: Vec<TupleElement>) -> TupleListId {
640        TupleListId(self.tuple_lists.intern(&elements))
641    }
642
643    pub(crate) fn intern_template_list(&self, spans: Vec<TemplateSpan>) -> TemplateLiteralId {
644        TemplateLiteralId(self.template_lists.intern(&spans))
645    }
646
647    pub fn intern_object_shape(&self, shape: ObjectShape) -> ObjectShapeId {
648        ObjectShapeId(self.object_shapes.intern(shape))
649    }
650
651    fn intern_function_shape(&self, shape: FunctionShape) -> FunctionShapeId {
652        FunctionShapeId(self.function_shapes.intern(shape))
653    }
654
655    fn intern_callable_shape(&self, shape: CallableShape) -> CallableShapeId {
656        CallableShapeId(self.callable_shapes.intern(shape))
657    }
658
659    fn intern_conditional_type(&self, conditional: ConditionalType) -> ConditionalTypeId {
660        ConditionalTypeId(self.conditional_types.intern(conditional))
661    }
662
663    fn intern_mapped_type(&self, mapped: MappedType) -> MappedTypeId {
664        MappedTypeId(self.mapped_types.intern(mapped))
665    }
666
667    fn intern_application(&self, application: TypeApplication) -> TypeApplicationId {
668        TypeApplicationId(self.applications.intern(application))
669    }
670
671    /// Get the number of interned types (lock-free read)
672    pub fn len(&self) -> usize {
673        let mut total = TypeId::FIRST_USER as usize;
674        for shard in &self.shards {
675            total += shard.next_index.load(Ordering::Relaxed) as usize;
676        }
677        total
678    }
679
680    /// Check if the interner is empty (only has intrinsics)
681    pub fn is_empty(&self) -> bool {
682        self.len() <= TypeId::FIRST_USER as usize
683    }
684
685    /// Get an approximate count of interned types.
686    /// This is cheaper than `len()` as it samples only a few shards.
687    /// Used for the circuit breaker to avoid OOM.
688    #[inline]
689    fn approximate_count(&self) -> usize {
690        // Sample first 4 shards and extrapolate (assumes uniform distribution)
691        let mut sample_total: usize = 0;
692        for shard in self.shards.iter().take(4) {
693            sample_total += shard.next_index.load(Ordering::Relaxed) as usize;
694        }
695        // Extrapolate to all 64 shards
696        (sample_total * SHARD_COUNT / 4) + TypeId::FIRST_USER as usize
697    }
698
699    #[inline]
700    fn make_id(&self, local_index: u32, shard_idx: u32) -> TypeId {
701        let raw_val = (local_index << SHARD_BITS) | (shard_idx & SHARD_MASK);
702        let id = TypeId(TypeId::FIRST_USER + raw_val);
703
704        // SAFETY: Assert that we're not overflowing into the local ID space (MSB=1).
705        // Global TypeIds must have MSB=0 (0x7FFFFFFF-) to allow ScopedTypeInterner
706        // to use the upper half (0x80000000+) for ephemeral types.
707        debug_assert!(
708            id.is_global(),
709            "Global TypeId overflow: {id:?} - would conflict with local ID space"
710        );
711
712        id
713    }
714
715    const fn get_intrinsic_id(&self, key: &TypeData) -> Option<TypeId> {
716        match key {
717            TypeData::Intrinsic(kind) => Some(kind.to_type_id()),
718            TypeData::Error => Some(TypeId::ERROR),
719            // Map boolean literals to their intrinsic IDs to avoid duplicates
720            TypeData::Literal(LiteralValue::Boolean(true)) => Some(TypeId::BOOLEAN_TRUE),
721            TypeData::Literal(LiteralValue::Boolean(false)) => Some(TypeId::BOOLEAN_FALSE),
722            _ => None,
723        }
724    }
725
726    const fn get_intrinsic_key(&self, id: TypeId) -> Option<TypeData> {
727        match id {
728            TypeId::NONE | TypeId::ERROR => Some(TypeData::Error),
729            TypeId::NEVER => Some(TypeData::Intrinsic(IntrinsicKind::Never)),
730            TypeId::UNKNOWN => Some(TypeData::Intrinsic(IntrinsicKind::Unknown)),
731            TypeId::ANY => Some(TypeData::Intrinsic(IntrinsicKind::Any)),
732            TypeId::VOID => Some(TypeData::Intrinsic(IntrinsicKind::Void)),
733            TypeId::UNDEFINED => Some(TypeData::Intrinsic(IntrinsicKind::Undefined)),
734            TypeId::NULL => Some(TypeData::Intrinsic(IntrinsicKind::Null)),
735            TypeId::BOOLEAN => Some(TypeData::Intrinsic(IntrinsicKind::Boolean)),
736            TypeId::NUMBER => Some(TypeData::Intrinsic(IntrinsicKind::Number)),
737            TypeId::STRING => Some(TypeData::Intrinsic(IntrinsicKind::String)),
738            TypeId::BIGINT => Some(TypeData::Intrinsic(IntrinsicKind::Bigint)),
739            TypeId::SYMBOL => Some(TypeData::Intrinsic(IntrinsicKind::Symbol)),
740            TypeId::OBJECT | TypeId::PROMISE_BASE => {
741                Some(TypeData::Intrinsic(IntrinsicKind::Object))
742            }
743            TypeId::BOOLEAN_TRUE => Some(TypeData::Literal(LiteralValue::Boolean(true))),
744            TypeId::BOOLEAN_FALSE => Some(TypeData::Literal(LiteralValue::Boolean(false))),
745            TypeId::FUNCTION => Some(TypeData::Intrinsic(IntrinsicKind::Function)),
746            _ => None,
747        }
748    }
749
750    // =========================================================================
751    // Convenience methods for common type constructions
752    // =========================================================================
753
754    /// Intern an intrinsic type
755    pub const fn intrinsic(&self, kind: IntrinsicKind) -> TypeId {
756        kind.to_type_id()
757    }
758
759    /// Intern a literal string type
760    pub fn literal_string(&self, value: &str) -> TypeId {
761        let atom = self.intern_string(value);
762        self.intern(TypeData::Literal(LiteralValue::String(atom)))
763    }
764
765    /// Intern a literal string type from an already-interned Atom
766    pub fn literal_string_atom(&self, atom: Atom) -> TypeId {
767        self.intern(TypeData::Literal(LiteralValue::String(atom)))
768    }
769
770    /// Intern a literal number type
771    pub fn literal_number(&self, value: f64) -> TypeId {
772        self.intern(TypeData::Literal(LiteralValue::Number(OrderedFloat(value))))
773    }
774
775    /// Intern a literal boolean type
776    pub fn literal_boolean(&self, value: bool) -> TypeId {
777        self.intern(TypeData::Literal(LiteralValue::Boolean(value)))
778    }
779
780    /// Intern a literal bigint type
781    pub fn literal_bigint(&self, value: &str) -> TypeId {
782        let atom = self.intern_string(&self.normalize_bigint_literal(value));
783        self.intern(TypeData::Literal(LiteralValue::BigInt(atom)))
784    }
785
786    /// Intern a literal bigint type, allowing a sign prefix without extra clones.
787    pub fn literal_bigint_with_sign(&self, negative: bool, digits: &str) -> TypeId {
788        let normalized = self.normalize_bigint_literal(digits);
789        if normalized == "0" {
790            return self.literal_bigint(&normalized);
791        }
792        if !negative {
793            return self.literal_bigint(&normalized);
794        }
795
796        let mut value = String::with_capacity(normalized.len() + 1);
797        value.push('-');
798        value.push_str(&normalized);
799        let atom = self.string_interner.intern_owned(value);
800        self.intern(TypeData::Literal(LiteralValue::BigInt(atom)))
801    }
802
803    fn normalize_bigint_literal(&self, value: &str) -> String {
804        let stripped = value.replace('_', "");
805        if stripped.is_empty() {
806            return "0".to_string();
807        }
808
809        let (base, digits) = if stripped.starts_with("0x") || stripped.starts_with("0X") {
810            (16, &stripped[2..])
811        } else if stripped.starts_with("0o") || stripped.starts_with("0O") {
812            (8, &stripped[2..])
813        } else if stripped.starts_with("0b") || stripped.starts_with("0B") {
814            (2, &stripped[2..])
815        } else {
816            (10, stripped.as_str())
817        };
818
819        if digits.is_empty() {
820            return "0".to_string();
821        }
822
823        if base == 10 {
824            let normalized = digits.trim_start_matches('0');
825            return if normalized.is_empty() {
826                "0".to_string()
827            } else {
828                normalized.to_string()
829            };
830        }
831
832        let mut decimal: Vec<u8> = vec![0];
833        for ch in digits.chars() {
834            let Some(digit) = ch.to_digit(base) else {
835                return "0".to_string();
836            };
837            let digit = digit as u16;
838            let mut carry = digit;
839            let base = base as u16;
840            for dec in decimal.iter_mut() {
841                let value = u16::from(*dec) * base + carry;
842                *dec = (value % 10) as u8;
843                carry = value / 10;
844            }
845            while carry > 0 {
846                decimal.push((carry % 10) as u8);
847                carry /= 10;
848            }
849        }
850
851        while decimal.len() > 1 && *decimal.last().unwrap_or(&0) == 0 {
852            decimal.pop();
853        }
854
855        let mut out = String::with_capacity(decimal.len());
856        for digit in decimal.iter().rev() {
857            out.push(char::from(b'0' + *digit));
858        }
859        out
860    }
861
862    /// Intern a union type, normalizing and deduplicating members
863    pub fn union(&self, members: Vec<TypeId>) -> TypeId {
864        self.union_from_iter(members)
865    }
866
867    /// Intern a union type from a vector that is already sorted and deduped.
868    /// This is an O(N) operation that avoids redundant sorting.
869    pub fn union_from_sorted_vec(&self, flat: Vec<TypeId>) -> TypeId {
870        if flat.is_empty() {
871            return TypeId::NEVER;
872        }
873        if flat.len() == 1 {
874            return flat[0];
875        }
876
877        let list_id = self.intern_type_list(flat);
878        self.intern(TypeData::Union(list_id))
879    }
880
881    /// Intern a union type while preserving member structure.
882    ///
883    /// This keeps unknown/literal members intact for property access checks.
884    pub fn union_preserve_members(&self, members: Vec<TypeId>) -> TypeId {
885        if members.is_empty() {
886            return TypeId::NEVER;
887        }
888
889        let mut flat: TypeListBuffer = SmallVec::new();
890        for member in members {
891            if let Some(TypeData::Union(inner)) = self.lookup(member) {
892                let members = self.type_list(inner);
893                flat.extend(members.iter().copied());
894            } else {
895                flat.push(member);
896            }
897        }
898
899        flat.sort_by_key(|id| id.0);
900        flat.dedup();
901        flat.retain(|id| *id != TypeId::NEVER);
902
903        if flat.is_empty() {
904            return TypeId::NEVER;
905        }
906        if flat.len() == 1 {
907            return flat[0];
908        }
909
910        let list_id = self.intern_type_list(flat.into_vec());
911        self.intern(TypeData::Union(list_id))
912    }
913
914    /// Fast path for unions that already fit in registers.
915    pub fn union2(&self, left: TypeId, right: TypeId) -> TypeId {
916        // Fast paths to avoid expensive normalize_union for trivial cases
917        if left == right {
918            return left;
919        }
920        if left == TypeId::NEVER {
921            return right;
922        }
923        if right == TypeId::NEVER {
924            return left;
925        }
926        self.union_from_iter([left, right])
927    }
928
929    /// Fast path for three-member unions without heap allocations.
930    pub fn union3(&self, first: TypeId, second: TypeId, third: TypeId) -> TypeId {
931        self.union_from_iter([first, second, third])
932    }
933
934    fn union_from_iter<I>(&self, members: I) -> TypeId
935    where
936        I: IntoIterator<Item = TypeId>,
937    {
938        let mut iter = members.into_iter();
939        let Some(first) = iter.next() else {
940            return TypeId::NEVER;
941        };
942        let Some(second) = iter.next() else {
943            return first;
944        };
945
946        let mut flat: TypeListBuffer = SmallVec::new();
947        self.push_union_member(&mut flat, first);
948        self.push_union_member(&mut flat, second);
949        for member in iter {
950            self.push_union_member(&mut flat, member);
951        }
952
953        self.normalize_union(flat)
954    }
955
956    fn push_union_member(&self, flat: &mut TypeListBuffer, member: TypeId) {
957        if let Some(TypeData::Union(inner)) = self.lookup(member) {
958            let members = self.type_list(inner);
959            flat.extend(members.iter().copied());
960        } else {
961            flat.push(member);
962        }
963    }
964
965    fn normalize_union(&self, mut flat: TypeListBuffer) -> TypeId {
966        // Deduplicate and sort for consistent hashing
967        flat.sort_by_key(|id| id.0);
968        flat.dedup();
969
970        // Handle special cases
971        if flat.contains(&TypeId::ERROR) {
972            return TypeId::ERROR;
973        }
974        if flat.is_empty() {
975            return TypeId::NEVER;
976        }
977        if flat.len() == 1 {
978            return flat[0];
979        }
980        // If any member is `any`, the union is `any`
981        if flat.contains(&TypeId::ANY) {
982            return TypeId::ANY;
983        }
984        // If any member is `unknown`, the union is `unknown`
985        if flat.contains(&TypeId::UNKNOWN) {
986            return TypeId::UNKNOWN;
987        }
988        // Remove `never` from unions
989        flat.retain(|id| *id != TypeId::NEVER);
990        if flat.is_empty() {
991            return TypeId::NEVER;
992        }
993        if flat.len() == 1 {
994            return flat[0];
995        }
996
997        // Absorb literal types into their corresponding primitive types
998        // e.g., "a" | string | number => string | number
999        // e.g., 1 | 2 | number => number
1000        // e.g., true | boolean => boolean
1001        self.absorb_literals_into_primitives(&mut flat);
1002
1003        if flat.is_empty() {
1004            return TypeId::NEVER;
1005        }
1006        if flat.len() == 1 {
1007            return flat[0];
1008        }
1009
1010        // Reduce union using subtype checks (e.g., {a: 1} | {a: 1 | number} => {a: 1 | number})
1011        // Skip reduction if union contains complex types (TypeParameters, Lazy, etc.)
1012        let has_complex = flat.iter().any(|&id| {
1013            matches!(
1014                self.lookup(id),
1015                Some(TypeData::TypeParameter(_) | TypeData::Lazy(_))
1016            )
1017        });
1018        if !has_complex {
1019            self.reduce_union_subtypes(&mut flat);
1020        }
1021
1022        if flat.is_empty() {
1023            return TypeId::NEVER;
1024        }
1025        if flat.len() == 1 {
1026            return flat[0];
1027        }
1028
1029        let list_id = self.intern_type_list(flat.into_vec());
1030        self.intern(TypeData::Union(list_id))
1031    }
1032
1033    /// Intern an intersection type, normalizing and deduplicating members
1034    pub fn intersection(&self, members: Vec<TypeId>) -> TypeId {
1035        self.intersection_from_iter(members)
1036    }
1037
1038    /// Fast path for two-member intersections.
1039    pub fn intersection2(&self, left: TypeId, right: TypeId) -> TypeId {
1040        self.intersection_from_iter([left, right])
1041    }
1042
1043    /// Create an intersection type WITHOUT triggering `normalize_intersection`
1044    ///
1045    /// This is a low-level operation used by the `SubtypeChecker` to merge
1046    /// properties from intersection members without causing infinite recursion.
1047    ///
1048    /// # Safety
1049    /// Only use this when you need to synthesize a type for intermediate checking.
1050    /// Do NOT use for final compiler output (like .d.ts generation) as the
1051    /// resulting type will be "unsimplified".
1052    pub fn intersect_types_raw(&self, members: Vec<TypeId>) -> TypeId {
1053        // Use SmallVec to keep stack allocation benefits
1054        let mut flat: TypeListBuffer = SmallVec::new();
1055
1056        for member in members {
1057            // Structural flattening is safe and cheap
1058            if let Some(TypeData::Intersection(inner)) = self.lookup(member) {
1059                let inner_members = self.type_list(inner);
1060                flat.extend(inner_members.iter().copied());
1061            } else {
1062                flat.push(member);
1063            }
1064        }
1065
1066        // Abort reduction if any member is a Lazy type.
1067        // The interner (Judge) cannot resolve symbols, so if we have unresolved types,
1068        // we must preserve the intersection as-is without attempting to merge or reduce.
1069        let has_unresolved = flat
1070            .iter()
1071            .any(|&id| matches!(self.lookup(id), Some(TypeData::Lazy(_))));
1072        if has_unresolved {
1073            // Basic dedup without any simplification
1074            flat.sort_by_key(|id| id.0);
1075            flat.dedup();
1076            let list_id = self.intern_type_list(flat.into_vec());
1077            return self.intern(TypeData::Intersection(list_id));
1078        }
1079
1080        // =========================================================
1081        // Canonicalization: Handle callable order preservation
1082        // =========================================================
1083        // In TypeScript, intersections of functions represent overloads, and
1084        // order matters. We need to separate callables from non-callables.
1085
1086        let has_callables = flat.iter().any(|&id| self.is_callable_type(id));
1087
1088        if !has_callables {
1089            // Fast path: No callables, sort everything for canonicalization
1090            flat.sort_by_key(|id| id.0);
1091            flat.dedup();
1092        } else {
1093            // Slow path: Separate callables and others to preserve order
1094            let mut callables = SmallVec::<[TypeId; 4]>::new();
1095
1096            // Retain only non-callables in 'flat', move callables to 'callables'
1097            // This preserves the order of callables as they are extracted
1098            let mut i = 0;
1099            while i < flat.len() {
1100                if self.is_callable_type(flat[i]) {
1101                    callables.push(flat.remove(i));
1102                } else {
1103                    i += 1;
1104                }
1105            }
1106
1107            // Sort and dedup non-callables
1108            flat.sort_by_key(|id| id.0);
1109            flat.dedup();
1110
1111            // Deduplicate callables (preserving order)
1112            let mut seen = FxHashSet::default();
1113            callables.retain(|id| seen.insert(*id));
1114
1115            // Merge: Put non-callables first (canonical), then callables (ordered)
1116            flat.extend(callables);
1117        }
1118
1119        // =========================================================
1120        // O(1) Fast Paths (Safe to do without recursion)
1121        // =========================================================
1122
1123        // 1. If any member is Never, the result is Never
1124        if flat.contains(&TypeId::NEVER) {
1125            return TypeId::NEVER;
1126        }
1127
1128        // 2. If any member is Any, the result is Any (unless Never is present)
1129        if flat.contains(&TypeId::ANY) {
1130            return TypeId::ANY;
1131        }
1132
1133        // 3. Remove Unknown (Identity element for intersection)
1134        flat.retain(|id| *id != TypeId::UNKNOWN);
1135
1136        // 4. Check for disjoint primitives (e.g., string & number = never)
1137        // If we have multiple intrinsic primitive types that are disjoint, return never
1138        if self.has_disjoint_primitives(&flat) {
1139            return TypeId::NEVER;
1140        }
1141
1142        // =========================================================
1143        // Final Construction
1144        // =========================================================
1145
1146        if flat.is_empty() {
1147            return TypeId::UNKNOWN;
1148        }
1149        if flat.len() == 1 {
1150            return flat[0];
1151        }
1152
1153        // Create the intersection directly without calling normalize_intersection
1154        let list_id = self.intern_type_list(flat.into_vec());
1155        self.intern(TypeData::Intersection(list_id))
1156    }
1157
1158    /// Convenience wrapper for raw intersection of two types
1159    pub fn intersect_types_raw2(&self, a: TypeId, b: TypeId) -> TypeId {
1160        self.intersect_types_raw(vec![a, b])
1161    }
1162
1163    fn intersection_from_iter<I>(&self, members: I) -> TypeId
1164    where
1165        I: IntoIterator<Item = TypeId>,
1166    {
1167        let mut iter = members.into_iter();
1168        let Some(first) = iter.next() else {
1169            return TypeId::UNKNOWN;
1170        };
1171        let Some(second) = iter.next() else {
1172            return first;
1173        };
1174
1175        let mut flat: TypeListBuffer = SmallVec::new();
1176        self.push_intersection_member(&mut flat, first);
1177        self.push_intersection_member(&mut flat, second);
1178        for member in iter {
1179            self.push_intersection_member(&mut flat, member);
1180        }
1181
1182        self.normalize_intersection(flat)
1183    }
1184
1185    fn push_intersection_member(&self, flat: &mut TypeListBuffer, member: TypeId) {
1186        if let Some(TypeData::Intersection(inner)) = self.lookup(member) {
1187            let members = self.type_list(inner);
1188            flat.extend(members.iter().copied());
1189        } else {
1190            flat.push(member);
1191        }
1192    }
1193
1194    /// Check if a type is a function or callable (order-sensitive in intersections).
1195    ///
1196    /// In TypeScript, intersection types are commutative for structural types
1197    /// (objects, primitives) but non-commutative for call signatures.
1198    /// For example: `((a: string) => void) & ((a: number) => void)` has a different
1199    /// overload order than the reverse.
1200    fn is_callable_type(&self, id: TypeId) -> bool {
1201        matches!(
1202            self.lookup(id),
1203            Some(TypeData::Function(_) | TypeData::Callable(_))
1204        )
1205    }
1206
1207    // Intersection normalization, empty object elimination, callable/object
1208    // merging, and distribution are in `intern_intersection.rs`.
1209
1210    /// Intern an array type
1211    pub fn array(&self, element: TypeId) -> TypeId {
1212        self.intern(TypeData::Array(element))
1213    }
1214
1215    /// Canonical `this` type.
1216    pub fn this_type(&self) -> TypeId {
1217        self.intern(TypeData::ThisType)
1218    }
1219
1220    /// Intern a readonly array type
1221    /// Returns a distinct type from mutable arrays to enforce readonly semantics
1222    pub fn readonly_array(&self, element: TypeId) -> TypeId {
1223        let array_type = self.array(element);
1224        self.intern(TypeData::ReadonlyType(array_type))
1225    }
1226
1227    /// Intern a tuple type
1228    pub fn tuple(&self, elements: Vec<TupleElement>) -> TypeId {
1229        let list_id = self.intern_tuple_list(elements);
1230        self.intern(TypeData::Tuple(list_id))
1231    }
1232
1233    /// Intern a readonly tuple type
1234    /// Returns a distinct type from mutable tuples to enforce readonly semantics
1235    pub fn readonly_tuple(&self, elements: Vec<TupleElement>) -> TypeId {
1236        let tuple_type = self.tuple(elements);
1237        self.intern(TypeData::ReadonlyType(tuple_type))
1238    }
1239
1240    /// Wrap any type in a `ReadonlyType` marker
1241    /// This is used for the `readonly` type operator
1242    pub fn readonly_type(&self, inner: TypeId) -> TypeId {
1243        self.intern(TypeData::ReadonlyType(inner))
1244    }
1245
1246    /// Wrap a type in a `NoInfer` marker.
1247    pub fn no_infer(&self, inner: TypeId) -> TypeId {
1248        self.intern(TypeData::NoInfer(inner))
1249    }
1250
1251    /// Create a `unique symbol` type for a symbol declaration.
1252    pub fn unique_symbol(&self, symbol: SymbolRef) -> TypeId {
1253        self.intern(TypeData::UniqueSymbol(symbol))
1254    }
1255
1256    /// Create an `infer` binder with the provided info.
1257    pub fn infer(&self, info: TypeParamInfo) -> TypeId {
1258        self.intern(TypeData::Infer(info))
1259    }
1260
1261    pub fn bound_parameter(&self, index: u32) -> TypeId {
1262        self.intern(TypeData::BoundParameter(index))
1263    }
1264
1265    pub fn recursive(&self, depth: u32) -> TypeId {
1266        self.intern(TypeData::Recursive(depth))
1267    }
1268
1269    /// Wrap a type in a `KeyOf` marker.
1270    pub fn keyof(&self, inner: TypeId) -> TypeId {
1271        self.intern(TypeData::KeyOf(inner))
1272    }
1273
1274    /// Build an indexed access type (`T[K]`).
1275    pub fn index_access(&self, object_type: TypeId, index_type: TypeId) -> TypeId {
1276        self.intern(TypeData::IndexAccess(object_type, index_type))
1277    }
1278
1279    /// Build a nominal enum type that preserves `DefId` identity and carries
1280    /// structural member information for compatibility with primitive relations.
1281    pub fn enum_type(&self, def_id: DefId, structural_type: TypeId) -> TypeId {
1282        self.intern(TypeData::Enum(def_id, structural_type))
1283    }
1284
1285    /// Intern an object type with properties.
1286    pub fn object(&self, properties: Vec<PropertyInfo>) -> TypeId {
1287        self.object_with_flags(properties, ObjectFlags::empty())
1288    }
1289
1290    /// Intern a fresh object type with properties.
1291    pub fn object_fresh(&self, properties: Vec<PropertyInfo>) -> TypeId {
1292        self.object_with_flags(properties, ObjectFlags::FRESH_LITERAL)
1293    }
1294
1295    /// Intern an object type with properties and custom flags.
1296    pub fn object_with_flags(
1297        &self,
1298        mut properties: Vec<PropertyInfo>,
1299        flags: ObjectFlags,
1300    ) -> TypeId {
1301        // Sort by property name for consistent hashing
1302        properties.sort_by_key(|a| a.name);
1303        let shape_id = self.intern_object_shape(ObjectShape {
1304            flags,
1305            properties,
1306            string_index: None,
1307            number_index: None,
1308            symbol: None,
1309        });
1310        self.intern(TypeData::Object(shape_id))
1311    }
1312
1313    /// Intern an object type with properties, custom flags, and optional symbol.
1314    /// This is used for interfaces that need symbol tracking but no index signatures.
1315    pub fn object_with_flags_and_symbol(
1316        &self,
1317        mut properties: Vec<PropertyInfo>,
1318        flags: ObjectFlags,
1319        symbol: Option<tsz_binder::SymbolId>,
1320    ) -> TypeId {
1321        // Sort by property name for consistent hashing
1322        properties.sort_by_key(|a| a.name);
1323        let shape_id = self.intern_object_shape(ObjectShape {
1324            flags,
1325            properties,
1326            string_index: None,
1327            number_index: None,
1328            symbol,
1329        });
1330        self.intern(TypeData::Object(shape_id))
1331    }
1332
1333    /// Intern an object type with index signatures.
1334    pub fn object_with_index(&self, mut shape: ObjectShape) -> TypeId {
1335        // Sort properties by name for consistent hashing
1336        shape.properties.sort_by_key(|a| a.name);
1337        let shape_id = self.intern_object_shape(shape);
1338        self.intern(TypeData::ObjectWithIndex(shape_id))
1339    }
1340
1341    /// Intern a function type
1342    pub fn function(&self, shape: FunctionShape) -> TypeId {
1343        let shape_id = self.intern_function_shape(shape);
1344        self.intern(TypeData::Function(shape_id))
1345    }
1346
1347    /// Intern a callable type with overloaded signatures
1348    pub fn callable(&self, shape: CallableShape) -> TypeId {
1349        let shape_id = self.intern_callable_shape(shape);
1350        self.intern(TypeData::Callable(shape_id))
1351    }
1352
1353    /// Intern a conditional type
1354    pub fn conditional(&self, conditional: ConditionalType) -> TypeId {
1355        let conditional_id = self.intern_conditional_type(conditional);
1356        self.intern(TypeData::Conditional(conditional_id))
1357    }
1358
1359    /// Intern a mapped type
1360    pub fn mapped(&self, mapped: MappedType) -> TypeId {
1361        let mapped_id = self.intern_mapped_type(mapped);
1362        self.intern(TypeData::Mapped(mapped_id))
1363    }
1364
1365    /// Build a string intrinsic (`Uppercase`, `Lowercase`, etc.) marker.
1366    pub fn string_intrinsic(
1367        &self,
1368        kind: crate::types::StringIntrinsicKind,
1369        type_arg: TypeId,
1370    ) -> TypeId {
1371        self.intern(TypeData::StringIntrinsic { kind, type_arg })
1372    }
1373
1374    /// Intern a type reference (deprecated - use `lazy()` with `DefId` instead).
1375    ///
1376    /// This method is kept for backward compatibility with tests and legacy code.
1377    /// It converts `SymbolRef` to `DefId` and creates `TypeData::Lazy`.
1378    ///
1379    /// Deprecated: new code should use `lazy(def_id)` instead.
1380    pub fn reference(&self, symbol: SymbolRef) -> TypeId {
1381        // Convert SymbolRef to DefId by wrapping the raw u32 value
1382        // This maintains the same identity while using the new TypeData::Lazy variant
1383        let def_id = DefId(symbol.0);
1384        self.intern(TypeData::Lazy(def_id))
1385    }
1386
1387    /// Intern a lazy type reference (DefId-based).
1388    ///
1389    /// This is the replacement for `reference()` that uses Solver-owned
1390    /// `DefIds` instead of Binder-owned `SymbolRefs`.
1391    ///
1392    /// Use this method for all new type references
1393    /// to enable O(1) type equality across Binder and Solver boundaries.
1394    pub fn lazy(&self, def_id: DefId) -> TypeId {
1395        self.intern(TypeData::Lazy(def_id))
1396    }
1397
1398    /// Intern a type parameter.
1399    pub fn type_param(&self, info: TypeParamInfo) -> TypeId {
1400        self.intern(TypeData::TypeParameter(info))
1401    }
1402
1403    /// Intern a type query (`typeof value`) marker.
1404    pub fn type_query(&self, symbol: SymbolRef) -> TypeId {
1405        self.intern(TypeData::TypeQuery(symbol))
1406    }
1407
1408    /// Intern a generic type application
1409    pub fn application(&self, base: TypeId, args: Vec<TypeId>) -> TypeId {
1410        let app_id = self.intern_application(TypeApplication { base, args });
1411        self.intern(TypeData::Application(app_id))
1412    }
1413}
1414
1415impl Default for TypeInterner {
1416    fn default() -> Self {
1417        Self::new()
1418    }
1419}
1420
1421#[cfg(test)]
1422#[path = "../tests/intern_tests.rs"]
1423mod tests;
1424
1425#[cfg(test)]
1426#[path = "../tests/concurrent_tests.rs"]
1427mod concurrent_tests;