dependency_injector/
container.rs

1//! High-performance dependency injection container
2//!
3//! The `Container` is the core of the DI system. It stores services and
4//! resolves dependencies with minimal overhead.
5
6use crate::factory::AnyFactory;
7use crate::storage::{downcast_arc_unchecked, ServiceStorage};
8use crate::{DiError, Injectable, Result};
9use std::any::{Any, TypeId};
10use std::cell::UnsafeCell;
11use std::sync::atomic::{AtomicBool, Ordering};
12use std::sync::Arc;
13
14#[cfg(feature = "logging")]
15use tracing::{debug, trace};
16
17// =============================================================================
18// Thread-Local Hot Cache (Phase 5 optimization)
19// =============================================================================
20
21/// Number of slots in the thread-local hot cache (power of 2 for fast indexing)
22/// 4 slots fits in a single cache line and provides good hit rates for typical apps.
23const HOT_CACHE_SLOTS: usize = 4;
24
25/// A cached service entry
26///
27/// Phase 13 optimization: Stores pre-computed u64 hash instead of TypeId
28/// to avoid transmute on every comparison.
29struct CacheEntry {
30    /// Pre-computed hash of TypeId (avoids transmute on lookup)
31    type_hash: u64,
32    /// Pointer to the storage this was resolved from (for scope identity)
33    storage_ptr: usize,
34    /// The cached service
35    service: Arc<dyn Any + Send + Sync>,
36}
37
38/// Thread-local cache for frequently accessed services.
39///
40/// This provides ~8-10ns speedup for hot services by avoiding DashMap lookups.
41/// Uses a simple direct-mapped cache with TypeId + storage pointer as key.
42struct HotCache {
43    entries: [Option<CacheEntry>; HOT_CACHE_SLOTS],
44}
45
46impl HotCache {
47    const fn new() -> Self {
48        Self {
49            entries: [const { None }; HOT_CACHE_SLOTS],
50        }
51    }
52
53    /// Get a cached service if present for a specific container
54    ///
55    /// Phase 12+13 optimization: Uses UnsafeCell (no RefCell borrow check)
56    /// and pre-computed type_hash (no transmute on lookup).
57    #[inline(always)]
58    fn get<T: Send + Sync + 'static>(&self, storage_ptr: usize) -> Option<Arc<T>> {
59        let type_hash = Self::type_hash::<T>();
60        let slot = Self::slot_for_hash(type_hash, storage_ptr);
61
62        if let Some(entry) = &self.entries[slot] {
63            // Phase 13: Compare u64 hash directly (faster than TypeId comparison)
64            if entry.type_hash == type_hash && entry.storage_ptr == storage_ptr {
65                // Cache hit - clone and downcast (unchecked since type_hash matches)
66                // SAFETY: We verified type_hash matches, so the Arc contains type T
67                let arc = entry.service.clone();
68                return Some(unsafe { downcast_arc_unchecked(arc) });
69            }
70        }
71        None
72    }
73
74    /// Insert a service into the cache for a specific container
75    #[inline]
76    fn insert<T: Injectable>(&mut self, storage_ptr: usize, service: Arc<T>) {
77        let type_hash = Self::type_hash::<T>();
78        let slot = Self::slot_for_hash(type_hash, storage_ptr);
79
80        self.entries[slot] = Some(CacheEntry {
81            type_hash,
82            storage_ptr,
83            service: service as Arc<dyn Any + Send + Sync>,
84        });
85    }
86
87    /// Clear the cache (call when container is modified)
88    #[inline]
89    fn clear(&mut self) {
90        self.entries = [const { None }; HOT_CACHE_SLOTS];
91    }
92
93    /// Extract u64 hash from TypeId (computed once per type at compile time via monomorphization)
94    #[inline(always)]
95    fn type_hash<T: 'static>() -> u64 {
96        let type_id = TypeId::of::<T>();
97        // SAFETY: TypeId is #[repr(transparent)] wrapper around u128
98        unsafe { std::mem::transmute_copy(&type_id) }
99    }
100
101    /// Calculate slot index from pre-computed type hash and storage pointer
102    #[inline(always)]
103    fn slot_for_hash(type_hash: u64, storage_ptr: usize) -> usize {
104        // Fast bit mixing: XOR with rotated storage_ptr for good distribution
105        let mixed = type_hash ^ (storage_ptr as u64).rotate_left(32);
106
107        // Use golden ratio multiplication for final mixing (fast & good distribution)
108        let slot = mixed.wrapping_mul(0x9e3779b97f4a7c15);
109
110        (slot as usize) & (HOT_CACHE_SLOTS - 1)
111    }
112}
113
114thread_local! {
115    /// Thread-local hot cache for frequently accessed services
116    ///
117    /// Phase 12 optimization: Uses UnsafeCell instead of RefCell to eliminate
118    /// borrow checking overhead. This is safe because thread_local! guarantees
119    /// single-threaded access.
120    static HOT_CACHE: UnsafeCell<HotCache> = const { UnsafeCell::new(HotCache::new()) };
121}
122
123/// Helper to access the hot cache without RefCell overhead
124///
125/// SAFETY: thread_local! guarantees single-threaded access, so we can use
126/// UnsafeCell without data races. We ensure no aliasing by limiting access
127/// to immutable borrows for reads and brief mutable borrows for writes.
128#[inline(always)]
129fn with_hot_cache<F, R>(f: F) -> R
130where
131    F: FnOnce(&HotCache) -> R,
132{
133    HOT_CACHE.with(|cell| {
134        // SAFETY: thread_local guarantees single-threaded access
135        let cache = unsafe { &*cell.get() };
136        f(cache)
137    })
138}
139
140/// Helper to mutably access the hot cache
141#[inline(always)]
142fn with_hot_cache_mut<F, R>(f: F) -> R
143where
144    F: FnOnce(&mut HotCache) -> R,
145{
146    HOT_CACHE.with(|cell| {
147        // SAFETY: thread_local guarantees single-threaded access
148        let cache = unsafe { &mut *cell.get() };
149        f(cache)
150    })
151}
152
153/// High-performance dependency injection container.
154///
155/// Uses lock-free data structures for maximum concurrent throughput.
156/// Supports hierarchical scopes with full parent chain resolution.
157///
158/// # Examples
159///
160/// ```rust
161/// use dependency_injector::Container;
162///
163/// #[derive(Clone)]
164/// struct MyService { name: String }
165///
166/// let container = Container::new();
167/// container.singleton(MyService { name: "test".into() });
168///
169/// let service = container.get::<MyService>().unwrap();
170/// assert_eq!(service.name, "test");
171/// ```
172#[derive(Clone)]
173pub struct Container {
174    /// Service storage (lock-free)
175    storage: Arc<ServiceStorage>,
176    /// Parent storage - strong reference for fast resolution (Phase 2 optimization)
177    /// This avoids Weak::upgrade() cost on every parent resolution
178    parent_storage: Option<Arc<ServiceStorage>>,
179    /// Lock state - uses AtomicBool for fast lock checking (no contention)
180    locked: Arc<AtomicBool>,
181    /// Scope depth for debugging
182    depth: u32,
183}
184
185impl Container {
186    /// Create a new root container.
187    ///
188    /// # Examples
189    ///
190    /// ```rust
191    /// use dependency_injector::Container;
192    /// let container = Container::new();
193    /// ```
194    #[inline]
195    pub fn new() -> Self {
196        #[cfg(feature = "logging")]
197        debug!(
198            target: "dependency_injector",
199            depth = 0,
200            "Creating new root DI container"
201        );
202
203        Self {
204            storage: Arc::new(ServiceStorage::new()),
205            parent_storage: None,
206            locked: Arc::new(AtomicBool::new(false)),
207            depth: 0,
208        }
209    }
210
211    /// Create a container with pre-allocated capacity.
212    ///
213    /// Use this when you know approximately how many services will be registered.
214    #[inline]
215    pub fn with_capacity(capacity: usize) -> Self {
216        Self {
217            storage: Arc::new(ServiceStorage::with_capacity(capacity)),
218            parent_storage: None,
219            locked: Arc::new(AtomicBool::new(false)),
220            depth: 0,
221        }
222    }
223
224    /// Create a child scope that inherits from this container.
225    ///
226    /// Child scopes can:
227    /// - Access all services from parent scopes
228    /// - Override parent services with local registrations
229    /// - Have their own transient/scoped services
230    ///
231    /// # Examples
232    ///
233    /// ```rust
234    /// use dependency_injector::Container;
235    ///
236    /// #[derive(Clone)]
237    /// struct AppConfig { debug: bool }
238    ///
239    /// #[derive(Clone)]
240    /// struct RequestId(String);
241    ///
242    /// let root = Container::new();
243    /// root.singleton(AppConfig { debug: true });
244    ///
245    /// let request = root.scope();
246    /// request.singleton(RequestId("req-123".into()));
247    ///
248    /// // Request scope can access root config
249    /// assert!(request.contains::<AppConfig>());
250    /// ```
251    #[inline]
252    pub fn scope(&self) -> Self {
253        let child_depth = self.depth + 1;
254
255        #[cfg(feature = "logging")]
256        debug!(
257            target: "dependency_injector",
258            parent_depth = self.depth,
259            child_depth = child_depth,
260            parent_services = self.storage.len(),
261            "Creating child scope from parent container"
262        );
263
264        Self {
265            // Phase 9: Storage now holds parent reference for deep chain resolution
266            storage: Arc::new(ServiceStorage::with_parent(Arc::clone(&self.storage))),
267            parent_storage: Some(Arc::clone(&self.storage)), // Keep for quick parent access
268            locked: Arc::new(AtomicBool::new(false)),
269            depth: child_depth,
270        }
271    }
272
273    /// Alias for `scope()` - creates a child container.
274    #[inline]
275    pub fn create_scope(&self) -> Self {
276        self.scope()
277    }
278
279    // =========================================================================
280    // Registration Methods
281    // =========================================================================
282
283    /// Register a singleton service (eager).
284    ///
285    /// The instance is stored immediately and shared across all resolves.
286    ///
287    /// # Examples
288    ///
289    /// ```rust
290    /// use dependency_injector::Container;
291    ///
292    /// #[derive(Clone)]
293    /// struct Database { url: String }
294    ///
295    /// let container = Container::new();
296    /// container.singleton(Database { url: "postgres://localhost".into() });
297    /// ```
298    #[inline]
299    pub fn singleton<T: Injectable>(&self, instance: T) {
300        self.check_not_locked();
301
302        let type_id = TypeId::of::<T>();
303        let type_name = std::any::type_name::<T>();
304
305        #[cfg(feature = "logging")]
306        debug!(
307            target: "dependency_injector",
308            service = type_name,
309            lifetime = "singleton",
310            depth = self.depth,
311            service_count = self.storage.len() + 1,
312            "Registering singleton service"
313        );
314
315        // Phase 2: Use enum-based AnyFactory directly
316        self.storage.insert(type_id, AnyFactory::singleton(instance));
317    }
318
319    /// Register a lazy singleton service.
320    ///
321    /// The factory is called once on first access, then the instance is cached.
322    ///
323    /// # Examples
324    ///
325    /// ```rust
326    /// use dependency_injector::Container;
327    ///
328    /// #[derive(Clone)]
329    /// struct ExpensiveService { data: Vec<u8> }
330    ///
331    /// let container = Container::new();
332    /// container.lazy(|| ExpensiveService {
333    ///     data: vec![0; 1024 * 1024], // Only allocated on first use
334    /// });
335    /// ```
336    #[inline]
337    pub fn lazy<T: Injectable, F>(&self, factory: F)
338    where
339        F: Fn() -> T + Send + Sync + 'static,
340    {
341        self.check_not_locked();
342
343        let type_id = TypeId::of::<T>();
344        let type_name = std::any::type_name::<T>();
345
346        #[cfg(feature = "logging")]
347        debug!(
348            target: "dependency_injector",
349            service = type_name,
350            lifetime = "lazy_singleton",
351            depth = self.depth,
352            service_count = self.storage.len() + 1,
353            "Registering lazy singleton service (will be created on first access)"
354        );
355
356        // Phase 2: Use enum-based AnyFactory directly
357        self.storage.insert(type_id, AnyFactory::lazy(factory));
358    }
359
360    /// Register a transient service.
361    ///
362    /// A new instance is created on every resolve.
363    ///
364    /// # Examples
365    ///
366    /// ```rust
367    /// use dependency_injector::Container;
368    /// use std::sync::atomic::{AtomicU64, Ordering};
369    ///
370    /// static COUNTER: AtomicU64 = AtomicU64::new(0);
371    ///
372    /// #[derive(Clone)]
373    /// struct RequestId(u64);
374    ///
375    /// let container = Container::new();
376    /// container.transient(|| RequestId(COUNTER.fetch_add(1, Ordering::SeqCst)));
377    ///
378    /// let id1 = container.get::<RequestId>().unwrap();
379    /// let id2 = container.get::<RequestId>().unwrap();
380    /// assert_ne!(id1.0, id2.0); // Different instances
381    /// ```
382    #[inline]
383    pub fn transient<T: Injectable, F>(&self, factory: F)
384    where
385        F: Fn() -> T + Send + Sync + 'static,
386    {
387        self.check_not_locked();
388
389        let type_id = TypeId::of::<T>();
390        let type_name = std::any::type_name::<T>();
391
392        #[cfg(feature = "logging")]
393        debug!(
394            target: "dependency_injector",
395            service = type_name,
396            lifetime = "transient",
397            depth = self.depth,
398            service_count = self.storage.len() + 1,
399            "Registering transient service (new instance on every resolve)"
400        );
401
402        // Phase 2: Use enum-based AnyFactory directly
403        self.storage.insert(type_id, AnyFactory::transient(factory));
404    }
405
406    /// Register using a factory (alias for `lazy`).
407    #[inline]
408    pub fn register_factory<T: Injectable, F>(&self, factory: F)
409    where
410        F: Fn() -> T + Send + Sync + 'static,
411    {
412        self.lazy(factory);
413    }
414
415    /// Register an instance (alias for `singleton`).
416    #[inline]
417    pub fn register<T: Injectable>(&self, instance: T) {
418        self.singleton(instance);
419    }
420
421    /// Register a boxed instance.
422    #[inline]
423    #[allow(clippy::boxed_local)]
424    pub fn register_boxed<T: Injectable>(&self, instance: Box<T>) {
425        self.singleton(*instance);
426    }
427
428    /// Register by TypeId directly (advanced use).
429    #[inline]
430    pub fn register_by_id(&self, type_id: TypeId, instance: Arc<dyn Any + Send + Sync>) {
431        self.check_not_locked();
432
433        // Phase 2: Use the singleton factory with pre-erased Arc directly
434        self.storage.insert(
435            type_id,
436            AnyFactory::Singleton(crate::factory::SingletonFactory { instance }),
437        );
438    }
439
440    // =========================================================================
441    // Resolution Methods
442    // =========================================================================
443
444    /// Resolve a service by type.
445    ///
446    /// Returns `Arc<T>` for zero-copy sharing. Walks the parent chain if
447    /// not found in the current scope.
448    ///
449    /// # Performance
450    ///
451    /// Uses thread-local caching for frequently accessed services (~8ns vs ~19ns).
452    /// The cache is automatically populated on first access.
453    ///
454    /// # Examples
455    ///
456    /// ```rust
457    /// use dependency_injector::Container;
458    ///
459    /// #[derive(Clone)]
460    /// struct MyService;
461    ///
462    /// let container = Container::new();
463    /// container.singleton(MyService);
464    ///
465    /// let service = container.get::<MyService>().unwrap();
466    /// ```
467    #[inline]
468    pub fn get<T: Injectable>(&self) -> Result<Arc<T>> {
469        // Get storage pointer for cache key (unique per container scope)
470        let storage_ptr = Arc::as_ptr(&self.storage) as usize;
471
472        // Phase 5+12: Check thread-local hot cache first (UnsafeCell, no RefCell overhead)
473        // Note: Transients won't be in cache, so they'll fall through to get_and_cache
474        if let Some(cached) = with_hot_cache(|cache| cache.get::<T>(storage_ptr)) {
475            #[cfg(feature = "logging")]
476            trace!(
477                target: "dependency_injector",
478                service = std::any::type_name::<T>(),
479                depth = self.depth,
480                location = "hot_cache",
481                "Service resolved from thread-local cache"
482            );
483            return Ok(cached);
484        }
485
486        // Cache miss - resolve normally and cache the result (unless transient)
487        self.get_and_cache::<T>(storage_ptr)
488    }
489
490    /// Internal: Resolve and cache a service
491    ///
492    /// Phase 15 optimization: Fast path for root containers (depth == 0) avoids
493    /// function call overhead to resolve_from_parents when there are no parents.
494    #[inline]
495    fn get_and_cache<T: Injectable>(&self, storage_ptr: usize) -> Result<Arc<T>> {
496        let type_id = TypeId::of::<T>();
497
498        #[cfg(feature = "logging")]
499        let type_name = std::any::type_name::<T>();
500
501        #[cfg(feature = "logging")]
502        trace!(
503            target: "dependency_injector",
504            service = type_name,
505            depth = self.depth,
506            "Resolving service (cache miss)"
507        );
508
509        // Try local storage first (most common case)
510        // Use get_with_transient_flag to avoid second DashMap lookup for is_transient
511        if let Some((service, is_transient)) = self.storage.get_with_transient_flag::<T>() {
512            #[cfg(feature = "logging")]
513            trace!(
514                target: "dependency_injector",
515                service = type_name,
516                depth = self.depth,
517                location = "local",
518                "Service resolved from current scope"
519            );
520
521            // Cache non-transient services (transients create new instances each time)
522            if !is_transient {
523                with_hot_cache_mut(|cache| cache.insert(storage_ptr, Arc::clone(&service)));
524            }
525
526            return Ok(service);
527        }
528
529        // Phase 15: Fast path for root containers - no parents to walk
530        if self.depth == 0 {
531            #[cfg(feature = "logging")]
532            debug!(
533                target: "dependency_injector",
534                service = std::any::type_name::<T>(),
535                "Service not found in root container"
536            );
537            return Err(DiError::not_found::<T>());
538        }
539
540        // Walk parent chain (cold path)
541        self.resolve_from_parents::<T>(&type_id, storage_ptr)
542    }
543
544    /// Resolve from parent chain (internal)
545    ///
546    /// Phase 9 optimization: Walks the full parent chain via ServiceStorage.parent.
547    /// This allows services to be resolved from any ancestor scope.
548    ///
549    /// Phase 14 optimization: Marked as cold to improve branch prediction in the
550    /// hot path - most resolutions hit the cache and don't need parent traversal.
551    #[cold]
552    fn resolve_from_parents<T: Injectable>(&self, type_id: &TypeId, storage_ptr: usize) -> Result<Arc<T>> {
553        let type_name = std::any::type_name::<T>();
554
555        #[cfg(feature = "logging")]
556        trace!(
557            target: "dependency_injector",
558            service = type_name,
559            depth = self.depth,
560            "Service not in local scope, walking parent chain"
561        );
562
563        // Walk the full parent chain via storage's parent references
564        let mut current = self.storage.parent();
565        let mut ancestor_depth = self.depth.saturating_sub(1);
566
567        while let Some(storage) = current {
568            if let Some(arc) = storage.resolve(type_id) {
569                // SAFETY: We resolved by TypeId::of::<T>(), so the factory
570                // was registered with the same TypeId and stores type T.
571                let typed: Arc<T> = unsafe { downcast_arc_unchecked(arc) };
572
573                #[cfg(feature = "logging")]
574                trace!(
575                    target: "dependency_injector",
576                    service = type_name,
577                    depth = self.depth,
578                    ancestor_depth = ancestor_depth,
579                    location = "ancestor",
580                    "Service resolved from ancestor scope"
581                );
582
583                // Cache non-transient services from parent (using child's storage ptr as key)
584                if !storage.is_transient(type_id) {
585                    with_hot_cache_mut(|cache| cache.insert(storage_ptr, Arc::clone(&typed)));
586                }
587
588                return Ok(typed);
589            }
590            current = storage.parent();
591            ancestor_depth = ancestor_depth.saturating_sub(1);
592        }
593
594        #[cfg(feature = "logging")]
595        debug!(
596            target: "dependency_injector",
597            service = type_name,
598            depth = self.depth,
599            "Service not found in container or parent chain"
600        );
601
602        Err(DiError::not_found::<T>())
603    }
604
605    /// Clear the thread-local hot cache.
606    ///
607    /// Call this after modifying the container (registering/removing services)
608    /// if you want subsequent resolutions to see the changes immediately.
609    ///
610    /// Note: The cache is automatically invalidated when services are
611    /// re-registered, but this method can be used for explicit control.
612    #[inline]
613    pub fn clear_cache(&self) {
614        with_hot_cache_mut(|cache| cache.clear());
615    }
616
617    /// Pre-warm the thread-local cache with a specific service type.
618    ///
619    /// This can be useful at the start of request handling to ensure
620    /// hot services are already in the cache.
621    ///
622    /// # Example
623    ///
624    /// ```rust
625    /// use dependency_injector::Container;
626    ///
627    /// #[derive(Clone)]
628    /// struct Database;
629    ///
630    /// let container = Container::new();
631    /// container.singleton(Database);
632    ///
633    /// // Pre-warm cache for hot services
634    /// container.warm_cache::<Database>();
635    /// ```
636    #[inline]
637    pub fn warm_cache<T: Injectable>(&self) {
638        // Simply resolve the service to populate the cache
639        let _ = self.get::<T>();
640    }
641
642    /// Alias for `get` - resolve a service.
643    #[inline]
644    pub fn resolve<T: Injectable>(&self) -> Result<Arc<T>> {
645        self.get::<T>()
646    }
647
648    /// Try to resolve, returning None if not found.
649    ///
650    /// # Examples
651    ///
652    /// ```rust
653    /// use dependency_injector::Container;
654    ///
655    /// #[derive(Clone)]
656    /// struct OptionalService;
657    ///
658    /// let container = Container::new();
659    /// assert!(container.try_get::<OptionalService>().is_none());
660    /// ```
661    #[inline]
662    pub fn try_get<T: Injectable>(&self) -> Option<Arc<T>> {
663        self.get::<T>().ok()
664    }
665
666    /// Alias for `try_get`.
667    #[inline]
668    pub fn try_resolve<T: Injectable>(&self) -> Option<Arc<T>> {
669        self.try_get::<T>()
670    }
671
672    // =========================================================================
673    // Query Methods
674    // =========================================================================
675
676    /// Check if a service is registered.
677    ///
678    /// Checks both current scope and parent scopes.
679    #[inline]
680    pub fn contains<T: Injectable>(&self) -> bool {
681        let type_id = TypeId::of::<T>();
682        self.contains_type_id(&type_id)
683    }
684
685    /// Alias for `contains`.
686    #[inline]
687    pub fn has<T: Injectable>(&self) -> bool {
688        self.contains::<T>()
689    }
690
691    /// Check by TypeId
692    /// Phase 9 optimization: Uses storage's parent chain for deep hierarchy support
693    fn contains_type_id(&self, type_id: &TypeId) -> bool {
694        // Check local storage and full parent chain
695        self.storage.contains_in_chain(type_id)
696    }
697
698    /// Get the number of services in this scope (not including parents).
699    #[inline]
700    pub fn len(&self) -> usize {
701        self.storage.len()
702    }
703
704    /// Check if this scope is empty.
705    #[inline]
706    pub fn is_empty(&self) -> bool {
707        self.storage.is_empty()
708    }
709
710    /// Get all registered TypeIds in this scope.
711    pub fn registered_types(&self) -> Vec<TypeId> {
712        self.storage.type_ids()
713    }
714
715    /// Get the scope depth (0 = root).
716    #[inline]
717    pub fn depth(&self) -> u32 {
718        self.depth
719    }
720
721    // =========================================================================
722    // Lifecycle Methods
723    // =========================================================================
724
725    /// Lock the container to prevent further registrations.
726    ///
727    /// Useful for ensuring no services are registered after app initialization.
728    #[inline]
729    pub fn lock(&self) {
730        self.locked.store(true, Ordering::Release);
731
732        #[cfg(feature = "logging")]
733        debug!(
734            target: "dependency_injector",
735            depth = self.depth,
736            service_count = self.storage.len(),
737            "Container locked - no further registrations allowed"
738        );
739    }
740
741    /// Check if the container is locked.
742    #[inline]
743    pub fn is_locked(&self) -> bool {
744        self.locked.load(Ordering::Acquire)
745    }
746
747    /// Freeze the container into an immutable, perfectly-hashed storage.
748    ///
749    /// This creates a `FrozenStorage` that uses minimal perfect hashing for
750    /// O(1) lookups without hash collisions, providing ~5ns faster resolution.
751    ///
752    /// Note: This also locks the container to prevent further registrations.
753    ///
754    /// # Example
755    ///
756    /// ```rust,ignore
757    /// use dependency_injector::Container;
758    ///
759    /// let container = Container::new();
760    /// container.singleton(MyService { ... });
761    ///
762    /// let frozen = container.freeze();
763    /// // Use frozen.resolve(&type_id) for faster lookups
764    /// ```
765    #[cfg(feature = "perfect-hash")]
766    #[inline]
767    pub fn freeze(&self) -> crate::storage::FrozenStorage {
768        self.lock();
769        crate::storage::FrozenStorage::from_storage(&self.storage)
770    }
771
772    /// Clear all services from this scope.
773    ///
774    /// Does not affect parent scopes.
775    #[inline]
776    pub fn clear(&self) {
777        let count = self.storage.len();
778        self.storage.clear();
779
780        #[cfg(feature = "logging")]
781        debug!(
782            target: "dependency_injector",
783            depth = self.depth,
784            services_removed = count,
785            "Container cleared - all services removed from this scope"
786        );
787    }
788
789    /// Panic if locked (internal helper).
790    /// Uses relaxed ordering for fast path - we only need eventual consistency
791    /// since registration is not a hot path and locking is rare.
792    #[inline]
793    fn check_not_locked(&self) {
794        if self.locked.load(Ordering::Relaxed) {
795            panic!("Cannot register services: container is locked");
796        }
797    }
798
799    // =========================================================================
800    // Batch Registration (Phase 3)
801    // =========================================================================
802
803    /// Register multiple services in a single batch operation.
804    ///
805    /// This is more efficient than individual registrations when registering
806    /// many services at once, as it:
807    /// - Performs a single lock check at the start
808    /// - Minimizes per-call overhead
809    ///
810    /// # Examples
811    ///
812    /// ```rust
813    /// use dependency_injector::Container;
814    ///
815    /// #[derive(Clone)]
816    /// struct Database { url: String }
817    /// #[derive(Clone)]
818    /// struct Cache { size: usize }
819    /// #[derive(Clone)]
820    /// struct Logger { level: String }
821    ///
822    /// let container = Container::new();
823    /// container.batch(|batch| {
824    ///     batch.singleton(Database { url: "postgres://localhost".into() });
825    ///     batch.singleton(Cache { size: 1024 });
826    ///     batch.singleton(Logger { level: "info".into() });
827    /// });
828    ///
829    /// assert!(container.contains::<Database>());
830    /// assert!(container.contains::<Cache>());
831    /// assert!(container.contains::<Logger>());
832    /// ```
833    ///
834    /// Note: For maximum performance with many services, prefer the builder API:
835    /// ```rust
836    /// use dependency_injector::Container;
837    ///
838    /// #[derive(Clone)]
839    /// struct A;
840    /// #[derive(Clone)]
841    /// struct B;
842    ///
843    /// let container = Container::new();
844    /// container.register_batch()
845    ///     .singleton(A)
846    ///     .singleton(B)
847    ///     .done();
848    /// ```
849    #[inline]
850    pub fn batch<F>(&self, f: F)
851    where
852        F: FnOnce(BatchRegistrar<'_>),
853    {
854        self.check_not_locked();
855
856        #[cfg(feature = "logging")]
857        let start_count = self.storage.len();
858
859        // Create a zero-cost batch registrar that wraps the storage
860        f(BatchRegistrar { storage: &self.storage });
861
862        #[cfg(feature = "logging")]
863        {
864            let end_count = self.storage.len();
865            debug!(
866                target: "dependency_injector",
867                depth = self.depth,
868                services_registered = end_count - start_count,
869                "Batch registration completed"
870            );
871        }
872    }
873
874    /// Start a fluent batch registration.
875    ///
876    /// This is faster than the closure-based `batch()` for many services
877    /// because it avoids closure overhead.
878    ///
879    /// # Example
880    ///
881    /// ```rust
882    /// use dependency_injector::Container;
883    ///
884    /// #[derive(Clone)]
885    /// struct Database { url: String }
886    /// #[derive(Clone)]
887    /// struct Cache { size: usize }
888    ///
889    /// let container = Container::new();
890    /// container.register_batch()
891    ///     .singleton(Database { url: "postgres://localhost".into() })
892    ///     .singleton(Cache { size: 1024 })
893    ///     .done();
894    ///
895    /// assert!(container.contains::<Database>());
896    /// assert!(container.contains::<Cache>());
897    /// ```
898    #[inline]
899    pub fn register_batch(&self) -> BatchBuilder<'_> {
900        self.check_not_locked();
901        BatchBuilder {
902            storage: &self.storage,
903            #[cfg(feature = "logging")]
904            count: 0,
905        }
906    }
907}
908
909/// Fluent batch registration builder.
910///
911/// Provides a chainable API for registering multiple services without closure overhead.
912pub struct BatchBuilder<'a> {
913    storage: &'a ServiceStorage,
914    #[cfg(feature = "logging")]
915    count: usize,
916}
917
918impl<'a> BatchBuilder<'a> {
919    /// Register a singleton and continue the chain
920    #[inline]
921    pub fn singleton<T: Injectable>(self, instance: T) -> Self {
922        self.storage.insert(TypeId::of::<T>(), AnyFactory::singleton(instance));
923        Self {
924            storage: self.storage,
925            #[cfg(feature = "logging")]
926            count: self.count + 1,
927        }
928    }
929
930    /// Register a lazy singleton and continue the chain
931    #[inline]
932    pub fn lazy<T: Injectable, F>(self, factory: F) -> Self
933    where
934        F: Fn() -> T + Send + Sync + 'static,
935    {
936        self.storage.insert(TypeId::of::<T>(), AnyFactory::lazy(factory));
937        Self {
938            storage: self.storage,
939            #[cfg(feature = "logging")]
940            count: self.count + 1,
941        }
942    }
943
944    /// Register a transient and continue the chain
945    #[inline]
946    pub fn transient<T: Injectable, F>(self, factory: F) -> Self
947    where
948        F: Fn() -> T + Send + Sync + 'static,
949    {
950        self.storage.insert(TypeId::of::<T>(), AnyFactory::transient(factory));
951        Self {
952            storage: self.storage,
953            #[cfg(feature = "logging")]
954            count: self.count + 1,
955        }
956    }
957
958    /// Finish the batch registration
959    #[inline]
960    pub fn done(self) {
961        #[cfg(feature = "logging")]
962        debug!(
963            target: "dependency_injector",
964            services_registered = self.count,
965            "Batch registration completed"
966        );
967    }
968}
969
970/// Batch registrar for closure-based bulk registration.
971///
972/// A zero-cost wrapper that provides direct storage access.
973/// The lock check is done once in `Container::batch()`.
974#[repr(transparent)]
975pub struct BatchRegistrar<'a> {
976    storage: &'a ServiceStorage,
977}
978
979impl<'a> BatchRegistrar<'a> {
980    /// Register a singleton service (inserted immediately)
981    #[inline]
982    pub fn singleton<T: Injectable>(&self, instance: T) {
983        self.storage.insert(TypeId::of::<T>(), AnyFactory::singleton(instance));
984    }
985
986    /// Register a lazy singleton service (inserted immediately)
987    #[inline]
988    pub fn lazy<T: Injectable, F>(&self, factory: F)
989    where
990        F: Fn() -> T + Send + Sync + 'static,
991    {
992        self.storage.insert(TypeId::of::<T>(), AnyFactory::lazy(factory));
993    }
994
995    /// Register a transient service (inserted immediately)
996    #[inline]
997    pub fn transient<T: Injectable, F>(&self, factory: F)
998    where
999        F: Fn() -> T + Send + Sync + 'static,
1000    {
1001        self.storage.insert(TypeId::of::<T>(), AnyFactory::transient(factory));
1002    }
1003}
1004
1005// =============================================================================
1006// Scope Pooling (Phase 6 optimization)
1007// =============================================================================
1008
1009use std::sync::Mutex;
1010
1011/// A pool of pre-allocated scopes for high-throughput scenarios.
1012///
1013/// Creating a scope involves allocating a DashMap (~134ns). For web servers
1014/// handling thousands of requests per second, this adds up. ScopePool pre-allocates
1015/// scopes and reuses them, reducing per-request overhead to near-zero.
1016///
1017/// # Example
1018///
1019/// ```rust
1020/// use dependency_injector::{Container, ScopePool};
1021///
1022/// #[derive(Clone)]
1023/// struct AppConfig { name: String }
1024///
1025/// #[derive(Clone)]
1026/// struct RequestId(String);
1027///
1028/// // Create root container with app-wide services
1029/// let root = Container::new();
1030/// root.singleton(AppConfig { name: "MyApp".into() });
1031///
1032/// // Create a pool of reusable scopes (pre-allocates 4 scopes)
1033/// let pool = ScopePool::new(&root, 4);
1034///
1035/// // In request handler: acquire a pooled scope
1036/// {
1037///     let scope = pool.acquire();
1038///     scope.singleton(RequestId("req-123".into()));
1039///
1040///     // Can access parent services
1041///     assert!(scope.contains::<AppConfig>());
1042///     assert!(scope.contains::<RequestId>());
1043///
1044///     // Scope automatically released when dropped
1045/// }
1046///
1047/// // Next request reuses the same scope allocation
1048/// {
1049///     let scope = pool.acquire();
1050///     // Previous RequestId is cleared, fresh scope
1051///     assert!(!scope.contains::<RequestId>());
1052/// }
1053/// ```
1054///
1055/// # Performance
1056///
1057/// - First acquisition: ~134ns (creates new scope if pool is empty)
1058/// - Subsequent acquisitions: ~20ns (reuses pooled scope)
1059/// - Release: ~10ns (clears and returns to pool)
1060pub struct ScopePool {
1061    /// Parent storage to create scopes from
1062    parent_storage: Arc<ServiceStorage>,
1063    /// Pool of available scopes (storage + lock state pairs)
1064    available: Mutex<Vec<ScopeSlot>>,
1065    /// Parent depth for child scope depth calculation
1066    parent_depth: u32,
1067}
1068
1069/// A reusable scope slot containing pre-allocated storage and lock state
1070struct ScopeSlot {
1071    /// Pre-allocated storage with parent reference
1072    storage: Arc<ServiceStorage>,
1073    locked: Arc<AtomicBool>,
1074}
1075
1076impl ScopePool {
1077    /// Create a new scope pool with pre-allocated capacity.
1078    ///
1079    /// # Arguments
1080    ///
1081    /// * `parent` - The parent container that scopes will inherit from
1082    /// * `capacity` - Number of scopes to pre-allocate
1083    ///
1084    /// # Example
1085    ///
1086    /// ```rust
1087    /// use dependency_injector::{Container, ScopePool};
1088    ///
1089    /// let root = Container::new();
1090    /// // Pre-allocate 8 scopes for concurrent request handling
1091    /// let pool = ScopePool::new(&root, 8);
1092    /// ```
1093    pub fn new(parent: &Container, capacity: usize) -> Self {
1094        let mut available = Vec::with_capacity(capacity);
1095
1096        // Pre-allocate storage with parent reference and lock states
1097        for _ in 0..capacity {
1098            available.push(ScopeSlot {
1099                storage: Arc::new(ServiceStorage::with_parent(Arc::clone(&parent.storage))),
1100                locked: Arc::new(AtomicBool::new(false)),
1101            });
1102        }
1103
1104        #[cfg(feature = "logging")]
1105        debug!(
1106            target: "dependency_injector",
1107            capacity = capacity,
1108            parent_depth = parent.depth,
1109            "Created scope pool with pre-allocated scopes"
1110        );
1111
1112        Self {
1113            parent_storage: Arc::clone(&parent.storage),
1114            available: Mutex::new(available),
1115            parent_depth: parent.depth,
1116        }
1117    }
1118
1119    /// Acquire a scope from the pool.
1120    ///
1121    /// Returns a `PooledScope` that automatically returns to the pool when dropped.
1122    /// If the pool is empty, creates a new scope.
1123    ///
1124    /// # Example
1125    ///
1126    /// ```rust
1127    /// use dependency_injector::{Container, ScopePool};
1128    ///
1129    /// #[derive(Clone)]
1130    /// struct RequestData { id: u64 }
1131    ///
1132    /// let root = Container::new();
1133    /// let pool = ScopePool::new(&root, 4);
1134    ///
1135    /// let scope = pool.acquire();
1136    /// scope.singleton(RequestData { id: 123 });
1137    /// let data = scope.get::<RequestData>().unwrap();
1138    /// assert_eq!(data.id, 123);
1139    /// ```
1140    #[inline]
1141    pub fn acquire(&self) -> PooledScope<'_> {
1142        let slot = self.available.lock().unwrap().pop();
1143
1144        let (storage, locked) = match slot {
1145            Some(slot) => {
1146                #[cfg(feature = "logging")]
1147                trace!(
1148                    target: "dependency_injector",
1149                    "Acquired scope from pool (reusing storage)"
1150                );
1151                (slot.storage, slot.locked)
1152            }
1153            None => {
1154                #[cfg(feature = "logging")]
1155                trace!(
1156                    target: "dependency_injector",
1157                    "Pool empty, creating new scope"
1158                );
1159                (
1160                    Arc::new(ServiceStorage::with_parent(Arc::clone(&self.parent_storage))),
1161                    Arc::new(AtomicBool::new(false)),
1162                )
1163            }
1164        };
1165
1166        let container = Container {
1167            storage,
1168            parent_storage: Some(Arc::clone(&self.parent_storage)),
1169            locked,
1170            depth: self.parent_depth + 1,
1171        };
1172
1173        PooledScope {
1174            container: Some(container),
1175            pool: self,
1176        }
1177    }
1178
1179    /// Return a scope to the pool (internal use).
1180    #[inline]
1181    fn release(&self, container: Container) {
1182        // Clear storage for reuse (parent reference is preserved)
1183        container.storage.clear();
1184        // Reset lock state
1185        container.locked.store(false, Ordering::Relaxed);
1186
1187        // Return to pool
1188        self.available.lock().unwrap().push(ScopeSlot {
1189            storage: container.storage,
1190            locked: container.locked,
1191        });
1192
1193        #[cfg(feature = "logging")]
1194        trace!(
1195            target: "dependency_injector",
1196            "Released scope back to pool"
1197        );
1198    }
1199
1200    /// Get the current number of available scopes in the pool.
1201    #[inline]
1202    pub fn available_count(&self) -> usize {
1203        self.available.lock().unwrap().len()
1204    }
1205}
1206
1207/// A scope acquired from a pool that automatically returns when dropped.
1208///
1209/// This provides RAII-style management of pooled scopes, ensuring they're
1210/// always returned to the pool even if the code panics.
1211pub struct PooledScope<'a> {
1212    container: Option<Container>,
1213    pool: &'a ScopePool,
1214}
1215
1216impl<'a> PooledScope<'a> {
1217    /// Get a reference to the underlying container.
1218    #[inline]
1219    pub fn container(&self) -> &Container {
1220        self.container.as_ref().unwrap()
1221    }
1222}
1223
1224impl<'a> std::ops::Deref for PooledScope<'a> {
1225    type Target = Container;
1226
1227    #[inline]
1228    fn deref(&self) -> &Self::Target {
1229        self.container.as_ref().unwrap()
1230    }
1231}
1232
1233impl<'a> Drop for PooledScope<'a> {
1234    fn drop(&mut self) {
1235        if let Some(container) = self.container.take() {
1236            self.pool.release(container);
1237        }
1238    }
1239}
1240
1241impl Default for Container {
1242    fn default() -> Self {
1243        Self::new()
1244    }
1245}
1246
1247impl std::fmt::Debug for Container {
1248    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1249        f.debug_struct("Container")
1250            .field("service_count", &self.len())
1251            .field("depth", &self.depth)
1252            .field("has_parent", &self.parent_storage.is_some())
1253            .field("locked", &self.is_locked())
1254            .finish()
1255    }
1256}
1257
1258// =========================================================================
1259// Thread Safety
1260// =========================================================================
1261
1262// Container is Send + Sync because:
1263// - ServiceStorage uses DashMap (thread-safe)
1264// - parent is Weak<...> which is Send + Sync
1265// - locked uses AtomicBool (Send + Sync)
1266unsafe impl Send for Container {}
1267unsafe impl Sync for Container {}
1268
1269#[cfg(test)]
1270mod tests {
1271    use super::*;
1272
1273    #[derive(Clone)]
1274    struct TestService {
1275        value: String,
1276    }
1277
1278    #[allow(dead_code)]
1279    #[derive(Clone)]
1280    struct AnotherService {
1281        name: String,
1282    }
1283
1284    #[test]
1285    fn test_singleton() {
1286        let container = Container::new();
1287        container.singleton(TestService {
1288            value: "test".into(),
1289        });
1290
1291        let s1 = container.get::<TestService>().unwrap();
1292        let s2 = container.get::<TestService>().unwrap();
1293
1294        assert_eq!(s1.value, "test");
1295        assert!(Arc::ptr_eq(&s1, &s2));
1296    }
1297
1298    #[test]
1299    fn test_lazy() {
1300        use std::sync::atomic::{AtomicBool, Ordering};
1301
1302        static CREATED: AtomicBool = AtomicBool::new(false);
1303
1304        let container = Container::new();
1305        container.lazy(|| {
1306            CREATED.store(true, Ordering::SeqCst);
1307            TestService {
1308                value: "lazy".into(),
1309            }
1310        });
1311
1312        assert!(!CREATED.load(Ordering::SeqCst));
1313
1314        let s = container.get::<TestService>().unwrap();
1315        assert!(CREATED.load(Ordering::SeqCst));
1316        assert_eq!(s.value, "lazy");
1317    }
1318
1319    #[test]
1320    fn test_transient() {
1321        use std::sync::atomic::{AtomicU32, Ordering};
1322
1323        static COUNTER: AtomicU32 = AtomicU32::new(0);
1324
1325        #[derive(Clone)]
1326        struct Counter(u32);
1327
1328        let container = Container::new();
1329        container.transient(|| Counter(COUNTER.fetch_add(1, Ordering::SeqCst)));
1330
1331        let c1 = container.get::<Counter>().unwrap();
1332        let c2 = container.get::<Counter>().unwrap();
1333
1334        assert_ne!(c1.0, c2.0);
1335    }
1336
1337    #[test]
1338    fn test_scope_inheritance() {
1339        let root = Container::new();
1340        root.singleton(TestService {
1341            value: "root".into(),
1342        });
1343
1344        let child = root.scope();
1345        child.singleton(AnotherService {
1346            name: "child".into(),
1347        });
1348
1349        // Child sees both
1350        assert!(child.contains::<TestService>());
1351        assert!(child.contains::<AnotherService>());
1352
1353        // Root only sees its own
1354        assert!(root.contains::<TestService>());
1355        assert!(!root.contains::<AnotherService>());
1356    }
1357
1358    #[test]
1359    fn test_scope_override() {
1360        let root = Container::new();
1361        root.singleton(TestService {
1362            value: "root".into(),
1363        });
1364
1365        let child = root.scope();
1366        child.singleton(TestService {
1367            value: "child".into(),
1368        });
1369
1370        let root_service = root.get::<TestService>().unwrap();
1371        let child_service = child.get::<TestService>().unwrap();
1372
1373        assert_eq!(root_service.value, "root");
1374        assert_eq!(child_service.value, "child");
1375    }
1376
1377    #[test]
1378    fn test_not_found() {
1379        let container = Container::new();
1380        let result = container.get::<TestService>();
1381        assert!(result.is_err());
1382    }
1383
1384    #[test]
1385    fn test_lock() {
1386        let container = Container::new();
1387        assert!(!container.is_locked());
1388
1389        container.lock();
1390        assert!(container.is_locked());
1391    }
1392
1393    #[test]
1394    #[should_panic(expected = "Cannot register services: container is locked")]
1395    fn test_register_after_lock() {
1396        let container = Container::new();
1397        container.lock();
1398        container.singleton(TestService {
1399            value: "fail".into(),
1400        });
1401    }
1402
1403    #[test]
1404    fn test_batch_registration() {
1405        #[derive(Clone)]
1406        struct ServiceA(i32);
1407        #[allow(dead_code)]
1408        #[derive(Clone)]
1409        struct ServiceB(String);
1410
1411        let container = Container::new();
1412        container.batch(|batch| {
1413            batch.singleton(ServiceA(42));
1414            batch.singleton(ServiceB("test".into()));
1415            batch.lazy(|| TestService {
1416                value: "lazy".into(),
1417            });
1418        });
1419
1420        assert!(container.contains::<ServiceA>());
1421        assert!(container.contains::<ServiceB>());
1422        assert!(container.contains::<TestService>());
1423
1424        let a = container.get::<ServiceA>().unwrap();
1425        assert_eq!(a.0, 42);
1426    }
1427
1428    #[test]
1429    fn test_scope_pool_basic() {
1430        #[derive(Clone)]
1431        struct RequestId(u64);
1432
1433        let root = Container::new();
1434        root.singleton(TestService {
1435            value: "root".into(),
1436        });
1437
1438        // Create pool with 2 pre-allocated scopes
1439        let pool = ScopePool::new(&root, 2);
1440        assert_eq!(pool.available_count(), 2);
1441
1442        // Acquire a scope
1443        {
1444            let scope = pool.acquire();
1445            assert_eq!(pool.available_count(), 1);
1446
1447            // Can access parent services
1448            assert!(scope.contains::<TestService>());
1449
1450            // Register request-specific service
1451            scope.singleton(RequestId(123));
1452            assert!(scope.contains::<RequestId>());
1453
1454            let id = scope.get::<RequestId>().unwrap();
1455            assert_eq!(id.0, 123);
1456        }
1457        // Scope released back to pool
1458        assert_eq!(pool.available_count(), 2);
1459    }
1460
1461    #[test]
1462    fn test_scope_pool_reuse() {
1463        #[derive(Clone)]
1464        struct RequestId(u64);
1465
1466        let root = Container::new();
1467        let pool = ScopePool::new(&root, 1);
1468
1469        // First request
1470        {
1471            let scope = pool.acquire();
1472            scope.singleton(RequestId(1));
1473            assert!(scope.contains::<RequestId>());
1474        }
1475
1476        // Second request - should reuse the same scope (cleared)
1477        {
1478            let scope = pool.acquire();
1479            // Previous RequestId should be cleared
1480            assert!(!scope.contains::<RequestId>());
1481
1482            scope.singleton(RequestId(2));
1483            let id = scope.get::<RequestId>().unwrap();
1484            assert_eq!(id.0, 2);
1485        }
1486    }
1487
1488    #[test]
1489    fn test_scope_pool_expansion() {
1490        let root = Container::new();
1491        let pool = ScopePool::new(&root, 1);
1492
1493        // Acquire more scopes than pre-allocated
1494        let _s1 = pool.acquire();
1495        let _s2 = pool.acquire(); // Creates new scope
1496
1497        assert_eq!(pool.available_count(), 0);
1498
1499        // Both should work
1500        drop(_s1);
1501        drop(_s2);
1502
1503        // Both return to pool
1504        assert_eq!(pool.available_count(), 2);
1505    }
1506
1507    #[test]
1508    fn test_deep_parent_chain() {
1509        // Test that services can be resolved from grandparent and beyond
1510        #[derive(Clone)]
1511        struct RootService(i32);
1512        #[derive(Clone)]
1513        struct MiddleService(i32);
1514        #[derive(Clone)]
1515        struct LeafService(i32);
1516
1517        // Create 4-level hierarchy: root -> middle1 -> middle2 -> leaf
1518        let root = Container::new();
1519        root.singleton(RootService(1));
1520
1521        let middle1 = root.scope();
1522        middle1.singleton(MiddleService(2));
1523
1524        let middle2 = middle1.scope();
1525        // No service in middle2
1526
1527        let leaf = middle2.scope();
1528        leaf.singleton(LeafService(4));
1529
1530        // Leaf should be able to access all ancestor services
1531        assert!(leaf.contains::<RootService>(), "Should find root service in leaf");
1532        assert!(leaf.contains::<MiddleService>(), "Should find middle service in leaf");
1533        assert!(leaf.contains::<LeafService>(), "Should find leaf service in leaf");
1534
1535        // Verify resolution works
1536        let root_svc = leaf.get::<RootService>().unwrap();
1537        assert_eq!(root_svc.0, 1);
1538
1539        let middle_svc = leaf.get::<MiddleService>().unwrap();
1540        assert_eq!(middle_svc.0, 2);
1541
1542        let leaf_svc = leaf.get::<LeafService>().unwrap();
1543        assert_eq!(leaf_svc.0, 4);
1544
1545        // Middle2 should also access ancestor services
1546        assert!(middle2.contains::<RootService>());
1547        assert!(middle2.contains::<MiddleService>());
1548        assert!(!middle2.contains::<LeafService>()); // Leaf service not in parent
1549    }
1550}