linked 1.0.9

Create families of linked objects that can collaborate across threads while being internally single-threaded
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
use std::collections::{HashMap, hash_map};
use std::ops::Deref;
use std::rc::Rc;
use std::sync::{Arc, RwLock};
use std::thread::{self, ThreadId};

use simple_mermaid::mermaid;

use crate::{BuildThreadIdHasher, ERR_POISONED_LOCK};

/// A wrapper that manages linked instances of `T`, ensuring that only one
/// instance of `T` is created per thread.
///
/// This is similar to the [`linked::thread_local_rc!` macro][1], with the main difference
/// being that this type operates entirely at runtime using dynamic storage and does
/// not require a static variable to be defined.
///
/// # Usage
///
/// Create an instance of `InstancePerThread` and provide it the initial instance of a linked
/// object `T`. Any instance of `T` accessed through the same `InstancePerThread` or a clone of it
/// will be linked to the same family.
#[ doc=mermaid!( "../doc/instance_per_thread.mermaid") ]
///
/// To access the current thread's instance of `T`, you must first obtain a
/// [`Ref<T>`][Ref] by calling [`.acquire()`][Self::acquire]. Then you can
/// access the `T` within by simply dereferencing via the `Deref<Target = T>` trait.
///
/// `Ref<T>` is a thread-isolated type, meaning you cannot move it to a different
/// thread nor access it from a different thread. To access linked instances on other threads,
/// you must transfer the `InstancePerThread<T>` instance across threads and obtain a new `Ref<T>`
/// on the destination thread.
///
/// # Resource management
///
/// A thread-specific instance of `T` is dropped when the last `Ref` on that thread
/// is dropped, similar to how `Rc<T>` would behave. If a new `Ref` is later obtained,
/// it is initialized with a new instance of the linked object.
///
/// It is important to emphasize that this means if you only acquire temporary `Ref`
/// objects then you will get a new instance of `T` every time. The performance impact of
/// this depends on how `T` works internally but you are recommended to keep `Ref`
/// instances around for reuse when possible.
///
/// # `Ref` storage
///
/// `Ref` is a thread-isolated type, which means you cannot store it in places that require types
/// to be thread-mobile. For example, in web framework request handlers the compiler might not
/// permit you to let a `Ref` live across an `await`, depending on the web framework, the async
/// task runtime used and its specific configuration.
///
/// Consider using `InstancePerThreadSync<T>` if you need a thread-mobile variant of `Ref`.
///
/// [1]: crate::thread_local_rc
#[derive(Debug)]
pub struct InstancePerThread<T>
where
    T: linked::Object,
{
    family: FamilyStateReference<T>,
}

impl<T> InstancePerThread<T>
where
    T: linked::Object,
{
    /// Creates a new `InstancePerThread` with an existing instance of `T`.
    ///
    /// Any further access of `T` instances via the `InstancePerThread` (or its clones) will return
    /// instances of `T` from the same family.
    #[expect(
        clippy::needless_pass_by_value,
        reason = "intentional needless consume to encourage all access to go via InstancePerThread<T>"
    )]
    #[must_use]
    pub fn new(inner: T) -> Self {
        let family = FamilyStateReference::new(inner.family());

        Self { family }
    }

    /// Returns a `Ref<T>` that can be used to access the current thread's instance of `T`.
    ///
    /// Creating multiple concurrent `Ref<T>` instances from the same `InstancePerThread<T>`
    /// on the same thread is allowed. Every `Ref<T>` instance will reference the same
    /// instance of `T` per thread.
    ///
    /// There are no constraints on the lifetime of the returned `Ref<T>` but it is a
    /// thread-isolated type and cannot be moved across threads or accessed from a different thread,
    /// which may impose some limits.
    ///
    /// # Example
    ///
    /// ```
    /// # use std::cell::Cell;
    /// #
    /// # #[linked::object]
    /// # struct Thing {
    /// #     local_value: Cell<usize>,
    /// # }
    /// #
    /// # impl Thing {
    /// #     pub fn new() -> Self {
    /// #         linked::new!(Self { local_value: Cell::new(0) })
    /// #     }
    /// #
    /// #     pub fn increment(&self) {
    /// #        self.local_value.set(self.local_value.get() + 1);
    /// #     }
    /// #
    /// #     pub fn local_value(&self) -> usize {
    /// #         self.local_value.get()
    /// #     }
    /// # }
    /// #
    /// use linked::InstancePerThread;
    ///
    /// let linked_thing = InstancePerThread::new(Thing::new());
    ///
    /// let thing = linked_thing.acquire();
    /// thing.increment();
    /// assert_eq!(thing.local_value(), 1);
    /// ```
    ///
    /// # Efficiency
    ///
    /// Reuse the returned `Ref<T>` when possible. Every call to this function has
    /// some overhead, especially if there are no other `Ref<T>` instances from the
    /// same family active on the current thread.
    ///
    /// # Instance lifecycle
    ///
    /// A thread-specific instance of `T` is dropped when the last `Ref` on that
    /// thread is dropped. If a new `Ref` is later obtained, it is initialized
    /// with a new linked instance of `T` linked to the same family as the
    /// originating `InstancePerThread<T>`.
    ///
    /// ```
    /// # use std::cell::Cell;
    /// #
    /// # #[linked::object]
    /// # struct Thing {
    /// #     local_value: Cell<usize>,
    /// # }
    /// #
    /// # impl Thing {
    /// #     pub fn new() -> Self {
    /// #         linked::new!(Self { local_value: Cell::new(0) })
    /// #     }
    /// #
    /// #     pub fn increment(&self) {
    /// #        self.local_value.set(self.local_value.get() + 1);
    /// #     }
    /// #
    /// #     pub fn local_value(&self) -> usize {
    /// #         self.local_value.get()
    /// #     }
    /// # }
    /// #
    /// use linked::InstancePerThread;
    ///
    /// let linked_thing = InstancePerThread::new(Thing::new());
    ///
    /// let thing = linked_thing.acquire();
    /// thing.increment();
    /// assert_eq!(thing.local_value(), 1);
    ///
    /// drop(thing);
    ///
    /// // Dropping the only acquired instance above will have reset the thread-local state.
    /// let thing = linked_thing.acquire();
    /// assert_eq!(thing.local_value(), 0);
    /// ```
    ///
    /// To minimize the effort spent on re-creating the thread-local state, ensure that you reuse
    /// the `Ref<T>` instances as much as possible.
    ///
    /// # Thread safety
    ///
    /// The returned value is single-threaded and cannot be moved or used across threads. To extend
    /// the linked object family across threads, transfer `InstancePerThread<T>` instances across threads.
    /// You can obtain additional `InstancePerThread<T>` instances by cloning the original. Every clone
    /// is equivalent.
    #[must_use]
    pub fn acquire(&self) -> Ref<T> {
        let inner = self.family.current_thread_instance();

        Ref {
            inner,
            family: self.family.clone(),
        }
    }
}

impl<T> Clone for InstancePerThread<T>
where
    T: linked::Object,
{
    #[inline]
    fn clone(&self) -> Self {
        Self {
            family: self.family.clone(),
        }
    }
}

/// An acquired thread-local instance of a linked object of type `T`,
/// implementing `Deref<Target = T>`.
///
/// For details, see [`InstancePerThread<T>`][InstancePerThread] which is the type used
/// to create instances of `Ref<T>`.
#[derive(Debug)]
pub struct Ref<T>
where
    T: linked::Object,
{
    // We really are just a wrapper around an Rc<T>. The only other duty we have
    // is to clean up the thread-local instance when the last `Ref` is dropped.
    inner: Rc<T>,
    family: FamilyStateReference<T>,
}

impl<T> Deref for Ref<T>
where
    T: linked::Object,
{
    type Target = T;

    #[inline]
    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Clone for Ref<T>
where
    T: linked::Object,
{
    #[inline]
    fn clone(&self) -> Self {
        Self {
            inner: Rc::clone(&self.inner),
            family: self.family.clone(),
        }
    }
}

impl<T> Drop for Ref<T>
where
    T: linked::Object,
{
    fn drop(&mut self) {
        // If we were the last Ref on this thread then we need to drop the thread-local
        // state for this thread. Note that there are 2 references - ourselves and the family state.
        if Rc::strong_count(&self.inner) != 2 {
            // No - there is another Ref, so we do not need to clean up.
            return;
        }

        self.family.clear_current_thread_instance();

        // `self.inner` is now the last reference to the current thread's instance of T
        // and this instance will be dropped once this function returns and drops the last `Rc<T>`.
    }
}

/// One reference to the state of a specific family of per-thread linked objects.
/// This can be used to retrieve and/or initialize the current thread's instance.
#[derive(Debug)]
struct FamilyStateReference<T>
where
    T: linked::Object,
{
    // If a thread needs a new instance, we create it via the family.
    family: linked::Family<T>,

    // We store the state of each thread here. See safety comments on ThreadSpecificState!
    // NB! While it is legal to manipulate the HashMap from any thread, including to move
    // the values, calling actual functions on a value is only valid from the thread in the key.
    //
    // To ensure safety, we must also ensure that all values are removed from here before the map
    // is dropped, because each value must be dropped on the thread that created it and dropping is
    // logic executed on that thread-specific value!
    //
    // This is done in the `Ref` destructor. By the time this map is dropped,
    // it must be empty, which we assert in our own drop().
    //
    // The write lock here is only held when initializing the thread-specific state for a thread
    // for the first time, which should generally be rare, especially as user code will also be
    // motivated to reduce those instances because it also means initializing the actual `T` inside.
    // Most access will therefore only need to take a read lock.
    thread_specific: Arc<RwLock<HashMap<ThreadId, ThreadSpecificState<T>, BuildThreadIdHasher>>>,
}

impl<T> FamilyStateReference<T>
where
    T: linked::Object,
{
    #[must_use]
    fn new(family: linked::Family<T>) -> Self {
        Self {
            family,
            thread_specific: Arc::new(RwLock::new(HashMap::with_hasher(BuildThreadIdHasher))),
        }
    }

    /// Returns the `Rc<T>` for the current thread, creating it if necessary.
    #[must_use]
    fn current_thread_instance(&self) -> Rc<T> {
        let thread_id = thread::current().id();

        // First, an optimistic pass - let us assume it is already initialized for our thread.
        {
            let map = self.thread_specific.read().expect(ERR_POISONED_LOCK);

            if let Some(state) = map.get(&thread_id) {
                // SAFETY: We must guarantee that we are on the thread that owns
                // the thread-specific state. We are - thread ID lookup led us here.
                return unsafe { state.clone_instance() };
            }
        }

        // The state for the current thread is not yet initialized. Let us initialize!
        // Note that we create this instance outside any locks, both to reduce the
        // lock durations but also because cloning a linked object may execute arbitrary code,
        // including potentially code that tries to grab the same lock.
        let instance: Rc<T> = Rc::new(self.family.clone().into());

        // Let us add the new instance to the map.
        let mut map = self.thread_specific.write().expect(ERR_POISONED_LOCK);

        // In some wild corner cases, it is perhaps possible that the arbitrary code in the
        // linked object clone logic may already have filled the map with our value? It is
        // a bit of a stretch of imagination but let us accept the possibility to be thorough.
        match map.entry(thread_id) {
            hash_map::Entry::Occupied(occupied_entry) => {
                // There already is something in the entry. That is fine, we just ignore the
                // new instance we created and pretend we are on the optimistic path.
                let state = occupied_entry.get();

                // SAFETY: We must guarantee that we are on the thread that owns
                // the thread-specific state. We are - thread ID lookup led us here.
                unsafe { state.clone_instance() }
            }
            hash_map::Entry::Vacant(vacant_entry) => {
                // We are the first thread to create an instance. Let us insert it.
                // SAFETY: We must guarantee that any further access (taking the Rc or dropping)
                // takes place on the same thread as was used to call this function. We ensure this
                // by the thread ID lookup in the map key - we can only ever directly access map
                // entries owned by the current thread (though we may resize the map from any
                // thread, as it simply moves data in memory).
                let state = unsafe { ThreadSpecificState::new(Rc::clone(&instance)) };
                vacant_entry.insert(state);

                instance
            }
        }
    }

    fn clear_current_thread_instance(&self) {
        // We need to clear the thread-specific state for this thread.
        let thread_id = thread::current().id();

        let mut map = self.thread_specific.write().expect(ERR_POISONED_LOCK);
        map.remove(&thread_id);
    }
}

impl<T> Clone for FamilyStateReference<T>
where
    T: linked::Object,
{
    fn clone(&self) -> Self {
        Self {
            family: self.family.clone(),
            thread_specific: Arc::clone(&self.thread_specific),
        }
    }
}

impl<T> Drop for FamilyStateReference<T>
where
    T: linked::Object,
{
    #[cfg_attr(test, mutants::skip)] // This is just a sanity check, no functional behavior.
    fn drop(&mut self) {
        // If we are the last reference to the family state, this will drop the thread-specific map.
        // We need to ensure that the thread-specific state is empty before we drop the map.
        // This is a sanity check - if this fails, we have a defect somewhere in our code.

        if Arc::strong_count(&self.thread_specific) > 1 {
            // We are not the last reference to the family state,
            // so no state dropping will occur - having state in the map is fine.
            return;
        }

        if thread::panicking() {
            // If we are already panicking, there is no point in asserting anything,
            // as another panic may disrupt the handling of the original panic.
            return;
        }

        let map = self.thread_specific.read().expect(ERR_POISONED_LOCK);
        assert!(
            map.is_empty(),
            "thread-specific state map was not empty on drop - internal logic error"
        );
    }
}

/// Holds the thread-specific state for a specific family of per-thread linked objects.
///
/// # Safety
///
/// This contains an `Rc`, which is `!Send` and only meant to be accessed from the thread it was
/// created on. Yet the instance of this type itself is visible from multiple threads and
/// potentially even touched (moved) from another thread when resizing the `HashMap` of all
/// instances! How can this be?!
///
/// We take advantage of the fact that an `Rc` is merely a reference to a control block.
/// As long as we never touch the control block from the wrong thread, nobody will ever
/// know we touched the `Rc` from another thread. This allows us to move the Rc around
/// in memory as long as the move itself is synchronized.
///
/// Obviously, this relies on `Rc` implementation details, so we are somewhat at risk of
/// breakage if a future Rust std implementation changes the way `Rc` works but this seems
/// unlikely as this is fairly fundamental to the nature of how smart pointers are created.
///
/// NB! We must not drop the Rc (and by extension this type) from a foreign thread!
#[derive(Debug)]
struct ThreadSpecificState<T>
where
    T: linked::Object,
{
    instance: Rc<T>,
}

impl<T> ThreadSpecificState<T>
where
    T: linked::Object,
{
    /// Creates a new `ThreadSpecificState` with the given `Rc<T>`.
    ///
    /// # Safety
    ///
    /// The caller must guarantee that any further access (including dropping) takes place on the
    /// same thread as was used to call this function.
    ///
    /// See type-level safety comments for details.
    #[must_use]
    unsafe fn new(instance: Rc<T>) -> Self {
        Self { instance }
    }

    /// Returns the `Rc<T>` for this thread.
    ///
    /// # Safety
    ///
    /// The caller must guarantee that the current thread is the thread for which this
    /// `ThreadSpecificState` was created. This is not enforced by the type system.
    ///
    /// See type-level safety comments for details.
    #[must_use]
    unsafe fn clone_instance(&self) -> Rc<T> {
        Rc::clone(&self.instance)
    }
}

// SAFETY: See comments on type.
unsafe impl<T> Sync for ThreadSpecificState<T> where T: linked::Object {}
// SAFETY: See comments on type.
unsafe impl<T> Send for ThreadSpecificState<T> where T: linked::Object {}

#[cfg(test)]
#[cfg_attr(coverage_nightly, coverage(off))]
mod tests {
    use std::cell::Cell;
    use std::panic::{RefUnwindSafe, UnwindSafe};
    use std::sync::atomic::{self, AtomicBool};
    use std::sync::{Arc, Mutex};
    use std::thread;

    use static_assertions::assert_impl_all;

    use super::*;

    assert_impl_all!(InstancePerThread<TokenCache>: UnwindSafe, RefUnwindSafe);

    // Ref<T> wraps Rc<T>, so T must be RefUnwindSafe for Rc<T> to be UnwindSafe.
    // TokenCache contains Cell<usize> which is !RefUnwindSafe (Cell wraps
    // UnsafeCell), so we use a separate linked type here.
    #[linked::object]
    struct SimpleValue {
        #[expect(
            dead_code,
            reason = "field exists to give the type a String \
                      component for static assertions"
        )]
        data: String,
    }

    assert_impl_all!(Ref<SimpleValue>: UnwindSafe, RefUnwindSafe);

    #[linked::object]
    struct TokenCache {
        shared_value: Arc<Mutex<usize>>,
        local_value: Cell<usize>,
    }

    impl TokenCache {
        fn new() -> Self {
            let shared_value = Arc::new(Mutex::new(0));

            linked::new!(Self {
                shared_value: Arc::clone(&shared_value),
                local_value: Cell::new(0),
            })
        }

        fn increment(&self) {
            self.local_value.set(self.local_value.get().wrapping_add(1));

            let mut shared_value = self.shared_value.lock().unwrap();
            *shared_value = shared_value.wrapping_add(1);
        }

        fn local_value(&self) -> usize {
            self.local_value.get()
        }

        fn shared_value(&self) -> usize {
            *self.shared_value.lock().unwrap()
        }
    }

    #[test]
    fn per_thread_smoke_test() {
        let linked_cache = InstancePerThread::new(TokenCache::new());

        let cache1 = linked_cache.acquire();
        cache1.increment();

        assert_eq!(cache1.local_value(), 1);
        assert_eq!(cache1.shared_value(), 1);

        // This must refer to the same instance.
        let cache2 = linked_cache.acquire();

        assert_eq!(cache2.local_value(), 1);
        assert_eq!(cache2.shared_value(), 1);

        cache2.increment();

        assert_eq!(cache1.local_value(), 2);
        assert_eq!(cache1.shared_value(), 2);

        thread::spawn(move || {
            // You can move InstancePerThread across threads.
            let cache3 = linked_cache.acquire();

            // This is a different thread's instance, so the local value is fresh.
            assert_eq!(cache3.local_value(), 0);
            assert_eq!(cache3.shared_value(), 2);

            cache3.increment();

            assert_eq!(cache3.local_value(), 1);
            assert_eq!(cache3.shared_value(), 3);

            // You can clone this and every clone works the same.
            let thread_local_clone = linked_cache.clone();

            let cache4 = thread_local_clone.acquire();

            assert_eq!(cache4.local_value(), 1);
            assert_eq!(cache4.shared_value(), 3);

            // Every InstancePerThread instance from the same family is equivalent.
            let cache5 = linked_cache.acquire();

            assert_eq!(cache5.local_value(), 1);
            assert_eq!(cache5.shared_value(), 3);

            thread::spawn(move || {
                let cache6 = thread_local_clone.acquire();

                // This is a different thread's instance, so the local value is fresh.
                assert_eq!(cache6.local_value(), 0);
                assert_eq!(cache6.shared_value(), 3);

                cache6.increment();

                assert_eq!(cache6.local_value(), 1);
                assert_eq!(cache6.shared_value(), 4);
            })
            .join()
            .unwrap();
        })
        .join()
        .unwrap();

        assert_eq!(cache1.local_value(), 2);
        assert_eq!(cache1.shared_value(), 4);
    }

    #[test]
    fn thread_state_dropped_on_last_thread_local_drop() {
        let linked_cache = InstancePerThread::new(TokenCache::new());

        let cache = linked_cache.acquire();
        cache.increment();

        assert_eq!(cache.local_value(), 1);

        // This will drop the local state.
        drop(cache);

        // We get a fresh instance now, initialized from scratch for this thread.
        let cache = linked_cache.acquire();
        assert_eq!(cache.local_value(), 0);
    }

    #[test]
    fn thread_state_dropped_on_thread_exit() {
        // At the start, no thread-specific state has been created. The link embedded into the
        // InstancePerThread holds one reference to the inner shared value of the TokenCache.
        let linked_cache = InstancePerThread::new(TokenCache::new());

        let cache = linked_cache.acquire();

        // We now have two references to the inner shared value - the link + this fn.
        assert_eq!(Arc::strong_count(&cache.shared_value), 2);

        thread::spawn(move || {
            let cache = linked_cache.acquire();

            assert_eq!(Arc::strong_count(&cache.shared_value), 3);
        })
        .join()
        .unwrap();

        // Should be back to 2 here - the thread-local state was dropped when the thread exited.
        assert_eq!(Arc::strong_count(&cache.shared_value), 2);
    }

    // The type used to trigger the Occupied branch in current_thread_instance().
    // During instance creation, the factory re-entrantly calls acquire() on the same
    // InstancePerThread, which populates the map entry before the outer call checks it.
    //
    // The factory closure must be Fn + Send + Sync, so we cannot capture an
    // InstancePerThread (which contains Rc-based state) directly. Instead we use
    // a Mutex<Option<InstancePerThread>> that the factory clones out of, releasing
    // the lock before calling acquire() to avoid deadlock on re-entry.
    #[linked::object]
    struct ReentrantType {}

    impl ReentrantType {
        fn new(
            shared_ipt: Arc<Mutex<Option<InstancePerThread<Self>>>>,
            reentry_flag: Arc<AtomicBool>,
        ) -> Self {
            linked::__private::new(move |link| {
                // Clone the IPT out (if armed) so we release the lock before
                // the re-entrant acquire() tries to lock the same Mutex.
                let maybe_ipt = shared_ipt.lock().unwrap().clone();

                if let Some(ipt) = maybe_ipt
                    && reentry_flag
                        .compare_exchange(
                            false,
                            true,
                            atomic::Ordering::Relaxed,
                            atomic::Ordering::Relaxed,
                        )
                        .is_ok()
                {
                    // Re-entrantly acquire - inserts into the map via the Vacant
                    // branch. The outer call will then find Entry::Occupied.
                    // We do not need to keep the Ref; the map retains the entry.
                    let _inner_ref = ipt.acquire();
                }

                Self {
                    __private_linked_link: link,
                }
            })
        }
    }

    #[test]
    fn reentrant_acquire_hits_occupied_branch() {
        let shared_ipt: Arc<Mutex<Option<InstancePerThread<ReentrantType>>>> =
            Arc::new(Mutex::new(None));
        let reentry_flag = Arc::new(AtomicBool::new(false));

        // Create the first instance without re-entry (shared_ipt is None).
        let first = ReentrantType::new(Arc::clone(&shared_ipt), Arc::clone(&reentry_flag));

        let ipt = InstancePerThread::new(first);

        // Arm the trigger: factory can now see the InstancePerThread.
        *shared_ipt.lock().unwrap() = Some(ipt.clone());

        // This acquire() will:
        // 1. Fail the read-lock optimistic check (no entry yet).
        // 2. Create instance via factory -> factory re-entrantly calls acquire().
        // 3. Inner acquire() takes the Vacant path, inserting into the map.
        // 4. Outer acquire() finds Entry::Occupied.
        let _outer_ref = ipt.acquire();
        assert!(reentry_flag.load(atomic::Ordering::Relaxed));

        // Break the reference cycle: the factory closure (inside ipt) captures shared_ipt,
        // and shared_ipt holds a clone of ipt. Clear the mutex to allow both to be freed.
        *shared_ipt.lock().unwrap() = None;
    }
}