happylock/collection/
boxed.rs

1use std::cell::UnsafeCell;
2use std::fmt::Debug;
3
4use crate::lockable::{Lockable, LockableIntoInner, OwnedLockable, RawLock, Sharable};
5use crate::{Keyable, ThreadKey};
6
7use super::utils::{
8	ordered_contains_duplicates, scoped_read, scoped_try_read, scoped_try_write, scoped_write,
9};
10use super::{utils, BoxedLockCollection, LockGuard};
11
12unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> {
13	#[mutants::skip] // this should never be called
14	#[cfg(not(tarpaulin_include))]
15	fn poison(&self) {
16		for lock in &self.locks {
17			lock.poison();
18		}
19	}
20
21	unsafe fn raw_write(&self) {
22		utils::ordered_write(self.locks())
23	}
24
25	unsafe fn raw_try_write(&self) -> bool {
26		utils::ordered_try_write(self.locks())
27	}
28
29	unsafe fn raw_unlock_write(&self) {
30		for lock in self.locks() {
31			lock.raw_unlock_write();
32		}
33	}
34
35	unsafe fn raw_read(&self) {
36		utils::ordered_read(self.locks());
37	}
38
39	unsafe fn raw_try_read(&self) -> bool {
40		utils::ordered_try_read(self.locks())
41	}
42
43	unsafe fn raw_unlock_read(&self) {
44		for lock in self.locks() {
45			lock.raw_unlock_read();
46		}
47	}
48}
49
50unsafe impl<L: Lockable> Lockable for BoxedLockCollection<L> {
51	type Guard<'g>
52		= L::Guard<'g>
53	where
54		Self: 'g;
55
56	type DataMut<'a>
57		= L::DataMut<'a>
58	where
59		Self: 'a;
60
61	fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
62		// Doing it this way means that if a boxed collection is put inside a
63		// different collection, it will use the other method of locking. However,
64		// this prevents duplicate locks in a collection.
65		ptrs.extend_from_slice(&self.locks);
66	}
67
68	unsafe fn guard(&self) -> Self::Guard<'_> {
69		self.child().guard()
70	}
71
72	unsafe fn data_mut(&self) -> Self::DataMut<'_> {
73		self.child().data_mut()
74	}
75}
76
77unsafe impl<L: Sharable> Sharable for BoxedLockCollection<L> {
78	type ReadGuard<'g>
79		= L::ReadGuard<'g>
80	where
81		Self: 'g;
82
83	type DataRef<'a>
84		= L::DataRef<'a>
85	where
86		Self: 'a;
87
88	unsafe fn read_guard(&self) -> Self::ReadGuard<'_> {
89		self.child().read_guard()
90	}
91
92	unsafe fn data_ref(&self) -> Self::DataRef<'_> {
93		self.child().data_ref()
94	}
95}
96
97unsafe impl<L: OwnedLockable> OwnedLockable for BoxedLockCollection<L> {}
98
99// LockableGetMut can't be implemented because that would create mutable and
100// immutable references to the same value at the same time.
101
102impl<L: LockableIntoInner> LockableIntoInner for BoxedLockCollection<L> {
103	type Inner = L::Inner;
104
105	fn into_inner(self) -> Self::Inner {
106		LockableIntoInner::into_inner(self.into_child())
107	}
108}
109
110impl<L> IntoIterator for BoxedLockCollection<L>
111where
112	L: IntoIterator,
113{
114	type Item = <L as IntoIterator>::Item;
115	type IntoIter = <L as IntoIterator>::IntoIter;
116
117	fn into_iter(self) -> Self::IntoIter {
118		self.into_child().into_iter()
119	}
120}
121
122impl<'a, L> IntoIterator for &'a BoxedLockCollection<L>
123where
124	&'a L: IntoIterator,
125{
126	type Item = <&'a L as IntoIterator>::Item;
127	type IntoIter = <&'a L as IntoIterator>::IntoIter;
128
129	fn into_iter(self) -> Self::IntoIter {
130		self.child().into_iter()
131	}
132}
133
134impl<L: OwnedLockable, I: FromIterator<L> + OwnedLockable> FromIterator<L>
135	for BoxedLockCollection<I>
136{
137	fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
138		let iter: I = iter.into_iter().collect();
139		Self::new(iter)
140	}
141}
142
143// safety: the RawLocks must be send because they come from the Send Lockable
144#[allow(clippy::non_send_fields_in_send_ty)]
145unsafe impl<L: Send> Send for BoxedLockCollection<L> {}
146unsafe impl<L: Sync> Sync for BoxedLockCollection<L> {}
147
148impl<L> Drop for BoxedLockCollection<L> {
149	#[mutants::skip] // i can't test for a memory leak
150	#[cfg(not(tarpaulin_include))]
151	fn drop(&mut self) {
152		unsafe {
153			// safety: this collection will never be locked again
154			self.locks.clear();
155			// safety: this was allocated using a box, and is now unique
156			let boxed: Box<UnsafeCell<L>> = Box::from_raw(self.data.cast_mut());
157
158			drop(boxed)
159		}
160	}
161}
162
163impl<T: ?Sized, L: AsRef<T>> AsRef<T> for BoxedLockCollection<L> {
164	fn as_ref(&self) -> &T {
165		self.child().as_ref()
166	}
167}
168
169#[mutants::skip]
170#[cfg(not(tarpaulin_include))]
171impl<L: Debug> Debug for BoxedLockCollection<L> {
172	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
173		f.debug_struct(stringify!(BoxedLockCollection))
174			.field("data", &self.data)
175			// there's not much reason to show the sorted locks
176			.finish_non_exhaustive()
177	}
178}
179
180impl<L: OwnedLockable + Default> Default for BoxedLockCollection<L> {
181	fn default() -> Self {
182		Self::new(L::default())
183	}
184}
185
186impl<L: OwnedLockable> From<L> for BoxedLockCollection<L> {
187	fn from(value: L) -> Self {
188		Self::new(value)
189	}
190}
191
192impl<L> BoxedLockCollection<L> {
193	/// Gets the underlying collection, consuming this collection.
194	///
195	/// # Examples
196	///
197	/// ```
198	/// use happylock::{Mutex, ThreadKey, LockCollection};
199	///
200	/// let data1 = Mutex::new(42);
201	/// let data2 = Mutex::new("");
202	///
203	/// // data1 and data2 refer to distinct mutexes, so this won't panic
204	/// let data = (&data1, &data2);
205	/// let lock = LockCollection::try_new(&data).unwrap();
206	///
207	/// let key = ThreadKey::get().unwrap();
208	/// let guard = lock.into_child().0.lock(key);
209	/// assert_eq!(*guard, 42);
210	/// ```
211	#[must_use]
212	pub fn into_child(mut self) -> L {
213		unsafe {
214			// safety: this collection will never be used again
215			std::ptr::drop_in_place(&mut self.locks);
216			// safety: this was allocated using a box, and is now unique
217			let boxed: Box<UnsafeCell<L>> = Box::from_raw(self.data.cast_mut());
218			// to prevent a double free
219			std::mem::forget(self);
220
221			boxed.into_inner()
222		}
223	}
224
225	// child_mut is immediate UB because it leads to mutable and immutable
226	// references happening at the same time
227
228	/// Gets an immutable reference to the underlying data
229	///
230	/// # Examples
231	///
232	/// ```
233	/// use happylock::{Mutex, ThreadKey, LockCollection};
234	///
235	/// let data1 = Mutex::new(42);
236	/// let data2 = Mutex::new("");
237	///
238	/// // data1 and data2 refer to distinct mutexes, so this won't panic
239	/// let data = (&data1, &data2);
240	/// let lock = LockCollection::try_new(&data).unwrap();
241	///
242	/// let key = ThreadKey::get().unwrap();
243	/// let guard = lock.child().0.lock(key);
244	/// assert_eq!(*guard, 42);
245	/// ```
246	#[must_use]
247	pub fn child(&self) -> &L {
248		unsafe {
249			self.data
250				.as_ref()
251				.unwrap_unchecked()
252				.get()
253				.as_ref()
254				.unwrap_unchecked()
255		}
256	}
257
258	/// Gets the locks
259	fn locks(&self) -> &[&dyn RawLock] {
260		&self.locks
261	}
262}
263
264impl<L: OwnedLockable> BoxedLockCollection<L> {
265	/// Creates a new collection of owned locks.
266	///
267	/// Because the locks are owned, there's no need to do any checks for
268	/// duplicate values.
269	///
270	/// # Examples
271	///
272	/// ```
273	/// use happylock::{Mutex, LockCollection};
274	///
275	/// let data = (Mutex::new(0), Mutex::new(""));
276	/// let lock = LockCollection::new(data);
277	/// ```
278	#[must_use]
279	pub fn new(data: L) -> Self {
280		// safety: owned lockable types cannot contain duplicates
281		unsafe { Self::new_unchecked(data) }
282	}
283}
284
285impl<'a, L: OwnedLockable> BoxedLockCollection<&'a L> {
286	/// Creates a new collection of owned locks.
287	///
288	/// Because the locks are owned, there's no need to do any checks for
289	/// duplicate values.
290	///
291	/// # Examples
292	///
293	/// ```
294	/// use happylock::{Mutex, LockCollection};
295	///
296	/// let data = (Mutex::new(0), Mutex::new(""));
297	/// let lock = LockCollection::new_ref(&data);
298	/// ```
299	#[must_use]
300	pub fn new_ref(data: &'a L) -> Self {
301		// safety: owned lockable types cannot contain duplicates
302		unsafe { Self::new_unchecked(data) }
303	}
304}
305
306impl<L: Lockable> BoxedLockCollection<L> {
307	/// Creates a new collections of locks.
308	///
309	/// # Safety
310	///
311	/// This results in undefined behavior if any locks are presented twice
312	/// within this collection.
313	///
314	/// # Examples
315	///
316	/// ```
317	/// use happylock::{Mutex, LockCollection};
318	///
319	/// let data1 = Mutex::new(0);
320	/// let data2 = Mutex::new("");
321	///
322	/// // safety: data1 and data2 refer to distinct mutexes
323	/// let data = (&data1, &data2);
324	/// let lock = unsafe { LockCollection::new_unchecked(&data) };
325	/// ```
326	#[must_use]
327	pub unsafe fn new_unchecked(data: L) -> Self {
328		let data = Box::leak(Box::new(UnsafeCell::new(data)));
329		let data_ref = data.get().cast_const().as_ref().unwrap_unchecked();
330
331		let mut locks = Vec::new();
332		data_ref.get_ptrs(&mut locks);
333
334		// cast to *const () because fat pointers can't be converted to usize
335		locks.sort_by_key(|lock| (&raw const **lock).cast::<()>() as usize);
336
337		// safety: we're just changing the lifetimes
338		let locks: Vec<&'static dyn RawLock> = std::mem::transmute(locks);
339		let data = &raw const *data;
340		Self { data, locks }
341	}
342
343	/// Creates a new collection of locks.
344	///
345	/// This returns `None` if any locks are found twice in the given
346	/// collection.
347	///
348	/// # Examples
349	///
350	/// ```
351	/// use happylock::{Mutex, LockCollection};
352	///
353	/// let data1 = Mutex::new(0);
354	/// let data2 = Mutex::new("");
355	///
356	/// // data1 and data2 refer to distinct mutexes, so this won't panic
357	/// let data = (&data1, &data2);
358	/// let lock = LockCollection::try_new(&data).unwrap();
359	/// ```
360	#[must_use]
361	pub fn try_new(data: L) -> Option<Self> {
362		// safety: we are checking for duplicates before returning
363		unsafe {
364			let this = Self::new_unchecked(data);
365			if ordered_contains_duplicates(this.locks()) {
366				return None;
367			}
368			Some(this)
369		}
370	}
371
372	pub fn scoped_lock<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataMut<'a>) -> R) -> R {
373		scoped_write(self, key, f)
374	}
375
376	pub fn scoped_try_lock<'a, Key: Keyable, R>(
377		&'a self,
378		key: Key,
379		f: impl Fn(L::DataMut<'a>) -> R,
380	) -> Result<R, Key> {
381		scoped_try_write(self, key, f)
382	}
383
384	/// Locks the collection
385	///
386	/// This function returns a guard that can be used to access the underlying
387	/// data. When the guard is dropped, the locks in the collection are also
388	/// dropped.
389	///
390	/// # Examples
391	///
392	/// ```
393	/// use happylock::{Mutex, ThreadKey, LockCollection};
394	///
395	/// let key = ThreadKey::get().unwrap();
396	/// let data = (Mutex::new(0), Mutex::new(""));
397	/// let lock = LockCollection::new(data);
398	///
399	/// let mut guard = lock.lock(key);
400	/// *guard.0 += 1;
401	/// *guard.1 = "1";
402	/// ```
403	#[must_use]
404	pub fn lock(&self, key: ThreadKey) -> LockGuard<L::Guard<'_>> {
405		unsafe {
406			// safety: we have the thread key
407			self.raw_write();
408
409			LockGuard {
410				// safety: we've already acquired the lock
411				guard: self.child().guard(),
412				key,
413			}
414		}
415	}
416
417	/// Attempts to lock the without blocking.
418	///
419	/// If the access could not be granted at this time, then `Err` is
420	/// returned. Otherwise, an RAII guard is returned which will release the
421	/// locks when it is dropped.
422	///
423	/// # Errors
424	///
425	/// If any locks in the collection are already locked, then an error
426	/// containing the given key is returned.
427	///
428	/// # Examples
429	///
430	/// ```
431	/// use happylock::{Mutex, ThreadKey, LockCollection};
432	///
433	/// let key = ThreadKey::get().unwrap();
434	/// let data = (Mutex::new(0), Mutex::new(""));
435	/// let lock = LockCollection::new(data);
436	///
437	/// match lock.try_lock(key) {
438	///     Ok(mut guard) => {
439	///         *guard.0 += 1;
440	///         *guard.1 = "1";
441	///     },
442	///     Err(_) => unreachable!(),
443	/// };
444	///
445	/// ```
446	pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> {
447		let guard = unsafe {
448			if !self.raw_try_write() {
449				return Err(key);
450			}
451
452			// safety: we've acquired the locks
453			self.child().guard()
454		};
455
456		Ok(LockGuard { guard, key })
457	}
458
459	/// Unlocks the underlying lockable data type, returning the key that's
460	/// associated with it.
461	///
462	/// # Examples
463	///
464	/// ```
465	/// use happylock::{Mutex, ThreadKey, LockCollection};
466	///
467	/// let key = ThreadKey::get().unwrap();
468	/// let data = (Mutex::new(0), Mutex::new(""));
469	/// let lock = LockCollection::new(data);
470	///
471	/// let mut guard = lock.lock(key);
472	/// *guard.0 += 1;
473	/// *guard.1 = "1";
474	/// let key = LockCollection::<(Mutex<i32>, Mutex<&str>)>::unlock(guard);
475	/// ```
476	pub fn unlock(guard: LockGuard<L::Guard<'_>>) -> ThreadKey {
477		drop(guard.guard);
478		guard.key
479	}
480}
481
482impl<L: Sharable> BoxedLockCollection<L> {
483	pub fn scoped_read<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataRef<'a>) -> R) -> R {
484		scoped_read(self, key, f)
485	}
486
487	pub fn scoped_try_read<'a, Key: Keyable, R>(
488		&'a self,
489		key: Key,
490		f: impl Fn(L::DataRef<'a>) -> R,
491	) -> Result<R, Key> {
492		scoped_try_read(self, key, f)
493	}
494
495	/// Locks the collection, so that other threads can still read from it
496	///
497	/// This function returns a guard that can be used to access the underlying
498	/// data immutably. When the guard is dropped, the locks in the collection
499	/// are also dropped.
500	///
501	/// # Examples
502	///
503	/// ```
504	/// use happylock::{RwLock, ThreadKey, LockCollection};
505	///
506	/// let key = ThreadKey::get().unwrap();
507	/// let data = (RwLock::new(0), RwLock::new(""));
508	/// let lock = LockCollection::new(data);
509	///
510	/// let mut guard = lock.read(key);
511	/// assert_eq!(*guard.0, 0);
512	/// assert_eq!(*guard.1, "");
513	/// ```
514	#[must_use]
515	pub fn read(&self, key: ThreadKey) -> LockGuard<L::ReadGuard<'_>> {
516		unsafe {
517			// safety: we have the thread key
518			self.raw_read();
519
520			LockGuard {
521				// safety: we've already acquired the lock
522				guard: self.child().read_guard(),
523				key,
524			}
525		}
526	}
527
528	/// Attempts to lock the without blocking, in such a way that other threads
529	/// can still read from the collection.
530	///
531	/// If the access could not be granted at this time, then `Err` is
532	/// returned. Otherwise, an RAII guard is returned which will release the
533	/// shared access when it is dropped.
534	///
535	/// # Errors
536	///
537	/// If any of the locks in the collection are already locked, then an error
538	/// is returned containing the given key.
539	///
540	/// # Examples
541	///
542	/// ```
543	/// use happylock::{RwLock, ThreadKey, LockCollection};
544	///
545	/// let key = ThreadKey::get().unwrap();
546	/// let data = (RwLock::new(5), RwLock::new("6"));
547	/// let lock = LockCollection::new(data);
548	///
549	/// match lock.try_read(key) {
550	///     Ok(mut guard) => {
551	///         assert_eq!(*guard.0, 5);
552	///         assert_eq!(*guard.1, "6");
553	///     },
554	///     Err(_) => unreachable!(),
555	/// };
556	///
557	/// ```
558	pub fn try_read(&self, key: ThreadKey) -> Result<LockGuard<L::ReadGuard<'_>>, ThreadKey> {
559		let guard = unsafe {
560			// safety: we have the thread key
561			if !self.raw_try_read() {
562				return Err(key);
563			}
564
565			// safety: we've acquired the locks
566			self.child().read_guard()
567		};
568
569		Ok(LockGuard { guard, key })
570	}
571
572	/// Unlocks the underlying lockable data type, returning the key that's
573	/// associated with it.
574	///
575	/// # Examples
576	///
577	/// ```
578	/// use happylock::{RwLock, ThreadKey, LockCollection};
579	///
580	/// let key = ThreadKey::get().unwrap();
581	/// let data = (RwLock::new(0), RwLock::new(""));
582	/// let lock = LockCollection::new(data);
583	///
584	/// let mut guard = lock.read(key);
585	/// let key = LockCollection::<(RwLock<i32>, RwLock<&str>)>::unlock_read(guard);
586	/// ```
587	pub fn unlock_read(guard: LockGuard<L::ReadGuard<'_>>) -> ThreadKey {
588		drop(guard.guard);
589		guard.key
590	}
591}
592
593impl<L: LockableIntoInner> BoxedLockCollection<L> {
594	/// Consumes this `BoxedLockCollection`, returning the underlying data.
595	///
596	/// # Examples
597	///
598	/// ```
599	/// use happylock::{Mutex, LockCollection};
600	///
601	/// let mutex = LockCollection::new([Mutex::new(0), Mutex::new(0)]);
602	/// assert_eq!(mutex.into_inner(), [0, 0]);
603	/// ```
604	#[must_use]
605	pub fn into_inner(self) -> <Self as LockableIntoInner>::Inner {
606		LockableIntoInner::into_inner(self)
607	}
608}
609
610impl<'a, L: 'a> BoxedLockCollection<L>
611where
612	&'a L: IntoIterator,
613{
614	/// Returns an iterator over references to each value in the collection.
615	///
616	/// # Examples
617	///
618	/// ```
619	/// use happylock::{Mutex, ThreadKey, LockCollection};
620	///
621	/// let key = ThreadKey::get().unwrap();
622	/// let data = [Mutex::new(26), Mutex::new(1)];
623	/// let lock = LockCollection::new(data);
624	///
625	/// let mut iter = lock.iter();
626	/// let mutex = iter.next().unwrap();
627	/// let guard = mutex.lock(key);
628	///
629	/// assert_eq!(*guard, 26);
630	/// ```
631	#[must_use]
632	pub fn iter(&'a self) -> <&'a L as IntoIterator>::IntoIter {
633		self.into_iter()
634	}
635}
636
637#[cfg(test)]
638mod tests {
639	use super::*;
640	use crate::{Mutex, RwLock, ThreadKey};
641
642	#[test]
643	fn from_iterator() {
644		let key = ThreadKey::get().unwrap();
645		let collection: BoxedLockCollection<Vec<Mutex<&str>>> =
646			[Mutex::new("foo"), Mutex::new("bar"), Mutex::new("baz")]
647				.into_iter()
648				.collect();
649		let guard = collection.lock(key);
650		assert_eq!(*guard[0], "foo");
651		assert_eq!(*guard[1], "bar");
652		assert_eq!(*guard[2], "baz");
653	}
654
655	#[test]
656	fn from() {
657		let key = ThreadKey::get().unwrap();
658		let collection =
659			BoxedLockCollection::from([Mutex::new("foo"), Mutex::new("bar"), Mutex::new("baz")]);
660		let guard = collection.lock(key);
661		assert_eq!(*guard[0], "foo");
662		assert_eq!(*guard[1], "bar");
663		assert_eq!(*guard[2], "baz");
664	}
665
666	#[test]
667	fn into_owned_iterator() {
668		let collection = BoxedLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
669		for (i, mutex) in collection.into_iter().enumerate() {
670			assert_eq!(mutex.into_inner(), i);
671		}
672	}
673
674	#[test]
675	fn into_ref_iterator() {
676		let mut key = ThreadKey::get().unwrap();
677		let collection = BoxedLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
678		for (i, mutex) in (&collection).into_iter().enumerate() {
679			mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
680		}
681	}
682
683	#[test]
684	fn ref_iterator() {
685		let mut key = ThreadKey::get().unwrap();
686		let collection = BoxedLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
687		for (i, mutex) in collection.iter().enumerate() {
688			mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
689		}
690	}
691
692	#[test]
693	#[allow(clippy::float_cmp)]
694	fn uses_correct_default() {
695		let collection =
696			BoxedLockCollection::<(Mutex<f64>, Mutex<Option<i32>>, Mutex<usize>)>::default();
697		let tuple = collection.into_inner();
698		assert_eq!(tuple.0, 0.0);
699		assert!(tuple.1.is_none());
700		assert_eq!(tuple.2, 0)
701	}
702
703	#[test]
704	fn non_duplicates_allowed() {
705		let mutex1 = Mutex::new(0);
706		let mutex2 = Mutex::new(1);
707		assert!(BoxedLockCollection::try_new([&mutex1, &mutex2]).is_some())
708	}
709
710	#[test]
711	fn duplicates_not_allowed() {
712		let mutex1 = Mutex::new(0);
713		assert!(BoxedLockCollection::try_new([&mutex1, &mutex1]).is_none())
714	}
715
716	#[test]
717	fn scoped_read_sees_changes() {
718		let mut key = ThreadKey::get().unwrap();
719		let mutexes = [RwLock::new(24), RwLock::new(42)];
720		let collection = BoxedLockCollection::new(mutexes);
721		collection.scoped_lock(&mut key, |guard| *guard[0] = 128);
722
723		let sum = collection.scoped_read(&mut key, |guard| {
724			assert_eq!(*guard[0], 128);
725			assert_eq!(*guard[1], 42);
726			*guard[0] + *guard[1]
727		});
728
729		assert_eq!(sum, 128 + 42);
730	}
731
732	#[test]
733	fn scoped_try_lock_can_fail() {
734		let key = ThreadKey::get().unwrap();
735		let collection = BoxedLockCollection::new([Mutex::new(1), Mutex::new(2)]);
736		let guard = collection.lock(key);
737
738		std::thread::scope(|s| {
739			s.spawn(|| {
740				let key = ThreadKey::get().unwrap();
741				let r = collection.scoped_try_lock(key, |_| {});
742				assert!(r.is_err());
743			});
744		});
745
746		drop(guard);
747	}
748
749	#[test]
750	fn scoped_try_read_can_fail() {
751		let key = ThreadKey::get().unwrap();
752		let collection = BoxedLockCollection::new([RwLock::new(1), RwLock::new(2)]);
753		let guard = collection.lock(key);
754
755		std::thread::scope(|s| {
756			s.spawn(|| {
757				let key = ThreadKey::get().unwrap();
758				let r = collection.scoped_try_read(key, |_| {});
759				assert!(r.is_err());
760			});
761		});
762
763		drop(guard);
764	}
765
766	#[test]
767	fn try_lock_works() {
768		let key = ThreadKey::get().unwrap();
769		let collection = BoxedLockCollection::new([Mutex::new(1), Mutex::new(2)]);
770		let guard = collection.try_lock(key);
771
772		std::thread::scope(|s| {
773			s.spawn(|| {
774				let key = ThreadKey::get().unwrap();
775				let guard = collection.try_lock(key);
776				assert!(guard.is_err());
777			});
778		});
779
780		assert!(guard.is_ok());
781	}
782
783	#[test]
784	fn try_read_works() {
785		let key = ThreadKey::get().unwrap();
786		let collection = BoxedLockCollection::new([RwLock::new(1), RwLock::new(2)]);
787		let guard = collection.try_read(key);
788
789		std::thread::scope(|s| {
790			s.spawn(|| {
791				let key = ThreadKey::get().unwrap();
792				let guard = collection.try_read(key);
793				assert!(guard.is_ok());
794			});
795		});
796
797		assert!(guard.is_ok());
798	}
799
800	#[test]
801	fn try_lock_fails_with_one_exclusive_lock() {
802		let key = ThreadKey::get().unwrap();
803		let locks = [Mutex::new(1), Mutex::new(2)];
804		let collection = BoxedLockCollection::new_ref(&locks);
805		let guard = locks[1].try_lock(key);
806
807		std::thread::scope(|s| {
808			s.spawn(|| {
809				let key = ThreadKey::get().unwrap();
810				let guard = collection.try_lock(key);
811				assert!(guard.is_err());
812			});
813		});
814
815		assert!(guard.is_ok());
816	}
817
818	#[test]
819	fn try_read_fails_during_exclusive_lock() {
820		let key = ThreadKey::get().unwrap();
821		let collection = BoxedLockCollection::new([RwLock::new(1), RwLock::new(2)]);
822		let guard = collection.try_lock(key);
823
824		std::thread::scope(|s| {
825			s.spawn(|| {
826				let key = ThreadKey::get().unwrap();
827				let guard = collection.try_read(key);
828				assert!(guard.is_err());
829			});
830		});
831
832		assert!(guard.is_ok());
833	}
834
835	#[test]
836	fn try_read_fails_with_one_exclusive_lock() {
837		let key = ThreadKey::get().unwrap();
838		let locks = [RwLock::new(1), RwLock::new(2)];
839		let collection = BoxedLockCollection::new_ref(&locks);
840		let guard = locks[1].try_write(key);
841
842		std::thread::scope(|s| {
843			s.spawn(|| {
844				let key = ThreadKey::get().unwrap();
845				let guard = collection.try_read(key);
846				assert!(guard.is_err());
847			});
848		});
849
850		assert!(guard.is_ok());
851	}
852
853	#[test]
854	fn unlock_collection_works() {
855		let key = ThreadKey::get().unwrap();
856		let mutex1 = Mutex::new("foo");
857		let mutex2 = Mutex::new("bar");
858		let collection = BoxedLockCollection::try_new((&mutex1, &mutex2)).unwrap();
859		let guard = collection.lock(key);
860		let key = BoxedLockCollection::<(&Mutex<_>, &Mutex<_>)>::unlock(guard);
861
862		assert!(mutex1.try_lock(key).is_ok())
863	}
864
865	#[test]
866	fn read_unlock_collection_works() {
867		let key = ThreadKey::get().unwrap();
868		let lock1 = RwLock::new("foo");
869		let lock2 = RwLock::new("bar");
870		let collection = BoxedLockCollection::try_new((&lock1, &lock2)).unwrap();
871		let guard = collection.read(key);
872		let key = BoxedLockCollection::<(&RwLock<_>, &RwLock<_>)>::unlock_read(guard);
873
874		assert!(lock1.try_write(key).is_ok())
875	}
876
877	#[test]
878	fn into_inner_works() {
879		let collection = BoxedLockCollection::new((Mutex::new("Hello"), Mutex::new(47)));
880		assert_eq!(collection.into_inner(), ("Hello", 47))
881	}
882
883	#[test]
884	fn works_in_collection() {
885		let key = ThreadKey::get().unwrap();
886		let mutex1 = RwLock::new(0);
887		let mutex2 = RwLock::new(1);
888		let collection =
889			BoxedLockCollection::try_new(BoxedLockCollection::try_new([&mutex1, &mutex2]).unwrap())
890				.unwrap();
891
892		let mut guard = collection.lock(key);
893		assert!(mutex1.is_locked());
894		assert!(mutex2.is_locked());
895		assert_eq!(*guard[0], 0);
896		assert_eq!(*guard[1], 1);
897		*guard[0] = 2;
898		let key = BoxedLockCollection::<BoxedLockCollection<[&RwLock<_>; 2]>>::unlock(guard);
899
900		let guard = collection.read(key);
901		assert!(mutex1.is_locked());
902		assert!(mutex2.is_locked());
903		assert_eq!(*guard[0], 2);
904		assert_eq!(*guard[1], 1);
905		drop(guard);
906	}
907
908	#[test]
909	fn as_ref_works() {
910		let mutexes = [Mutex::new(0), Mutex::new(1)];
911		let collection = BoxedLockCollection::new_ref(&mutexes);
912
913		assert!(std::ptr::addr_eq(&mutexes, collection.as_ref()))
914	}
915
916	#[test]
917	fn child() {
918		let mutexes = [Mutex::new(0), Mutex::new(1)];
919		let collection = BoxedLockCollection::new_ref(&mutexes);
920
921		assert!(std::ptr::addr_eq(&mutexes, *collection.child()))
922	}
923}