pub unsafe auto trait Send { }Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send type is the reference-counting pointer
rc::Rc. If two threads attempt to clone Rcs that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc doesn’t use atomic
operations. Its cousin sync::Arc does use atomic operations (incurring
some overhead) and thus is Send.
See the Nomicon and the Sync trait for more details.
Implementors§
impl !Send for Args
impl !Send for ArgsOs
impl Send for cairo_vm::with_std::string::Drain<'_>
impl Send for Waker
impl<'a> Send for IoSlice<'a>
impl<'a> Send for IoSliceMut<'a>
impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>where K: Send, Q: Sync + ?Sized, V: Send, S: Send, A: Send + Allocator + Clone,
impl<'a, K, V> Send for Iter<'a, K, V>where K: Send, V: Send,
impl<'a, K, V> Send for IterMut<'a, K, V>where K: Send, V: Send,
impl<'a, T, O> Send for bitvec::slice::iter::Iter<'a, T, O>where T: BitStore, O: BitOrder, &'a mut BitSlice<T, O>: Send,
impl<'a, T, O> Send for bitvec::slice::iter::IterMut<'a, T, O>where T: BitStore, O: BitOrder, &'a mut BitSlice<T, O>: Send,
impl<Dyn> Send for DynMetadata<Dyn>where Dyn: ?Sized,
impl<K, V> Send for hashbrown::map::IterMut<'_, K, V>where K: Send, V: Send,
impl<K, V, S> Send for LruCache<K, V, S>where K: Send, V: Send, S: Send,
impl<K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A>where K: Send, V: Send, S: Send, A: Send + Allocator + Clone,
impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>where K: Send, V: Send, S: Send, A: Send + Allocator + Clone,
impl<M, T, O> Send for BitRef<'_, M, T, O>where M: Mutability, T: BitStore + Sync, O: BitOrder,
impl<T> !Send for *const Twhere T: ?Sized,
impl<T> !Send for *mut Twhere T: ?Sized,
impl<T> !Send for NonNull<T>where T: ?Sized,
NonNull pointers are not Send because the data they reference may be aliased.
impl<T> !Send for MutexGuard<'_, T>where T: ?Sized,
impl<T> !Send for RwLockReadGuard<'_, T>where T: ?Sized,
impl<T> !Send for RwLockWriteGuard<'_, T>where T: ?Sized,
impl<T> Send for BitSpanError<T>where T: BitStore,
impl<T> Send for &Twhere T: Sync + ?Sized,
impl<T> Send for ThinBox<T>where T: Send + ?Sized,
ThinBox<T> is Send if T is Send because the data is owned.
impl<T> Send for Cell<T>where T: Send + ?Sized,
impl<T> Send for RefCell<T>where T: Send + ?Sized,
impl<T> Send for ChunksExactMut<'_, T>where T: Send,
impl<T> Send for ChunksMut<'_, T>where T: Send,
impl<T> Send for cairo_vm::with_std::slice::Iter<'_, T>where T: Sync,
impl<T> Send for cairo_vm::with_std::slice::IterMut<'_, T>where T: Send,
impl<T> Send for RChunksExactMut<'_, T>where T: Send,
impl<T> Send for RChunksMut<'_, T>where T: Send,
impl<T> Send for AtomicPtr<T>
impl<T> Send for Receiver<T>where T: Send,
impl<T> Send for Sender<T>where T: Send,
impl<T> Send for SyncSender<T>where T: Send,
impl<T> Send for cairo_vm::with_std::sync::Mutex<T>where T: Send + ?Sized,
impl<T> Send for OnceLock<T>where T: Send,
impl<T> Send for cairo_vm::with_std::sync::RwLock<T>where T: Send + ?Sized,
impl<T> Send for alloc::collections::linked_list::Iter<'_, T>where T: Sync,
impl<T> Send for alloc::collections::linked_list::IterMut<'_, T>where T: Send,
impl<T> Send for JoinHandle<T>
impl<T> Send for MisalignError<T>
impl<T> Send for Mutex<T>where T: Send + ?Sized,
impl<T> Send for Once<T>where T: Send,
impl<T> Send for RwLock<T>where T: Send + ?Sized,
impl<T, A> !Send for Rc<T, A>where A: Allocator, T: ?Sized,
impl<T, A> !Send for cairo_vm::with_std::rc::Weak<T, A>where A: Allocator, T: ?Sized,
impl<T, A> Send for Arc<T, A>where T: Sync + Send + ?Sized, A: Allocator + Send,
impl<T, A> Send for cairo_vm::with_std::sync::Weak<T, A>where T: Sync + Send + ?Sized, A: Allocator + Send,
impl<T, A> Send for cairo_vm::with_std::vec::Drain<'_, T, A>where T: Send, A: Send + Allocator,
impl<T, A> Send for cairo_vm::with_std::vec::IntoIter<T, A>where T: Send, A: Allocator + Send,
impl<T, A> Send for Cursor<'_, T, A>where T: Sync, A: Allocator + Sync,
impl<T, A> Send for CursorMut<'_, T, A>where T: Send, A: Allocator + Send,
impl<T, A> Send for LinkedList<T, A>where T: Send, A: Allocator + Send,
impl<T, A> Send for alloc::collections::vec_deque::drain::Drain<'_, T, A>where T: Send, A: Allocator + Send,
impl<T, N> Send for GenericArray<T, N>where T: Send, N: ArrayLength<T>,
impl<T, O> Send for bitvec::boxed::iter::IntoIter<T, O>where T: BitStore + Sync, O: BitOrder,
impl<T, O> Send for BitBox<T, O>where T: BitStore, O: BitOrder,
impl<T, O> Send for BitSlice<T, O>where T: BitStore + Sync, O: BitOrder,
Bit-Slice Thread Safety
This allows bit-slice references to be moved across thread boundaries only when
the underlying T element can tolerate concurrency.
All BitSlice references, shared or exclusive, are only threadsafe if the T
element type is Send, because any given bit-slice reference may only have
partial control of a memory element that is also being shared by a bit-slice
reference on another thread. As such, this is never implemented for Cell<U>,
but always implemented for AtomicU and U for a given unsigned integer type
U.
Atomic integers safely handle concurrent writes, cells do not allow concurrency
at all, so the only missing piece is &mut BitSlice<_, U: Unsigned>. This is
handled by the aliasing system that the mutable splitters employ: a mutable
reference to an unsynchronized bit-slice can only cross threads when no other
handle is able to exist to the elements it governs. Splitting a mutable
bit-slice causes the split halves to change over to either atomics or cells, so
concurrency is either safe or impossible.