Trait signature_core::lib::marker::Send 1.0.0[−][src]
pub unsafe auto trait Send { }
Expand description
Types that can be transferred across thread boundaries.
This trait is automatically implemented when the compiler determines it’s appropriate.
An example of a non-Send
type is the reference-counting pointer
rc::Rc
. If two threads attempt to clone Rc
s that point to the same
reference-counted value, they might try to update the reference count at the
same time, which is undefined behavior because Rc
doesn’t use atomic
operations. Its cousin sync::Arc
does use atomic operations (incurring
some overhead) and thus is Send
.
See the Nomicon for more details.
Implementations on Foreign Types
impl Send for Argument
impl Send for FormatSpec
impl Send for Alignment
impl Send for Count
impl Send for Big32x40
impl Send for Big8x3
impl Send for Sign
impl<'a> Send for Decimal<'a>
impl<'a> Send for ParseResult<'a>
impl Send for Unpacked
impl Send for Decoded
impl Send for FullDecoded
impl Send for Sign
impl<'a> Send for Part<'a>
impl<'a> Send for Formatted<'a>
NonNull
pointers are not Send
because the data they reference may be aliased.
impl<T> Send for BitSpanError<T> where
T: BitStore,
impl<T> Send for MisalignError<T>
impl<'_, O, T> Send for IterMut<'_, O, T> where
T: BitStore,
O: BitOrder,
Conditionally mark BitSlice
as Send
based on its T
type argument.
In order for BitSlice
to be Send
(that is, &mut BitSlice
can be moved
across thread boundaries), it must be capable of writing to memory without
invalidating any other &BitSlice
handles that alias the same memory address.
This is true when T
is one of the fundamental integers, because no other
&BitSlice
handle is able to observe mutations, or when T
is a BitSafe
type
that implements atomic read-modify-write instructions, because other &BitSlice
types will be protected from data races by the hardware.
When T
is a non-atomic BitSafe
type, BitSlice
cannot be Send
, because
one &mut BitSlice
moved across a thread boundary may cause mutation that
another &BitSlice
may observe, but the instructions used to access memory do
not guard against data races.
A &mut BitSlice
over aliased memory addresses is equivalent to either a
&Cell
or &AtomicT
, depending on what the radium
crate makes available
for the register width.
impl<'_, O, T> Send for Iter<'_, O, T> where
T: BitStore,
O: BitOrder,
impl<'_, K, V, S, A> Send for OccupiedEntry<'_, K, V, S, A> where
S: Send,
A: Send + Allocator + Clone,
K: Send,
V: Send,
impl<'_, K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A> where
A: Send + Allocator + Clone,
K: Send,
V: Send,
Implementors
Auto implementors
impl Send for HiddenMessage
impl Send for ProofMessage
impl Send for Infallible
impl Send for FpCategory
impl Send for IntErrorKind
impl Send for SearchStep
impl Send for Commitment
impl Send for SignatureBlinding
impl Send for NonZeroI16
impl Send for NonZeroI32
impl Send for NonZeroI64
impl Send for NonZeroI128
impl Send for NonZeroIsize
impl Send for NonZeroU16
impl Send for NonZeroU32
impl Send for NonZeroU64
impl Send for NonZeroU128
impl Send for NonZeroUsize
impl Send for ParseFloatError
impl Send for ParseIntError
impl Send for TryFromIntError
impl Send for ParseBoolError
impl Send for PhantomPinned
impl<'a> Send for EscapeAscii<'a>
impl<'a> Send for Utf8LossyChunk<'a>
impl<'a> Send for Utf8LossyChunksIter<'a>
impl<'a> Send for CharSearcher<'a>
impl<'a> Send for CharIndices<'a>
impl<'a> Send for EncodeUtf16<'a>
impl<'a> Send for EscapeDebug<'a>
impl<'a> Send for EscapeDefault<'a>
impl<'a> Send for EscapeUnicode<'a>
impl<'a> Send for SplitAsciiWhitespace<'a>
impl<'a> Send for SplitWhitespace<'a>
impl<'a, 'b> !Send for DebugStruct<'a, 'b>
impl<'a, 'b> !Send for DebugTuple<'a, 'b>
impl<'a, 'b> Send for CharSliceSearcher<'a, 'b>
impl<'a, 'b> Send for StrSearcher<'a, 'b>
impl<'a, F> Send for CharPredicateSearcher<'a, F> where
F: Send,
impl<'a, P> Send for MatchIndices<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for RMatchIndices<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for signature_core::lib::str::RSplit<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for signature_core::lib::str::RSplitN<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for RSplitTerminator<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for signature_core::lib::str::Split<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for signature_core::lib::str::SplitInclusive<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for signature_core::lib::str::SplitN<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, P> Send for SplitTerminator<'a, P> where
<P as Pattern<'a>>::Searcher: Send,
impl<'a, T> Send for ChunksExact<'a, T> where
T: Sync,
impl<'a, T> Send for ChunksExactMut<'a, T> where
T: Send,
impl<'a, T> Send for RChunksExact<'a, T> where
T: Sync,
impl<'a, T> Send for RChunksExactMut<'a, T> where
T: Send,
impl<'a, T> Send for RChunksMut<'a, T> where
T: Send,
impl<'a, T, P> Send for GroupByMut<'a, T, P> where
P: Send,
T: Send,
impl<'a, T, P> Send for RSplitNMut<'a, T, P> where
P: Send,
T: Send,
impl<'a, T, P> Send for signature_core::lib::slice::SplitInclusive<'a, T, P> where
P: Send,
T: Sync,
impl<'a, T, P> Send for SplitInclusiveMut<'a, T, P> where
P: Send,
T: Send,
impl<'a, T, const N: usize> !Send for ArrayWindows<'a, T, N>
impl<'a, T, const N: usize> Send for ArrayChunks<'a, T, N> where
T: Sync,
impl<'a, T, const N: usize> Send for ArrayChunksMut<'a, T, N> where
T: Send,
impl<B, C, const P: usize, const S: usize> Send for ProofCommittedBuilder<B, C, P, S>
impl<F> Send for RepeatWith<F> where
F: Send,
impl<I, U, F> Send for FlatMap<I, U, F> where
F: Send,
I: Send,
<U as IntoIterator>::IntoIter: Send,
impl<T> Send for Discriminant<T>
impl<T> Send for MaybeUninit<T> where
T: Send,
impl<T, F> Send for Successors<T, F> where
F: Send,
T: Send,
impl<T: ?Sized> Send for ManuallyDrop<T> where
T: Send,
impl<T: ?Sized> Send for PhantomData<T> where
T: Send,