pub struct OgreUnique<DataType, OgreAllocatorType>where
DataType: Debug + Send + Sync + 'static,
OgreAllocatorType: BoundedOgreAllocator<DataType> + Send + Sync + 'static,{ /* private fields */ }
Expand description
Wrapper type for data that requires a custom Drop to be called (through an BoundedOgreAllocator).
Similar to C++’s unique_ptr
Implementations§
Source§impl<DataType, OgreAllocatorType> OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> OgreUnique<DataType, OgreAllocatorType>
pub fn new<F>(
setter: F,
allocator: &OgreAllocatorType,
) -> Option<OgreUnique<DataType, OgreAllocatorType>>where
F: FnOnce(&mut DataType),
pub fn from_allocated_id( data_id: u32, allocator: &OgreAllocatorType, ) -> OgreUnique<DataType, OgreAllocatorType>
pub fn from_allocated_ref( data_ref: &DataType, allocator: &OgreAllocatorType, ) -> OgreUnique<DataType, OgreAllocatorType>
pub fn into_ogre_arc(self) -> OgreArc<DataType, OgreAllocatorType>
Trait Implementations§
Source§impl<DataType, OgreAllocatorType> AsRef<DataType> for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> AsRef<DataType> for OgreUnique<DataType, OgreAllocatorType>
Source§impl<DataType, OgreAllocatorType> Borrow<DataType> for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Borrow<DataType> for OgreUnique<DataType, OgreAllocatorType>
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelCommon<ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelCommon<ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn new<IntoString>(
name: IntoString,
) -> Arc<Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>
fn new<IntoString>( name: IntoString, ) -> Arc<Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>
Creates a new instance of this channel, to be referred to (in logs) as
name
Source§async fn flush(&self, timeout: Duration) -> u32
async fn flush(&self, timeout: Duration) -> u32
Waits until all pending items are taken from this channel, up until
Returns the number of still unconsumed items – which is 0 if it was not interrupted by the timeout
timeout
elapses.Returns the number of still unconsumed items – which is 0 if it was not interrupted by the timeout
Source§fn is_channel_open(&self) -> bool
fn is_channel_open(&self) -> bool
Tells weather this channel is still enabled to process elements
(true before calling the “end stream” / “cancel stream” functions)
Source§async fn gracefully_end_stream(&self, stream_id: u32, timeout: Duration) -> bool
async fn gracefully_end_stream(&self, stream_id: u32, timeout: Duration) -> bool
Flushes & signals that the given
Returns
stream_id
should cease its activities when there are no more elements left
to process, waiting for the operation to complete for up to timeout
.Returns
true
if the stream ended within the given timeout
or false
if it is still processing elements.Source§async fn gracefully_end_all_streams(&self, timeout: Duration) -> u32
async fn gracefully_end_all_streams(&self, timeout: Duration) -> u32
Flushes & signals that all streams should cease their activities when there are no more elements left
to process, waiting for the operation to complete for up to
Returns the number of un-ended streams – which is 0 if it was not interrupted by the timeout
timeout
.Returns the number of un-ended streams – which is 0 if it was not interrupted by the timeout
Source§fn cancel_all_streams(&self)
fn cancel_all_streams(&self)
Sends a signal to all streams, urging them to cease their operations.
In opposition to [end_all_streams()], this method does not wait for any confirmation, nor cares if there are remaining elements to be processed.
In opposition to [end_all_streams()], this method does not wait for any confirmation, nor cares if there are remaining elements to be processed.
Source§fn running_streams_count(&self) -> u32
fn running_streams_count(&self) -> u32
Informs the caller how many active streams are currently managed by this channel
IMPLEMENTORS: #[inline(always)]
Source§fn pending_items_count(&self) -> u32
fn pending_items_count(&self) -> u32
Tells how many events are waiting to be taken out of this channel.
IMPLEMENTORS: #[inline(always)]
IMPLEMENTORS: #[inline(always)]
Source§fn buffer_size(&self) -> u32
fn buffer_size(&self) -> u32
Tells how many events may be produced ahead of the consumers.
IMPLEMENTORS: #[inline(always)]
IMPLEMENTORS: #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelCommon<ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelCommon<ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn new<IntoString>(
streams_manager_name: IntoString,
) -> Arc<FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>
fn new<IntoString>( streams_manager_name: IntoString, ) -> Arc<FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>
Creates a new instance of this channel, to be referred to (in logs) as
name
Source§async fn flush(&self, timeout: Duration) -> u32
async fn flush(&self, timeout: Duration) -> u32
Waits until all pending items are taken from this channel, up until
Returns the number of still unconsumed items – which is 0 if it was not interrupted by the timeout
timeout
elapses.Returns the number of still unconsumed items – which is 0 if it was not interrupted by the timeout
Source§fn is_channel_open(&self) -> bool
fn is_channel_open(&self) -> bool
Tells weather this channel is still enabled to process elements
(true before calling the “end stream” / “cancel stream” functions)
Source§async fn gracefully_end_stream(&self, stream_id: u32, timeout: Duration) -> bool
async fn gracefully_end_stream(&self, stream_id: u32, timeout: Duration) -> bool
Flushes & signals that the given
Returns
stream_id
should cease its activities when there are no more elements left
to process, waiting for the operation to complete for up to timeout
.Returns
true
if the stream ended within the given timeout
or false
if it is still processing elements.Source§async fn gracefully_end_all_streams(&self, timeout: Duration) -> u32
async fn gracefully_end_all_streams(&self, timeout: Duration) -> u32
Flushes & signals that all streams should cease their activities when there are no more elements left
to process, waiting for the operation to complete for up to
Returns the number of un-ended streams – which is 0 if it was not interrupted by the timeout
timeout
.Returns the number of un-ended streams – which is 0 if it was not interrupted by the timeout
Source§fn cancel_all_streams(&self)
fn cancel_all_streams(&self)
Sends a signal to all streams, urging them to cease their operations.
In opposition to [end_all_streams()], this method does not wait for any confirmation, nor cares if there are remaining elements to be processed.
In opposition to [end_all_streams()], this method does not wait for any confirmation, nor cares if there are remaining elements to be processed.
Source§fn running_streams_count(&self) -> u32
fn running_streams_count(&self) -> u32
Informs the caller how many active streams are currently managed by this channel
IMPLEMENTORS: #[inline(always)]
Source§fn pending_items_count(&self) -> u32
fn pending_items_count(&self) -> u32
Tells how many events are waiting to be taken out of this channel.
IMPLEMENTORS: #[inline(always)]
IMPLEMENTORS: #[inline(always)]
Source§fn buffer_size(&self) -> u32
fn buffer_size(&self) -> u32
Tells how many events may be produced ahead of the consumers.
IMPLEMENTORS: #[inline(always)]
IMPLEMENTORS: #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn consume(
&self,
_stream_id: u32,
) -> Option<OgreUnique<ItemType, OgreAllocatorType>>
fn consume( &self, _stream_id: u32, ) -> Option<OgreUnique<ItemType, OgreAllocatorType>>
Delivers the next event, whenever the Stream wants it.
IMPLEMENTORS: use #[inline(always)]
IMPLEMENTORS: use #[inline(always)]
Source§fn keep_stream_running(&self, stream_id: u32) -> bool
fn keep_stream_running(&self, stream_id: u32) -> bool
Returns
IMPLEMENTORS: use #[inline(always)]
false
if the Stream
has been signaled to end its operations, causing it to report “out-of-elements” as soon as possible.IMPLEMENTORS: use #[inline(always)]
Source§fn register_stream_waker(&self, stream_id: u32, waker: &Waker)
fn register_stream_waker(&self, stream_id: u32, waker: &Waker)
Shares, to implementors concern, how
IMPLEMENTORS: use #[inline(always)]
stream_id
may be awaken.IMPLEMENTORS: use #[inline(always)]
Source§fn drop_resources(&self, stream_id: u32)
fn drop_resources(&self, stream_id: u32)
Reports no more elements will be required through [provide()].
IMPLEMENTORS: use #[inline(always)]
IMPLEMENTORS: use #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn consume(
&self,
_stream_id: u32,
) -> Option<OgreUnique<ItemType, OgreAllocatorType>>
fn consume( &self, _stream_id: u32, ) -> Option<OgreUnique<ItemType, OgreAllocatorType>>
Delivers the next event, whenever the Stream wants it.
IMPLEMENTORS: use #[inline(always)]
IMPLEMENTORS: use #[inline(always)]
Source§fn keep_stream_running(&self, stream_id: u32) -> bool
fn keep_stream_running(&self, stream_id: u32) -> bool
Returns
IMPLEMENTORS: use #[inline(always)]
false
if the Stream
has been signaled to end its operations, causing it to report “out-of-elements” as soon as possible.IMPLEMENTORS: use #[inline(always)]
Source§fn register_stream_waker(&self, stream_id: u32, waker: &Waker)
fn register_stream_waker(&self, stream_id: u32, waker: &Waker)
Shares, to implementors concern, how
IMPLEMENTORS: use #[inline(always)]
stream_id
may be awaken.IMPLEMENTORS: use #[inline(always)]
Source§fn drop_resources(&self, stream_id: u32)
fn drop_resources(&self, stream_id: u32)
Reports no more elements will be required through [provide()].
IMPLEMENTORS: use #[inline(always)]
IMPLEMENTORS: use #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelProducer<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelProducer<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn send(&self, item: ItemType) -> RetryResult<(), ItemType, (), ()>
fn send(&self, item: ItemType) -> RetryResult<(), ItemType, (), ()>
Similar to Self::send_with(), but for sending the already-built
See there for how to deal with the returned type.
IMPLEMENTORS: #[inline(always)]
item
.See there for how to deal with the returned type.
IMPLEMENTORS: #[inline(always)]
Source§fn send_with<F>(&self, setter: F) -> RetryResult<(), F, (), ()>where
F: FnOnce(&mut ItemType),
fn send_with<F>(&self, setter: F) -> RetryResult<(), F, (), ()>where
F: FnOnce(&mut ItemType),
Calls
The returned type is conversible to
setter
, passing a slot so the payload may be filled there, then sends the event through this channel asynchronously.The returned type is conversible to
Result<(), F>
by calling .into() on it, returning Err<setter>
when the buffer is full,
to allow the caller to try again; otherwise you may add any retrying logic using the keen-retry
crate’s API like in: Read moreSource§async fn send_with_async<F, Fut>(
&'a self,
setter: F,
) -> RetryResult<(), F, (), ()>
async fn send_with_async<F, Fut>( &'a self, setter: F, ) -> RetryResult<(), F, (), ()>
Similar to [Self::send_with(), but accepts an async
setter
.
This method is useful for sending operations that depend on data acquired by async blocks, allowing
select loops (like the following) to be built: Read moreSource§fn reserve_slot(&self) -> Option<&mut ItemType>
fn reserve_slot(&self) -> Option<&mut ItemType>
Proxy to crate::prelude::advanced::BoundedOgreAllocator::alloc_ref() from the underlying allocator,
allowing caller to fill in the data as they wish – in a non-blocking prone API.
See also [Self::send_reserved()] and [Self::cancel_slot_reserve()].
See also [Self::send_reserved()] and [Self::cancel_slot_reserve()].
Source§fn try_send_reserved(&self, reserved_slot: &mut ItemType) -> bool
fn try_send_reserved(&self, reserved_slot: &mut ItemType) -> bool
Attempts to send an item previously reserved by Self::reserve_slot().
Failure to do so (when
false
is returned) might be part of the normal channel operation,
so retrying is advised.
More: some channel implementations are optimized (or even only accept) sending the slots
in the same order they were reserved.Source§fn try_cancel_slot_reserve(&self, reserved_slot: &mut ItemType) -> bool
fn try_cancel_slot_reserve(&self, reserved_slot: &mut ItemType) -> bool
Attempts to give up sending an item previously reserved by Self::reserve_slot(), freeing it / setting its resources for reuse.
Two important things to note: Read more
Source§fn send_derived(&self, _derived_item: &DerivedItemType) -> bool
fn send_derived(&self, _derived_item: &DerivedItemType) -> bool
For channels that stores the
IMPLEMENTORS: #[inline(always)]
DerivedItemType
instead of the ItemType
, this method may be useful
– for instance: if the Stream consumes OgreArc<Type>
(the derived item type) and the channel is for Type
, with this method one may send an OgreArc
directly.IMPLEMENTORS: #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelProducer<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelProducer<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn send(&self, item: ItemType) -> RetryResult<(), ItemType, (), ()>
fn send(&self, item: ItemType) -> RetryResult<(), ItemType, (), ()>
Similar to Self::send_with(), but for sending the already-built
See there for how to deal with the returned type.
IMPLEMENTORS: #[inline(always)]
item
.See there for how to deal with the returned type.
IMPLEMENTORS: #[inline(always)]
Source§fn send_with<F>(&self, setter: F) -> RetryResult<(), F, (), ()>where
F: FnOnce(&mut ItemType),
fn send_with<F>(&self, setter: F) -> RetryResult<(), F, (), ()>where
F: FnOnce(&mut ItemType),
Calls
The returned type is conversible to
setter
, passing a slot so the payload may be filled there, then sends the event through this channel asynchronously.The returned type is conversible to
Result<(), F>
by calling .into() on it, returning Err<setter>
when the buffer is full,
to allow the caller to try again; otherwise you may add any retrying logic using the keen-retry
crate’s API like in: Read moreSource§async fn send_with_async<F, Fut>(
&'a self,
setter: F,
) -> RetryResult<(), F, (), ()>
async fn send_with_async<F, Fut>( &'a self, setter: F, ) -> RetryResult<(), F, (), ()>
Similar to [Self::send_with(), but accepts an async
setter
.
This method is useful for sending operations that depend on data acquired by async blocks, allowing
select loops (like the following) to be built: Read moreSource§fn reserve_slot(&self) -> Option<&mut ItemType>
fn reserve_slot(&self) -> Option<&mut ItemType>
Proxy to crate::prelude::advanced::BoundedOgreAllocator::alloc_ref() from the underlying allocator,
allowing caller to fill in the data as they wish – in a non-blocking prone API.
See also [Self::send_reserved()] and [Self::cancel_slot_reserve()].
See also [Self::send_reserved()] and [Self::cancel_slot_reserve()].
Source§fn try_send_reserved(&self, reserved_slot: &mut ItemType) -> bool
fn try_send_reserved(&self, reserved_slot: &mut ItemType) -> bool
Attempts to send an item previously reserved by Self::reserve_slot().
Failure to do so (when
false
is returned) might be part of the normal channel operation,
so retrying is advised.
More: some channel implementations are optimized (or even only accept) sending the slots
in the same order they were reserved.Source§fn try_cancel_slot_reserve(&self, reserved_slot: &mut ItemType) -> bool
fn try_cancel_slot_reserve(&self, reserved_slot: &mut ItemType) -> bool
Attempts to give up sending an item previously reserved by Self::reserve_slot(), freeing it / setting its resources for reuse.
Two important things to note: Read more
Source§fn send_derived(&self, _derived_item: &DerivedItemType) -> bool
fn send_derived(&self, _derived_item: &DerivedItemType) -> bool
For channels that stores the
IMPLEMENTORS: #[inline(always)]
DerivedItemType
instead of the ItemType
, this method may be useful
– for instance: if the Stream consumes OgreArc<Type>
(the derived item type) and the channel is for Type
, with this method one may send an OgreArc
directly.IMPLEMENTORS: #[inline(always)]
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelUni<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelUni<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn create_stream(
self: &Arc<Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>,
) -> (MutinyStream<'a, ItemType, Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>, OgreUnique<ItemType, OgreAllocatorType>>, u32)where
Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>: ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>>,
fn create_stream(
self: &Arc<Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>,
) -> (MutinyStream<'a, ItemType, Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>, OgreUnique<ItemType, OgreAllocatorType>>, u32)where
Atomic<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>: ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>>,
Returns a
If called more than once, each
Currently
Stream
(and its stream_id
) able to receive elements sent through this channel.If called more than once, each
Stream
will receive a different element – “consumer pattern”.Currently
panic
s if called more times than allowed by [Uni]’s MAX_STREAMS
Source§impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelUni<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
impl<'a, ItemType, OgreAllocatorType, const BUFFER_SIZE: usize, const MAX_STREAMS: usize> ChannelUni<'a, ItemType, OgreUnique<ItemType, OgreAllocatorType>> for FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>
Source§fn create_stream(
self: &Arc<FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>,
) -> (MutinyStream<'a, ItemType, FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>, OgreUnique<ItemType, OgreAllocatorType>>, u32)where
FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>: ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>>,
fn create_stream(
self: &Arc<FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>>,
) -> (MutinyStream<'a, ItemType, FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>, OgreUnique<ItemType, OgreAllocatorType>>, u32)where
FullSync<'a, ItemType, OgreAllocatorType, BUFFER_SIZE, MAX_STREAMS>: ChannelConsumer<'a, OgreUnique<ItemType, OgreAllocatorType>>,
Returns a
If called more than once, each
Currently
Stream
(and its stream_id
) able to receive elements sent through this channel.If called more than once, each
Stream
will receive a different element – “consumer pattern”.Currently
panic
s if called more times than allowed by [Uni]’s MAX_STREAMS
Source§impl<DataType, OgreAllocatorType> Debug for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Debug for OgreUnique<DataType, OgreAllocatorType>
Source§impl<DataType, OgreAllocatorType> Deref for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Deref for OgreUnique<DataType, OgreAllocatorType>
Source§impl<DataType, OgreAllocatorType> Display for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Display for OgreUnique<DataType, OgreAllocatorType>
Source§impl<DataType, OgreAllocatorType> Drop for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Drop for OgreUnique<DataType, OgreAllocatorType>
Source§impl<DataType, OgreAllocatorType> From<OgreUnique<DataType, OgreAllocatorType>> for OgreArc<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> From<OgreUnique<DataType, OgreAllocatorType>> for OgreArc<DataType, OgreAllocatorType>
Source§fn from(
ogre_unique: OgreUnique<DataType, OgreAllocatorType>,
) -> OgreArc<DataType, OgreAllocatorType>
fn from( ogre_unique: OgreUnique<DataType, OgreAllocatorType>, ) -> OgreArc<DataType, OgreAllocatorType>
Converts to this type from the input type.
Source§impl<DataType, OgreAllocatorType> PartialEq<DataType> for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> PartialEq<DataType> for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Send for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> Sync for OgreUnique<DataType, OgreAllocatorType>
Auto Trait Implementations§
impl<DataType, OgreAllocatorType> Freeze for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> RefUnwindSafe for OgreUnique<DataType, OgreAllocatorType>where
OgreAllocatorType: RefUnwindSafe,
DataType: RefUnwindSafe,
impl<DataType, OgreAllocatorType> Unpin for OgreUnique<DataType, OgreAllocatorType>
impl<DataType, OgreAllocatorType> UnwindSafe for OgreUnique<DataType, OgreAllocatorType>where
OgreAllocatorType: RefUnwindSafe,
DataType: RefUnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more