mod layout;
pub mod suballocator;
use self::{array_vec::ArrayVec, suballocator::Region};
pub use self::{
layout::DeviceLayout,
suballocator::{
AllocationType, BuddyAllocator, BumpAllocator, FreeListAllocator, Suballocation,
Suballocator, SuballocatorError,
},
};
use super::{
DedicatedAllocation, DeviceAlignment, DeviceMemory, ExternalMemoryHandleTypes,
MemoryAllocateFlags, MemoryAllocateInfo, MemoryMapInfo, MemoryProperties, MemoryPropertyFlags,
MemoryRequirements, MemoryType,
};
use crate::{
device::{Device, DeviceOwned},
instance::InstanceOwnedDebugWrapper,
DeviceSize, Validated, Version, VulkanError,
};
use ash::vk::MAX_MEMORY_TYPES;
use parking_lot::Mutex;
use std::{
error::Error,
fmt::{Debug, Display, Error as FmtError, Formatter},
mem,
ops::BitOr,
ptr,
sync::Arc,
};
pub unsafe trait MemoryAllocator: DeviceOwned + Send + Sync + 'static {
fn find_memory_type_index(
&self,
memory_type_bits: u32,
filter: MemoryTypeFilter,
) -> Option<u32>;
fn allocate_from_type(
&self,
memory_type_index: u32,
layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError>;
fn allocate(
&self,
requirements: MemoryRequirements,
allocation_type: AllocationType,
create_info: AllocationCreateInfo,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
) -> Result<MemoryAlloc, MemoryAllocatorError>;
fn allocate_dedicated(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, MemoryAllocatorError>;
unsafe fn deallocate(&self, allocation: MemoryAlloc);
}
impl Debug for dyn MemoryAllocator {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
f.debug_struct("MemoryAllocator").finish_non_exhaustive()
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct MemoryTypeFilter {
pub required_flags: MemoryPropertyFlags,
pub preferred_flags: MemoryPropertyFlags,
pub not_preferred_flags: MemoryPropertyFlags,
}
impl MemoryTypeFilter {
pub const PREFER_DEVICE: Self = Self {
required_flags: MemoryPropertyFlags::empty(),
preferred_flags: MemoryPropertyFlags::DEVICE_LOCAL,
not_preferred_flags: MemoryPropertyFlags::empty(),
};
pub const PREFER_HOST: Self = Self {
required_flags: MemoryPropertyFlags::empty(),
preferred_flags: MemoryPropertyFlags::empty(),
not_preferred_flags: MemoryPropertyFlags::DEVICE_LOCAL,
};
pub const HOST_SEQUENTIAL_WRITE: Self = Self {
required_flags: MemoryPropertyFlags::HOST_VISIBLE,
preferred_flags: MemoryPropertyFlags::empty(),
not_preferred_flags: MemoryPropertyFlags::HOST_CACHED,
};
pub const HOST_RANDOM_ACCESS: Self = Self {
required_flags: MemoryPropertyFlags::HOST_VISIBLE.union(MemoryPropertyFlags::HOST_CACHED),
preferred_flags: MemoryPropertyFlags::empty(),
not_preferred_flags: MemoryPropertyFlags::empty(),
};
#[inline]
pub const fn empty() -> Self {
Self {
required_flags: MemoryPropertyFlags::empty(),
preferred_flags: MemoryPropertyFlags::empty(),
not_preferred_flags: MemoryPropertyFlags::empty(),
}
}
#[inline]
pub const fn union(self, other: Self) -> Self {
Self {
required_flags: self.required_flags.union(other.required_flags),
preferred_flags: self.preferred_flags.union(other.preferred_flags),
not_preferred_flags: self.not_preferred_flags.union(other.not_preferred_flags),
}
}
}
impl BitOr for MemoryTypeFilter {
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self::Output {
self.union(rhs)
}
}
#[derive(Clone, Debug)]
pub struct AllocationCreateInfo {
pub memory_type_filter: MemoryTypeFilter,
pub memory_type_bits: u32,
pub allocate_preference: MemoryAllocatePreference,
pub _ne: crate::NonExhaustive,
}
impl Default for AllocationCreateInfo {
#[inline]
fn default() -> Self {
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE,
memory_type_bits: u32::MAX,
allocate_preference: MemoryAllocatePreference::Unknown,
_ne: crate::NonExhaustive(()),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum MemoryAllocatePreference {
Unknown,
NeverAllocate,
AlwaysAllocate,
}
#[derive(Clone, Debug)]
pub struct MemoryAlloc {
pub device_memory: Arc<DeviceMemory>,
pub suballocation: Option<Suballocation>,
pub allocation_handle: AllocationHandle,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(not(doc), repr(transparent))]
pub struct AllocationHandle(*mut ());
unsafe impl Send for AllocationHandle {}
unsafe impl Sync for AllocationHandle {}
impl AllocationHandle {
#[inline]
pub const fn null() -> Self {
AllocationHandle(ptr::null_mut())
}
#[inline]
pub const fn from_ptr(ptr: *mut ()) -> Self {
AllocationHandle(ptr)
}
#[allow(clippy::useless_transmute)]
#[inline]
pub const fn from_index(index: usize) -> Self {
AllocationHandle(unsafe { mem::transmute::<usize, *mut ()>(index) })
}
#[inline]
pub const fn as_ptr(self) -> *mut () {
self.0
}
#[allow(clippy::transmutes_expressible_as_ptr_casts)]
#[inline]
pub const fn as_index(self) -> usize {
unsafe { mem::transmute::<*mut (), usize>(self.0) }
}
}
#[derive(Clone, Debug)]
pub enum MemoryAllocatorError {
AllocateDeviceMemory(Validated<VulkanError>),
FindMemoryType,
OutOfPoolMemory,
DedicatedAllocationRequired,
BlockSizeExceeded,
}
impl Error for MemoryAllocatorError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::AllocateDeviceMemory(err) => Some(err),
_ => None,
}
}
}
impl Display for MemoryAllocatorError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
let msg = match self {
Self::AllocateDeviceMemory(_) => "allocating device memory failed",
Self::FindMemoryType => "finding a suitable memory type failed",
Self::OutOfPoolMemory => "the pool doesn't have enough free space",
Self::DedicatedAllocationRequired => {
"a dedicated allocation is required but was explicitly forbidden"
}
Self::BlockSizeExceeded => {
"the allocation size was greater than the block size for all heaps of suitable \
memory types and dedicated allocations were explicitly forbidden"
}
};
f.write_str(msg)
}
}
pub type StandardMemoryAllocator = GenericMemoryAllocator<FreeListAllocator>;
impl StandardMemoryAllocator {
pub fn new_default(device: Arc<Device>) -> Self {
let MemoryProperties {
memory_types,
memory_heaps,
} = device.physical_device().memory_properties();
let mut block_sizes = vec![0; memory_types.len()];
let mut memory_type_bits = u32::MAX;
for (index, memory_type) in memory_types.iter().enumerate() {
const LARGE_HEAP_THRESHOLD: DeviceSize = 1024 * 1024 * 1024;
let heap_size = memory_heaps[memory_type.heap_index as usize].size;
block_sizes[index] = if heap_size >= LARGE_HEAP_THRESHOLD {
256 * 1024 * 1024
} else {
64 * 1024 * 1024
};
if memory_type.property_flags.intersects(
MemoryPropertyFlags::LAZILY_ALLOCATED
| MemoryPropertyFlags::PROTECTED
| MemoryPropertyFlags::DEVICE_COHERENT
| MemoryPropertyFlags::RDMA_CAPABLE,
) {
memory_type_bits &= !(1 << index);
}
}
let create_info = GenericMemoryAllocatorCreateInfo {
block_sizes: &block_sizes,
memory_type_bits,
..Default::default()
};
Self::new(device, create_info)
}
}
#[derive(Debug)]
pub struct GenericMemoryAllocator<S> {
device: InstanceOwnedDebugWrapper<Arc<Device>>,
buffer_image_granularity: DeviceAlignment,
pools: ArrayVec<Pool<S>, MAX_MEMORY_TYPES>,
memory_type_bits: u32,
dedicated_allocation: bool,
export_handle_types: ArrayVec<ExternalMemoryHandleTypes, MAX_MEMORY_TYPES>,
flags: MemoryAllocateFlags,
max_allocations: u32,
}
#[derive(Debug)]
struct Pool<S> {
blocks: Mutex<Vec<Box<Block<S>>>>,
property_flags: MemoryPropertyFlags,
atom_size: DeviceAlignment,
block_size: DeviceSize,
}
impl<S> GenericMemoryAllocator<S> {
#[allow(clippy::declare_interior_mutable_const)]
const EMPTY_POOL: Pool<S> = Pool {
blocks: Mutex::new(Vec::new()),
property_flags: MemoryPropertyFlags::empty(),
atom_size: DeviceAlignment::MIN,
block_size: 0,
};
pub fn new(device: Arc<Device>, create_info: GenericMemoryAllocatorCreateInfo<'_>) -> Self {
let GenericMemoryAllocatorCreateInfo {
block_sizes,
memory_type_bits,
dedicated_allocation,
export_handle_types,
mut device_address,
_ne: _,
} = create_info;
let memory_types = &device.physical_device().memory_properties().memory_types;
assert_eq!(
block_sizes.len(),
memory_types.len(),
"`create_info.block_sizes` must contain as many elements as the number of memory types",
);
if !export_handle_types.is_empty() {
assert_eq!(
export_handle_types.len(),
memory_types.len(),
"`create_info.export_handle_types` must contain as many elements as the number of \
memory types if not empty",
);
}
let buffer_image_granularity = device
.physical_device()
.properties()
.buffer_image_granularity;
let memory_types = &device.physical_device().memory_properties().memory_types;
let mut pools = ArrayVec::new(memory_types.len(), [Self::EMPTY_POOL; MAX_MEMORY_TYPES]);
for (index, &MemoryType { property_flags, .. }) in memory_types.iter().enumerate() {
pools[index].property_flags = property_flags;
if property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE)
&& !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT)
{
pools[index].atom_size =
device.physical_device().properties().non_coherent_atom_size;
}
pools[index].block_size = block_sizes[index];
}
let export_handle_types = {
let mut types = ArrayVec::new(
export_handle_types.len(),
[ExternalMemoryHandleTypes::empty(); MAX_MEMORY_TYPES],
);
types.copy_from_slice(export_handle_types);
types
};
device_address &= device.enabled_features().buffer_device_address
&& !device.enabled_extensions().ext_buffer_device_address;
device_address &=
device.api_version() >= Version::V1_1 || device.enabled_extensions().khr_device_group;
let flags = if device_address {
MemoryAllocateFlags::DEVICE_ADDRESS
} else {
MemoryAllocateFlags::empty()
};
let max_memory_allocation_count = device
.physical_device()
.properties()
.max_memory_allocation_count;
let max_allocations = max_memory_allocation_count / 4 * 3;
GenericMemoryAllocator {
device: InstanceOwnedDebugWrapper(device),
buffer_image_granularity,
pools,
dedicated_allocation,
export_handle_types,
flags,
memory_type_bits,
max_allocations,
}
}
#[cold]
fn allocate_device_memory(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<Arc<DeviceMemory>, Validated<VulkanError>> {
let mut memory = DeviceMemory::allocate(
self.device.clone(),
MemoryAllocateInfo {
allocation_size,
memory_type_index,
dedicated_allocation,
export_handle_types,
flags: self.flags,
..Default::default()
},
)?;
if self.pools[memory_type_index as usize]
.property_flags
.intersects(MemoryPropertyFlags::HOST_VISIBLE)
{
unsafe {
memory.map_unchecked(MemoryMapInfo {
offset: 0,
size: memory.allocation_size(),
_ne: crate::NonExhaustive(()),
})?;
}
}
Ok(Arc::new(memory))
}
}
unsafe impl<S: Suballocator + Send + 'static> MemoryAllocator for GenericMemoryAllocator<S> {
fn find_memory_type_index(
&self,
memory_type_bits: u32,
filter: MemoryTypeFilter,
) -> Option<u32> {
let required_flags = filter.required_flags.into();
let preferred_flags = filter.preferred_flags.into();
let not_preferred_flags = filter.not_preferred_flags.into();
self.pools
.iter()
.map(|pool| ash::vk::MemoryPropertyFlags::from(pool.property_flags))
.enumerate()
.filter(|&(index, flags)| {
memory_type_bits & (1 << index) != 0 && flags & required_flags == required_flags
})
.min_by_key(|&(_, flags)| {
(!flags & preferred_flags).as_raw().count_ones()
+ (flags & not_preferred_flags).as_raw().count_ones()
})
.map(|(index, _)| index as u32)
}
fn allocate_from_type(
&self,
memory_type_index: u32,
mut layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
let size = layout.size();
let pool = &self.pools[memory_type_index as usize];
if size > pool.block_size {
return Err(MemoryAllocatorError::BlockSizeExceeded);
}
layout = layout.align_to(pool.atom_size).unwrap();
let mut blocks = pool.blocks.lock();
blocks.sort_by_key(|block| block.free_size());
let (Ok(idx) | Err(idx)) = blocks.binary_search_by_key(&size, |block| block.free_size());
for block in &mut blocks[idx..] {
if let Ok(allocation) =
block.allocate(layout, allocation_type, self.buffer_image_granularity)
{
return Ok(allocation);
}
}
if never_allocate {
return Err(MemoryAllocatorError::OutOfPoolMemory);
}
let block = {
let export_handle_types = if !self.export_handle_types.is_empty() {
self.export_handle_types[memory_type_index as usize]
} else {
ExternalMemoryHandleTypes::empty()
};
let mut i = 0;
loop {
let allocation_size = pool.block_size >> i;
match self.allocate_device_memory(
memory_type_index,
allocation_size,
None,
export_handle_types,
) {
Ok(device_memory) => {
break Block::new(device_memory);
}
Err(Validated::Error(
VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory,
)) if i < 3 && pool.block_size >> (i + 1) >= size => {
i += 1;
}
Err(err) => return Err(MemoryAllocatorError::AllocateDeviceMemory(err)),
}
}
};
blocks.push(block);
let block = blocks.last_mut().unwrap();
match block.allocate(layout, allocation_type, self.buffer_image_granularity) {
Ok(allocation) => Ok(allocation),
Err(SuballocatorError::OutOfRegionMemory) => unreachable!(),
Err(SuballocatorError::FragmentedRegion) => unreachable!(),
}
}
fn allocate(
&self,
requirements: MemoryRequirements,
allocation_type: AllocationType,
create_info: AllocationCreateInfo,
mut dedicated_allocation: Option<DedicatedAllocation<'_>>,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
let MemoryRequirements {
layout,
mut memory_type_bits,
mut prefers_dedicated_allocation,
requires_dedicated_allocation,
} = requirements;
memory_type_bits &= self.memory_type_bits;
memory_type_bits &= create_info.memory_type_bits;
let AllocationCreateInfo {
memory_type_filter,
memory_type_bits: _,
allocate_preference,
_ne: _,
} = create_info;
let size = layout.size();
let mut memory_type_index = self
.find_memory_type_index(memory_type_bits, memory_type_filter)
.ok_or(MemoryAllocatorError::FindMemoryType)?;
if !self.dedicated_allocation && !requires_dedicated_allocation {
dedicated_allocation = None;
}
let export_handle_types = if self.export_handle_types.is_empty() {
ExternalMemoryHandleTypes::empty()
} else {
self.export_handle_types[memory_type_index as usize]
};
loop {
let pool = &self.pools[memory_type_index as usize];
let res = match allocate_preference {
MemoryAllocatePreference::Unknown => {
if requires_dedicated_allocation {
self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
} else {
if size > pool.block_size / 2 {
prefers_dedicated_allocation = true;
}
if self.device.allocation_count() > self.max_allocations
&& size <= pool.block_size
{
prefers_dedicated_allocation = false;
}
if prefers_dedicated_allocation {
self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
.or_else(|err| {
self.allocate_from_type(
memory_type_index,
layout,
allocation_type,
true, )
.map_err(|_| err)
})
} else {
self.allocate_from_type(
memory_type_index,
layout,
allocation_type,
false,
)
.or_else(|_| {
self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
})
}
}
}
MemoryAllocatePreference::NeverAllocate => {
if requires_dedicated_allocation {
return Err(MemoryAllocatorError::DedicatedAllocationRequired);
}
self.allocate_from_type(memory_type_index, layout, allocation_type, true)
}
MemoryAllocatePreference::AlwaysAllocate => self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
),
};
match res {
Ok(allocation) => return Ok(allocation),
Err(err) => {
memory_type_bits &= !(1 << memory_type_index);
memory_type_index = self
.find_memory_type_index(memory_type_bits, memory_type_filter)
.ok_or(err)?;
}
}
}
}
#[cold]
fn allocate_dedicated(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
let device_memory = self
.allocate_device_memory(
memory_type_index,
allocation_size,
dedicated_allocation,
export_handle_types,
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)?;
Ok(MemoryAlloc {
device_memory,
suballocation: None,
allocation_handle: AllocationHandle::null(),
})
}
unsafe fn deallocate(&self, allocation: MemoryAlloc) {
if let Some(suballocation) = allocation.suballocation {
let memory_type_index = allocation.device_memory.memory_type_index();
let pool = self.pools[memory_type_index as usize].blocks.lock();
let block_ptr = allocation.allocation_handle.0 as *mut Block<S>;
debug_assert!(
pool.iter()
.any(|block| &**block as *const Block<S> == block_ptr),
"attempted to deallocate a memory block that does not belong to this allocator",
);
let block = &mut *block_ptr;
block.deallocate(suballocation);
drop(pool);
}
}
}
unsafe impl<T: MemoryAllocator> MemoryAllocator for Arc<T> {
fn find_memory_type_index(
&self,
memory_type_bits: u32,
filter: MemoryTypeFilter,
) -> Option<u32> {
(**self).find_memory_type_index(memory_type_bits, filter)
}
fn allocate_from_type(
&self,
memory_type_index: u32,
layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
(**self).allocate_from_type(memory_type_index, layout, allocation_type, never_allocate)
}
fn allocate(
&self,
requirements: MemoryRequirements,
allocation_type: AllocationType,
create_info: AllocationCreateInfo,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
(**self).allocate(
requirements,
allocation_type,
create_info,
dedicated_allocation,
)
}
fn allocate_dedicated(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
(**self).allocate_dedicated(
memory_type_index,
allocation_size,
dedicated_allocation,
export_handle_types,
)
}
unsafe fn deallocate(&self, allocation: MemoryAlloc) {
(**self).deallocate(allocation)
}
}
unsafe impl<S> DeviceOwned for GenericMemoryAllocator<S> {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
#[derive(Debug)]
struct Block<S> {
device_memory: Arc<DeviceMemory>,
suballocator: S,
allocation_count: usize,
}
impl<S: Suballocator> Block<S> {
fn new(device_memory: Arc<DeviceMemory>) -> Box<Self> {
let suballocator = S::new(
Region::new(0, device_memory.allocation_size())
.expect("we somehow managed to allocate more than `DeviceLayout::MAX_SIZE` bytes"),
);
Box::new(Block {
device_memory,
suballocator,
allocation_count: 0,
})
}
fn allocate(
&mut self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<MemoryAlloc, SuballocatorError> {
let suballocation =
self.suballocator
.allocate(layout, allocation_type, buffer_image_granularity)?;
self.allocation_count += 1;
Ok(MemoryAlloc {
device_memory: self.device_memory.clone(),
suballocation: Some(suballocation),
allocation_handle: AllocationHandle::from_ptr(self as *mut Block<S> as _),
})
}
unsafe fn deallocate(&mut self, suballocation: Suballocation) {
self.suballocator.deallocate(suballocation);
self.allocation_count -= 1;
if self.allocation_count == 0 {
self.suballocator.cleanup();
}
}
fn free_size(&self) -> DeviceSize {
self.suballocator.free_size()
}
}
#[derive(Clone, Debug)]
pub struct GenericMemoryAllocatorCreateInfo<'a> {
pub block_sizes: &'a [DeviceSize],
pub memory_type_bits: u32,
pub dedicated_allocation: bool,
pub export_handle_types: &'a [ExternalMemoryHandleTypes],
pub device_address: bool,
pub _ne: crate::NonExhaustive,
}
impl Default for GenericMemoryAllocatorCreateInfo<'_> {
#[inline]
fn default() -> Self {
GenericMemoryAllocatorCreateInfo {
block_sizes: &[],
memory_type_bits: u32::MAX,
dedicated_allocation: true,
export_handle_types: &[],
device_address: true,
_ne: crate::NonExhaustive(()),
}
}
}
#[inline(always)]
pub(crate) const fn align_up(val: DeviceSize, alignment: DeviceAlignment) -> DeviceSize {
align_down(val.wrapping_add(alignment.as_devicesize() - 1), alignment)
}
#[inline(always)]
pub(crate) const fn align_down(val: DeviceSize, alignment: DeviceAlignment) -> DeviceSize {
val & !(alignment.as_devicesize() - 1)
}
mod array_vec {
use std::ops::{Deref, DerefMut};
#[derive(Clone, Copy, Debug)]
pub(super) struct ArrayVec<T, const N: usize> {
len: usize,
data: [T; N],
}
impl<T, const N: usize> ArrayVec<T, N> {
pub fn new(len: usize, data: [T; N]) -> Self {
assert!(len <= N);
ArrayVec { len, data }
}
}
impl<T, const N: usize> Deref for ArrayVec<T, N> {
type Target = [T];
fn deref(&self) -> &Self::Target {
unsafe { self.data.get_unchecked(0..self.len) }
}
}
impl<T, const N: usize> DerefMut for ArrayVec<T, N> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.data.get_unchecked_mut(0..self.len) }
}
}
}