#[repr(u64)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
#[derive(EnumIter)]
pub enum HugePageSize
{
_64KB = 64 * 1_024,
_512KB = 512 * 1_024,
_1MB = 1_024 * 1_024,
_2MB = 2_048 * 1_024,
_4MB = 4_096 * 1_024,
_8MB = 8_192 * 1_024,
_16MB = 16_384 * 1_024,
_32MB = 32_768 * 1_024,
_256MB = 262_144 * 1_024,
_512MB = 524_288 * 1_024,
_1GB = 1_048_576 * 1_024,
_2GB = 2_097_152 * 1_024,
_16GB = 16_777_216 * 1_024,
}
impl Into<NonZeroU64> for HugePageSize
{
#[inline(always)]
fn into(self) -> NonZeroU64
{
self.into_non_zero_u64()
}
}
impl Into<u64> for HugePageSize
{
#[inline(always)]
fn into(self) -> u64
{
self.into_u64()
}
}
impl Into<NonZeroUsize> for HugePageSize
{
#[inline(always)]
fn into(self) -> NonZeroUsize
{
self.into_non_zero_usize()
}
}
impl Into<usize> for HugePageSize
{
#[inline(always)]
fn into(self) -> usize
{
self.into_usize()
}
}
impl TryFrom<NonZeroU64> for HugePageSize
{
type Error = ParseNumberError;
#[inline(always)]
fn try_from(value: NonZeroU64) -> Result<Self, Self::Error>
{
Self::from_non_zero_bytes(value).ok_or(ParseNumberError::OutOfRange)
}
}
impl TryFrom<u64> for HugePageSize
{
type Error = ParseNumberError;
#[inline(always)]
fn try_from(value: u64) -> Result<Self, Self::Error>
{
Self::from_bytes(value).ok_or(ParseNumberError::OutOfRange)
}
}
impl TryFrom<NonZeroUsize> for HugePageSize
{
type Error = ParseNumberError;
#[inline(always)]
fn try_from(value: NonZeroUsize) -> Result<Self, Self::Error>
{
Self::try_from(value.get())
}
}
impl TryFrom<usize> for HugePageSize
{
type Error = ParseNumberError;
#[inline(always)]
fn try_from(value: usize) -> Result<Self, Self::Error>
{
Self::try_from(value as u64)
}
}
impl HugePageSize
{
#[inline(always)]
pub(crate) fn cgroup_file_name_fragment(self) -> &'static str
{
use self::HugePageSize::*;
match self
{
_64KB => "64KB",
_512KB => "512KB",
_1MB => "1MB",
_2MB => "2MB",
_4MB => "4MB",
_8MB => "8MB",
_16MB => "16MB",
_32MB => "32MB",
_256MB => "256MB",
_512MB => "512MB",
_1GB => "1GB",
_2GB => "2GB",
_16GB => "16GB",
}
}
#[inline(always)]
pub const fn size_in_kilobytes(self) -> NonZeroKilobyte
{
new_non_zero_u64((self as u64) / 1_024)
}
#[inline(always)]
pub const fn size_in_bytes(self) -> NonZeroU64
{
new_non_zero_u64(self as u64)
}
pub fn non_zero_number_of_pages_from_non_zero_number_of_bytes_rounded_up(self, number_of_bytes: NonZeroU64) -> NonZeroNumberOfPages
{
new_non_zero_u64(self.number_of_pages_from_number_of_bytes_rounded_up(number_of_bytes.get()))
}
pub const fn number_of_pages_from_number_of_bytes_rounded_up(self, number_of_bytes: u64) -> NumberOfPages
{
let size_in_bytes = self.size_in_bytes().get();
(number_of_bytes + size_in_bytes - 1) / size_in_bytes
}
pub const fn non_zero_number_of_bytes_rounded_up_to_multiple_of_page_size(self, number_of_bytes: NonZeroU64) -> NonZeroU64
{
new_non_zero_u64(self.number_of_bytes_rounded_up_to_multiple_of_page_size(number_of_bytes.get()))
}
pub const fn number_of_bytes_rounded_up_to_multiple_of_page_size(self, number_of_bytes: u64) -> u64
{
let size_in_bytes = self.size_in_bytes().get();
((number_of_bytes + size_in_bytes - 1) / size_in_bytes) * size_in_bytes
}
#[cfg(any(target_arch = "powerpc64", target_arch = "riscv64", target_arch = "sparc64", target_arch = "x86_64"))]
pub const fn can_have_a_dynamic_huge_page_pool(self) -> bool
{
!self.is_a_gigantic_huge_page()
}
#[cfg(not(any(target_arch = "powerpc64", target_arch = "riscv64", target_arch = "sparc64", target_arch = "x86_64")))]
#[inline(always)]
pub fn can_have_a_dynamic_huge_page_pool(self) -> bool
{
!self.is_a_gigantic_huge_page()
}
#[cfg(any(target_arch = "powerpc64", target_arch = "riscv64", target_arch = "sparc64", target_arch = "x86_64"))]
pub const fn is_a_gigantic_huge_page(self) -> bool
{
self.is_a_gigantic_huge_page_inner(PageSize::default())
}
#[cfg(not(any(target_arch = "powerpc64", target_arch = "riscv64", target_arch = "sparc64", target_arch = "x86_64")))]
#[inline(always)]
pub fn is_a_gigantic_huge_page(self) -> bool
{
self.is_a_gigantic_huge_page_inner(PageSize::default())
}
const fn is_a_gigantic_huge_page_inner(self, page_size: PageSize) -> bool
{
const Scalar: u64 = 2048;
let minimum_gigantic_huge_page = (page_size as u64) * Scalar;
self.size_in_bytes().get() >= minimum_gigantic_huge_page
}
#[inline(always)]
pub fn global_huge_page_pool_size(self, sys_path: &SysPath) -> Option<GlobalHugePagePoolSize>
{
GlobalHugePagePoolSize::new(sys_path, self, SysPath::global_hugepages_folder_path)
}
#[inline(always)]
pub fn global_huge_page_pool_statistics(self, sys_path: &SysPath) -> Option<HugePagePoolStatistics>
{
HugePagePoolStatistics::new(sys_path, self, SysPath::global_hugepages_folder_path)
}
#[inline(always)]
pub fn memory_policy_global_huge_pages(self, sys_path: &SysPath) -> Option<u64>
{
let file_path = sys_path.global_hugepages_folder_path(self).append("nr_hugepages_mempolicy");
if file_path.exists()
{
Some(file_path.read_value().unwrap())
}
else
{
return None
}
}
#[inline(always)]
fn default_huge_page_size(memory_information: &MemoryInformation) -> Option<Self>
{
if let Some(size_in_bytes) = memory_information.get_statistic(&MemoryInformationName::SizeOfDefaultHugePage)
{
Self::from_kilobytes(size_in_bytes)
}
else
{
None
}
}
#[inline(always)]
fn transparent_huge_page_size(sys_path: &SysPath) -> Option<Self>
{
let file_path = sys_path.transparent_huge_memory_file_path("hpage_pmd_size");
if file_path.exists()
{
let value: NonZeroU64 = file_path.read_value().unwrap();
Self::from_non_zero_kilobytes(value)
}
else
{
None
}
}
#[inline(always)]
fn supported_huge_page_sizes(sys_path: &SysPath) -> BTreeSet<Self>
{
let mut supported = BTreeSet::new();
for huge_page_size in Self::iter()
{
if sys_path.global_hugepages_folder_path(huge_page_size).exists()
{
supported.insert(huge_page_size);
}
}
supported
}
#[inline(always)]
pub(crate) fn from_non_zero_bytes(bytes: NonZeroU64) -> Option<Self>
{
Self::from_bytes(bytes.get())
}
#[inline(always)]
pub(crate) fn from_bytes(bytes: u64) -> Option<Self>
{
use self::HugePageSize::*;
match bytes
{
65_536 => Some(_64KB),
524_288 => Some(_512KB),
1_048_576 => Some(_1MB),
2_097152 => Some(_2MB),
4_194_304 => Some(_4MB),
8_388_608 => Some(_8MB),
16_777_216 => Some(_16MB),
33_554_432 => Some(_32MB),
268_435_456 => Some(_256MB),
536_870_912 => Some(_512MB),
1_073_741_824 => Some(_1GB),
2_147_483_648 => Some(_2GB),
17_179_869_184 => Some(_16GB),
_ => None,
}
}
#[inline(always)]
pub(crate) fn from_non_zero_kilobytes(kilobytes: NonZeroU64) -> Option<Self>
{
Self::from_kilobytes(kilobytes.get())
}
#[inline(always)]
pub(crate) fn from_kilobytes(kilobytes: u64) -> Option<Self>
{
use self::HugePageSize::*;
match kilobytes
{
64 => Some(_64KB),
512 => Some(_512KB),
1_024 => Some(_1MB),
2_048 => Some(_2MB),
4_096 => Some(_4MB),
8_192 => Some(_8MB),
16_384 => Some(_16MB),
32_768 => Some(_32MB),
262_144 => Some(_256MB),
524_288 => Some(_512MB),
1_048_576 => Some(_1GB),
2_097_152 => Some(_2GB),
16_777_216 => Some(_16GB),
_ => None,
}
}
#[inline(always)]
fn mmap_and_memfd_flags_bits(self) -> i32
{
const MAP_HUGE_SHIFT: u64 = 26;
let value: u64 = self.log_base_2_of_bytes() << MAP_HUGE_SHIFT;
let value: u32 = value.try_into().expect("Gigantic huge pages more than 2Gb are not supported by mmap or memfd");
value as i32
}
const fn log_base_2_of_bytes(self) -> u64
{
(self as u64).trailing_zeros() as u64
}
pub const fn into_non_zero_u64(self) -> NonZeroU64
{
new_non_zero_u64(self as u64)
}
pub const fn into_u64(self) -> u64
{
self as u64
}
pub const fn into_non_zero_usize(self) -> NonZeroUsize
{
new_non_zero_usize(self.into_usize())
}
pub const fn into_usize(self) -> usize
{
self.into_u64() as usize
}
}