#[repr(u64)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum HugePageSize
{
_1MB = 1024,
_2MB = 2048,
_4MB = 4096,
_16MB = 16_384,
_256MB = 262_144,
_512MB = 524_288,
_1GB = 1_048_576,
_2GB = 2_097_152,
_16GB = 16_777_216,
}
impl HugePageSize
{
pub const PotentiallySupportedHugePageSizesLargestFirst: [HugePageSize; 9] =
[
HugePageSize::_16GB,
HugePageSize::_2GB,
HugePageSize::_1GB,
HugePageSize::_512MB,
HugePageSize::_256MB,
HugePageSize::_16MB,
HugePageSize::_4MB,
HugePageSize::_2MB,
HugePageSize::_1MB,
];
#[inline(always)]
pub fn size_in_mega_bytes(self) -> u64
{
self.size_in_kilo_bytes() / 1024
}
#[inline(always)]
pub fn size_in_kilo_bytes(self) -> u64
{
self as u64
}
#[inline(always)]
pub fn size_in_bytes(self) -> u64
{
self.size_in_kilo_bytes() * 1024
}
#[inline(always)]
pub fn calculate_number_of_huge_pages(&self, desired_number_of_kilo_bytes: u64) -> u64
{
let size_in_kilo_bytes = self.size_in_kilo_bytes();
if size_in_kilo_bytes < desired_number_of_kilo_bytes
{
1
}
else
{
size_in_kilo_bytes / desired_number_of_kilo_bytes
}
}
#[inline(always)]
pub fn from_proc_mem_info_value(value: u64) -> Option<Self>
{
use self::HugePageSize::*;
match value
{
1024 => Some(_1MB),
2048 => Some(_2MB),
4096 => Some(_4MB),
16384 => Some(_16MB),
262144 => Some(_256MB),
524288 => Some(_512MB),
1048576 => Some(_1GB),
2097152 => Some(_2GB),
16777216 => Some(_16GB),
_ => None,
}
}
#[inline(always)]
pub fn to_str(&self) -> &'static str
{
use self::HugePageSize::*;
match *self
{
_1MB => "1MB",
_2MB => "2MB",
_4MB => "4MB",
_16MB => "16MB",
_256MB => "256MB",
_512MB => "512MB",
_1GB => "1GB",
_2GB => "2GB",
_16GB => "16GB",
}
}
#[inline(always)]
pub fn to_bytes(&self) -> &'static [u8]
{
use self::HugePageSize::*;
match *self
{
_1MB => b"1MB",
_2MB => b"2MB",
_4MB => b"4MB",
_16MB => b"16MB",
_256MB => b"256MB",
_512MB => b"512MB",
_1GB => b"1GB",
_2GB => b"2GB",
_16GB => b"16GB",
}
}
#[inline(always)]
pub fn largest_supported_huge_page_size(sys_path: &SysPath) -> Self
{
*Self::supported_huge_page_sizes(sys_path).iter().rev().next().expect("Huge pages are not supported")
}
#[inline(always)]
pub fn supported_huge_page_sizes(sys_path: &SysPath) -> BTreeSet<Self>
{
let mut supported = BTreeSet::new();
for huge_page_size in Self::PotentiallySupportedHugePageSizesLargestFirst.iter()
{
if let Ok(_) = huge_page_size.number_of_global_huge_pages(sys_path)
{
supported.insert(*huge_page_size);
}
}
supported
}
#[inline(always)]
pub fn unreserve_global_huge_pages(self, sys_path: &SysPath) -> io::Result<()>
{
assert_effective_user_id_is_root(&format!("Clear all global huge pages of size '{:?}'", self));
self.reserve_global_huge_pages(sys_path, 0)
}
#[inline(always)]
pub fn reserve_global_huge_pages(self, sys_path: &SysPath, number_to_try_to_reserve: u64) -> io::Result<()>
{
assert_effective_user_id_is_root(&format!("Reserve '{}' global huge pages of size '{:?}'", number_to_try_to_reserve, self));
sys_path.global_hugepages_file_path(self, "nr_hugepages").write_value(number_to_try_to_reserve)?;
#[cfg(target_arch = "powerpc64")]
{
sys_path.global_hugepages_file_path(self, "nr_overcommit_hugepages").write_value(number_to_try_to_reserve)?;
}
Ok(())
}
#[inline(always)]
pub fn number_of_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "nr_hugepages")
}
#[inline(always)]
pub fn number_of_free_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "free_hugepages")
}
#[inline(always)]
pub fn number_of_surplus_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "surplus_hugepages")
}
#[inline(always)]
pub fn number_of_reserved_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "resv_hugepages")
}
#[inline(always)]
pub fn number_of_memory_policy_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "nr_hugepages_mempolicy")
}
#[inline(always)]
pub fn number_of_overcommit_global_huge_pages(self, sys_path: &SysPath) -> io::Result<u64>
{
sys_path.read_global_hugepages_value(self, "nr_overcommit_hugepages")
}
#[inline(always)]
pub fn unreserve_numa_huge_pages(self, sys_path: &SysPath, numa_node: u8) -> io::Result<()>
{
assert_effective_user_id_is_root(&format!("Clear all NUMA huge pages of size '{:?}'", self));
self.reserve_numa_huge_pages(sys_path, numa_node, 0)
}
#[inline(always)]
pub fn reserve_numa_huge_pages(self, sys_path: &SysPath, numa_node: u8, number_to_try_to_reserve: u64) -> io::Result<()>
{
assert_effective_user_id_is_root(&format!("Reserve '{}' NUMA huge pages of size '{:?}'", number_to_try_to_reserve, self));
sys_path.numa_hugepages_file_path(self, numa_node, "nr_hugepages").write_value(number_to_try_to_reserve)
}
#[inline(always)]
pub fn number_of_numa_huge_pages(self, sys_path: &SysPath, numa_node: u8) -> io::Result<u64>
{
sys_path.read_numa_hugepages_value(self, numa_node, "nr_hugepages")
}
#[inline(always)]
pub fn number_of_free_numa_huge_pages(self, sys_path: &SysPath, numa_node: u8) -> io::Result<u64>
{
sys_path.read_numa_hugepages_value(self, numa_node, "free_hugepages")
}
#[inline(always)]
pub fn number_of_surplus_numa_huge_pages(self, sys_path: &SysPath, numa_node: u8) -> io::Result<u64>
{
sys_path.read_numa_hugepages_value(self, numa_node, "surplus_hugepages")
}
}