1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
// This file is part of context-allocator. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/context-allocator/master/COPYRIGHT. No part of context-allocator, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of context-allocator. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/context-allocator/master/COPYRIGHT.


/// Defaults to `Default`.
///
/// Ignored on operating systems other than Android and Linux.
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub enum NumaAllocationPolicy
{
	/// `MPOL_DEFAULT`.
	///
	/// This mode requests that any non-default policy be removed, restoring default behavior.
	///
	/// When applied to a range of memory this means to use the thread memory policy, which may have been set with the syscall `set_mempolicy`.
	///
	/// If the mode of the thread memory policy is also `NumaNodePolicy::Default`, then the system-wide default policy will be used.
	/// The system-wide default policy allocates pages on the node of the CPU that triggers the allocation.
	Default,

	/// `MPOL_PREFERRED`.
	///
	/// This mode sets the preferred node for allocation.
	///
	/// The kernel will try to allocate pages from this node first and fall back to other nodes if the preferred node is low on free memory.
	///
	/// The first node in the bit set will be selected as the preferred node.
	/// If the NUMA node bit set is empty, then the memory is allocated on the node of the CPU that triggered the allocation.
	Preferred(NumaNodeBitSet),

	/// `MPOL_BIND`.
	///
	/// This mode specifies a strict policy that restricts memory allocation to the nodes specified in nodemask.
	///
	/// Page allocations will come from the node in the NUMA node node bit set with sufficient free memory that is closest to the node where the allocation takes place.
	/// Pages will not be allocated from any node not specified in the NUMA node node bit set.
	Bind(NumaNodeBitSet),

	/// `MPOL_INTERLEAVE`.
	///
	/// This mode specifies that page allocations be interleaved across the set of nodes specified in the NUMA node bit set.
	///
	/// This optimizes for bandwidth instead of latency by spreading out pages and memory accesses to those pages across multiple nodes.
	/// To be effective the memory area should be fairly large, at least 1 MB or bigger with a fairly uniform access pattern.
	/// Accesses to a single page of the area will still be limited to the memory bandwidth of a single node.
	Interleave(NumaNodeBitSet),

	/// `MPOL_LOCAL`.
	///
	/// This mode specifies "local allocation"; the memory is allocated on the node of the CPU that triggered the allocation (the "local node").
	///
	/// If the "local node" is low on free memory, the kernel will try to allocate memory from other nodes.
	/// The kernel will allocate memory from the "local node"  whenever memory for this node is available.
	/// If the "local node" is not allowed by the thread's current cpuset context, the kernel will try to allocate memory from other nodes.
	/// The kernel will allocate memory from the "local node" whenever it becomes allowed by the thread's current cpuset context.
	///
	/// Since Linux 3.8.
	Local,
}

impl Default for NumaAllocationPolicy
{
	#[inline(always)]
	fn default() -> Self
	{
		NumaAllocationPolicy::Default
	}
}

impl NumaAllocationPolicy
{
	#[cfg(any(target_os = "android", target_os = "linux"))]
	#[inline(always)]
	fn values(&self) -> (i32, (i32, Option<usize>, usize))
	{
		use self::NumaAllocationPolicy::*;

		match *self
		{
			Default => (0, NumaNodeBitSet::no_mode_flags_nodemask_maxnode),

			Preferred(ref numa_node_bit_set) => (1, numa_node_bit_set.mask_and_size()),

			Bind(ref numa_node_bit_set) => (2, numa_node_bit_set.mask_and_size()),

			Interleave(ref numa_node_bit_set) => (3, numa_node_bit_set.mask_and_size()),

			Local => (4, NumaNodeBitSet::no_mode_flags_nodemask_maxnode),
		}

	}
}