core_allocator/
allocators.rs

1use crate::{CoreAllocator, CoreGroup, ManagedCore, Resource};
2use hwloc2::ObjectType;
3use std::fmt::{Debug, Formatter};
4use std::mem::replace;
5use std::ops::Range;
6use std::sync::Arc;
7#[cfg(feature = "hwloc2")]
8lazy_static::lazy_static! {
9    static ref ALL_CORES: Arc<Vec<Arc<ManagedCore >>> = {
10        let topo = hwloc2::Topology::new().unwrap();
11        let cpuset = topo.object_at_root().cpuset().unwrap();
12        let cores = cpuset.into_iter().map(|x| x as _).map(ManagedCore::new).map(Arc::new).collect();
13        Arc::new(cores)
14    };
15}
16#[cfg(not(feature = "hwloc2"))]
17lazy_static::lazy_static! {
18    static ref ALL_CORES: Arc<Vec<Arc<CoreIndex>>> = {
19        let cpuset = 0..256;
20        let cores = cpuset.into_iter().map(|x| x as _).map(CoreIndex::new).map(Arc::new).collect();
21        Arc::new(cores)
22    };
23}
24pub struct NoAllocator;
25impl CoreAllocator for NoAllocator {
26    fn allocate_core(&self) -> Option<CoreGroup> {
27        Some(CoreGroup::any_core())
28    }
29}
30struct ManagedGroup {
31    resource: Resource,
32    group: Vec<Arc<ManagedCore>>,
33}
34
35pub struct GroupedAllocator {
36    groups: Vec<ManagedGroup>,
37}
38impl GroupedAllocator {
39    pub fn new() -> Self {
40        Self { groups: vec![] }
41    }
42    pub fn add_group(&mut self, group: Vec<Arc<ManagedCore>>) {
43        self.groups.push(ManagedGroup {
44            resource: Resource::new(),
45            group,
46        });
47    }
48    pub fn filter_group(&mut self, filter: impl Fn(&ManagedCore) -> bool) {
49        let groups = replace(&mut self.groups, vec![]);
50        'outer: for group in groups {
51            for core in &group.group {
52                if !filter(core) {
53                    continue 'outer;
54                }
55            }
56            self.groups.push(group);
57        }
58    }
59}
60impl CoreAllocator for GroupedAllocator {
61    fn allocate_core(&self) -> Option<CoreGroup> {
62        for group in self.groups.iter() {
63            if let Ok(taken) = group.resource.try_lock() {
64                let mut released = true;
65                for core in &group.group {
66                    if core.taken.is_taken() {
67                        released = false;
68                    }
69                }
70                if released {
71                    return Some(CoreGroup::cores(taken, group.group.clone()));
72                }
73            }
74        }
75
76        None
77    }
78}
79impl Debug for GroupedAllocator {
80    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
81        let groups = self
82            .groups
83            .iter()
84            .map(|x| x.group.iter().map(|x| x.index).collect::<Vec<_>>())
85            .collect::<Vec<_>>();
86        groups.fmt(f)
87    }
88}
89pub struct SequentialAllocator;
90
91impl SequentialAllocator {
92    pub fn new_range(range: Range<usize>, width: usize) -> GroupedAllocator {
93        let mut groups = GroupedAllocator::new();
94        let mut group = vec![];
95        for i in range {
96            group.push(Arc::clone(&ALL_CORES.get(i).unwrap()));
97            if group.len() == width {
98                groups.add_group(replace(&mut group, vec![]));
99            }
100        }
101        groups
102    }
103}
104
105#[cfg(feature = "hwloc2")]
106pub struct HierarchicalAllocator {
107    object_type: hwloc2::ObjectType,
108    on_cpus: Option<Vec<usize>>,
109}
110#[cfg(feature = "hwloc2")]
111impl HierarchicalAllocator {
112    pub fn new_at_depth(object_type: hwloc2::ObjectType) -> Self {
113        Self {
114            object_type,
115            on_cpus: None,
116        }
117    }
118
119    pub fn on_cpu(mut self, on_cpus: Vec<usize>) -> Self {
120        self.on_cpus = Some(on_cpus);
121        self
122    }
123
124    pub fn finish(self) -> GroupedAllocator {
125        let obj_type = self.object_type;
126        let topo = hwloc2::Topology::new().unwrap();
127        let mut groups = GroupedAllocator::new();
128        let mut allow = hwloc2::CpuSet::new();
129        if let Some(allow_cpu) = self.on_cpus {
130            for (i, cpu) in topo
131                .objects_with_type(&hwloc2::ObjectType::Package)
132                .unwrap()
133                .iter()
134                .enumerate()
135            {
136                if allow_cpu.iter().find(|x| **x == i).is_some() {
137                    for bit in cpu.cpuset().unwrap() {
138                        allow.set(bit);
139                    }
140                }
141            }
142        } else {
143            allow = hwloc2::CpuSet::full();
144        }
145        if obj_type == ObjectType::L3Cache {
146            for object in topo.objects_with_type(&obj_type).unwrap().iter() {
147                let mut phys = hwloc2::CpuSet::new();
148                let mut hypers = hwloc2::CpuSet::new();
149                for l2 in object.children() {
150                    let mut cpu = l2.cpuset().unwrap().into_iter();
151                    phys.set(cpu.next().unwrap());
152                    hypers.set(cpu.next().unwrap());
153                    assert_eq!(cpu.next(), None);
154                }
155                for cpu_set in [phys, hypers] {
156                    let group = cpu_set
157                        .into_iter()
158                        .filter(|x| allow.is_set(*x))
159                        .flat_map(|x| ALL_CORES.get(x as usize))
160                        .map(Arc::clone)
161                        .collect::<Vec<_>>();
162                    if group.len() > 0 {
163                        groups.add_group(group)
164                    }
165                }
166            }
167        } else {
168            for object in topo.objects_with_type(&obj_type).unwrap().iter() {
169                let cpu_set = object.cpuset();
170                match cpu_set {
171                    Some(cpu_set) => {
172                        let group = cpu_set
173                            .into_iter()
174                            .filter(|x| allow.is_set(*x))
175                            .flat_map(|x| ALL_CORES.get(x as usize))
176                            .map(Arc::clone)
177                            .collect::<Vec<_>>();
178                        if group.len() > 0 {
179                            groups.add_group(group)
180                        }
181                    }
182                    None => {}
183                }
184            }
185        }
186        groups
187    }
188}