1#![cfg(feature = "std")]
2
3use crate::{Context, SizeOf};
4use core::{
5 cell::{Cell, UnsafeCell},
6 mem::size_of,
7 ptr::NonNull,
8 sync::atomic::AtomicBool,
9};
10use std::{
11 collections::hash_map::RandomState,
12 ffi::{OsStr, OsString},
13 net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
14 path::{Path, PathBuf},
15 sync::{Barrier, Condvar, Mutex, Once, RwLock},
16 thread::{Thread, ThreadId},
17 time::{Instant, SystemTime},
18};
19
20const PATH_ELEM_SIZE: usize = if cfg!(windows) {
22 size_of::<u16>()
24} else {
25 size_of::<u8>()
27};
28
29impl_total_size_childless! {
30 Path,
31 OsStr,
32 Barrier,
33 Condvar,
34 Instant,
35 ThreadId,
36 SystemTime,
37 RandomState,
38 IpAddr,
39 Ipv4Addr,
40 Ipv6Addr,
41 SocketAddr,
42 SocketAddrV4,
43 SocketAddrV6,
44}
45
46impl SizeOf for OsString {
47 fn size_of_children(&self, context: &mut Context) {
48 if self.capacity() != 0 {
49 context
50 .add_vectorlike(self.len(), self.capacity(), PATH_ELEM_SIZE)
51 .add_distinct_allocation();
52 }
53 }
54}
55
56impl SizeOf for PathBuf {
57 fn size_of_children(&self, context: &mut Context) {
58 if self.capacity() != 0 {
59 context
60 .add_vectorlike(self.as_os_str().len(), self.capacity(), PATH_ELEM_SIZE)
61 .add_distinct_allocation();
62 }
63 }
64}
65
66impl<T> SizeOf for Mutex<T>
67where
68 T: SizeOf,
69{
70 fn size_of_children(&self, context: &mut Context) {
72 if cfg!(target_env = "sgx") {
73 context
74 .add(estimate_mutex_size_sgx::<T>())
75 .add_distinct_allocation();
76 }
77
78 if let Ok(contents) = self.lock() {
83 contents.size_of_children(context);
84 }
85 }
86}
87
88const fn estimate_mutex_size_sgx<T>() -> usize {
95 #[allow(dead_code)]
97 struct FakeSgxMutex<T> {
98 inner: FakeSpinMutex<FakeWaitVariable<bool>>,
99 value: UnsafeCell<T>,
100 }
101
102 #[allow(dead_code)]
104 struct FakeSpinMutex<T> {
105 value: UnsafeCell<T>,
106 lock: AtomicBool,
107 }
108
109 #[allow(dead_code)]
111 struct FakeWaitVariable<T> {
112 queue: FakeWaitQueue,
113 lock: T,
114 }
115
116 #[allow(dead_code)]
118 struct FakeWaitQueue {
119 inner: FakeUnsafeList<FakeSpinMutex<FakeWaitEntry>>,
120 }
121
122 #[allow(dead_code)]
124 struct FakeWaitEntry {
125 tcs: NonNull<u8>,
127 wake: bool,
128 }
129
130 #[allow(dead_code)]
132 struct FakeUnsafeList<T> {
133 head_tail: NonNull<FakeUnsafeListEntry<T>>,
134 head_tail_entry: Option<FakeUnsafeListEntry<T>>,
135 }
136
137 #[allow(dead_code)]
139 struct FakeUnsafeListEntry<T> {
140 next: NonNull<FakeUnsafeListEntry<T>>,
141 prev: NonNull<FakeUnsafeListEntry<T>>,
142 value: Option<T>,
143 }
144
145 size_of::<FakeSgxMutex<T>>()
146}
147
148impl<T> SizeOf for RwLock<T>
150where
151 T: SizeOf,
152{
153 fn size_of_children(&self, context: &mut Context) {
154 if let Ok(contents) = self.read() {
156 contents.size_of_children(context);
157 }
158 }
159}
160
161impl SizeOf for Once {
171 fn size_of_children(&self, context: &mut Context) {
172 #[allow(dead_code)]
173 struct FakeWaiter {
174 thread: Cell<Option<Thread>>,
175 signaled: AtomicBool,
176 next: *const FakeWaiter,
177 }
178
179 context
181 .add(size_of::<FakeWaiter>())
182 .add_distinct_allocation();
183 }
184}
185
186pub(crate) mod hashmap {
187 use crate::{Context, SizeOf};
188 use core::mem::{align_of, size_of};
189 use std::collections::{HashMap, HashSet};
190
191 #[inline]
194 const fn capacity_to_buckets(capacity: usize) -> usize {
195 if capacity == 0 {
196 0
197 } else if capacity < 4 {
198 4
199 } else if capacity < 8 {
200 8
201 } else {
202 (capacity * 8 / 7).next_power_of_two()
203 }
204 }
205
206 const GROUP_WIDTH: usize = if cfg!(any(
208 target_pointer_width = "64",
209 target_arch = "aarch64",
210 target_arch = "x86_64",
211 target_arch = "wasm32",
212 )) {
213 size_of::<u64>()
214 } else {
215 size_of::<u32>()
216 };
217
218 #[inline]
221 pub(crate) const fn calculate_layout_for<T>(buckets: usize) -> usize {
222 let align = if align_of::<T>() > GROUP_WIDTH {
224 align_of::<T>()
225 } else {
226 GROUP_WIDTH
227 };
228 let ctrl_offset = ((size_of::<T>() * buckets) + (align - 1)) & !(align - 1);
229 ctrl_offset + buckets + GROUP_WIDTH
230 }
231
232 #[inline]
236 pub(crate) const fn estimate_hashmap_size<K, V>(
237 length: usize,
238 capacity: usize,
239 ) -> (usize, usize) {
240 if capacity == 0 {
241 (0, 0)
242 } else {
243 let buckets = capacity_to_buckets(capacity);
245
246 let table_layout = calculate_layout_for::<(K, V)>(buckets);
248
249 let used_layout = calculate_layout_for::<(K, V)>(length);
251
252 (table_layout, used_layout)
253 }
254 }
255
256 impl<K, S> SizeOf for HashSet<K, S>
257 where
258 K: SizeOf,
259 S: SizeOf,
260 {
261 fn size_of_children(&self, context: &mut Context) {
262 if self.capacity() != 0 {
263 let (total_bytes, used_bytes) =
264 estimate_hashmap_size::<K, ()>(self.len(), self.capacity());
265
266 context
267 .add(used_bytes)
268 .add_excess(total_bytes - used_bytes)
269 .add_distinct_allocation();
270
271 self.iter().for_each(|key| key.size_of_children(context));
272 }
273
274 self.hasher().size_of_children(context);
275 }
276 }
277
278 impl<K, V, S> SizeOf for HashMap<K, V, S>
279 where
280 K: SizeOf,
281 V: SizeOf,
282 S: SizeOf,
283 {
284 fn size_of_children(&self, context: &mut Context) {
285 if self.capacity() != 0 {
286 let (total_bytes, used_bytes) =
287 estimate_hashmap_size::<K, V>(self.len(), self.capacity());
288
289 context
290 .add(used_bytes)
291 .add_excess(total_bytes - used_bytes)
292 .add_distinct_allocation();
293
294 self.iter().for_each(|(key, value)| {
295 key.size_of_children(context);
296 value.size_of_children(context);
297 });
298 }
299
300 self.hasher().size_of_children(context);
301 }
302 }
303}