1use fxhash::FxHasher64;
2use std::cmp::{max, min};
3use std::hash::Hasher;
4use std::mem::size_of;
5use std::ops::Not;
6use std::{ptr, slice};
7
8#[repr(u8)]
9#[derive(Copy, Clone, PartialEq, Eq, Debug)]
10pub enum BucketStatus {
11 Empty = 0, Tombstone = 1,
13 Occupied = 2,
14}
15
16#[repr(C)]
17#[derive(Copy, Clone, Debug)]
18pub struct MapHeader {
19 pub capacity: u16, pub element_count: u16, pub key_size: u32,
23 pub value_size: u32,
24
25 pub value_offset: u32,
26 pub bucket_size: u32,
27
28 pub logical_limit: u16,
29 pub key_offset: u8,
30 pub padding2: u8,
31}
32
33pub struct MapInit {
34 pub key_size: u32,
35 pub key_alignment: u8,
36 pub value_size: u32,
37 pub value_alignment: u8,
38 pub capacity: u16,
39 pub logical_limit: u16,
40 pub total_size: u32,
41}
42
43#[derive(Clone, Copy, Debug)]
44pub struct BucketLayout {
45 pub bucket_size: u32,
46 pub key_offset: u8,
47 pub value_offset: u32,
48}
49
50const MAP_BUCKETS_OFFSET: usize = size_of::<MapHeader>();
51const MAX_PROBE_DISTANCE: usize = 32;
52
53#[inline]
54fn calculate_hash_bytes(key_bytes: &[u8]) -> u64 {
55 let mut hasher = FxHasher64::default();
56 hasher.write(key_bytes);
57 hasher.finish()
58}
59
60#[inline]
62#[must_use]
63pub fn calculate_bucket_layout(
64 key_size: u32,
65 key_alignment: u8,
66 value_size: u32,
67 value_alignment: u8,
68) -> BucketLayout {
69 let status_size: u32 = 1;
70 let mut current_offset = status_size;
71
72 let key_align = u32::from(key_alignment);
74 let key_offset = (current_offset + key_align - 1) & !(key_align - 1);
75 current_offset = key_offset + key_size;
76
77 let value_align = u32::from(value_alignment);
79 let value_offset = (current_offset + value_align - 1) & !(value_align - 1);
80 current_offset = value_offset + value_size;
81
82 let bucket_content_alignment = max(key_align, value_align);
84 let bucket_size =
85 (current_offset + bucket_content_alignment - 1) & !(bucket_content_alignment - 1);
86
87 BucketLayout {
88 bucket_size,
89 key_offset: key_offset as u8,
90 value_offset,
91 }
92}
93
94#[must_use]
95pub const fn total_size(capacity: u16, bucket_size: u32) -> u32 {
96 (MAP_BUCKETS_OFFSET + capacity as usize * bucket_size as usize) as u32
97}
98
99#[must_use]
100pub fn layout(
101 key_size: u32,
102 key_alignment: u8,
103 value_size: u32,
104 value_alignment: u8,
105 logical_limit: u16,
106) -> (BucketLayout, MapInit) {
107 let capacity = logical_limit.next_power_of_two();
108 let bucket_layout =
109 calculate_bucket_layout(key_size, key_alignment, value_size, value_alignment);
110 (
111 bucket_layout,
112 MapInit {
113 key_size,
114 key_alignment,
115 value_size,
116 value_alignment,
117 capacity,
118 logical_limit,
119 total_size: total_size(capacity, bucket_layout.bucket_size),
120 },
121 )
122}
123
124pub unsafe fn init(map_base: *mut u8, config: &MapInit) {
131 unsafe {
132 debug_assert!(
133 config.capacity.is_power_of_two(),
134 "Capacity must be a power of two"
135 );
136
137 let map_header = map_base.cast::<MapHeader>();
138 let layout = calculate_bucket_layout(
139 config.key_size,
140 config.key_alignment,
141 config.value_size,
142 config.value_alignment,
143 );
144
145 unsafe {
147 ptr::write(
148 map_header,
149 MapHeader {
150 capacity: config.capacity,
151 logical_limit: config.logical_limit,
152 key_size: config.key_size,
153 value_size: config.value_size,
154 bucket_size: layout.bucket_size,
155 key_offset: layout.key_offset,
156 value_offset: layout.value_offset,
157 element_count: 0,
158 padding2: 0,
159 },
160 );
161 }
162
163 let buckets_start_ptr = map_base.add(MAP_BUCKETS_OFFSET);
165 let capacity = usize::from(config.capacity);
166 let bucket_size = layout.bucket_size as usize;
167
168 for i in 0..capacity {
170 unsafe {
171 ptr::write(
172 buckets_start_ptr.add(i * bucket_size),
173 BucketStatus::Empty as u8,
174 );
175 }
176 }
177 }
178}
179
180#[inline]
183unsafe fn matches_key(a: *const u8, b: *const u8, len: usize) -> bool {
184 unsafe {
185 if len <= 16 {
186 match len {
187 0 => true,
188 1 => *a == *b,
189 2 => *a.cast::<u16>() == *b.cast::<u16>(),
190 4 => *a.cast::<u32>() == *b.cast::<u32>(),
191 8 => *a.cast::<u64>() == *b.cast::<u64>(),
192 _ => {
193 for i in 0..len {
194 if *a.add(i) != *b.add(i) {
195 return false;
196 }
197 }
198 true
199 }
200 }
201 } else {
202 slice::from_raw_parts(a, len) == slice::from_raw_parts(b, len)
203 }
204 }
205}
206
207#[inline]
218pub unsafe fn get_or_reserve_entry(base_ptr: *mut u8, key_ptr: *const u8) -> *mut u8 {
219 unsafe {
220 let header = &*base_ptr.cast::<MapHeader>();
221
222 let capacity = header.capacity as usize;
224 let key_size = header.key_size as usize;
225 let bucket_size = header.bucket_size as usize;
226 let key_offset = header.key_offset as usize;
227 let value_offset = header.value_offset as usize;
228
229 debug_assert_ne!(key_size, 0, "Key size cannot be zero");
230 debug_assert_ne!(capacity, 0, "Capacity cannot be zero");
231 debug_assert!(
232 capacity.is_power_of_two(),
233 "Capacity must be a power of two"
234 );
235
236 let buckets_ptr = base_ptr.add(MAP_BUCKETS_OFFSET);
237 let key_slice = slice::from_raw_parts(key_ptr, key_size);
238 let hash = calculate_hash_bytes(key_slice);
239
240 let mut index = hash as usize & (capacity - 1);
242
243 let mut first_tombstone = None;
245 let probe_limit = min(capacity, MAX_PROBE_DISTANCE);
246
247 for _ in 0..probe_limit {
248 let bucket_ptr = buckets_ptr.add(index * bucket_size);
249 let status = *bucket_ptr;
250
251 match status {
252 status if status == BucketStatus::Empty as u8 => {
253 let insert_index = first_tombstone.unwrap_or(index);
256 let target_bucket = buckets_ptr.add(insert_index * bucket_size);
257
258 *target_bucket = BucketStatus::Occupied as u8;
260 let target_key_ptr = target_bucket.add(key_offset);
261 ptr::copy_nonoverlapping(key_ptr, target_key_ptr, key_size);
262
263 let header_mut = &mut *base_ptr.cast::<MapHeader>();
265 header_mut.element_count += 1;
266
267 return target_bucket.add(value_offset);
268 }
269 status if status == BucketStatus::Occupied as u8 => {
270 let existing_key_ptr = bucket_ptr.add(key_offset);
272 if matches_key(existing_key_ptr, key_ptr, key_size) {
273 return bucket_ptr.add(value_offset);
274 }
275 }
276 status if status == BucketStatus::Tombstone as u8 => {
277 if first_tombstone.is_none() {
279 first_tombstone = Some(index);
280 }
281 }
282 _ => unreachable!(),
283 }
284
285 index = (index + 1) & (capacity - 1);
287 }
288
289 if let Some(tombstone_index) = first_tombstone {
291 let target_bucket = buckets_ptr.add(tombstone_index * bucket_size);
292
293 *target_bucket = BucketStatus::Occupied as u8;
295 let target_key_ptr = target_bucket.add(key_offset);
296 ptr::copy_nonoverlapping(key_ptr, target_key_ptr, key_size);
297
298 let header_mut = &mut *base_ptr.cast::<MapHeader>();
300 header_mut.element_count += 1;
301
302 return target_bucket.add(value_offset);
303 }
304
305 ptr::null_mut()
307 }
308}
309
310#[inline]
317#[must_use]
318pub unsafe fn has(base_ptr: *const u8, key_ptr: *const u8) -> bool {
319 unsafe { lookup(base_ptr.cast_mut(), key_ptr).is_null().not() }
320}
321
322#[inline]
333pub unsafe fn lookup(base_ptr: *mut u8, key_ptr: *const u8) -> *mut u8 {
334 unsafe {
335 let header = &*base_ptr.cast::<MapHeader>();
336
337 let capacity = header.capacity as usize;
338 let key_size = header.key_size as usize;
339 let bucket_size = header.bucket_size as usize;
340 let key_offset = header.key_offset as usize;
341 let value_offset = header.value_offset as usize;
342
343 debug_assert_ne!(key_size, 0, "Key size cannot be zero");
344 debug_assert_ne!(capacity, 0, "Capacity cannot be zero");
345 debug_assert!(
346 capacity.is_power_of_two(),
347 "Capacity must be a power of two"
348 );
349
350 let buckets_ptr = base_ptr.add(MAP_BUCKETS_OFFSET);
351 let key_slice = slice::from_raw_parts(key_ptr, key_size);
352 let hash = calculate_hash_bytes(key_slice);
353
354 let mut index = hash as usize & (capacity - 1);
356 let probe_limit = min(capacity, MAX_PROBE_DISTANCE);
357
358 for _ in 0..probe_limit {
359 let bucket_ptr = buckets_ptr.add(index * bucket_size);
360 let status = *bucket_ptr;
361
362 match status {
363 status if status == BucketStatus::Empty as u8 => {
364 return ptr::null_mut();
367 }
368 status if status == BucketStatus::Occupied as u8 => {
369 let existing_key_ptr = bucket_ptr.add(key_offset);
371 if matches_key(existing_key_ptr, key_ptr, key_size) {
372 return bucket_ptr.add(value_offset);
373 }
374 }
375 _ => {} }
377
378 index = (index + 1) & (capacity - 1);
379 }
380
381 ptr::null_mut()
383 }
384}
385
386#[inline]
397pub unsafe fn remove(base_ptr: *mut u8, key_ptr: *const u8) -> bool {
398 unsafe {
399 let header = &*base_ptr.cast::<MapHeader>();
400
401 let capacity = header.capacity as usize;
402 let key_size = header.key_size as usize;
403 let bucket_size = header.bucket_size as usize;
404 let key_offset = header.key_offset as usize;
405
406 debug_assert_ne!(key_size, 0, "Key size cannot be zero");
407 debug_assert_ne!(capacity, 0, "Capacity cannot be zero");
408 debug_assert!(
409 capacity.is_power_of_two(),
410 "Capacity must be a power of two"
411 );
412
413 let buckets_ptr = base_ptr.add(MAP_BUCKETS_OFFSET);
414 let key_slice = slice::from_raw_parts(key_ptr, key_size);
415 let hash = calculate_hash_bytes(key_slice);
416
417 let mut index = hash as usize & (capacity - 1);
419 let probe_limit = min(capacity, MAX_PROBE_DISTANCE);
420
421 for _ in 0..probe_limit {
422 let bucket_ptr = buckets_ptr.add(index * bucket_size);
423 let status = *bucket_ptr;
424
425 match status {
426 status if status == BucketStatus::Empty as u8 => {
427 return false;
429 }
430 status if status == BucketStatus::Occupied as u8 => {
431 let existing_key_ptr = bucket_ptr.add(key_offset);
433 if matches_key(existing_key_ptr, key_ptr, key_size) {
434 *bucket_ptr = BucketStatus::Tombstone as u8;
436
437 let header_mut = &mut *base_ptr.cast::<MapHeader>();
439 header_mut.element_count -= 1;
440
441 return true;
442 }
443 }
444 _ => {} }
446
447 index = (index + 1) & (capacity - 1);
448 }
449
450 false
452 }
453}
454
455#[inline]
466pub unsafe fn overwrite(target_base: *mut u8, source: *const u8) -> bool {
467 unsafe {
468 let target_header = &mut *target_base.cast::<MapHeader>();
469 let source_header = &*source.cast::<MapHeader>();
470
471 if target_header.logical_limit < source_header.element_count {
473 return false;
474 }
475
476 debug_assert_eq!(
478 target_header.bucket_size, source_header.bucket_size,
479 "Incompatible bucket sizes"
480 );
481 debug_assert_eq!(
482 target_header.key_size, source_header.key_size,
483 "Incompatible key sizes"
484 );
485 debug_assert_eq!(
486 target_header.value_size, source_header.value_size,
487 "Incompatible value sizes"
488 );
489
490 let source_buckets_ptr = source.add(MAP_BUCKETS_OFFSET);
491 let bucket_size = source_header.bucket_size as usize;
492 let key_offset = source_header.key_offset as usize;
493 let value_offset = source_header.value_offset as usize;
494 let value_size = source_header.value_size as usize;
495
496 for i in 0..source_header.capacity as usize {
498 let source_bucket = source_buckets_ptr.add(i * bucket_size);
499
500 if *source_bucket == BucketStatus::Occupied as u8 {
501 let source_key_ptr = source_bucket.add(key_offset);
502 let source_value_ptr = source_bucket.add(value_offset);
503
504 let target_value_ptr = get_or_reserve_entry(target_base, source_key_ptr);
505
506 if target_value_ptr.is_null() {
507 return false;
508 }
509
510 ptr::copy_nonoverlapping(source_value_ptr, target_value_ptr, value_size);
511 }
512 }
513
514 true
515 }
516}
517
518#[inline]
529pub unsafe fn find_next_valid_entry(base: *mut u8, start_index: u16) -> (*const u8, *mut u8, u16) {
530 unsafe {
531 let map_header = &*base.cast::<MapHeader>();
532 let bucket_size = map_header.bucket_size as usize;
533 let buckets_start = base.add(MAP_BUCKETS_OFFSET);
534 let key_offset = map_header.key_offset as usize;
535 let value_offset = map_header.value_offset as usize;
536
537 let mut index = start_index as usize;
538
539 while index < map_header.capacity as usize {
540 let entry_ptr = buckets_start.add(index * bucket_size);
541
542 if *entry_ptr == BucketStatus::Occupied as u8 {
544 let key_addr = entry_ptr.add(key_offset);
545 let value_addr = entry_ptr.add(value_offset);
546
547 return (key_addr, value_addr, index as u16);
548 }
549
550 index += 1;
551 }
552
553 (ptr::null(), ptr::null_mut(), 0xFFFF)
554 }
555}