1use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
9use std::ptr::{self, null_mut};
10use std::marker::PhantomData;
11use std::alloc::{alloc_zeroed, dealloc, Layout};
12use std::hash::{Hash, Hasher};
13use std::collections::hash_map::DefaultHasher;
14
15struct Node<T> {
17 data: T,
18 next: AtomicPtr<Node<T>>,
19}
20
21pub struct AppendOnlyList<T> {
26 head: AtomicPtr<Node<T>>,
27 tail: AtomicPtr<Node<T>>,
28 size: AtomicUsize,
29 _phantom: PhantomData<T>,
30}
31
32impl<T> AppendOnlyList<T> {
33 pub fn new() -> Self {
34 Self {
35 head: AtomicPtr::new(null_mut()),
36 tail: AtomicPtr::new(null_mut()),
37 size: AtomicUsize::new(0),
38 _phantom: PhantomData,
39 }
40 }
41
42 pub fn append(&self, data: T) {
44 let new_node = Box::into_raw(Box::new(Node {
45 data,
46 next: AtomicPtr::new(null_mut()),
47 }));
48
49 loop {
50 let tail = self.tail.load(Ordering::Acquire);
51
52 if tail.is_null() {
53 match self.head.compare_exchange(
55 null_mut(),
56 new_node,
57 Ordering::Release,
58 Ordering::Acquire
59 ) {
60 Ok(_) => {
61 self.tail.store(new_node, Ordering::Release);
62 self.size.fetch_add(1, Ordering::Relaxed);
63 return;
64 }
65 Err(_) => continue,
66 }
67 } else {
68 let tail_next = unsafe { &(*tail).next };
70
71 match tail_next.compare_exchange(
72 null_mut(),
73 new_node,
74 Ordering::Release,
75 Ordering::Acquire
76 ) {
77 Ok(_) => {
78 let _ = self.tail.compare_exchange(
80 tail,
81 new_node,
82 Ordering::Release,
83 Ordering::Acquire
84 );
85 self.size.fetch_add(1, Ordering::Relaxed);
86 return;
87 }
88 Err(_) => {
89 let actual_tail = tail_next.load(Ordering::Acquire);
91 if !actual_tail.is_null() {
92 let _ = self.tail.compare_exchange(
93 tail,
94 actual_tail,
95 Ordering::Release,
96 Ordering::Acquire
97 );
98 }
99 }
100 }
101 }
102 }
103 }
104
105 pub fn len(&self) -> usize {
107 self.size.load(Ordering::Relaxed)
108 }
109
110 pub fn iter(&self) -> AppendOnlyIterator<T> {
112 AppendOnlyIterator {
113 current: self.head.load(Ordering::Acquire),
114 _phantom: PhantomData,
115 }
116 }
117}
118
119pub struct AppendOnlyIterator<T> {
120 current: *mut Node<T>,
121 _phantom: PhantomData<T>,
122}
123
124impl<T> Iterator for AppendOnlyIterator<T> {
125 type Item = *const T;
126
127 fn next(&mut self) -> Option<Self::Item> {
128 if self.current.is_null() {
129 None
130 } else {
131 unsafe {
132 let node = &*self.current;
133 let data_ptr = &node.data as *const T;
134 self.current = node.next.load(Ordering::Acquire);
135 Some(data_ptr)
136 }
137 }
138 }
139}
140
141pub struct IndexedList<T> {
146 data: *mut AtomicPtr<T>,
147 capacity: usize,
148 size: AtomicUsize,
149 _phantom: PhantomData<T>,
150}
151
152impl<T> IndexedList<T> {
153 pub fn new(capacity: usize) -> Self {
154 let layout = Layout::array::<AtomicPtr<T>>(capacity).unwrap();
155 let data = unsafe {
156 let ptr = alloc_zeroed(layout) as *mut AtomicPtr<T>;
157 for i in 0..capacity {
158 ptr::write(ptr.add(i), AtomicPtr::new(null_mut()));
159 }
160 ptr
161 };
162
163 Self {
164 data,
165 capacity,
166 size: AtomicUsize::new(0),
167 _phantom: PhantomData,
168 }
169 }
170
171 pub fn set(&self, index: usize, value: T) -> Result<Option<T>, T> {
173 if index >= self.capacity {
174 return Err(value);
175 }
176
177 let ptr = Box::into_raw(Box::new(value));
178 let slot = unsafe { &*self.data.add(index) };
179 let old = slot.swap(ptr, Ordering::AcqRel);
180
181 if old.is_null() {
182 self.size.fetch_add(1, Ordering::Relaxed);
183 Ok(None)
184 } else {
185 Ok(Some(unsafe { *Box::from_raw(old) }))
186 }
187 }
188
189 pub fn get(&self, index: usize) -> Option<*const T> {
191 if index >= self.capacity {
192 return None;
193 }
194
195 let slot = unsafe { &*self.data.add(index) };
196 let ptr = slot.load(Ordering::Acquire);
197
198 if ptr.is_null() {
199 None
200 } else {
201 Some(ptr as *const T)
202 }
203 }
204
205 pub fn remove(&self, index: usize) -> Option<T> {
207 if index >= self.capacity {
208 return None;
209 }
210
211 let slot = unsafe { &*self.data.add(index) };
212 let ptr = slot.swap(null_mut(), Ordering::AcqRel);
213
214 if ptr.is_null() {
215 None
216 } else {
217 self.size.fetch_sub(1, Ordering::Relaxed);
218 Some(unsafe { *Box::from_raw(ptr) })
219 }
220 }
221
222 pub fn capacity(&self) -> usize {
223 self.capacity
224 }
225
226 pub fn len(&self) -> usize {
227 self.size.load(Ordering::Relaxed)
228 }
229}
230
231pub struct UnorderedSet<T: Hash + Eq> {
236 buckets: *mut Bucket<T>,
237 bucket_count: usize,
238 mask: usize,
239 size: AtomicUsize,
240 _phantom: PhantomData<T>,
241}
242
243#[repr(align(64))]
244struct Bucket<T> {
245 head: AtomicPtr<SetNode<T>>,
247}
248
249struct SetNode<T> {
250 data: T,
251 hash: u64,
252 next: AtomicPtr<SetNode<T>>,
253}
254
255impl<T: Hash + Eq> UnorderedSet<T> {
256 pub fn new() -> Self {
257 Self::with_capacity(1024)
258 }
259
260 pub fn with_capacity(capacity: usize) -> Self {
261 let bucket_count = capacity.next_power_of_two();
262 let layout = Layout::array::<Bucket<T>>(bucket_count).unwrap();
263
264 let buckets = unsafe {
265 let ptr = alloc_zeroed(layout) as *mut Bucket<T>;
266 for i in 0..bucket_count {
267 ptr::write(ptr.add(i), Bucket {
268 head: AtomicPtr::new(null_mut()),
269 });
270 }
271 ptr
272 };
273
274 Self {
275 buckets,
276 bucket_count,
277 mask: bucket_count - 1,
278 size: AtomicUsize::new(0),
279 _phantom: PhantomData,
280 }
281 }
282
283 fn hash(item: &T) -> u64 {
284 let mut hasher = DefaultHasher::new();
285 item.hash(&mut hasher);
286 hasher.finish()
287 }
288
289 pub fn insert(&self, item: T) -> bool {
291 let hash = Self::hash(&item);
292 let bucket_idx = (hash as usize) & self.mask;
293 let bucket = unsafe { &*self.buckets.add(bucket_idx) };
294
295 let mut current = bucket.head.load(Ordering::Acquire);
297 while !current.is_null() {
298 let node = unsafe { &*current };
299 if node.hash == hash && node.data == item {
300 return false; }
302 current = node.next.load(Ordering::Acquire);
303 }
304
305 let new_node = Box::into_raw(Box::new(SetNode {
307 data: item,
308 hash,
309 next: AtomicPtr::new(null_mut()),
310 }));
311
312 loop {
313 let head = bucket.head.load(Ordering::Acquire);
314 unsafe {
315 (*new_node).next.store(head, Ordering::Relaxed);
316 }
317
318 match bucket.head.compare_exchange(
319 head,
320 new_node,
321 Ordering::Release,
322 Ordering::Acquire
323 ) {
324 Ok(_) => {
325 self.size.fetch_add(1, Ordering::Relaxed);
326 return true;
327 }
328 Err(_) => continue,
329 }
330 }
331 }
332
333 pub fn contains(&self, item: &T) -> bool {
335 let hash = Self::hash(item);
336 let bucket_idx = (hash as usize) & self.mask;
337 let bucket = unsafe { &*self.buckets.add(bucket_idx) };
338
339 let mut current = bucket.head.load(Ordering::Acquire);
340 while !current.is_null() {
341 let node = unsafe { &*current };
342 if node.hash == hash && node.data == *item {
343 return true;
344 }
345 current = node.next.load(Ordering::Acquire);
346 }
347 false
348 }
349
350 pub fn remove(&self, item: &T) -> bool {
352 let hash = Self::hash(item);
353 let bucket_idx = (hash as usize) & self.mask;
354 let bucket = unsafe { &*self.buckets.add(bucket_idx) };
355
356 let mut prev = &bucket.head as *const AtomicPtr<SetNode<T>>;
358 let mut current = bucket.head.load(Ordering::Acquire);
359
360 while !current.is_null() {
361 let node = unsafe { &*current };
362 if node.hash == hash && node.data == *item {
363 let next = node.next.load(Ordering::Acquire);
364
365 let prev_atomic = unsafe { &*prev };
366 match prev_atomic.compare_exchange(
367 current,
368 next,
369 Ordering::Release,
370 Ordering::Acquire
371 ) {
372 Ok(_) => {
373 self.size.fetch_sub(1, Ordering::Relaxed);
374 unsafe { Box::from_raw(current); }
375 return true;
376 }
377 Err(_) => return false, }
379 }
380
381 prev = &node.next as *const AtomicPtr<SetNode<T>>;
382 current = node.next.load(Ordering::Acquire);
383 }
384 false
385 }
386
387 pub fn len(&self) -> usize {
388 self.size.load(Ordering::Relaxed)
389 }
390}
391
392impl<T> Drop for AppendOnlyList<T> {
394 fn drop(&mut self) {
395 let mut current = self.head.load(Ordering::Acquire);
396 while !current.is_null() {
397 let next = unsafe {
398 let node = Box::from_raw(current);
399 node.next.load(Ordering::Acquire)
400 };
401 current = next;
402 }
403 }
404}
405
406impl<T> Drop for IndexedList<T> {
407 fn drop(&mut self) {
408 for i in 0..self.capacity {
409 let slot = unsafe { &*self.data.add(i) };
410 let ptr = slot.load(Ordering::Acquire);
411 if !ptr.is_null() {
412 unsafe { Box::from_raw(ptr); }
413 }
414 }
415
416 let layout = Layout::array::<AtomicPtr<T>>(self.capacity).unwrap();
417 unsafe {
418 dealloc(self.data as *mut u8, layout);
419 }
420 }
421}
422
423impl<T: Hash + Eq> Drop for UnorderedSet<T> {
424 fn drop(&mut self) {
425 for i in 0..self.bucket_count {
426 let bucket = unsafe { &*self.buckets.add(i) };
427 let mut current = bucket.head.load(Ordering::Acquire);
428
429 while !current.is_null() {
430 let next = unsafe {
431 let node = Box::from_raw(current);
432 node.next.load(Ordering::Acquire)
433 };
434 current = next;
435 }
436 }
437
438 let layout = Layout::array::<Bucket<T>>(self.bucket_count).unwrap();
439 unsafe {
440 dealloc(self.buckets as *mut u8, layout);
441 }
442 }
443}
444
445unsafe impl<T: Send> Send for AppendOnlyList<T> {}
446unsafe impl<T: Send + Sync> Sync for AppendOnlyList<T> {}
447
448unsafe impl<T: Send> Send for IndexedList<T> {}
449unsafe impl<T: Send + Sync> Sync for IndexedList<T> {}
450
451unsafe impl<T: Send + Hash + Eq> Send for UnorderedSet<T> {}
452unsafe impl<T: Send + Sync + Hash + Eq> Sync for UnorderedSet<T> {}