lockless/containers/
atomic_cell_array.rs1use std::sync::atomic::{AtomicUsize, Ordering};
2use std::cell::UnsafeCell;
3use std::marker::PhantomData;
4
5use handle::{HandleInner, Handle, IdHandle, ResizingHandle, BoundedHandle, Tag0, HandleInnerBase, ContainerInner, HandleInner1, Id};
6use primitives::index_allocator::IndexAllocator;
7use primitives::invariant::Invariant;
8
9#[derive(Debug)]
10pub struct AtomicCellArrayInner<T, Tag> {
11 values: Vec<UnsafeCell<Option<T>>>,
12 indices: Vec<UnsafeCell<usize>>,
13 current: Vec<AtomicUsize>,
14 phantom: Invariant<Tag>,
15}
16
17unsafe impl<T: Send, Tag> Sync for AtomicCellArrayInner<T, Tag> {}
18
19impl<T, Tag> ContainerInner<Tag> for AtomicCellArrayInner<T, Tag> {
20 fn raise_id_limit(&mut self, new_limit: usize) {
21 assert!(new_limit > self.id_limit());
22
23 let extra_len = new_limit - self.id_limit();
24 self.values.reserve_exact(extra_len);
25 self.indices.reserve_exact(extra_len);
26 for _ in 0..extra_len {
27 self.indices.push(UnsafeCell::new(self.values.len()));
28 self.values.push(UnsafeCell::new(None));
29 }
30 }
31 fn id_limit(&self) -> usize {
32 self.indices.len()
33 }
34}
35
36impl<T, Tag> AtomicCellArrayInner<T, Tag> {
37 pub fn reserve_exact(&mut self, additional_cells: usize, additional_ids: usize) {
38 self.values.reserve_exact(additional_cells + additional_ids);
39 self.indices.reserve_exact(additional_ids);
40 self.current.reserve_exact(additional_cells);
41 }
42
43 fn place(&mut self, value: T) -> AtomicUsize {
44 let id = self.values.len();
45 self.values.push(UnsafeCell::new(Some(value)));
46 AtomicUsize::new(id)
47 }
48
49 pub fn push(&mut self, value: T) {
50 let idx = self.place(value);
51 self.current.push(idx);
52 }
53
54 pub fn insert(&mut self, index: usize, value: T) {
55 let idx = self.place(value);
56 self.current.insert(index, idx)
57 }
58
59 pub fn with_capacity(capacity: usize, id_limit: usize) -> Self {
60 let mut result = AtomicCellArrayInner {
61 values: Vec::new(),
62 indices: Vec::new(),
63 current: Vec::new(),
64 phantom: PhantomData
65 };
66 result.reserve_exact(capacity, id_limit);
67 result.raise_id_limit(id_limit);
68 result
69 }
70
71 pub fn new(id_limit: usize) -> Self {
72 Self::with_capacity(0, id_limit)
73 }
74
75 pub unsafe fn swap(&self, index: usize, id: Id<Tag>, value: T) -> T {
76 let id: usize = id.into();
77 let ref mut idx = *(&self.indices[id]).get();
80 *(&self.values[*idx]).get() = Some(value);
81 *idx = self.current[index].swap(*idx, Ordering::AcqRel);
82 (*self.values[*idx].get()).take().expect("Cell should contain a value!")
83 }
84}
85
86#[derive(Debug)]
87pub struct AtomicCellArray<H: Handle, Tag>(IdHandle<Tag, H>) where H::HandleInner: HandleInner<Tag>;
88
89impl<T, H: Handle, Tag> AtomicCellArray<H, Tag> where H::HandleInner: HandleInnerBase<ContainerInner=AtomicCellArrayInner<T, Tag>> + HandleInner<Tag> {
90 pub fn new(max_accessors: usize) -> Self {
91 AtomicCellArray(IdHandle::new(&HandleInnerBase::new(AtomicCellArrayInner::new(max_accessors))))
92 }
93
94 pub fn swap(&mut self, index: usize, value: T) -> T {
95 self.0.with_mut(move |inner, id| unsafe { inner.swap(index, id, value) })
96 }
97
98 pub fn len(&self) -> usize {
99 self.0.with(|inner| inner.current.len())
100 }
101}
102
103impl<H: Handle, Tag> Clone for AtomicCellArray<H, Tag> where H::HandleInner: HandleInner<Tag> {
104 fn clone(&self) -> Self {
105 AtomicCellArray(self.0.clone())
106 }
107}
108
109type Inner<T> = HandleInner1<Tag0, IndexAllocator, AtomicCellArrayInner<T, Tag0>>;
110pub type ResizingAtomicCellArray<T> = AtomicCellArray<ResizingHandle<Inner<T>>, Tag0>;
111pub type BoundedAtomicCellArray<T> = AtomicCellArray<BoundedHandle<Inner<T>>, Tag0>;