1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
use crate::array::*;
#[doc(alias("One-sided", "onesided"))]
/// The interface for remotely writing elements
///
/// These operations can be performed using any [LamellarWriteArray] type
///
/// Both single element operations and batched element operations are provided
///
/// Generally if you are performing a large number of operations it will be better to
/// use a batched version instead of multiple single element opertations. While the
/// Runtime internally performs message aggregation for both single element and batched
/// operations, single element operates have to be treated as individual requests, resulting
/// in allocation and bookkeeping overheads. A single batched call on the other hand is treated
/// as a single request by the runtime. (See [ReadOnlyOps] for an example comparing single vs batched load operations of a list of indices)
///
/// The results of a batched operation are returned to the user in the same order as the input indices.
///
/// # One-sided Operation
/// performing either single or batched operations are both one-sided, with the calling PE performing any necessary work to
/// initate and execute active messages that are sent to remote PEs.
/// For Ops that return results, the result will only be available on the calling PE.
///
/// # Note
/// For both single index and batched operations there are no guarantees to the order in which individual operations occur (an individal operation is guaranteed to be atomic though).
///
/// # Batched Types
/// Three types of batched operations can be performed
/// ## One Value - Many Indicies
/// In this type, the same value will be applied to the provided indices
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let indices = vec![3,54,12,88,29,68];
/// let val = 10;
/// array.block_on(array.batch_store(indices,val));
///```
/// ## Many Values - One Index
/// In this type, multiple values will be applied to the given index
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let vals = vec![3,54,12,88,29,68];
/// let index = 10;
/// array.block_on(array.batch_store(index,vals));
///```
/// ## Many Values - Many Indicies
/// In this type, values and indices have a one-to-one correspondance.
///
/// If the two lists are unequal in length, the longer of the two will be truncated so that it matches the length of the shorter
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let indices = vec![3,54,12,88,29,68];
/// let vals = vec![12,2,1,10000,12,13];
/// array.block_on(array.batch_store(indices,vals));
///```
pub trait AccessOps<T: ElementOps>: private::LamellarArrayPrivate<T> {
/// This call stores the supplied `val` into the element specified by `index`
///
/// A future is returned as the result of this call, which is used to detect when the operation has completed
///
/// # Note
/// This future is only lazy with respect to checking for completion, not
/// with respect to launching the operation. That is, the operation will
/// occur regardless of if the future is ever polled or not, Enabling
/// a "fire and forget" programming model.
///
/// # Examples
///
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let idx = 53;
/// let val = 10;
/// let req = array.store(idx,val);
/// array.block_on(req);
///```
#[tracing::instrument(skip_all)]
fn store<'a>(&self, index: usize, val: T) -> Pin<Box<dyn Future<Output = ()> + Send>> {
self.inner_array().initiate_batch_op(
val,
index,
ArrayOpCmd::Store,
self.as_lamellar_byte_array(),
)
}
/// This call performs a batched vesion of the [store][AccessOps::store] function,
///
/// Instead of a single value and index this function expects a list of `vals`, or a list of `indices` or both.
/// Please see the general [AccessOps] documentation for more information on batch operation input
///
/// A future is returned as the result of this call, which is used to detect when the operation has completed
///
/// # Note
/// This future is only lazy with respect to checking for completion, not
/// with respect to launching the operation. That is, the operation will
/// occur regardless of if the future is ever polled or not, Enabling
/// a "fire and forget" programming model.
///
/// # Examples
///
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let indices = vec![3,54,12,88,29,68];
/// let req = array.batch_store(indices,10);
/// array.block_on(req);
///```
#[tracing::instrument(skip_all)]
fn batch_store<'a>(
&self,
index: impl OpInput<'a, usize>,
val: impl OpInput<'a, T>,
) -> Pin<Box<dyn Future<Output = ()> + Send>> {
self.inner_array().initiate_batch_op(
val,
index,
ArrayOpCmd::Store,
self.as_lamellar_byte_array(),
)
}
/// This call swaps the supplied `val` into the element specified by `index`, returning the old value
///
/// A future is returned as the result of this call, which is used to retrieve
/// the results after the (possibly remote) operations have finished.
///
/// # Note
/// This future is only lazy with respect to retrieving the result, not
/// with respect to launching the operation. That is, the operation will
/// occur regardless of if the future is ever polled or not, Enabling
/// a "fire and forget" programming model.
///
/// # Examples
///
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let idx = 53;
/// let new = 10;
/// let req = array.swap(idx,new);
/// let old = array.block_on(req);
///```
#[tracing::instrument(skip_all)]
fn swap<'a>(&self, index: usize, val: T) -> Pin<Box<dyn Future<Output = T> + Send>> {
let result = self.inner_array().initiate_batch_fetch_op_2(
val,
index,
ArrayOpCmd::Swap,
self.as_lamellar_byte_array(),
);
Box::pin(async move { result.await[0] })
}
/// This call performs a batched vesion of the [swap][AccessOps::swap] function,
///
/// Instead of a single value and index this function expects a list of `vals`, or a list of `indices` or both.
/// Please see the general [AccessOps] documentation for more information on batch operation input
///
/// A future is returned as the result of this call, which is used to retrieve
/// the results after the (possibly remote) operations have finished.
///
/// # Note
/// This future is only lazy with respect to checking for completion, not
/// with respect to launching the operation. That is, the operation will
/// occur regardless of if the future is ever polled or not, Enabling
/// a "fire and forget" programming model.
///
/// # Examples
///
///```
/// use lamellar::array::prelude::*;
///
/// let world = LamellarWorldBuilder::new().build();
/// let array = AtomicArray::<usize>::new(&world,100,Distribution::Block);
///
/// let indices = vec![3,54,12,88,29,68];
/// let req = array.batch_swap(indices,10);
/// let old_vals = array.block_on(req);
///```
#[tracing::instrument(skip_all)]
fn batch_swap<'a>(
&self,
index: impl OpInput<'a, usize>,
val: impl OpInput<'a, T>,
) -> Pin<Box<dyn Future<Output = Vec<T>> + Send>> {
self.inner_array().initiate_batch_fetch_op_2(
val,
index,
ArrayOpCmd::Swap,
self.as_lamellar_byte_array(),
)
}
}
#[doc(hidden)]
pub trait LocalAtomicOps<T: Dist + ElementOps> {
fn local_load(&self, index: usize, val: T) -> T;
fn local_store(&self, index: usize, val: T);
fn local_swap(&self, index: usize, val: T) -> T;
}