1use std::{
11 cell::UnsafeCell,
12 hint::unreachable_unchecked,
13 marker::PhantomData,
14 ops::{Index, IndexMut},
15 rc::Rc,
16 sync::{
17 atomic::{AtomicU32, Ordering},
18 TryLockError, TryLockResult,
19 },
20};
21
22#[derive(Debug)]
102pub struct RepVecRangeLock<T> {
103 slice_len: usize,
105 cycle_len: usize,
107 cycle_num_elems: usize,
109 locked_offsets: Vec<AtomicU32>,
111 data: UnsafeCell<Vec<T>>,
113}
114
115unsafe impl<T> Sync for RepVecRangeLock<T> where T: Send {}
121
122impl<'a, T> RepVecRangeLock<T> {
123 pub fn new(data: Vec<T>, slice_len: usize, cycle_len: usize) -> RepVecRangeLock<T> {
129 if slice_len == 0 {
130 panic!("slice_len must not be 0.");
131 }
132 if cycle_len == 0 || cycle_len > usize::MAX - 31 {
133 panic!("cycle_len out of range.");
134 }
135 let Some(cycle_num_elems) = cycle_len.checked_mul(slice_len) else {
136 panic!("Repeat cycle overflow.");
137 };
138
139 let num = (cycle_len + 31) / 32;
140 let mut locked_offsets = Vec::with_capacity(num);
141 locked_offsets.resize_with(num, || AtomicU32::new(0));
142
143 let data = UnsafeCell::new(data);
144
145 RepVecRangeLock {
146 slice_len,
147 cycle_len,
148 cycle_num_elems,
149 locked_offsets,
150 data,
151 }
152 }
153
154 #[inline]
156 pub fn data_len(&self) -> usize {
157 unsafe { (*self.data.get()).len() }
159 }
160
161 #[inline]
164 pub fn into_inner(self) -> Vec<T> {
165 debug_assert!(self
166 .locked_offsets
167 .iter()
168 .all(|x| x.load(Ordering::Acquire) == 0));
169 self.data.into_inner()
170 }
171
172 #[inline]
180 pub fn try_lock(&'a self, cycle_offset: usize) -> TryLockResult<RepVecRangeLockGuard<'a, T>> {
181 if cycle_offset >= self.cycle_len {
182 panic!("Invalid cycle_offset. It must be 0 <= cycle_offset < cycle_len.");
183 }
184 let idx = cycle_offset / 32;
185 let mask = 1 << (cycle_offset % 32);
186 let prev =
188 unsafe { self.locked_offsets.get_unchecked(idx) }.fetch_or(mask, Ordering::AcqRel);
189 if prev & mask == 0 {
190 let cycle_offset_slices = self.slice_len * cycle_offset;
192 TryLockResult::Ok(RepVecRangeLockGuard::new(
194 self,
195 cycle_offset,
196 cycle_offset_slices,
197 ))
198 } else {
199 TryLockResult::Err(TryLockError::WouldBlock)
201 }
202 }
203
204 #[inline]
206 fn unlock(&self, cycle_offset: usize) {
207 let idx = cycle_offset / 32;
208 let mask = 1 << (cycle_offset % 32);
209 let prev =
211 unsafe { self.locked_offsets.get_unchecked(idx) }.fetch_xor(mask, Ordering::Release);
212 debug_assert!(prev & mask != 0);
213 }
214
215 #[inline]
221 unsafe fn get_slice(&self, cycle_offset_slices: usize, cycle: usize) -> &[T] {
222 if let Some(cycle_elemidx) = self.cycle_num_elems.checked_mul(cycle) {
223 if let Some(begin) = cycle_elemidx.checked_add(cycle_offset_slices) {
224 if let Some(end) = begin.checked_add(self.slice_len) {
225 let dataptr = self.data.get();
226 if end <= (*dataptr).len() {
227 return &(*dataptr)[begin..end];
231 }
232 }
233 }
234 }
235 panic!("RepVecRangeLock cycle index out of range.");
236 }
237
238 #[inline]
247 #[allow(clippy::mut_from_ref)] unsafe fn get_mut_slice(&self, cycle_offset_slices: usize, cycle: usize) -> &mut [T] {
249 let cptr = self.get_slice(cycle_offset_slices, cycle) as *const [T];
250 let mut_slice = (cptr as *mut [T]).as_mut();
251 mut_slice.unwrap_or_else(|| unreachable_unchecked())
253 }
254}
255
256#[derive(Debug)]
261pub struct RepVecRangeLockGuard<'a, T> {
262 lock: &'a RepVecRangeLock<T>,
264 cycle_offset: usize,
266 cycle_offset_slices: usize,
268 #[allow(clippy::redundant_allocation)]
271 _p: PhantomData<Rc<&'a mut T>>,
272}
273
274impl<'a, T> RepVecRangeLockGuard<'a, T> {
275 #[inline]
276 fn new(
277 lock: &'a RepVecRangeLock<T>,
278 cycle_offset: usize,
279 cycle_offset_slices: usize,
280 ) -> RepVecRangeLockGuard<'a, T> {
281 RepVecRangeLockGuard {
282 lock,
283 cycle_offset,
284 cycle_offset_slices,
285 _p: PhantomData,
286 }
287 }
288}
289
290impl<'a, T> Drop for RepVecRangeLockGuard<'a, T> {
291 #[inline]
292 fn drop(&mut self) {
293 self.lock.unlock(self.cycle_offset);
294 }
295}
296
297impl<'a, T> Index<usize> for RepVecRangeLockGuard<'a, T> {
298 type Output = [T];
299
300 #[inline]
301 fn index(&self, cycle: usize) -> &Self::Output {
302 unsafe { self.lock.get_slice(self.cycle_offset_slices, cycle) }
304 }
305}
306
307impl<'a, T> IndexMut<usize> for RepVecRangeLockGuard<'a, T> {
308 #[inline]
309 fn index_mut(&mut self, cycle: usize) -> &mut Self::Output {
310 unsafe { self.lock.get_mut_slice(self.cycle_offset_slices, cycle) }
320 }
321}
322
323#[cfg(test)]
324mod tests {
325 use super::*;
326 use std::cell::RefCell;
327 use std::sync::{Arc, Barrier};
328 use std::thread;
329
330 #[test]
331 #[should_panic(expected = "cycle_len out of range")]
332 fn test_oob_slice_len() {
333 let _ = RepVecRangeLock::new(vec![0; 100], 1, 0);
334 }
335
336 #[test]
337 #[should_panic(expected = "cycle_len out of range")]
338 fn test_oob_cycle_len1() {
339 let _ = RepVecRangeLock::new(vec![0; 100], 1, usize::MAX - 30);
340 }
341
342 #[test]
343 #[should_panic(expected = "slice_len must not be 0")]
344 fn test_oob_cycle_len0() {
345 let _ = RepVecRangeLock::new(vec![0; 100], 0, 1);
346 }
347
348 #[test]
349 #[should_panic(expected = "cycle overflow")]
350 fn test_oob_cycle_len2() {
351 let _ = RepVecRangeLock::new(vec![0; 100], usize::MAX, 2);
352 }
353
354 #[test]
355 #[should_panic(expected = "must be 0 <= cycle_offset < cycle_len")]
356 fn test_oob_lock_offset() {
357 let a = RepVecRangeLock::new(vec![0; 100], 2, 10);
358 let _ = a.try_lock(10);
359 }
360
361 #[test]
362 #[should_panic(expected = "index out of bounds")]
363 fn test_base_oob_read() {
364 let a = RepVecRangeLock::new(vec![0; 100], 1, 2);
365 let g = a.try_lock(0).unwrap();
366 let _ = g[0][1];
367 }
368
369 #[test]
370 #[should_panic(expected = "guard 1 panicked")]
371 fn test_overlap0() {
372 let a = RepVecRangeLock::new(vec![1_i32, 2, 3, 4, 5, 6], 1, 3);
373 let _g0 = a.try_lock(0).expect("guard 0 panicked");
374 let _g1 = a.try_lock(0).expect("guard 1 panicked");
375 }
376
377 #[test]
378 #[should_panic(expected = "guard 1 panicked")]
379 fn test_overlap1() {
380 let a = RepVecRangeLock::new(vec![1_i32, 2, 3, 4, 5, 6], 1, 3);
381 let _g0 = a.try_lock(1).expect("guard 0 panicked");
382 let _g1 = a.try_lock(1).expect("guard 1 panicked");
383 }
384
385 #[test]
386 fn test_big_cycle() {
387 let a = Arc::new(RepVecRangeLock::new(
388 vec![1_i32; 256],
389 2, 128, ));
392 assert!(a.locked_offsets.len() == 4);
393 {
394 let _g = a.try_lock(0);
395 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 1);
396 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
397 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
398 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
399 }
400 {
401 let _g = a.try_lock(1);
402 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 2);
403 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
404 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
405 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
406 }
407 {
408 let _g = a.try_lock(32);
409 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
410 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 1);
411 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
412 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
413 }
414 {
415 let _g = a.try_lock(33);
416 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
417 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 2);
418 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
419 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
420 }
421 {
422 let _g = a.try_lock(69);
423 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
424 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
425 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 32);
426 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
427 }
428 {
429 let _g = a.try_lock(127);
430 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
431 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
432 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
433 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0x80000000);
434 }
435 }
436
437 #[test]
438 #[should_panic(expected = "Invalid cycle_offset")]
439 fn test_cycle_offset_out_of_range() {
440 let a = Arc::new(RepVecRangeLock::new(
441 vec![1_i32; 256],
442 2, 128, ));
445 let _g = a.try_lock(128);
446 }
447
448 #[test]
449 fn test_thread_no_overlap() {
450 let a = Arc::new(RepVecRangeLock::new(
451 vec![1_i32, 2, 3, 4],
452 1, 2, ));
455 let b = Arc::clone(&a);
456 let c = Arc::clone(&a);
457 let ba0 = Arc::new(Barrier::new(2));
458 let ba1 = Arc::clone(&ba0);
459 let j0 = thread::spawn(move || {
460 {
461 let mut g = b.try_lock(0).unwrap();
462 assert!(b.locked_offsets[0].load(Ordering::Acquire) & 1 != 0);
463 assert_eq!(g[0][0], 1);
464 assert_eq!(g[1][0], 3);
465 g[0][0] = 10;
466 g[1][0] = 30;
467 }
468 ba0.wait();
469 });
470 let j1 = thread::spawn(move || {
471 {
472 let g = c.try_lock(1).unwrap();
473 assert!(c.locked_offsets[0].load(Ordering::Acquire) & 2 != 0);
474 assert_eq!(g[0][0], 2);
475 assert_eq!(g[1][0], 4);
476 }
477 ba1.wait();
478 let g = c.try_lock(0).unwrap();
479 assert_eq!(g[0][0], 10);
480 assert_eq!(g[1][0], 30);
481 });
482 j1.join().expect("Thread 1 panicked.");
483 j0.join().expect("Thread 0 panicked.");
484 assert!(a
485 .locked_offsets
486 .iter()
487 .all(|x| x.load(Ordering::Acquire) == 0));
488 }
489
490 struct NoSyncStruct(RefCell<u32>); #[test]
493 fn test_nosync() {
494 let a = Arc::new(RepVecRangeLock::new(
495 vec![
496 NoSyncStruct(RefCell::new(1)),
497 NoSyncStruct(RefCell::new(2)),
498 NoSyncStruct(RefCell::new(3)),
499 NoSyncStruct(RefCell::new(4)),
500 ],
501 1, 2, ));
504 let b = Arc::clone(&a);
505 let c = Arc::clone(&a);
506 let ba0 = Arc::new(Barrier::new(2));
507 let ba1 = Arc::clone(&ba0);
508 let j0 = thread::spawn(move || {
509 let _g = b.try_lock(0).unwrap();
510 assert!(b.locked_offsets[0].load(Ordering::Acquire) & 1 != 0);
511 ba0.wait();
512 });
513 let j1 = thread::spawn(move || {
514 let _g = c.try_lock(1).unwrap();
515 assert!(c.locked_offsets[0].load(Ordering::Acquire) & 2 != 0);
516 ba1.wait();
517 });
518 j1.join().expect("Thread 1 panicked.");
519 j0.join().expect("Thread 0 panicked.");
520 assert!(a
521 .locked_offsets
522 .iter()
523 .all(|x| x.load(Ordering::Acquire) == 0));
524 }
525}
526
527