1use crate::vecparts::VecParts;
11use std::{
12 cell::UnsafeCell,
13 marker::PhantomData,
14 mem::size_of,
15 ops::{Index, IndexMut, Range},
16 rc::Rc,
17 slice,
18 sync::{
19 atomic::{AtomicU32, Ordering},
20 TryLockError, TryLockResult,
21 },
22};
23
24#[derive(Debug)]
104pub struct RepVecRangeLock<T> {
105 slice_len: usize,
107 cycle_len: usize,
109 cycle_num_elems: usize,
111 locked_offsets: Vec<AtomicU32>,
113 data: UnsafeCell<VecParts<T>>,
115}
116
117unsafe impl<T> Sync for RepVecRangeLock<T> where T: Send {}
123
124impl<'a, T> RepVecRangeLock<T> {
125 pub fn new(data: Vec<T>, slice_len: usize, cycle_len: usize) -> RepVecRangeLock<T> {
131 if slice_len == 0 {
132 panic!("slice_len must not be 0.");
133 }
134 if cycle_len == 0 || cycle_len > usize::MAX - 31 {
135 panic!("cycle_len out of range.");
136 }
137 let Some(cycle_num_elems) = cycle_len.checked_mul(slice_len) else {
138 panic!("Repeat cycle overflow.");
139 };
140
141 let num = cycle_len.div_ceil(32);
142 let mut locked_offsets = Vec::with_capacity(num);
143 locked_offsets.resize_with(num, || AtomicU32::new(0));
144
145 let data = UnsafeCell::new(data.into());
146
147 RepVecRangeLock {
148 slice_len,
149 cycle_len,
150 cycle_num_elems,
151 locked_offsets,
152 data,
153 }
154 }
155
156 #[inline]
158 pub fn data_len(&self) -> usize {
159 unsafe { (*self.data.get()).len() }
161 }
162
163 #[inline]
166 pub fn into_inner(self) -> Vec<T> {
167 debug_assert!(self
168 .locked_offsets
169 .iter()
170 .all(|x| x.load(Ordering::Acquire) == 0));
171 self.data.into_inner().into()
172 }
173
174 #[inline]
182 pub fn try_lock(&'a self, cycle_offset: usize) -> TryLockResult<RepVecRangeLockGuard<'a, T>> {
183 if cycle_offset >= self.cycle_len {
184 panic!("Invalid cycle_offset. It must be 0 <= cycle_offset < cycle_len.");
185 }
186 let idx = cycle_offset / 32;
187 let mask = 1 << (cycle_offset % 32);
188 let locked_offsets = unsafe { self.locked_offsets.get_unchecked(idx) };
190 let prev = locked_offsets.fetch_or(mask, Ordering::AcqRel);
191 if prev & mask == 0 {
192 let cycle_offset_slices = self.slice_len * cycle_offset;
194 TryLockResult::Ok(RepVecRangeLockGuard::new(
196 self,
197 cycle_offset,
198 cycle_offset_slices,
199 ))
200 } else {
201 TryLockResult::Err(TryLockError::WouldBlock)
203 }
204 }
205
206 #[inline]
208 fn unlock(&self, cycle_offset: usize) {
209 let idx = cycle_offset / 32;
210 let mask = 1 << (cycle_offset % 32);
211 let locked_offsets = unsafe { self.locked_offsets.get_unchecked(idx) };
213 let prev = locked_offsets.fetch_xor(mask, Ordering::Release);
214 debug_assert!(prev & mask != 0);
215 }
216
217 #[inline]
219 fn calc_range(&self, cycle_offset_slices: usize, cycle: usize) -> Range<usize> {
220 if let Some(cycle_elemidx) = self.cycle_num_elems.checked_mul(cycle) {
221 if let Some(start) = cycle_elemidx.checked_add(cycle_offset_slices) {
222 if let Some(end) = start.checked_add(self.slice_len) {
223 return Range { start, end };
224 }
225 }
226 }
227 panic!("RepVecRangeLock cycle index out of range.");
228 }
229
230 #[inline]
236 unsafe fn get_slice(&self, cycle_offset_slices: usize, cycle: usize) -> &[T] {
237 let data = (*self.data.get()).ptr();
238 let range = self.calc_range(cycle_offset_slices, cycle);
239 assert!(range.start <= isize::MAX as usize / size_of::<T>());
240 unsafe { slice::from_raw_parts(data.add(range.start) as _, range.end - range.start) }
243 }
244
245 #[inline]
254 #[allow(clippy::mut_from_ref)]
255 unsafe fn get_mut_slice(&self, cycle_offset_slices: usize, cycle: usize) -> &mut [T] {
256 let data = (*self.data.get()).ptr();
257 let range = self.calc_range(cycle_offset_slices, cycle);
258 assert!(range.start <= isize::MAX as usize / size_of::<T>());
259 unsafe { slice::from_raw_parts_mut(data.add(range.start) as _, range.end - range.start) }
262 }
263}
264
265#[derive(Debug)]
270pub struct RepVecRangeLockGuard<'a, T> {
271 lock: &'a RepVecRangeLock<T>,
273 cycle_offset: usize,
275 cycle_offset_slices: usize,
277 #[allow(clippy::redundant_allocation)]
280 _p: PhantomData<Rc<&'a mut T>>,
281}
282
283impl<'a, T> RepVecRangeLockGuard<'a, T> {
284 #[inline]
285 fn new(
286 lock: &'a RepVecRangeLock<T>,
287 cycle_offset: usize,
288 cycle_offset_slices: usize,
289 ) -> RepVecRangeLockGuard<'a, T> {
290 RepVecRangeLockGuard {
291 lock,
292 cycle_offset,
293 cycle_offset_slices,
294 _p: PhantomData,
295 }
296 }
297}
298
299impl<T> Drop for RepVecRangeLockGuard<'_, T> {
300 #[inline]
301 fn drop(&mut self) {
302 self.lock.unlock(self.cycle_offset);
303 }
304}
305
306impl<T> Index<usize> for RepVecRangeLockGuard<'_, T> {
307 type Output = [T];
308
309 #[inline]
310 fn index(&self, cycle: usize) -> &Self::Output {
311 unsafe { self.lock.get_slice(self.cycle_offset_slices, cycle) }
313 }
314}
315
316impl<T> IndexMut<usize> for RepVecRangeLockGuard<'_, T> {
317 #[inline]
318 fn index_mut(&mut self, cycle: usize) -> &mut Self::Output {
319 unsafe { self.lock.get_mut_slice(self.cycle_offset_slices, cycle) }
329 }
330}
331
332#[cfg(test)]
333mod tests {
334 use super::*;
335 use std::cell::RefCell;
336 use std::sync::{Arc, Barrier};
337 use std::thread;
338
339 #[test]
340 #[should_panic(expected = "cycle_len out of range")]
341 fn test_oob_slice_len() {
342 let _ = RepVecRangeLock::new(vec![0; 100], 1, 0);
343 }
344
345 #[test]
346 #[should_panic(expected = "cycle_len out of range")]
347 fn test_oob_cycle_len1() {
348 let _ = RepVecRangeLock::new(vec![0; 100], 1, usize::MAX - 30);
349 }
350
351 #[test]
352 #[should_panic(expected = "slice_len must not be 0")]
353 fn test_oob_cycle_len0() {
354 let _ = RepVecRangeLock::new(vec![0; 100], 0, 1);
355 }
356
357 #[test]
358 #[should_panic(expected = "cycle overflow")]
359 fn test_oob_cycle_len2() {
360 let _ = RepVecRangeLock::new(vec![0; 100], usize::MAX, 2);
361 }
362
363 #[test]
364 #[should_panic(expected = "must be 0 <= cycle_offset < cycle_len")]
365 fn test_oob_lock_offset() {
366 let a = RepVecRangeLock::new(vec![0; 100], 2, 10);
367 let _ = a.try_lock(10);
368 }
369
370 #[test]
371 #[should_panic(expected = "index out of bounds")]
372 fn test_base_oob_read() {
373 let a = RepVecRangeLock::new(vec![0; 100], 1, 2);
374 let g = a.try_lock(0).unwrap();
375 let _ = g[0][1];
376 }
377
378 #[test]
379 #[should_panic(expected = "guard 1 panicked")]
380 fn test_overlap0() {
381 let a = RepVecRangeLock::new(vec![1_i32, 2, 3, 4, 5, 6], 1, 3);
382 let _g0 = a.try_lock(0).expect("guard 0 panicked");
383 let _g1 = a.try_lock(0).expect("guard 1 panicked");
384 }
385
386 #[test]
387 #[should_panic(expected = "guard 1 panicked")]
388 fn test_overlap1() {
389 let a = RepVecRangeLock::new(vec![1_i32, 2, 3, 4, 5, 6], 1, 3);
390 let _g0 = a.try_lock(1).expect("guard 0 panicked");
391 let _g1 = a.try_lock(1).expect("guard 1 panicked");
392 }
393
394 #[test]
395 fn test_big_cycle() {
396 let a = Arc::new(RepVecRangeLock::new(
397 vec![1_i32; 256],
398 2, 128, ));
401 assert!(a.locked_offsets.len() == 4);
402 {
403 let _g = a.try_lock(0);
404 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 1);
405 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
406 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
407 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
408 }
409 {
410 let _g = a.try_lock(1);
411 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 2);
412 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
413 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
414 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
415 }
416 {
417 let _g = a.try_lock(32);
418 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
419 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 1);
420 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
421 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
422 }
423 {
424 let _g = a.try_lock(33);
425 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
426 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 2);
427 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
428 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
429 }
430 {
431 let _g = a.try_lock(69);
432 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
433 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
434 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 32);
435 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0);
436 }
437 {
438 let _g = a.try_lock(127);
439 assert!(a.locked_offsets[0].load(Ordering::Acquire) == 0);
440 assert!(a.locked_offsets[1].load(Ordering::Acquire) == 0);
441 assert!(a.locked_offsets[2].load(Ordering::Acquire) == 0);
442 assert!(a.locked_offsets[3].load(Ordering::Acquire) == 0x80000000);
443 }
444 }
445
446 #[test]
447 #[should_panic(expected = "Invalid cycle_offset")]
448 fn test_cycle_offset_out_of_range() {
449 let a = Arc::new(RepVecRangeLock::new(
450 vec![1_i32; 256],
451 2, 128, ));
454 let _g = a.try_lock(128);
455 }
456
457 #[test]
458 fn test_thread_no_overlap() {
459 let a = Arc::new(RepVecRangeLock::new(
460 vec![1_i32, 2, 3, 4],
461 1, 2, ));
464 let b = Arc::clone(&a);
465 let c = Arc::clone(&a);
466 let ba0 = Arc::new(Barrier::new(2));
467 let ba1 = Arc::clone(&ba0);
468 let j0 = thread::spawn(move || {
469 {
470 let mut g = b.try_lock(0).unwrap();
471 assert!(b.locked_offsets[0].load(Ordering::Acquire) & 1 != 0);
472 assert_eq!(g[0][0], 1);
473 assert_eq!(g[1][0], 3);
474 g[0][0] = 10;
475 g[1][0] = 30;
476 }
477 ba0.wait();
478 });
479 let j1 = thread::spawn(move || {
480 {
481 let g = c.try_lock(1).unwrap();
482 assert!(c.locked_offsets[0].load(Ordering::Acquire) & 2 != 0);
483 assert_eq!(g[0][0], 2);
484 assert_eq!(g[1][0], 4);
485 }
486 ba1.wait();
487 let g = c.try_lock(0).unwrap();
488 assert_eq!(g[0][0], 10);
489 assert_eq!(g[1][0], 30);
490 });
491 j1.join().expect("Thread 1 panicked.");
492 j0.join().expect("Thread 0 panicked.");
493 assert!(a
494 .locked_offsets
495 .iter()
496 .all(|x| x.load(Ordering::Acquire) == 0));
497 }
498
499 #[allow(dead_code)]
500 struct NoSyncStruct(RefCell<u32>); #[test]
503 fn test_nosync() {
504 let a = Arc::new(RepVecRangeLock::new(
505 vec![
506 NoSyncStruct(RefCell::new(1)),
507 NoSyncStruct(RefCell::new(2)),
508 NoSyncStruct(RefCell::new(3)),
509 NoSyncStruct(RefCell::new(4)),
510 ],
511 1, 2, ));
514 let b = Arc::clone(&a);
515 let c = Arc::clone(&a);
516 let ba0 = Arc::new(Barrier::new(2));
517 let ba1 = Arc::clone(&ba0);
518 let j0 = thread::spawn(move || {
519 let _g = b.try_lock(0).unwrap();
520 assert!(b.locked_offsets[0].load(Ordering::Acquire) & 1 != 0);
521 ba0.wait();
522 });
523 let j1 = thread::spawn(move || {
524 let _g = c.try_lock(1).unwrap();
525 assert!(c.locked_offsets[0].load(Ordering::Acquire) & 2 != 0);
526 ba1.wait();
527 });
528 j1.join().expect("Thread 1 panicked.");
529 j0.join().expect("Thread 0 panicked.");
530 assert!(a
531 .locked_offsets
532 .iter()
533 .all(|x| x.load(Ordering::Acquire) == 0));
534 }
535}
536
537