atomic_shim/shim.rs
1use crossbeam_utils::sync::ShardedLock;
2use std::sync::atomic::Ordering;
3
4/// An integer type which can be safely shared between threads.
5#[derive(Debug, Default)]
6pub struct AtomicU64 {
7 value: ShardedLock<u64>,
8}
9
10impl AtomicU64 {
11 /// Creates a new atomic integer.
12 ///
13 /// # Examples
14 ///
15 /// ```
16 /// use atomic_shim::AtomicU64;
17 /// let atomic_forty_two = AtomicU64::new(42);
18 /// ```
19 pub fn new(v: u64) -> Self {
20 Self {
21 value: ShardedLock::new(v),
22 }
23 }
24
25 /// Returns a mutable reference to the underlying integer.
26 ///
27 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
28 ///
29 /// # Panics
30 ///
31 /// Panics if the Mutex is poisoned
32 ///
33 /// # Examples
34 ///
35 /// ```
36 /// use std::sync::atomic::Ordering;
37 /// use atomic_shim::AtomicU64;
38 ///
39 /// let mut some_var = AtomicU64::new(10);
40 /// assert_eq!(*some_var.get_mut(), 10);
41 /// *some_var.get_mut() = 5;
42 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
43 /// ```
44 pub fn get_mut(&mut self) -> &mut u64 {
45 self.value.get_mut().unwrap()
46 }
47
48 /// Consumes the atomic and returns the contained value.
49 ///
50 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
51 ///
52 /// # Panics
53 ///
54 /// Panics if the Mutex is poisoned
55 ///
56 /// # Examples
57 ///
58 /// ```
59 /// use std::sync::atomic::AtomicU64;
60 /// let some_var = AtomicU64::new(5);
61 /// assert_eq!(some_var.into_inner(), 5);
62 /// ```
63 pub fn into_inner(self) -> u64 {
64 self.value.into_inner().unwrap()
65 }
66
67 /// Loads a value from the atomic integer.
68 ///
69 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
70 ///
71 /// # Panics
72 ///
73 /// Panics if the Mutex is poisoned
74 ///
75 /// # Examples
76 ///
77 /// ```
78 /// use std::sync::atomic::Ordering;
79 /// use atomic_shim::AtomicU64;
80 /// let some_var = AtomicU64::new(5);
81 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
82 /// ```
83 pub fn load(&self, _: Ordering) -> u64 {
84 *self.value.read().unwrap()
85 }
86
87 /// Stores a value into the atomic integer.
88 ///
89 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
90 ///
91 /// # Panics
92 ///
93 /// Panics if the Mutex is poisoned
94 ///
95 /// # Examples
96 ///
97 /// ```
98 /// use std::sync::atomic::Ordering;
99 /// use atomic_shim::AtomicU64;
100 ///
101 /// let some_var = AtomicU64::new(5);
102 /// some_var.store(10, Ordering::Relaxed);
103 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
104 /// ```
105 pub fn store(&self, value: u64, _: Ordering) {
106 let mut lock = self.value.write().unwrap();
107 *lock = value;
108 }
109
110 /// Stores a value into the atomic integer, returning the previous value.
111 ///
112 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
113 ///
114 /// # Panics
115 ///
116 /// Panics if the Mutex is poisoned
117 ///
118 /// # Examples
119 ///
120 /// ```
121 /// use std::sync::atomic::Ordering;
122 /// use atomic_shim::AtomicU64;
123 ///
124 /// let some_var = AtomicU64::new(5);
125 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
126 /// ```
127 pub fn swap(&self, value: u64, _: Ordering) -> u64 {
128 let mut lock = self.value.write().unwrap();
129 let prev = *lock;
130 *lock = value;
131 prev
132 }
133
134 /// Stores a value into the atomic integer if the current value is the same as the current value.
135 ///
136 /// The return value is always the previous value. If it is equal to current, then the value was updated.
137 ///
138 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
139 ///
140 /// # Panics
141 ///
142 /// Panics if the Mutex is poisoned
143 ///
144 /// # Examples
145 ///
146 /// ```
147 /// use std::sync::atomic::Ordering;
148 /// use atomic_shim::AtomicU64;
149 ///
150 /// let some_var = AtomicU64::new(5);
151 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
152 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
153 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
154 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
155 /// ```
156 pub fn compare_and_swap(&self, current: u64, new: u64, _: Ordering) -> u64 {
157 let mut lock = self.value.write().unwrap();
158 let prev = *lock;
159 if prev == current {
160 *lock = new;
161 };
162 prev
163 }
164
165 /// Stores a value into the atomic integer if the current value is the same as the current value.
166 ///
167 /// The return value is a result indicating whether the new value was written and containing the previous value. On success this value is guaranteed to be equal to current.
168 ///
169 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
170 ///
171 /// # Panics
172 ///
173 /// Panics if the Mutex is poisoned
174 ///
175 /// # Examples
176 ///
177 /// ```
178 /// use std::sync::atomic::Ordering;
179 /// use atomic_shim::AtomicU64;
180 ///
181 /// let some_var = AtomicU64::new(5);
182 /// assert_eq!(some_var.compare_exchange(5, 10,
183 /// Ordering::Acquire,
184 /// Ordering::Relaxed),
185 /// Ok(5));
186 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
187 /// assert_eq!(some_var.compare_exchange(6, 12,
188 /// Ordering::SeqCst,
189 /// Ordering::Acquire),
190 /// Err(10));
191 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
192 /// ```
193 pub fn compare_exchange(
194 &self,
195 current: u64,
196 new: u64,
197 _: Ordering,
198 _: Ordering,
199 ) -> Result<u64, u64> {
200 let mut lock = self.value.write().unwrap();
201 let prev = *lock;
202 if prev == current {
203 *lock = new;
204 Ok(current)
205 } else {
206 Err(prev)
207 }
208 }
209
210 /// Stores a value into the atomic integer if the current value is the same as the current value.
211 ///
212 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
213 ///
214 /// # Panics
215 ///
216 /// Panics if the Mutex is poisoned
217 ///
218 /// # Examples
219 ///
220 /// ```
221 /// use std::sync::atomic::Ordering;
222 /// use atomic_shim::AtomicU64;
223 ///
224 /// let val = AtomicU64::new(4);
225 /// let mut old = val.load(Ordering::Relaxed);
226 /// loop {
227 /// let new = old * 2;
228 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
229 /// Ok(_) => break,
230 /// Err(x) => old = x,
231 /// }
232 /// }
233 /// ```
234 pub fn compare_exchange_weak(
235 &self,
236 current: u64,
237 new: u64,
238 success: Ordering,
239 failure: Ordering,
240 ) -> Result<u64, u64> {
241 self.compare_exchange(current, new, success, failure)
242 }
243
244 /// Adds to the current value, returning the previous value.
245 ///
246 /// This operation wraps around on overflow.
247 ///
248 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
249 ///
250 /// # Panics
251 ///
252 /// Panics if the Mutex is poisoned
253 ///
254 /// # Examples
255 ///
256 /// ```
257 /// use std::sync::atomic::Ordering;
258 /// use atomic_shim::AtomicU64;
259 ///
260 /// let foo = AtomicU64::new(0);
261 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
262 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
263 /// ```
264 pub fn fetch_add(&self, val: u64, _: Ordering) -> u64 {
265 let mut lock = self.value.write().unwrap();
266 let prev = *lock;
267 *lock = prev.wrapping_add(val);
268 prev
269 }
270
271 /// Subtracts from the current value, returning the previous value.
272 ///
273 /// This operation wraps around on overflow.
274 ///
275 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
276 ///
277 /// # Panics
278 ///
279 /// Panics if the Mutex is poisoned
280 ///
281 /// # Examples
282 ///
283 /// ```
284 /// use std::sync::atomic::Ordering;
285 /// use atomic_shim::AtomicU64;
286 ///
287 /// let foo = AtomicU64::new(20);
288 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
289 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
290 /// ```
291 pub fn fetch_sub(&self, val: u64, _: Ordering) -> u64 {
292 let mut lock = self.value.write().unwrap();
293 let prev = *lock;
294 *lock = prev.wrapping_sub(val);
295 prev
296 }
297
298 /// Bitwise "and" with the current value.
299 ///
300 /// Performs a bitwise "and" operation on the current value and the argument val, and sets the new value to the result.
301 /// Returns the previous value.
302 ///
303 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
304 ///
305 /// # Panics
306 ///
307 /// Panics if the Mutex is poisoned
308 ///
309 /// # Examples
310 ///
311 /// ```
312 /// use std::sync::atomic::Ordering;
313 /// use atomic_shim::AtomicU64;
314 ///
315 /// let foo = AtomicU64::new(0b101101);
316 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
317 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
318 /// ```
319 pub fn fetch_and(&self, val: u64, _: Ordering) -> u64 {
320 let mut lock = self.value.write().unwrap();
321 let prev = *lock;
322 *lock = prev & val;
323 prev
324 }
325
326 /// Bitwise "nand" with the current value.
327 ///
328 /// Performs a bitwise "nand" operation on the current value and the argument val, and sets the new value to the result.
329 /// Returns the previous value.
330 ///
331 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
332 ///
333 /// # Panics
334 ///
335 /// Panics if the Mutex is poisoned
336 ///
337 /// # Examples
338 ///
339 /// ```
340 /// use std::sync::atomic::Ordering;
341 /// use atomic_shim::AtomicU64;
342 ///
343 /// let foo = AtomicU64::new(0x13);
344 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
345 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
346 /// ```
347 pub fn fetch_nand(&self, val: u64, _: Ordering) -> u64 {
348 let mut lock = self.value.write().unwrap();
349 let prev = *lock;
350 *lock = !(prev & val);
351 prev
352 }
353
354 /// Bitwise "or" with the current value.
355 ///
356 /// Performs a bitwise "or" operation on the current value and the argument val, and sets the new value to the result.
357 /// Returns the previous value.
358 ///
359 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
360 ///
361 /// # Panics
362 ///
363 /// Panics if the Mutex is poisoned
364 ///
365 /// # Examples
366 ///
367 /// ```
368 /// use std::sync::atomic::Ordering;
369 /// use atomic_shim::AtomicU64;
370 ///
371 /// let foo = AtomicU64::new(0b101101);
372 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
373 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
374 /// ```
375 pub fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
376 let mut lock = self.value.write().unwrap();
377 let prev = *lock;
378 *lock = prev | val;
379 prev
380 }
381
382 /// Bitwise "xor" with the current value.
383 ///
384 /// Performs a bitwise "xor" operation on the current value and the argument val, and sets the new value to the result.
385 /// Returns the previous value.
386 ///
387 ///
388 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicU64`
389 ///
390 /// # Panics
391 ///
392 /// Panics if the Mutex is poisoned
393 ///
394 /// # Examples
395 ///
396 /// ```
397 /// use std::sync::atomic::Ordering;
398 /// use atomic_shim::AtomicU64;
399 /// let foo = AtomicU64::new(0b101101);
400 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
401 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
402 /// ```
403 pub fn fetch_xor(&self, val: u64, _: Ordering) -> u64 {
404 let mut lock = self.value.write().unwrap();
405 let prev = *lock;
406 *lock = prev ^ val;
407 prev
408 }
409}
410
411impl From<u64> for AtomicU64 {
412 fn from(value: u64) -> Self {
413 AtomicU64::new(value)
414 }
415}
416
417/// An integer type which can be safely shared between threads.
418#[derive(Debug, Default)]
419pub struct AtomicI64 {
420 value: ShardedLock<i64>,
421}
422
423impl AtomicI64 {
424 /// Creates a new atomic integer.
425 ///
426 /// # Examples
427 ///
428 /// ```
429 /// use atomic_shim::AtomicI64;
430 /// let atomic_forty_two = AtomicI64::new(42);
431 /// ```
432 pub fn new(v: i64) -> Self {
433 Self {
434 value: ShardedLock::new(v),
435 }
436 }
437
438 /// Returns a mutable reference to the underlying integer.
439 ///
440 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
441 ///
442 /// # Panics
443 ///
444 /// Panics if the Mutex is poisoned
445 ///
446 /// # Examples
447 ///
448 /// ```
449 /// use std::sync::atomic::Ordering;
450 /// use atomic_shim::AtomicI64;
451 ///
452 /// let mut some_var = AtomicI64::new(10);
453 /// assert_eq!(*some_var.get_mut(), 10);
454 /// *some_var.get_mut() = 5;
455 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
456 /// ```
457 pub fn get_mut(&mut self) -> &mut i64 {
458 self.value.get_mut().unwrap()
459 }
460
461 /// Consumes the atomic and returns the contained value.
462 ///
463 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
464 ///
465 /// # Panics
466 ///
467 /// Panics if the Mutex is poisoned
468 ///
469 /// # Examples
470 ///
471 /// ```
472 /// use std::sync::atomic::AtomicI64;
473 /// let some_var = AtomicI64::new(5);
474 /// assert_eq!(some_var.into_inner(), 5);
475 /// ```
476 pub fn into_inner(self) -> i64 {
477 self.value.into_inner().unwrap()
478 }
479
480 /// Loads a value from the atomic integer.
481 ///
482 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
483 ///
484 /// # Panics
485 ///
486 /// Panics if the Mutex is poisoned
487 ///
488 /// # Examples
489 ///
490 /// ```
491 /// use std::sync::atomic::Ordering;
492 /// use atomic_shim::AtomicI64;
493 /// let some_var = AtomicI64::new(5);
494 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
495 /// ```
496 pub fn load(&self, _: Ordering) -> i64 {
497 *self.value.read().unwrap()
498 }
499
500 /// Stores a value into the atomic integer.
501 ///
502 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
503 ///
504 /// # Panics
505 ///
506 /// Panics if the Mutex is poisoned
507 ///
508 /// # Examples
509 ///
510 /// ```
511 /// use std::sync::atomic::Ordering;
512 /// use atomic_shim::AtomicI64;
513 ///
514 /// let some_var = AtomicI64::new(5);
515 /// some_var.store(10, Ordering::Relaxed);
516 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
517 /// ```
518 pub fn store(&self, value: i64, _: Ordering) {
519 let mut lock = self.value.write().unwrap();
520 *lock = value;
521 }
522
523 /// Stores a value into the atomic integer, returning the previous value.
524 ///
525 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
526 ///
527 /// # Panics
528 ///
529 /// Panics if the Mutex is poisoned
530 ///
531 /// # Examples
532 ///
533 /// ```
534 /// use std::sync::atomic::Ordering;
535 /// use atomic_shim::AtomicI64;
536 ///
537 /// let some_var = AtomicI64::new(5);
538 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
539 /// ```
540 pub fn swap(&self, value: i64, _: Ordering) -> i64 {
541 let mut lock = self.value.write().unwrap();
542 let prev = *lock;
543 *lock = value;
544 prev
545 }
546
547 /// Stores a value into the atomic integer if the current value is the same as the current value.
548 ///
549 /// The return value is always the previous value. If it is equal to current, then the value was updated.
550 ///
551 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
552 ///
553 /// # Panics
554 ///
555 /// Panics if the Mutex is poisoned
556 ///
557 /// # Examples
558 ///
559 /// ```
560 /// use std::sync::atomic::Ordering;
561 /// use atomic_shim::AtomicI64;
562 ///
563 /// let some_var = AtomicI64::new(5);
564 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
565 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
566 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
567 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
568 /// ```
569 pub fn compare_and_swap(&self, current: i64, new: i64, _: Ordering) -> i64 {
570 let mut lock = self.value.write().unwrap();
571 let prev = *lock;
572 if prev == current {
573 *lock = new;
574 };
575 prev
576 }
577
578 /// Stores a value into the atomic integer if the current value is the same as the current value.
579 ///
580 /// The return value is a result indicating whether the new value was written and containing the previous value. On success this value is guaranteed to be equal to current.
581 ///
582 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
583 ///
584 /// # Panics
585 ///
586 /// Panics if the Mutex is poisoned
587 ///
588 /// # Examples
589 ///
590 /// ```
591 /// use std::sync::atomic::Ordering;
592 /// use atomic_shim::AtomicI64;
593 ///
594 /// let some_var = AtomicI64::new(5);
595 /// assert_eq!(some_var.compare_exchange(5, 10,
596 /// Ordering::Acquire,
597 /// Ordering::Relaxed),
598 /// Ok(5));
599 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
600 /// assert_eq!(some_var.compare_exchange(6, 12,
601 /// Ordering::SeqCst,
602 /// Ordering::Acquire),
603 /// Err(10));
604 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
605 /// ```
606 pub fn compare_exchange(
607 &self,
608 current: i64,
609 new: i64,
610 _: Ordering,
611 _: Ordering,
612 ) -> Result<i64, i64> {
613 let mut lock = self.value.write().unwrap();
614 let prev = *lock;
615 if prev == current {
616 *lock = new;
617 Ok(current)
618 } else {
619 Err(prev)
620 }
621 }
622
623 /// Stores a value into the atomic integer if the current value is the same as the current value.
624 ///
625 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
626 ///
627 /// # Panics
628 ///
629 /// Panics if the Mutex is poisoned
630 ///
631 /// # Examples
632 ///
633 /// ```
634 /// use std::sync::atomic::Ordering;
635 /// use atomic_shim::AtomicI64;
636 ///
637 /// let val = AtomicI64::new(4);
638 /// let mut old = val.load(Ordering::Relaxed);
639 /// loop {
640 /// let new = old * 2;
641 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
642 /// Ok(_) => break,
643 /// Err(x) => old = x,
644 /// }
645 /// }
646 /// ```
647 pub fn compare_exchange_weak(
648 &self,
649 current: i64,
650 new: i64,
651 success: Ordering,
652 failure: Ordering,
653 ) -> Result<i64, i64> {
654 self.compare_exchange(current, new, success, failure)
655 }
656
657 /// Adds to the current value, returning the previous value.
658 ///
659 /// This operation wraps around on overflow.
660 ///
661 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
662 ///
663 /// # Panics
664 ///
665 /// Panics if the Mutex is poisoned
666 ///
667 /// # Examples
668 ///
669 /// ```
670 /// use std::sync::atomic::Ordering;
671 /// use atomic_shim::AtomicI64;
672 ///
673 /// let foo = AtomicI64::new(0);
674 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
675 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
676 /// ```
677 pub fn fetch_add(&self, val: i64, _: Ordering) -> i64 {
678 let mut lock = self.value.write().unwrap();
679 let prev = *lock;
680 *lock = prev.wrapping_add(val);
681 prev
682 }
683
684 /// Subtracts from the current value, returning the previous value.
685 ///
686 /// This operation wraps around on overflow.
687 ///
688 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
689 ///
690 /// # Panics
691 ///
692 /// Panics if the Mutex is poisoned
693 ///
694 /// # Examples
695 ///
696 /// ```
697 /// use std::sync::atomic::Ordering;
698 /// use atomic_shim::AtomicI64;
699 ///
700 /// let foo = AtomicI64::new(20);
701 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
702 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
703 /// ```
704 pub fn fetch_sub(&self, val: i64, _: Ordering) -> i64 {
705 let mut lock = self.value.write().unwrap();
706 let prev = *lock;
707 *lock = prev.wrapping_sub(val);
708 prev
709 }
710
711 /// Bitwise "and" with the current value.
712 ///
713 /// Performs a bitwise "and" operation on the current value and the argument val, and sets the new value to the result.
714 /// Returns the previous value.
715 ///
716 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
717 ///
718 /// # Panics
719 ///
720 /// Panics if the Mutex is poisoned
721 ///
722 /// # Examples
723 ///
724 /// ```
725 /// use std::sync::atomic::Ordering;
726 /// use atomic_shim::AtomicI64;
727 ///
728 /// let foo = AtomicI64::new(0b101101);
729 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
730 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
731 /// ```
732 pub fn fetch_and(&self, val: i64, _: Ordering) -> i64 {
733 let mut lock = self.value.write().unwrap();
734 let prev = *lock;
735 *lock = prev & val;
736 prev
737 }
738
739 /// Bitwise "nand" with the current value.
740 ///
741 /// Performs a bitwise "nand" operation on the current value and the argument val, and sets the new value to the result.
742 /// Returns the previous value.
743 ///
744 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
745 ///
746 /// # Panics
747 ///
748 /// Panics if the Mutex is poisoned
749 ///
750 /// # Examples
751 ///
752 /// ```
753 /// use std::sync::atomic::Ordering;
754 /// use atomic_shim::AtomicI64;
755 ///
756 /// let foo = AtomicI64::new(0x13);
757 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
758 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
759 /// ```
760 pub fn fetch_nand(&self, val: i64, _: Ordering) -> i64 {
761 let mut lock = self.value.write().unwrap();
762 let prev = *lock;
763 *lock = !(prev & val);
764 prev
765 }
766
767 /// Bitwise "or" with the current value.
768 ///
769 /// Performs a bitwise "or" operation on the current value and the argument val, and sets the new value to the result.
770 /// Returns the previous value.
771 ///
772 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
773 ///
774 /// # Panics
775 ///
776 /// Panics if the Mutex is poisoned
777 ///
778 /// # Examples
779 ///
780 /// ```
781 /// use std::sync::atomic::Ordering;
782 /// use atomic_shim::AtomicI64;
783 ///
784 /// let foo = AtomicI64::new(0b101101);
785 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
786 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
787 /// ```
788 pub fn fetch_or(&self, val: i64, _: Ordering) -> i64 {
789 let mut lock = self.value.write().unwrap();
790 let prev = *lock;
791 *lock = prev | val;
792 prev
793 }
794
795 /// Bitwise "xor" with the current value.
796 ///
797 /// Performs a bitwise "xor" operation on the current value and the argument val, and sets the new value to the result.
798 /// Returns the previous value.
799 ///
800 ///
801 /// It ignores the Ordering argument, but it is required for compatibility with `std::sync::AtomicI64`
802 ///
803 /// # Panics
804 ///
805 /// Panics if the Mutex is poisoned
806 ///
807 /// # Examples
808 ///
809 /// ```
810 /// use std::sync::atomic::Ordering;
811 /// use atomic_shim::AtomicI64;
812 /// let foo = AtomicI64::new(0b101101);
813 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
814 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
815 /// ```
816 pub fn fetch_xor(&self, val: i64, _: Ordering) -> i64 {
817 let mut lock = self.value.write().unwrap();
818 let prev = *lock;
819 *lock = prev ^ val;
820 prev
821 }
822}
823
824impl From<i64> for AtomicI64 {
825 fn from(value: i64) -> Self {
826 AtomicI64::new(value)
827 }
828}