msp430_atomic/
lib.rs

1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at http://rust-lang.org/COPYRIGHT.
3//
4// Copyright 2017 Vadzim Dambrouski, initial port to MSP430.
5//
6// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
7// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
8// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
9// option. This file may not be copied, modified, or distributed
10// except according to those terms.
11
12//! Atomic types
13//!
14//! Atomic types provide primitive shared-memory communication between
15//! threads, and are the building blocks of other concurrent
16//! types.
17//!
18//! This module defines atomic versions of a select number of primitive
19//! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
20//! Atomic types present operations that, when used correctly, synchronize
21//! updates between threads.
22//!
23//! [`AtomicBool`]: struct.AtomicBool.html
24//! [`AtomicIsize`]: struct.AtomicIsize.html
25//! [`AtomicUsize`]: struct.AtomicUsize.html
26//!
27//! MSP430 note: All atomic operations in this crate have `SeqCst`
28//! memory [ordering].
29//!
30//! [ordering]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
31//!
32//! Atomic variables are safe to share between threads (they implement [`Sync`])
33//! but they do not themselves provide the mechanism for sharing and follow the
34//! [threading model](https://doc.rust-lang.org/std/thread/#the-threading-model)
35//! of rust.
36//!
37//! [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
38//!
39//! Most atomic types may be stored in static variables, initialized using
40//! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
41//! are often used for lazy global initialization.
42//!
43//! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
44//!
45//! # Examples
46//!
47//! A simple spinlock:
48//!
49//! ```
50//! use msp430_atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
51//! use std::thread;
52//!
53//! // Initialize SPINLOCK to 0
54//! static SPINLOCK: AtomicUsize = ATOMIC_USIZE_INIT;
55//!
56//! fn main() {
57//!     let thread = thread::spawn(move|| {
58//!         SPINLOCK.store(1);
59//!     });
60//!
61//!     // Wait for the other thread to release the lock
62//!     while SPINLOCK.load() == 0 {}
63//!
64//!     if let Err(panic) = thread.join() {
65//!         println!("Thread had an error: {:?}", panic);
66//!     }
67//! }
68//! ```
69
70#![no_std]
71#![cfg_attr(target_arch = "msp430", feature(asm_experimental_arch))]
72
73#[cfg(target_arch = "msp430")]
74use core::arch::asm;
75use core::cell::UnsafeCell;
76use core::fmt;
77
78/// A boolean type which can be safely shared between threads.
79///
80/// This type has the same in-memory representation as a `bool`.
81#[repr(C, align(1))]
82pub struct AtomicBool {
83    v: UnsafeCell<u8>,
84}
85
86impl Default for AtomicBool {
87    /// Creates an `AtomicBool` initialized to `false`.
88    fn default() -> Self {
89        Self::new(false)
90    }
91}
92
93// Send is implicitly implemented for AtomicBool.
94unsafe impl Sync for AtomicBool {}
95
96/// A raw pointer type which can be safely shared between threads.
97///
98/// This type has the same in-memory representation as a `*mut T`.
99#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
100#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
101#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
102pub struct AtomicPtr<T> {
103    p: UnsafeCell<*mut T>,
104}
105
106impl<T> Default for AtomicPtr<T> {
107    /// Creates a null `AtomicPtr<T>`.
108    fn default() -> AtomicPtr<T> {
109        AtomicPtr::new(core::ptr::null_mut())
110    }
111}
112
113unsafe impl<T> Send for AtomicPtr<T> {}
114unsafe impl<T> Sync for AtomicPtr<T> {}
115
116/// An [`AtomicBool`] initialized to `false`.
117///
118/// [`AtomicBool`]: struct.AtomicBool.html
119pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
120
121impl AtomicBool {
122    /// Creates a new `AtomicBool`.
123    ///
124    /// # Examples
125    ///
126    /// ```
127    /// use msp430_atomic::AtomicBool;
128    ///
129    /// let atomic_true  = AtomicBool::new(true);
130    /// let atomic_false = AtomicBool::new(false);
131    /// ```
132    #[inline]
133    pub const fn new(v: bool) -> AtomicBool {
134        AtomicBool {
135            v: UnsafeCell::new(v as u8),
136        }
137    }
138
139    /// Returns a mutable reference to the underlying `bool`.
140    ///
141    /// This is safe because the mutable reference guarantees that no other threads are
142    /// concurrently accessing the atomic data.
143    ///
144    /// # Examples
145    ///
146    /// ```
147    /// use msp430_atomic::AtomicBool;
148    ///
149    /// let mut some_bool = AtomicBool::new(true);
150    /// assert_eq!(*some_bool.get_mut(), true);
151    /// *some_bool.get_mut() = false;
152    /// assert_eq!(some_bool.load(), false);
153    /// ```
154    #[inline]
155    pub fn get_mut(&mut self) -> &mut bool {
156        unsafe { &mut *(self.v.get() as *mut bool) }
157    }
158
159    /// Consumes the atomic and returns the contained value.
160    ///
161    /// This is safe because passing `self` by value guarantees that no other threads are
162    /// concurrently accessing the atomic data.
163    ///
164    /// # Examples
165    ///
166    /// ```
167    /// use msp430_atomic::AtomicBool;
168    ///
169    /// let some_bool = AtomicBool::new(true);
170    /// assert_eq!(some_bool.into_inner(), true);
171    /// ```
172    #[inline]
173    pub fn into_inner(self) -> bool {
174        self.v.into_inner() != 0
175    }
176
177    /// Loads a value from the bool.
178    ///
179    /// # Examples
180    ///
181    /// ```
182    /// use msp430_atomic::AtomicBool;
183    ///
184    /// let some_bool = AtomicBool::new(true);
185    ///
186    /// assert_eq!(some_bool.load(), true);
187    /// ```
188    #[inline]
189    pub fn load(&self) -> bool {
190        unsafe { u8::atomic_load(self.v.get()) != 0 }
191    }
192
193    /// Stores a value into the bool.
194    ///
195    /// # Examples
196    ///
197    /// ```
198    /// use msp430_atomic::AtomicBool;
199    ///
200    /// let some_bool = AtomicBool::new(true);
201    ///
202    /// some_bool.store(false);
203    /// assert_eq!(some_bool.load(), false);
204    /// ```
205    #[inline]
206    pub fn store(&self, val: bool) {
207        unsafe {
208            u8::atomic_store(self.v.get(), val as u8);
209        }
210    }
211
212    /// Logical "and" with a boolean value.
213    ///
214    /// Performs a logical "and" operation on the current value and the argument `val`, and sets
215    /// the new value to the result.
216    ///
217    /// # Examples
218    ///
219    /// ```
220    /// use msp430_atomic::AtomicBool;
221    ///
222    /// let foo = AtomicBool::new(true);
223    /// foo.and(false);
224    /// assert_eq!(foo.load(), false);
225    ///
226    /// let foo = AtomicBool::new(true);
227    /// foo.and(true);
228    /// assert_eq!(foo.load(), true);
229    ///
230    /// let foo = AtomicBool::new(false);
231    /// foo.and(false);
232    /// assert_eq!(foo.load(), false);
233    /// ```
234    #[inline]
235    pub fn and(&self, val: bool) {
236        unsafe { u8::atomic_and(self.v.get(), val as u8) }
237    }
238
239    /// Logical "nand" with a boolean value.
240    ///
241    /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
242    /// the new value to the result.
243    ///
244    /// # Examples
245    ///
246    /// ```
247    /// use msp430_atomic::AtomicBool;
248    ///
249    /// let foo = AtomicBool::new(true);
250    /// foo.nand(false);
251    /// assert_eq!(foo.load(), true);
252    ///
253    /// let foo = AtomicBool::new(true);
254    /// foo.nand(true);
255    /// assert_eq!(foo.load() as usize, 0);
256    /// assert_eq!(foo.load(), false);
257    ///
258    /// let foo = AtomicBool::new(false);
259    /// foo.nand(false);
260    /// assert_eq!(foo.load(), true);
261    /// ```
262    #[inline]
263    pub fn nand(&self, val: bool) {
264        // We can't use atomic_nand here because it can result in a bool with
265        // an invalid value. This happens because the atomic operation is done
266        // with an 8-bit integer internally, which would set the upper 7 bits.
267        // So we just use xor or swap instead.
268        if val {
269            // !(x & true) == !x
270            // We must invert the bool.
271            self.xor(true)
272        } else {
273            // !(x & false) == true
274            // We must set the bool to true.
275            self.store(true)
276        }
277    }
278
279    /// Logical "or" with a boolean value.
280    ///
281    /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
282    /// new value to the result.
283    ///
284    /// # Examples
285    ///
286    /// ```
287    /// use msp430_atomic::AtomicBool;
288    ///
289    /// let foo = AtomicBool::new(true);
290    /// foo.or(false);
291    /// assert_eq!(foo.load(), true);
292    ///
293    /// let foo = AtomicBool::new(true);
294    /// foo.or(true);
295    /// assert_eq!(foo.load(), true);
296    ///
297    /// let foo = AtomicBool::new(false);
298    /// foo.or(false);
299    /// assert_eq!(foo.load(), false);
300    /// ```
301    #[inline]
302    pub fn or(&self, val: bool) {
303        unsafe { u8::atomic_or(self.v.get(), val as u8) }
304    }
305
306    /// Logical "xor" with a boolean value.
307    ///
308    /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
309    /// the new value to the result.
310    ///
311    /// # Examples
312    ///
313    /// ```
314    /// use msp430_atomic::AtomicBool;
315    ///
316    /// let foo = AtomicBool::new(true);
317    /// foo.xor(false);
318    /// assert_eq!(foo.load(), true);
319    ///
320    /// let foo = AtomicBool::new(true);
321    /// foo.xor(true);
322    /// assert_eq!(foo.load(), false);
323    ///
324    /// let foo = AtomicBool::new(false);
325    /// foo.xor(false);
326    /// assert_eq!(foo.load(), false);
327    /// ```
328    #[inline]
329    pub fn xor(&self, val: bool) {
330        unsafe { u8::atomic_xor(self.v.get(), val as u8) }
331    }
332}
333
334impl<T> AtomicPtr<T> {
335    /// Creates a new `AtomicPtr`.
336    ///
337    /// # Examples
338    ///
339    /// ```
340    /// use msp430_atomic::AtomicPtr;
341    ///
342    /// let ptr = &mut 5;
343    /// let atomic_ptr  = AtomicPtr::new(ptr);
344    /// ```
345    #[inline]
346    pub const fn new(p: *mut T) -> AtomicPtr<T> {
347        AtomicPtr {
348            p: UnsafeCell::new(p),
349        }
350    }
351
352    /// Returns a mutable reference to the underlying pointer.
353    ///
354    /// This is safe because the mutable reference guarantees that no other threads are
355    /// concurrently accessing the atomic data.
356    ///
357    /// # Examples
358    ///
359    /// ```
360    /// use msp430_atomic::AtomicPtr;
361    ///
362    /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
363    /// *atomic_ptr.get_mut() = &mut 5;
364    /// assert_eq!(unsafe { *atomic_ptr.load() }, 5);
365    /// ```
366    #[inline]
367    pub fn get_mut(&mut self) -> &mut *mut T {
368        unsafe { &mut *self.p.get() }
369    }
370
371    /// Consumes the atomic and returns the contained value.
372    ///
373    /// This is safe because passing `self` by value guarantees that no other threads are
374    /// concurrently accessing the atomic data.
375    ///
376    /// # Examples
377    ///
378    /// ```
379    /// use msp430_atomic::AtomicPtr;
380    ///
381    /// let atomic_ptr = AtomicPtr::new(&mut 5);
382    /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
383    /// ```
384    #[inline]
385    pub fn into_inner(self) -> *mut T {
386        self.p.into_inner()
387    }
388
389    /// Loads a value from the pointer.
390    ///
391    /// # Examples
392    ///
393    /// ```
394    /// use msp430_atomic::AtomicPtr;
395    ///
396    /// let ptr = &mut 5;
397    /// let some_ptr  = AtomicPtr::new(ptr);
398    ///
399    /// let value = some_ptr.load();
400    /// ```
401    #[inline]
402    pub fn load(&self) -> *mut T {
403        unsafe { usize::atomic_load(self.p.get() as *mut usize) as *mut T }
404    }
405
406    /// Stores a value into the pointer.
407    ///
408    /// # Examples
409    ///
410    /// ```
411    /// use msp430_atomic::AtomicPtr;
412    ///
413    /// let ptr = &mut 5;
414    /// let some_ptr  = AtomicPtr::new(ptr);
415    ///
416    /// let other_ptr = &mut 10;
417    ///
418    /// some_ptr.store(other_ptr);
419    /// ```
420    #[inline]
421    pub fn store(&self, ptr: *mut T) {
422        unsafe {
423            usize::atomic_store(self.p.get() as *mut usize, ptr as usize);
424        }
425    }
426}
427
428macro_rules! atomic_int {
429    ($int_type:ident $atomic_type:ident $atomic_init:ident $asm_suffix:literal $align:literal) => {
430        /// An integer type which can be safely shared between threads.
431        ///
432        /// This type has the same in-memory representation as the underlying integer type.
433        #[repr(C, align($align))]
434        pub struct $atomic_type {
435            v: UnsafeCell<$int_type>,
436        }
437
438        /// An atomic integer initialized to `0`.
439        pub const $atomic_init: $atomic_type = $atomic_type::new(0);
440
441        impl Default for $atomic_type {
442            fn default() -> Self {
443                Self::new(Default::default())
444            }
445        }
446
447        impl fmt::Debug for $atomic_type {
448            fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
449                f.debug_tuple(stringify!($atomic_type))
450                 .field(&self.load())
451                 .finish()
452            }
453        }
454
455        // Send is implicitly implemented.
456        unsafe impl Sync for $atomic_type {}
457
458        impl $atomic_type {
459            /// Creates a new atomic integer.
460            ///
461            /// # Examples
462            ///
463            /// ```
464            /// use msp430_atomic::AtomicIsize;
465            ///
466            /// let atomic_forty_two  = AtomicIsize::new(42);
467            /// ```
468            #[inline]
469            pub const fn new(v: $int_type) -> Self {
470                $atomic_type {v: UnsafeCell::new(v)}
471            }
472
473            /// Returns a mutable reference to the underlying integer.
474            ///
475            /// This is safe because the mutable reference guarantees that no other threads are
476            /// concurrently accessing the atomic data.
477            ///
478            /// # Examples
479            ///
480            /// ```
481            /// use msp430_atomic::AtomicIsize;
482            ///
483            /// let mut some_isize = AtomicIsize::new(10);
484            /// assert_eq!(*some_isize.get_mut(), 10);
485            /// *some_isize.get_mut() = 5;
486            /// assert_eq!(some_isize.load(), 5);
487            /// ```
488            #[inline]
489            pub fn get_mut(&mut self) -> &mut $int_type {
490                unsafe { &mut *self.v.get() }
491            }
492
493            /// Consumes the atomic and returns the contained value.
494            ///
495            /// This is safe because passing `self` by value guarantees that no other threads are
496            /// concurrently accessing the atomic data.
497            ///
498            /// # Examples
499            ///
500            /// ```
501            /// use msp430_atomic::AtomicIsize;
502            ///
503            /// let some_isize = AtomicIsize::new(5);
504            /// assert_eq!(some_isize.into_inner(), 5);
505            /// ```
506            #[inline]
507            pub fn into_inner(self) -> $int_type {
508                 self.v.into_inner()
509            }
510
511            /// Loads a value from the atomic integer.
512            ///
513            /// # Examples
514            ///
515            /// ```
516            /// use msp430_atomic::AtomicIsize;
517            ///
518            /// let some_isize = AtomicIsize::new(5);
519            ///
520            /// assert_eq!(some_isize.load(), 5);
521            /// ```
522            #[inline]
523            pub fn load(&self) -> $int_type {
524                unsafe { $int_type::atomic_load(self.v.get()) }
525            }
526
527            /// Stores a value into the atomic integer.
528            ///
529            /// # Examples
530            ///
531            /// ```
532            /// use msp430_atomic::AtomicIsize;
533            ///
534            /// let some_isize = AtomicIsize::new(5);
535            ///
536            /// some_isize.store(10);
537            /// assert_eq!(some_isize.load(), 10);
538            /// ```
539            #[inline]
540            pub fn store(&self, val: $int_type) {
541                unsafe { $int_type::atomic_store(self.v.get(), val); }
542            }
543
544            /// Adds to the current value, returning the previous value.
545            ///
546            /// This operation wraps around on overflow.
547            ///
548            /// # Examples
549            ///
550            /// ```
551            /// use msp430_atomic::AtomicIsize;
552            ///
553            /// let foo = AtomicIsize::new(0);
554            /// foo.add(10);
555            /// assert_eq!(foo.load(), 10);
556            /// ```
557            #[inline]
558            pub fn add(&self, val: $int_type) {
559                unsafe { $int_type::atomic_add(self.v.get(), val) }
560            }
561
562            /// Subtracts from the current value, returning the previous value.
563            ///
564            /// This operation wraps around on overflow.
565            ///
566            /// # Examples
567            ///
568            /// ```
569            /// use msp430_atomic::AtomicIsize;
570            ///
571            /// let foo = AtomicIsize::new(0);
572            /// foo.sub(10);
573            /// assert_eq!(foo.load(), -10);
574            /// ```
575            #[inline]
576            pub fn sub(&self, val: $int_type) {
577                unsafe { $int_type::atomic_sub(self.v.get(), val) }
578            }
579
580            /// Bitwise "and" with the current value.
581            ///
582            /// Performs a bitwise "and" operation on the current value and the argument `val`, and
583            /// sets the new value to the result.
584            ///
585            /// # Examples
586            ///
587            /// ```
588            /// use msp430_atomic::AtomicIsize;
589            ///
590            /// let foo = AtomicIsize::new(0b101101);
591            /// foo.and(0b110011);
592            /// assert_eq!(foo.load(), 0b100001);
593            #[inline]
594            pub fn and(&self, val: $int_type) {
595                unsafe { $int_type::atomic_and(self.v.get(), val) }
596            }
597
598            /// Bitwise "or" with the current value.
599            ///
600            /// Performs a bitwise "or" operation on the current value and the argument `val`, and
601            /// sets the new value to the result.
602            ///
603            /// # Examples
604            ///
605            /// ```
606            /// use msp430_atomic::AtomicIsize;
607            ///
608            /// let foo = AtomicIsize::new(0b101101);
609            /// foo.or(0b110011);
610            /// assert_eq!(foo.load(), 0b111111);
611            #[inline]
612            pub fn or(&self, val: $int_type) {
613                unsafe { $int_type::atomic_or(self.v.get(), val) }
614            }
615
616            /// Bitwise "xor" with the current value.
617            ///
618            /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
619            /// sets the new value to the result.
620            ///
621            /// # Examples
622            ///
623            /// ```
624            /// use msp430_atomic::AtomicIsize;
625            ///
626            /// let foo = AtomicIsize::new(0b101101);
627            /// foo.xor(0b110011);
628            /// assert_eq!(foo.load(), 0b011110);
629            #[inline]
630            pub fn xor(&self, val: $int_type) {
631                unsafe { $int_type::atomic_xor(self.v.get(), val) }
632            }
633        }
634
635        #[cfg(target_arch = "msp430")]
636        impl AtomicOperations for $int_type {
637            #[inline(always)]
638            unsafe fn atomic_store(dst: *mut Self, val: Self) {
639                asm!(concat!("mov", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
640            }
641
642            #[inline(always)]
643            unsafe fn atomic_load(dst: *const Self) -> Self {
644                let out;
645                asm!(concat!("mov", $asm_suffix, " @{0}, {1}"), in(reg) dst, lateout(reg) out);
646                out
647            }
648
649            #[inline(always)]
650            unsafe fn atomic_add(dst: *mut Self, val: Self) {
651                asm!(concat!("add", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
652            }
653
654            #[inline(always)]
655            unsafe fn atomic_sub(dst: *mut Self, val: Self) {
656                asm!(concat!("sub", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
657            }
658
659            #[inline(always)]
660            unsafe fn atomic_and(dst: *mut Self, val: Self) {
661                asm!(concat!("and", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
662            }
663
664            #[inline(always)]
665            unsafe fn atomic_clear(dst: *mut Self, val: Self) {
666                asm!(concat!("bic", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
667            }
668
669            #[inline(always)]
670            unsafe fn atomic_or(dst: *mut Self, val: Self) {
671                asm!(concat!("bis", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
672            }
673
674            #[inline(always)]
675            unsafe fn atomic_xor(dst: *mut Self, val: Self) {
676                asm!(concat!("xor", $asm_suffix, " {1}, 0({0})"), in(reg) dst, in(reg) val);
677            }
678        }
679
680        #[cfg(not(target_arch = "msp430"))]
681        impl AtomicOperations for $int_type {
682            #[inline(always)]
683            unsafe fn atomic_store(dst: *mut Self, val: Self) {
684                (*(dst as *const ::core::sync::atomic::$atomic_type))
685                    .store(val, ::core::sync::atomic::Ordering::SeqCst);
686            }
687
688            #[inline(always)]
689            unsafe fn atomic_load(dst: *const Self) -> Self {
690                (*(dst as *const ::core::sync::atomic::$atomic_type))
691                    .load(::core::sync::atomic::Ordering::SeqCst)
692            }
693
694            #[inline(always)]
695            unsafe fn atomic_add(dst: *mut Self, val: Self) {
696                (*(dst as *const ::core::sync::atomic::$atomic_type))
697                    .fetch_add(val, ::core::sync::atomic::Ordering::SeqCst);
698            }
699
700            #[inline(always)]
701            unsafe fn atomic_sub(dst: *mut Self, val: Self) {
702                (*(dst as *const ::core::sync::atomic::$atomic_type))
703                    .fetch_sub(val, ::core::sync::atomic::Ordering::SeqCst);
704            }
705
706            #[inline(always)]
707            unsafe fn atomic_and(dst: *mut Self, val: Self) {
708                (*(dst as *const ::core::sync::atomic::$atomic_type))
709                    .fetch_and(val, ::core::sync::atomic::Ordering::SeqCst);
710            }
711
712            #[inline(always)]
713            unsafe fn atomic_clear(dst: *mut Self, val: Self) {
714                (*(dst as *const ::core::sync::atomic::$atomic_type))
715                    .fetch_and(!val, ::core::sync::atomic::Ordering::SeqCst);
716            }
717
718            #[inline(always)]
719            unsafe fn atomic_or(dst: *mut Self, val: Self) {
720                (*(dst as *const ::core::sync::atomic::$atomic_type))
721                    .fetch_or(val, ::core::sync::atomic::Ordering::SeqCst);
722            }
723
724            #[inline(always)]
725            unsafe fn atomic_xor(dst: *mut Self, val: Self) {
726                (*(dst as *const ::core::sync::atomic::$atomic_type))
727                    .fetch_xor(val, ::core::sync::atomic::Ordering::SeqCst);
728            }
729        }
730    }
731}
732
733atomic_int! {
734    i8 AtomicI8 ATOMIC_I8_INIT ".b" 1
735}
736
737atomic_int! {
738    u8 AtomicU8 ATOMIC_U8_INIT ".b" 1
739}
740
741atomic_int! {
742    i16 AtomicI16 ATOMIC_I16_INIT ".w" 2
743}
744
745atomic_int! {
746    u16 AtomicU16 ATOMIC_U16_INIT ".w" 2
747}
748
749#[cfg(target_pointer_width = "16")]
750atomic_int! {
751    isize AtomicIsize ATOMIC_ISIZE_INIT ".w" 2
752}
753#[cfg(target_pointer_width = "32")]
754atomic_int! {
755    isize AtomicIsize ATOMIC_ISIZE_INIT ".w" 4
756}
757#[cfg(target_pointer_width = "64")]
758atomic_int! {
759    isize AtomicIsize ATOMIC_ISIZE_INIT ".w" 8
760}
761
762#[cfg(target_pointer_width = "16")]
763atomic_int! {
764    usize AtomicUsize ATOMIC_USIZE_INIT ".w" 2
765}
766#[cfg(target_pointer_width = "32")]
767atomic_int! {
768    usize AtomicUsize ATOMIC_USIZE_INIT ".w" 4
769}
770#[cfg(target_pointer_width = "64")]
771atomic_int! {
772    usize AtomicUsize ATOMIC_USIZE_INIT ".w" 8
773}
774
775/// Atomic arithmetic and bitwise operations implemented for numerical types. Each operation is
776/// implemented with a single assembly instruction.
777pub trait AtomicOperations {
778    /// Store value into destination pointee.
779    unsafe fn atomic_store(dst: *mut Self, val: Self);
780    /// Read value from destination pointee.
781    unsafe fn atomic_load(dst: *const Self) -> Self;
782    /// Add value to destination pointee. Result may wrap around.
783    unsafe fn atomic_add(dst: *mut Self, val: Self);
784    /// Subtract value from destination pointee. Result may wrap around.
785    unsafe fn atomic_sub(dst: *mut Self, val: Self);
786    /// Clear all bits in destination pointee that are zeroed in value.
787    unsafe fn atomic_and(dst: *mut Self, val: Self);
788    /// Clear all bits in destination pointee that are set in value
789    unsafe fn atomic_clear(dst: *mut Self, val: Self);
790    /// Set all bits in destination pointee that are set in value.
791    unsafe fn atomic_or(dst: *mut Self, val: Self);
792    /// Toggle all bits in destination pointee that are set in value.
793    unsafe fn atomic_xor(dst: *mut Self, val: Self);
794}
795
796impl fmt::Debug for AtomicBool {
797    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
798        f.debug_tuple("AtomicBool").field(&self.load()).finish()
799    }
800}
801
802impl<T> fmt::Debug for AtomicPtr<T> {
803    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
804        f.debug_tuple("AtomicPtr").field(&self.load()).finish()
805    }
806}