type_census/
counter.rs

1//! Shared counters, suitable for quickly tabulating extant types.
2//!
3//! The default, [`RelaxedCounter`], is suitable in most circumstances.
4
5use crossbeam_utils::CachePadded;
6use num_traits::Num;
7use std::sync::atomic::{AtomicIsize, Ordering};
8
9/// A type suitable as a shared census counter.
10pub trait Counter: 'static {
11    /// The primitive type underlying this counter.
12    type Primitive: Num;
13
14    /// A fresh instance of this counter holding the value of `0`.
15    const ZERO: Self;
16
17    /// Eventually increase the value of this counter by `n`.
18    fn add_assign(&self, n: Self::Primitive);
19
20    /// Eventually decrease the value of this counter by `n`.
21    fn sub_assign(&self, n: Self::Primitive);
22
23    /// Eventually retrieve the value of this counter.
24    fn fetch(&self) -> Self::Primitive;
25}
26
27/// An [`AtomicIsize`] padded and aligned to the cache line size to combat
28/// [false sharing].
29///
30/// As a [`Counter`], this type uses [`Ordering::Relaxed`] for
31/// [`Counter::add_assign`], [`Counter::sub_assign`] and [`Counter::fetch`].
32///
33/// [false sharing]: https://en.wikipedia.org/wiki/False_sharing
34#[repr(transparent)]
35pub struct RelaxedCounter {
36    counter: CachePadded<AtomicIsize>,
37}
38
39impl Counter for RelaxedCounter {
40    type Primitive = isize;
41    const ZERO: Self = Self {
42        counter: CachePadded::new(AtomicIsize::new(0)),
43    };
44
45    #[inline(always)]
46    fn add_assign(&self, n: isize) {
47        let _ = self.counter.fetch_add(n, Ordering::Relaxed);
48    }
49
50    #[inline(always)]
51    fn sub_assign(&self, n: isize) {
52        let _ = self.counter.fetch_sub(n, Ordering::Relaxed);
53    }
54
55    #[inline(always)]
56    fn fetch(&self) -> isize {
57        self.counter.load(Ordering::Relaxed)
58    }
59}
60
61/// A counter that minimizes slowdowns from contenation at the cost of increased
62/// memory usage.
63///
64/// Modeled on the ["adaptive multi-counter" described by Travis Downs][multi].
65/// Use this counter type only if [`RelaxedCounter`] performs poorly. Then,
66/// benchmark the performance of your code with [`DistributedCounter`] with
67/// a bucket count of `1`. Increase the number of buckets (up to your
68/// available parallelism) until performance is satisfactory. 
69///
70/// [multi]: https://travisdowns.github.io/blog/2020/07/06/concurrency-costs.html#adaptive-multi-counter
71pub struct DistributedCounter<const BUCKETS: usize> {
72    counters: [CachePadded<AtomicIsize>; BUCKETS],
73}
74
75impl<const BUCKETS: usize> DistributedCounter<BUCKETS> {
76    const fn new() -> Self {
77        const BUCKET: CachePadded<AtomicIsize> = CachePadded::new(AtomicIsize::new(0));
78        Self {
79            counters: [BUCKET; BUCKETS],
80        }
81    }
82
83    fn thread_id() -> usize {
84        use std::sync::atomic::AtomicUsize;
85        static THREADS: AtomicUsize = AtomicUsize::new(0);
86        thread_local! {
87            pub static ID: usize = THREADS.fetch_add(1, Ordering::SeqCst);
88        }
89        ID.try_with(|id| *id).unwrap_or(0)
90    }
91
92    #[inline(always)]
93    fn try_add_assign(bucket: &AtomicIsize, n: isize) -> Result<isize, isize> {
94        let count = bucket.load(Ordering::SeqCst);
95        bucket.compare_exchange(
96            count,
97            count.wrapping_add(n),
98            Ordering::SeqCst,
99            Ordering::SeqCst,
100        )
101    }
102
103    #[inline(always)]
104    fn add_assign(&self, n: isize) {
105        let id = Self::thread_id();
106        let mut bucket = id % BUCKETS;
107        loop {
108            if Self::try_add_assign(&self.counters[bucket], n).is_ok() {
109                return;
110            } else {
111                bucket = bucket.wrapping_add(1) % BUCKETS;
112            }
113        }
114    }
115}
116
117impl<const BUCKETS: usize> Counter for DistributedCounter<BUCKETS> {
118    type Primitive = isize;
119    const ZERO: Self = Self::new();
120
121    fn add_assign(&self, n: isize) {
122        self.add_assign(n)
123    }
124
125    fn sub_assign(&self, n: isize) {
126        self.add_assign(-n)
127    }
128
129    fn fetch(&self) -> isize {
130        let mut sum = 0isize;
131        for counter in &self.counters {
132            sum = sum.wrapping_add(counter.load(Ordering::SeqCst));
133        }
134        sum
135    }
136}