1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
use std::{
num::NonZeroUsize,
thread::{self, JoinHandle},
};
use crossbeam::channel::{self as mpmc, Receiver, Sender};
use once_cell::sync::Lazy;
use crate::{
error::Error,
sink::{OverflowPolicy, Task},
sync::*,
Result,
};
/// A thread pool for processing operations asynchronously.
///
/// Currently only used in [`AsyncPoolSink`].
///
/// # Examples
///
/// ```
/// # use std::sync::Arc;
/// use spdlog::{sink::AsyncPoolSink, ThreadPool};
///
/// # fn main() -> Result<(), spdlog::Error> {
/// # let underlying_sink = spdlog::default_logger().sinks().first().unwrap().clone();
/// let thread_pool = Arc::new(ThreadPool::new()?);
/// let async_pool_sink = AsyncPoolSink::builder()
/// .sink(underlying_sink)
/// .thread_pool(thread_pool)
/// .build()?;
/// # Ok(()) }
/// ```
///
/// [`AsyncPoolSink`]: crate::sink::AsyncPoolSink
pub struct ThreadPool(ArcSwapOption<ThreadPoolInner>);
struct ThreadPoolInner {
threads: Vec<Option<JoinHandle<()>>>,
sender: Option<Sender<Task>>,
}
type Callback = Arc<dyn Fn() + Send + Sync + 'static>;
#[allow(missing_docs)]
pub struct ThreadPoolBuilder {
capacity: NonZeroUsize,
threads: NonZeroUsize,
on_thread_spawn: Option<Callback>,
on_thread_finish: Option<Callback>,
}
struct Worker {
receiver: Receiver<Task>,
}
impl ThreadPool {
/// Gets a builder of `ThreadPool` with default parameters:
///
/// | Parameter | Default Value |
/// |--------------------|-----------------------------------|
/// | [capacity] | `8192` (may change in the future) |
/// | [on_thread_spawn] | `None` |
/// | [on_thread_finish] | `None` |
///
/// [capacity]: ThreadPoolBuilder::capacity
/// [on_thread_spawn]: ThreadPoolBuilder::on_thread_spawn
/// [on_thread_finish]: ThreadPoolBuilder::on_thread_finish
#[must_use]
pub fn builder() -> ThreadPoolBuilder {
ThreadPoolBuilder {
capacity: NonZeroUsize::new(8192).unwrap(),
threads: NonZeroUsize::new(1).unwrap(),
on_thread_spawn: None,
on_thread_finish: None,
}
}
/// Constructs a `ThreadPool` with default parameters (see documentation of
/// [`ThreadPool::builder`]).
pub fn new() -> Result<Self> {
Self::builder().build()
}
pub(super) fn assign_task(&self, task: Task, overflow_policy: OverflowPolicy) -> Result<()> {
let inner = self.0.load();
if let Some(inner) = inner.as_ref() {
let sender = inner.sender.as_ref().unwrap();
match overflow_policy {
OverflowPolicy::Block => sender.send(task).map_err(Error::from_crossbeam_send),
OverflowPolicy::DropIncoming => sender
.try_send(task)
.map_err(Error::from_crossbeam_try_send),
}
} else {
// https://github.com/SpriteOvO/spdlog-rs/issues/120
//
// The thread pool has been destroyed
//
// TODO: Return an error and perform the task directly on the current thread.
Ok(())
}
}
pub(super) fn destroy(&self) {
if let Some(inner) = self.0.swap(None) {
// https://github.com/SpriteOvO/spdlog-rs/issues/120
//
// If a task is being assigned, there will be more than one strong reference,
// causing `into_inner` to return `None`.
//
// TODO: Skip it if it's None. This avoids panic, but might introduce a memory
// leak? However, it's not a big deal since this isn't a frequent operation.
// Anyway, we should eventually fix it.
if let Some(mut inner) = Arc::into_inner(inner) {
// drop our sender, threads will break the loop after receiving and processing
// the remaining tasks
inner.sender.take();
for thread in &mut inner.threads {
if let Some(thread) = thread.take() {
thread.join().expect("failed to join a thread from pool");
}
}
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.destroy();
}
}
impl ThreadPoolBuilder {
/// Specifies the capacity of the operation channel.
///
/// This parameter is **optional**, and defaults to `8192` (may change in
/// the future).
///
/// When a new operation is incoming, but the channel is full, it will be
/// handled by sink according to the [`OverflowPolicy`] that has been set.
#[must_use]
pub fn capacity(&mut self, capacity: NonZeroUsize) -> &mut Self {
self.capacity = capacity;
self
}
// The current Sinks are not beneficial with more than one thread, so the method
// is not public.
#[must_use]
#[allow(dead_code)]
fn threads(&mut self, threads: NonZeroUsize) -> &mut Self {
self.threads = threads;
self
}
/// Provide a function that will be called on each thread of the thread pool
/// immediately after it is spawned. This can, for example, be used to set
/// core affinity for each thread.
#[must_use]
pub fn on_thread_spawn<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.on_thread_spawn = Some(Arc::new(f));
self
}
/// Provide a function that will be called on each thread of the thread pool
/// just before the thread finishes.
#[must_use]
pub fn on_thread_finish<F>(&mut self, f: F) -> &mut Self
where
F: Fn() + Send + Sync + 'static,
{
self.on_thread_finish = Some(Arc::new(f));
self
}
/// Builds a [`ThreadPool`].
pub fn build(&self) -> Result<ThreadPool> {
let (sender, receiver) = mpmc::bounded(self.capacity.get());
let mut threads = Vec::new();
threads.resize_with(self.threads.get(), || {
let receiver = receiver.clone();
let on_thread_spawn = self.on_thread_spawn.clone();
let on_thread_finish = self.on_thread_finish.clone();
Some(thread::spawn(move || {
if let Some(f) = on_thread_spawn {
f();
}
Worker { receiver }.run();
if let Some(f) = on_thread_finish {
f();
}
}))
});
Ok(ThreadPool(ArcSwapOption::new(Some(Arc::new(
ThreadPoolInner {
threads,
sender: Some(sender),
},
)))))
}
/// Builds a `Arc<ThreadPool>`.
///
/// This is a shorthand method for `.build().map(Arc::new)`.
pub fn build_arc(&self) -> Result<Arc<ThreadPool>> {
self.build().map(Arc::new)
}
}
impl Worker {
fn run(&self) {
while let Ok(task) = self.receiver.recv() {
task.exec();
}
}
}
#[must_use]
pub(crate) fn default_thread_pool() -> Arc<ThreadPool> {
static POOL_WEAK: Lazy<Mutex<Weak<ThreadPool>>> = Lazy::new(|| Mutex::new(Weak::new()));
let mut pool_weak = POOL_WEAK.lock_expect();
match pool_weak.upgrade() {
Some(pool) => pool,
None => {
let pool = ThreadPool::builder().build_arc().unwrap();
*pool_weak = Arc::downgrade(&pool);
pool
}
}
}
#[cfg(test)]
mod tests {
use std::{thread::sleep, time::Duration};
use super::*;
// https://github.com/SpriteOvO/spdlog-rs/issues/120
#[test]
fn inner_arc_multiple_strong_refs() {
let thread_pool = ThreadPool::builder()
.capacity(1.try_into().unwrap())
.build_arc()
.unwrap();
let task = || Task::__ForTestUse {
sleep: Some(Duration::from_secs(1)),
};
thread_pool
.assign_task(task(), OverflowPolicy::Block)
.unwrap();
let (first_blocked_assign, second_blocked_assign, destroy, third_assign) =
std::thread::scope(|s| {
let first_blocked_assign = s.spawn({
let thread_pool = thread_pool.clone();
move || {
thread_pool
.assign_task(task(), OverflowPolicy::Block)
.unwrap();
}
});
let second_blocked_assign = s.spawn({
let thread_pool = thread_pool.clone();
move || {
thread_pool
.assign_task(task(), OverflowPolicy::Block)
.unwrap();
}
});
sleep(Duration::from_millis(200));
let destroy = s.spawn({
let thread_pool = thread_pool.clone();
move || {
thread_pool.destroy();
}
});
let third_assign = s.spawn({
let thread_pool = thread_pool.clone();
move || {
thread_pool
.assign_task(task(), OverflowPolicy::Block)
.unwrap();
}
});
(
first_blocked_assign.join(),
second_blocked_assign.join(),
destroy.join(),
third_assign.join(),
)
});
first_blocked_assign.unwrap();
second_blocked_assign.unwrap();
destroy.unwrap();
third_assign.unwrap();
}
}