rayon_core/lib.rs
1//! Rayon-core houses the core stable APIs of Rayon.
2//!
3//! These APIs have been mirrored in the Rayon crate and it is recommended to use these from there.
4//!
5//! [`join`] is used to take two closures and potentially run them in parallel.
6//! - It will run in parallel if task B gets stolen before task A can finish.
7//! - It will run sequentially if task A finishes before task B is stolen and can continue on task B.
8//!
9//! [`scope`] creates a scope in which you can run any number of parallel tasks.
10//! These tasks can spawn nested tasks and scopes, but given the nature of work stealing, the order of execution can not be guaranteed.
11//! The scope will exist until all tasks spawned within the scope have been completed.
12//!
13//! [`spawn`] add a task into the 'static' or 'global' scope, or a local scope created by the [`scope()`] function.
14//!
15//! [`ThreadPool`] can be used to create your own thread pools (using [`ThreadPoolBuilder`]) or to customize the global one.
16//! Tasks spawned within the pool (using [`install()`], [`join()`], etc.) will be added to a deque,
17//! where it becomes available for work stealing from other threads in the local threadpool.
18//!
19//! [`join`]: fn.join.html
20//! [`scope`]: fn.scope.html
21//! [`scope()`]: fn.scope.html
22//! [`spawn`]: fn.spawn.html
23//! [`ThreadPool`]: struct.threadpool.html
24//! [`install()`]: struct.ThreadPool.html#method.install
25//! [`spawn()`]: struct.ThreadPool.html#method.spawn
26//! [`join()`]: struct.ThreadPool.html#method.join
27//! [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
28//!
29//! # Global fallback when threading is unsupported
30//!
31//! Rayon uses `std` APIs for threading, but some targets have incomplete implementations that
32//! always return `Unsupported` errors. The WebAssembly `wasm32-unknown-unknown` and `wasm32-wasi`
33//! targets are notable examples of this. Rather than panicking on the unsupported error when
34//! creating the implicit global threadpool, Rayon configures a fallback mode instead.
35//!
36//! This fallback mode mostly functions as if it were using a single-threaded "pool", like setting
37//! `RAYON_NUM_THREADS=1`. For example, `join` will execute its two closures sequentially, since
38//! there is no other thread to share the work. However, since the pool is not running independent
39//! of the main thread, non-blocking calls like `spawn` may not execute at all, unless a lower-
40//! priority call like `broadcast` gives them an opening. The fallback mode does not try to emulate
41//! anything like thread preemption or `async` task switching, but `yield_now` or `yield_local`
42//! can also volunteer execution time.
43//!
44//! Explicit `ThreadPoolBuilder` methods always report their error without any fallback.
45//!
46//! # Restricting multiple versions
47//!
48//! In order to ensure proper coordination between threadpools, and especially
49//! to make sure there's only one global threadpool, `rayon-core` is actively
50//! restricted from building multiple versions of itself into a single target.
51//! You may see a build error like this in violation:
52//!
53//! ```text
54//! error: native library `rayon-core` is being linked to by more
55//! than one package, and can only be linked to by one package
56//! ```
57//!
58//! While we strive to keep `rayon-core` semver-compatible, it's still
59//! possible to arrive at this situation if different crates have overly
60//! restrictive tilde or inequality requirements for `rayon-core`. The
61//! conflicting requirements will need to be resolved before the build will
62//! succeed.
63
64#![warn(rust_2018_idioms)]
65
66use std::any::Any;
67use std::env;
68use std::error::Error;
69use std::fmt;
70use std::io;
71use std::marker::PhantomData;
72use std::str::FromStr;
73use std::thread;
74
75#[macro_use]
76mod private;
77
78mod broadcast;
79mod job;
80mod join;
81mod latch;
82mod registry;
83mod scope;
84mod sleep;
85mod spawn;
86mod thread_pool;
87mod unwind;
88mod worker_local;
89
90mod compile_fail;
91mod test;
92
93pub mod tlv;
94
95pub use self::broadcast::{broadcast, spawn_broadcast, BroadcastContext};
96pub use self::join::{join, join_context};
97pub use self::registry::ThreadBuilder;
98pub use self::registry::{mark_blocked, mark_unblocked, Registry};
99pub use self::scope::{in_place_scope, scope, Scope};
100pub use self::scope::{in_place_scope_fifo, scope_fifo, ScopeFifo};
101pub use self::spawn::{spawn, spawn_fifo};
102pub use self::thread_pool::current_thread_has_pending_tasks;
103pub use self::thread_pool::current_thread_index;
104pub use self::thread_pool::ThreadPool;
105pub use self::thread_pool::{yield_local, yield_now, Yield};
106pub use worker_local::WorkerLocal;
107
108use self::registry::{CustomSpawn, DefaultSpawn, ThreadSpawn};
109
110/// Returns the maximum number of threads that Rayon supports in a single thread-pool.
111///
112/// If a higher thread count is requested by calling `ThreadPoolBuilder::num_threads` or by setting
113/// the `RAYON_NUM_THREADS` environment variable, then it will be reduced to this maximum.
114///
115/// The value may vary between different targets, and is subject to change in new Rayon versions.
116pub fn max_num_threads() -> usize {
117 // We are limited by the bits available in the sleep counter's `AtomicUsize`.
118 crate::sleep::THREADS_MAX
119}
120
121/// Returns the number of threads in the current registry. If this
122/// code is executing within a Rayon thread-pool, then this will be
123/// the number of threads for the thread-pool of the current
124/// thread. Otherwise, it will be the number of threads for the global
125/// thread-pool.
126///
127/// This can be useful when trying to judge how many times to split
128/// parallel work (the parallel iterator traits use this value
129/// internally for this purpose).
130///
131/// # Future compatibility note
132///
133/// Note that unless this thread-pool was created with a
134/// builder that specifies the number of threads, then this
135/// number may vary over time in future versions (see [the
136/// `num_threads()` method for details][snt]).
137///
138/// [snt]: struct.ThreadPoolBuilder.html#method.num_threads
139pub fn current_num_threads() -> usize {
140 crate::registry::Registry::current_num_threads()
141}
142
143/// Error when initializing a thread pool.
144#[derive(Debug)]
145pub struct ThreadPoolBuildError {
146 kind: ErrorKind,
147}
148
149#[derive(Debug)]
150enum ErrorKind {
151 GlobalPoolAlreadyInitialized,
152 IOError(io::Error),
153}
154
155/// Used to create a new [`ThreadPool`] or to configure the global rayon thread pool.
156/// ## Creating a ThreadPool
157/// The following creates a thread pool with 22 threads.
158///
159/// ```rust
160/// # use rayon_core as rayon;
161/// let pool = rayon::ThreadPoolBuilder::new().num_threads(22).build().unwrap();
162/// ```
163///
164/// To instead configure the global thread pool, use [`build_global()`]:
165///
166/// ```rust
167/// # use rayon_core as rayon;
168/// rayon::ThreadPoolBuilder::new().num_threads(22).build_global().unwrap();
169/// ```
170///
171/// [`ThreadPool`]: struct.ThreadPool.html
172/// [`build_global()`]: struct.ThreadPoolBuilder.html#method.build_global
173pub struct ThreadPoolBuilder<S = DefaultSpawn> {
174 /// The number of threads in the rayon thread pool.
175 /// If zero will use the RAYON_NUM_THREADS environment variable.
176 /// If RAYON_NUM_THREADS is invalid or zero will use the default.
177 num_threads: usize,
178
179 /// Custom closure, if any, to handle a panic that we cannot propagate
180 /// anywhere else.
181 panic_handler: Option<Box<PanicHandler>>,
182
183 /// Closure to compute the name of a thread.
184 get_thread_name: Option<Box<dyn FnMut(usize) -> String>>,
185
186 /// The stack size for the created worker threads
187 stack_size: Option<usize>,
188
189 /// Closure invoked on deadlock.
190 deadlock_handler: Option<Box<DeadlockHandler>>,
191
192 /// Closure invoked on worker thread start.
193 start_handler: Option<Box<StartHandler>>,
194
195 /// Closure invoked on worker thread exit.
196 exit_handler: Option<Box<ExitHandler>>,
197
198 /// Closure invoked to spawn threads.
199 spawn_handler: S,
200
201 /// Closure invoked when starting computations in a thread.
202 acquire_thread_handler: Option<Box<AcquireThreadHandler>>,
203
204 /// Closure invoked when blocking in a thread.
205 release_thread_handler: Option<Box<ReleaseThreadHandler>>,
206
207 /// If false, worker threads will execute spawned jobs in a
208 /// "depth-first" fashion. If true, they will do a "breadth-first"
209 /// fashion. Depth-first is the default.
210 breadth_first: bool,
211}
212
213/// Contains the rayon thread pool configuration. Use [`ThreadPoolBuilder`] instead.
214///
215/// [`ThreadPoolBuilder`]: struct.ThreadPoolBuilder.html
216#[deprecated(note = "Use `ThreadPoolBuilder`")]
217#[derive(Default)]
218pub struct Configuration {
219 builder: ThreadPoolBuilder,
220}
221
222/// The type for a panic handling closure. Note that this same closure
223/// may be invoked multiple times in parallel.
224type PanicHandler = dyn Fn(Box<dyn Any + Send>) + Send + Sync;
225
226/// The type for a closure that gets invoked when the Rayon thread pool deadlocks
227type DeadlockHandler = dyn Fn() + Send + Sync;
228
229/// The type for a closure that gets invoked when a thread starts. The
230/// closure is passed the index of the thread on which it is invoked.
231/// Note that this same closure may be invoked multiple times in parallel.
232type StartHandler = dyn Fn(usize) + Send + Sync;
233
234/// The type for a closure that gets invoked when a thread exits. The
235/// closure is passed the index of the thread on which it is invoked.
236/// Note that this same closure may be invoked multiple times in parallel.
237type ExitHandler = dyn Fn(usize) + Send + Sync;
238
239// NB: We can't `#[derive(Default)]` because `S` is left ambiguous.
240impl Default for ThreadPoolBuilder {
241 fn default() -> Self {
242 ThreadPoolBuilder {
243 num_threads: 0,
244 panic_handler: None,
245 get_thread_name: None,
246 stack_size: None,
247 start_handler: None,
248 exit_handler: None,
249 deadlock_handler: None,
250 acquire_thread_handler: None,
251 release_thread_handler: None,
252 spawn_handler: DefaultSpawn,
253 breadth_first: false,
254 }
255 }
256}
257
258/// The type for a closure that gets invoked before starting computations in a thread.
259/// Note that this same closure may be invoked multiple times in parallel.
260type AcquireThreadHandler = dyn Fn() + Send + Sync;
261
262/// The type for a closure that gets invoked before blocking in a thread.
263/// Note that this same closure may be invoked multiple times in parallel.
264type ReleaseThreadHandler = dyn Fn() + Send + Sync;
265
266impl ThreadPoolBuilder {
267 /// Creates and returns a valid rayon thread pool builder, but does not initialize it.
268 pub fn new() -> Self {
269 Self::default()
270 }
271}
272
273/// Note: the `S: ThreadSpawn` constraint is an internal implementation detail for the
274/// default spawn and those set by [`spawn_handler`](#method.spawn_handler).
275impl<S> ThreadPoolBuilder<S>
276where
277 S: ThreadSpawn,
278{
279 /// Creates a new `ThreadPool` initialized using this configuration.
280 pub fn build(self) -> Result<ThreadPool, ThreadPoolBuildError> {
281 ThreadPool::build(self)
282 }
283
284 /// Initializes the global thread pool. This initialization is
285 /// **optional**. If you do not call this function, the thread pool
286 /// will be automatically initialized with the default
287 /// configuration. Calling `build_global` is not recommended, except
288 /// in two scenarios:
289 ///
290 /// - You wish to change the default configuration.
291 /// - You are running a benchmark, in which case initializing may
292 /// yield slightly more consistent results, since the worker threads
293 /// will already be ready to go even in the first iteration. But
294 /// this cost is minimal.
295 ///
296 /// Initialization of the global thread pool happens exactly
297 /// once. Once started, the configuration cannot be
298 /// changed. Therefore, if you call `build_global` a second time, it
299 /// will return an error. An `Ok` result indicates that this
300 /// is the first initialization of the thread pool.
301 pub fn build_global(self) -> Result<(), ThreadPoolBuildError> {
302 let registry = registry::init_global_registry(self)?;
303 registry.wait_until_primed();
304 Ok(())
305 }
306}
307
308impl ThreadPoolBuilder {
309 /// Creates a scoped `ThreadPool` initialized using this configuration.
310 ///
311 /// This is a convenience function for building a pool using [`std::thread::scope`]
312 /// to spawn threads in a [`spawn_handler`](#method.spawn_handler).
313 /// The threads in this pool will start by calling `wrapper`, which should
314 /// do initialization and continue by calling `ThreadBuilder::run()`.
315 ///
316 /// [`std::thread::scope`]: https://doc.rust-lang.org/std/thread/fn.scope.html
317 ///
318 /// # Examples
319 ///
320 /// A scoped pool may be useful in combination with scoped thread-local variables.
321 ///
322 /// ```
323 /// # use rayon_core as rayon;
324 ///
325 /// scoped_tls::scoped_thread_local!(static POOL_DATA: Vec<i32>);
326 ///
327 /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
328 /// let pool_data = vec![1, 2, 3];
329 ///
330 /// // We haven't assigned any TLS data yet.
331 /// assert!(!POOL_DATA.is_set());
332 ///
333 /// rayon::ThreadPoolBuilder::new()
334 /// .build_scoped(
335 /// // Borrow `pool_data` in TLS for each thread.
336 /// |thread| POOL_DATA.set(&pool_data, || thread.run()),
337 /// // Do some work that needs the TLS data.
338 /// |pool| pool.install(|| assert!(POOL_DATA.is_set())),
339 /// )?;
340 ///
341 /// // Once we've returned, `pool_data` is no longer borrowed.
342 /// drop(pool_data);
343 /// Ok(())
344 /// }
345 /// ```
346 pub fn build_scoped<W, F, R>(self, wrapper: W, with_pool: F) -> Result<R, ThreadPoolBuildError>
347 where
348 W: Fn(ThreadBuilder) + Sync, // expected to call `run()`
349 F: FnOnce(&ThreadPool) -> R,
350 {
351 std::thread::scope(|scope| {
352 let pool = self
353 .spawn_handler(|thread| {
354 let mut builder = std::thread::Builder::new();
355 if let Some(name) = thread.name() {
356 builder = builder.name(name.to_string());
357 }
358 if let Some(size) = thread.stack_size() {
359 builder = builder.stack_size(size);
360 }
361 builder.spawn_scoped(scope, || wrapper(thread))?;
362 Ok(())
363 })
364 .build()?;
365 let result = unwind::halt_unwinding(|| with_pool(&pool));
366 pool.wait_until_stopped();
367 match result {
368 Ok(result) => Ok(result),
369 Err(err) => unwind::resume_unwinding(err),
370 }
371 })
372 }
373}
374
375impl<S> ThreadPoolBuilder<S> {
376 /// Sets a custom function for spawning threads.
377 ///
378 /// Note that the threads will not exit until after the pool is dropped. It
379 /// is up to the caller to wait for thread termination if that is important
380 /// for any invariants. For instance, threads created in [`std::thread::scope`]
381 /// will be joined before that scope returns, and this will block indefinitely
382 /// if the pool is leaked. Furthermore, the global thread pool doesn't terminate
383 /// until the entire process exits!
384 ///
385 /// # Examples
386 ///
387 /// A minimal spawn handler just needs to call `run()` from an independent thread.
388 ///
389 /// ```
390 /// # use rayon_core as rayon;
391 /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
392 /// let pool = rayon::ThreadPoolBuilder::new()
393 /// .spawn_handler(|thread| {
394 /// std::thread::spawn(|| thread.run());
395 /// Ok(())
396 /// })
397 /// .build()?;
398 ///
399 /// pool.install(|| println!("Hello from my custom thread!"));
400 /// Ok(())
401 /// }
402 /// ```
403 ///
404 /// The default spawn handler sets the name and stack size if given, and propagates
405 /// any errors from the thread builder.
406 ///
407 /// ```
408 /// # use rayon_core as rayon;
409 /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
410 /// let pool = rayon::ThreadPoolBuilder::new()
411 /// .spawn_handler(|thread| {
412 /// let mut b = std::thread::Builder::new();
413 /// if let Some(name) = thread.name() {
414 /// b = b.name(name.to_owned());
415 /// }
416 /// if let Some(stack_size) = thread.stack_size() {
417 /// b = b.stack_size(stack_size);
418 /// }
419 /// b.spawn(|| thread.run())?;
420 /// Ok(())
421 /// })
422 /// .build()?;
423 ///
424 /// pool.install(|| println!("Hello from my fully custom thread!"));
425 /// Ok(())
426 /// }
427 /// ```
428 ///
429 /// This can also be used for a pool of scoped threads like [`crossbeam::scope`],
430 /// or [`std::thread::scope`] introduced in Rust 1.63, which is encapsulated in
431 /// [`build_scoped`](#method.build_scoped).
432 ///
433 /// [`crossbeam::scope`]: https://docs.rs/crossbeam/0.8/crossbeam/fn.scope.html
434 /// [`std::thread::scope`]: https://doc.rust-lang.org/std/thread/fn.scope.html
435 ///
436 /// ```
437 /// # use rayon_core as rayon;
438 /// fn main() -> Result<(), rayon::ThreadPoolBuildError> {
439 /// std::thread::scope(|scope| {
440 /// let pool = rayon::ThreadPoolBuilder::new()
441 /// .spawn_handler(|thread| {
442 /// let mut builder = std::thread::Builder::new();
443 /// if let Some(name) = thread.name() {
444 /// builder = builder.name(name.to_string());
445 /// }
446 /// if let Some(size) = thread.stack_size() {
447 /// builder = builder.stack_size(size);
448 /// }
449 /// builder.spawn_scoped(scope, || {
450 /// // Add any scoped initialization here, then run!
451 /// thread.run()
452 /// })?;
453 /// Ok(())
454 /// })
455 /// .build()?;
456 ///
457 /// pool.install(|| println!("Hello from my custom scoped thread!"));
458 /// Ok(())
459 /// })
460 /// }
461 /// ```
462 pub fn spawn_handler<F>(self, spawn: F) -> ThreadPoolBuilder<CustomSpawn<F>>
463 where
464 F: FnMut(ThreadBuilder) -> io::Result<()>,
465 {
466 ThreadPoolBuilder {
467 spawn_handler: CustomSpawn::new(spawn),
468 // ..self
469 num_threads: self.num_threads,
470 panic_handler: self.panic_handler,
471 get_thread_name: self.get_thread_name,
472 stack_size: self.stack_size,
473 start_handler: self.start_handler,
474 exit_handler: self.exit_handler,
475 deadlock_handler: self.deadlock_handler,
476 acquire_thread_handler: self.acquire_thread_handler,
477 release_thread_handler: self.release_thread_handler,
478 breadth_first: self.breadth_first,
479 }
480 }
481
482 /// Returns a reference to the current spawn handler.
483 fn get_spawn_handler(&mut self) -> &mut S {
484 &mut self.spawn_handler
485 }
486
487 /// Get the number of threads that will be used for the thread
488 /// pool. See `num_threads()` for more information.
489 fn get_num_threads(&self) -> usize {
490 if self.num_threads > 0 {
491 self.num_threads
492 } else {
493 let default = || {
494 thread::available_parallelism()
495 .map(|n| n.get())
496 .unwrap_or(1)
497 };
498
499 match env::var("RAYON_NUM_THREADS")
500 .ok()
501 .and_then(|s| usize::from_str(&s).ok())
502 {
503 Some(x @ 1..) => return x,
504 Some(0) => return default(),
505 _ => {}
506 }
507
508 // Support for deprecated `RAYON_RS_NUM_CPUS`.
509 match env::var("RAYON_RS_NUM_CPUS")
510 .ok()
511 .and_then(|s| usize::from_str(&s).ok())
512 {
513 Some(x @ 1..) => x,
514 _ => default(),
515 }
516 }
517 }
518
519 /// Get the thread name for the thread with the given index.
520 fn get_thread_name(&mut self, index: usize) -> Option<String> {
521 let f = self.get_thread_name.as_mut()?;
522 Some(f(index))
523 }
524
525 /// Sets a closure which takes a thread index and returns
526 /// the thread's name.
527 pub fn thread_name<F>(mut self, closure: F) -> Self
528 where
529 F: FnMut(usize) -> String + 'static,
530 {
531 self.get_thread_name = Some(Box::new(closure));
532 self
533 }
534
535 /// Sets the number of threads to be used in the rayon threadpool.
536 ///
537 /// If you specify a non-zero number of threads using this
538 /// function, then the resulting thread-pools are guaranteed to
539 /// start at most this number of threads.
540 ///
541 /// If `num_threads` is 0, or you do not call this function, then
542 /// the Rayon runtime will select the number of threads
543 /// automatically. At present, this is based on the
544 /// `RAYON_NUM_THREADS` environment variable (if set),
545 /// or the number of logical CPUs (otherwise).
546 /// In the future, however, the default behavior may
547 /// change to dynamically add or remove threads as needed.
548 ///
549 /// **Future compatibility warning:** Given the default behavior
550 /// may change in the future, if you wish to rely on a fixed
551 /// number of threads, you should use this function to specify
552 /// that number. To reproduce the current default behavior, you
553 /// may wish to use [`std::thread::available_parallelism`]
554 /// to query the number of CPUs dynamically.
555 ///
556 /// **Old environment variable:** `RAYON_NUM_THREADS` is a one-to-one
557 /// replacement of the now deprecated `RAYON_RS_NUM_CPUS` environment
558 /// variable. If both variables are specified, `RAYON_NUM_THREADS` will
559 /// be preferred.
560 pub fn num_threads(mut self, num_threads: usize) -> Self {
561 self.num_threads = num_threads;
562 self
563 }
564
565 /// Returns a copy of the current panic handler.
566 fn take_panic_handler(&mut self) -> Option<Box<PanicHandler>> {
567 self.panic_handler.take()
568 }
569
570 /// Normally, whenever Rayon catches a panic, it tries to
571 /// propagate it to someplace sensible, to try and reflect the
572 /// semantics of sequential execution. But in some cases,
573 /// particularly with the `spawn()` APIs, there is no
574 /// obvious place where we should propagate the panic to.
575 /// In that case, this panic handler is invoked.
576 ///
577 /// If no panic handler is set, the default is to abort the
578 /// process, under the principle that panics should not go
579 /// unobserved.
580 ///
581 /// If the panic handler itself panics, this will abort the
582 /// process. To prevent this, wrap the body of your panic handler
583 /// in a call to `std::panic::catch_unwind()`.
584 pub fn panic_handler<H>(mut self, panic_handler: H) -> Self
585 where
586 H: Fn(Box<dyn Any + Send>) + Send + Sync + 'static,
587 {
588 self.panic_handler = Some(Box::new(panic_handler));
589 self
590 }
591
592 /// Get the stack size of the worker threads
593 fn get_stack_size(&self) -> Option<usize> {
594 self.stack_size
595 }
596
597 /// Sets the stack size of the worker threads
598 pub fn stack_size(mut self, stack_size: usize) -> Self {
599 self.stack_size = Some(stack_size);
600 self
601 }
602
603 /// **(DEPRECATED)** Suggest to worker threads that they execute
604 /// spawned jobs in a "breadth-first" fashion.
605 ///
606 /// Typically, when a worker thread is idle or blocked, it will
607 /// attempt to execute the job from the *top* of its local deque of
608 /// work (i.e., the job most recently spawned). If this flag is set
609 /// to true, however, workers will prefer to execute in a
610 /// *breadth-first* fashion -- that is, they will search for jobs at
611 /// the *bottom* of their local deque. (At present, workers *always*
612 /// steal from the bottom of other workers' deques, regardless of
613 /// the setting of this flag.)
614 ///
615 /// If you think of the tasks as a tree, where a parent task
616 /// spawns its children in the tree, then this flag loosely
617 /// corresponds to doing a breadth-first traversal of the tree,
618 /// whereas the default would be to do a depth-first traversal.
619 ///
620 /// **Note that this is an "execution hint".** Rayon's task
621 /// execution is highly dynamic and the precise order in which
622 /// independent tasks are executed is not intended to be
623 /// guaranteed.
624 ///
625 /// This `breadth_first()` method is now deprecated per [RFC #1],
626 /// and in the future its effect may be removed. Consider using
627 /// [`scope_fifo()`] for a similar effect.
628 ///
629 /// [RFC #1]: https://github.com/rayon-rs/rfcs/blob/master/accepted/rfc0001-scope-scheduling.md
630 /// [`scope_fifo()`]: fn.scope_fifo.html
631 #[deprecated(note = "use `scope_fifo` and `spawn_fifo` for similar effect")]
632 pub fn breadth_first(mut self) -> Self {
633 self.breadth_first = true;
634 self
635 }
636
637 fn get_breadth_first(&self) -> bool {
638 self.breadth_first
639 }
640
641 /// Takes the current acquire thread callback, leaving `None`.
642 fn take_acquire_thread_handler(&mut self) -> Option<Box<AcquireThreadHandler>> {
643 self.acquire_thread_handler.take()
644 }
645
646 /// Set a callback to be invoked when starting computations in a thread.
647 pub fn acquire_thread_handler<H>(mut self, acquire_thread_handler: H) -> Self
648 where
649 H: Fn() + Send + Sync + 'static,
650 {
651 self.acquire_thread_handler = Some(Box::new(acquire_thread_handler));
652 self
653 }
654
655 /// Takes the current release thread callback, leaving `None`.
656 fn take_release_thread_handler(&mut self) -> Option<Box<ReleaseThreadHandler>> {
657 self.release_thread_handler.take()
658 }
659
660 /// Set a callback to be invoked when blocking in thread.
661 pub fn release_thread_handler<H>(mut self, release_thread_handler: H) -> Self
662 where
663 H: Fn() + Send + Sync + 'static,
664 {
665 self.release_thread_handler = Some(Box::new(release_thread_handler));
666 self
667 }
668
669 /// Takes the current deadlock callback, leaving `None`.
670 fn take_deadlock_handler(&mut self) -> Option<Box<DeadlockHandler>> {
671 self.deadlock_handler.take()
672 }
673
674 /// Set a callback to be invoked on current deadlock.
675 pub fn deadlock_handler<H>(mut self, deadlock_handler: H) -> Self
676 where
677 H: Fn() + Send + Sync + 'static,
678 {
679 self.deadlock_handler = Some(Box::new(deadlock_handler));
680 self
681 }
682
683 /// Takes the current thread start callback, leaving `None`.
684 fn take_start_handler(&mut self) -> Option<Box<StartHandler>> {
685 self.start_handler.take()
686 }
687
688 /// Sets a callback to be invoked on thread start.
689 ///
690 /// The closure is passed the index of the thread on which it is invoked.
691 /// Note that this same closure may be invoked multiple times in parallel.
692 /// If this closure panics, the panic will be passed to the panic handler.
693 /// If that handler returns, then startup will continue normally.
694 pub fn start_handler<H>(mut self, start_handler: H) -> Self
695 where
696 H: Fn(usize) + Send + Sync + 'static,
697 {
698 self.start_handler = Some(Box::new(start_handler));
699 self
700 }
701
702 /// Returns a current thread exit callback, leaving `None`.
703 fn take_exit_handler(&mut self) -> Option<Box<ExitHandler>> {
704 self.exit_handler.take()
705 }
706
707 /// Sets a callback to be invoked on thread exit.
708 ///
709 /// The closure is passed the index of the thread on which it is invoked.
710 /// Note that this same closure may be invoked multiple times in parallel.
711 /// If this closure panics, the panic will be passed to the panic handler.
712 /// If that handler returns, then the thread will exit normally.
713 pub fn exit_handler<H>(mut self, exit_handler: H) -> Self
714 where
715 H: Fn(usize) + Send + Sync + 'static,
716 {
717 self.exit_handler = Some(Box::new(exit_handler));
718 self
719 }
720}
721
722#[allow(deprecated)]
723impl Configuration {
724 /// Creates and return a valid rayon thread pool configuration, but does not initialize it.
725 pub fn new() -> Configuration {
726 Configuration {
727 builder: ThreadPoolBuilder::new(),
728 }
729 }
730
731 /// Deprecated in favor of `ThreadPoolBuilder::build`.
732 pub fn build(self) -> Result<ThreadPool, Box<dyn Error + 'static>> {
733 self.builder.build().map_err(Box::from)
734 }
735
736 /// Deprecated in favor of `ThreadPoolBuilder::thread_name`.
737 pub fn thread_name<F>(mut self, closure: F) -> Self
738 where
739 F: FnMut(usize) -> String + 'static,
740 {
741 self.builder = self.builder.thread_name(closure);
742 self
743 }
744
745 /// Deprecated in favor of `ThreadPoolBuilder::num_threads`.
746 pub fn num_threads(mut self, num_threads: usize) -> Configuration {
747 self.builder = self.builder.num_threads(num_threads);
748 self
749 }
750
751 /// Deprecated in favor of `ThreadPoolBuilder::panic_handler`.
752 pub fn panic_handler<H>(mut self, panic_handler: H) -> Configuration
753 where
754 H: Fn(Box<dyn Any + Send>) + Send + Sync + 'static,
755 {
756 self.builder = self.builder.panic_handler(panic_handler);
757 self
758 }
759
760 /// Deprecated in favor of `ThreadPoolBuilder::stack_size`.
761 pub fn stack_size(mut self, stack_size: usize) -> Self {
762 self.builder = self.builder.stack_size(stack_size);
763 self
764 }
765
766 /// Deprecated in favor of `ThreadPoolBuilder::breadth_first`.
767 pub fn breadth_first(mut self) -> Self {
768 self.builder = self.builder.breadth_first();
769 self
770 }
771
772 /// Deprecated in favor of `ThreadPoolBuilder::start_handler`.
773 pub fn start_handler<H>(mut self, start_handler: H) -> Configuration
774 where
775 H: Fn(usize) + Send + Sync + 'static,
776 {
777 self.builder = self.builder.start_handler(start_handler);
778 self
779 }
780
781 /// Deprecated in favor of `ThreadPoolBuilder::exit_handler`.
782 pub fn exit_handler<H>(mut self, exit_handler: H) -> Configuration
783 where
784 H: Fn(usize) + Send + Sync + 'static,
785 {
786 self.builder = self.builder.exit_handler(exit_handler);
787 self
788 }
789
790 /// Returns a ThreadPoolBuilder with identical parameters.
791 fn into_builder(self) -> ThreadPoolBuilder {
792 self.builder
793 }
794}
795
796impl ThreadPoolBuildError {
797 fn new(kind: ErrorKind) -> ThreadPoolBuildError {
798 ThreadPoolBuildError { kind }
799 }
800
801 fn is_unsupported(&self) -> bool {
802 matches!(&self.kind, ErrorKind::IOError(e) if e.kind() == io::ErrorKind::Unsupported)
803 }
804}
805
806const GLOBAL_POOL_ALREADY_INITIALIZED: &str =
807 "The global thread pool has already been initialized.";
808
809impl Error for ThreadPoolBuildError {
810 #[allow(deprecated)]
811 fn description(&self) -> &str {
812 match self.kind {
813 ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED,
814 ErrorKind::IOError(ref e) => e.description(),
815 }
816 }
817
818 fn source(&self) -> Option<&(dyn Error + 'static)> {
819 match &self.kind {
820 ErrorKind::GlobalPoolAlreadyInitialized => None,
821 ErrorKind::IOError(e) => Some(e),
822 }
823 }
824}
825
826impl fmt::Display for ThreadPoolBuildError {
827 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
828 match &self.kind {
829 ErrorKind::GlobalPoolAlreadyInitialized => GLOBAL_POOL_ALREADY_INITIALIZED.fmt(f),
830 ErrorKind::IOError(e) => e.fmt(f),
831 }
832 }
833}
834
835/// Deprecated in favor of `ThreadPoolBuilder::build_global`.
836#[deprecated(note = "use `ThreadPoolBuilder::build_global`")]
837#[allow(deprecated)]
838pub fn initialize(config: Configuration) -> Result<(), Box<dyn Error>> {
839 config.into_builder().build_global().map_err(Box::from)
840}
841
842impl<S> fmt::Debug for ThreadPoolBuilder<S> {
843 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
844 let ThreadPoolBuilder {
845 ref num_threads,
846 ref get_thread_name,
847 ref panic_handler,
848 ref stack_size,
849 ref deadlock_handler,
850 ref start_handler,
851 ref exit_handler,
852 ref acquire_thread_handler,
853 ref release_thread_handler,
854 spawn_handler: _,
855 ref breadth_first,
856 } = *self;
857
858 // Just print `Some(<closure>)` or `None` to the debug
859 // output.
860 struct ClosurePlaceholder;
861 impl fmt::Debug for ClosurePlaceholder {
862 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
863 f.write_str("<closure>")
864 }
865 }
866 let get_thread_name = get_thread_name.as_ref().map(|_| ClosurePlaceholder);
867 let panic_handler = panic_handler.as_ref().map(|_| ClosurePlaceholder);
868 let deadlock_handler = deadlock_handler.as_ref().map(|_| ClosurePlaceholder);
869 let start_handler = start_handler.as_ref().map(|_| ClosurePlaceholder);
870 let exit_handler = exit_handler.as_ref().map(|_| ClosurePlaceholder);
871 let acquire_thread_handler = acquire_thread_handler.as_ref().map(|_| ClosurePlaceholder);
872 let release_thread_handler = release_thread_handler.as_ref().map(|_| ClosurePlaceholder);
873
874 f.debug_struct("ThreadPoolBuilder")
875 .field("num_threads", num_threads)
876 .field("get_thread_name", &get_thread_name)
877 .field("panic_handler", &panic_handler)
878 .field("stack_size", &stack_size)
879 .field("deadlock_handler", &deadlock_handler)
880 .field("start_handler", &start_handler)
881 .field("exit_handler", &exit_handler)
882 .field("acquire_thread_handler", &acquire_thread_handler)
883 .field("release_thread_handler", &release_thread_handler)
884 .field("breadth_first", &breadth_first)
885 .finish()
886 }
887}
888
889#[allow(deprecated)]
890impl fmt::Debug for Configuration {
891 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
892 self.builder.fmt(f)
893 }
894}
895
896/// Provides the calling context to a closure called by `join_context`.
897#[derive(Debug)]
898pub struct FnContext {
899 migrated: bool,
900
901 /// disable `Send` and `Sync`, just for a little future-proofing.
902 _marker: PhantomData<*mut ()>,
903}
904
905impl FnContext {
906 #[inline]
907 fn new(migrated: bool) -> Self {
908 FnContext {
909 migrated,
910 _marker: PhantomData,
911 }
912 }
913}
914
915impl FnContext {
916 /// Returns `true` if the closure was called from a different thread
917 /// than it was provided from.
918 #[inline]
919 pub fn migrated(&self) -> bool {
920 self.migrated
921 }
922}