aarch64_std/
thread.rs

1use aarch64_cpu::registers;
2use core::time::Duration;
3use tock_registers::interfaces::Readable;
4
5/// Puts the current thread to sleep for at least the specified amount of time.
6///
7/// The thread may sleep longer than the duration specified due to scheduling specifics or
8/// platform-dependent functionality. It will never sleep less.
9///
10/// This function is blocking, and should not be used in async functions.
11pub fn sleep(d: Duration) {
12    let freq = registers::CNTFRQ_EL0.get();
13    let start = registers::CNTVCT_EL0.get();
14    let end = start + (d.as_secs_f64() * freq as f64) as u64;
15    // TODO: use a timer or event stream for lower power usage?
16    loop {
17        #[cfg(feature = "alloc")]
18        yield_now();
19        if registers::CNTVCT_EL0.get() >= end {
20            return;
21        }
22    }
23}
24
25#[cfg(feature = "alloc")]
26mod runtime {
27    use crate::sync::Mutex;
28    use alloc::{boxed::Box, collections::LinkedList, string::String, sync::Arc, vec::Vec};
29    use core::{any::Any, arch::asm, convert::Infallible, time::Duration};
30    use tock_registers::interfaces::Readable;
31
32    pub type Result<T> = core::result::Result<T, Box<dyn Any + Send + 'static>>;
33
34    pub struct JoinHandle<T> {
35        result: Arc<Mutex<Option<Result<T>>>>,
36        thread: Thread,
37    }
38
39    impl<T> JoinHandle<T> {
40        /// Extracts a handle to the underlying thread.
41        #[must_use]
42        pub fn thread(&self) -> &Thread {
43            &self.thread
44        }
45
46        /// Waits for the associated thread to finish.
47        ///
48        /// This function will return immediately if the associated thread has already finished.
49        ///
50        /// In terms of [atomic memory orderings],  the completion of the associated
51        /// thread synchronizes with this function returning. In other words, all
52        /// operations performed by that thread [happen
53        /// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all
54        /// operations that happen after `join` returns.
55        ///
56        /// If the associated thread panics, [`Err`] is returned with the parameter given
57        /// to [`panic!`].
58        ///
59        /// [`Err`]: crate::result::Result::Err
60        /// [atomic memory orderings]: crate::sync::atomic
61        ///
62        /// # Panics
63        ///
64        /// This function may panic on some platforms if a thread attempts to join
65        /// itself or otherwise may create a deadlock with joining threads.
66        pub fn join(self) -> Result<T> {
67            loop {
68                if let Some(result) = self.result.lock().unwrap().take() {
69                    return result;
70                }
71                yield_now();
72            }
73        }
74
75        /// Checks if the associated thread has finished running its main function.
76        ///
77        /// `is_finished` supports implementing a non-blocking join operation, by checking
78        /// `is_finished`, and calling `join` if it returns `true`. This function does not block. To
79        /// block while waiting on the thread to finish, use [`join`][Self::join].
80        ///
81        /// This might return `true` for a brief moment after the thread's main
82        /// function has returned, but before the thread itself has stopped running.
83        /// However, once this returns `true`, [`join`][Self::join] can be expected
84        /// to return quickly, without blocking for any significant amount of time.
85        pub fn is_finished(&self) -> bool {
86            Arc::strong_count(&self.result) == 1
87        }
88    }
89
90    #[repr(C)]
91    #[derive(Default)]
92    struct Registers {
93        // These are the registers we have to keep track of ourselves. The rest are handled by the
94        // compiler based on the inline assembly directives.
95        x18: u64,
96        x19: u64,
97        x29: u64,
98        x30: u64,
99        sp: u64,
100    }
101
102    #[repr(C, align(16))]
103    #[derive(Default)]
104    struct SixteenBytes([u8; 16]);
105
106    struct RuntimeThread {
107        args: Option<RuntimeThreadArgs>,
108        stack: Vec<SixteenBytes>,
109        registers: Registers,
110        handle: Thread,
111    }
112
113    enum RunStatus {
114        Yielded,
115        Ended,
116    }
117
118    struct RuntimeThreadArgs {
119        f: Box<dyn FnOnce() + Send>,
120    }
121
122    impl RuntimeThread {
123        fn entry_point(args: *mut RuntimeThreadArgs) {
124            let args = unsafe { Box::from_raw(args) };
125            (args.f)();
126        }
127
128        #[allow(named_asm_labels)]
129        #[inline(never)]
130        fn run(&mut self) -> RunStatus {
131            unsafe {
132                let stack = self.stack.as_mut_ptr().offset(self.stack.len() as _);
133                let args = self
134                    .args
135                    .take()
136                    .map(|args| Box::into_raw(Box::new(args)))
137                    .unwrap_or_else(core::ptr::null_mut);
138                let did_end: u64;
139                asm!(
140                    // see if we're starting a new thread or resuming one
141                    "cmp x0, #0",
142                    "beq 1f",
143
144                    // we're starting a new thread
145                    "mov x1, sp",
146                    "mov sp, {stack}",
147                    "str x18, [sp, #-16]!",
148                    "stp x19, x29, [sp, #-16]!",
149                    "stp x1, lr, [sp, #-16]!",
150                    "blr {entry}",
151                    // if we make it back here, that means the thread's ended
152                    "ldp x1, lr, [sp], #16",
153                    "ldp x19, x29, [sp], #16",
154                    "ldr x18, [sp], #16",
155                    "mov sp, x1",
156                    // set did_end = 1
157                    "mov x1, #1",
158                    "b 2f",
159
160                    "1:",
161                    // we're resuming a thread
162                    // restore x18, x19, x29, lr, and sp from x8-x12
163                    "mov x18, x8",
164                    "mov x19, x9",
165                    "mov x29, x10",
166                    "mov lr, x11",
167                    "mov sp, x12",
168                    // then jump back to that location
169                    "b aarch64_std_unyield",
170
171                    "aarch64_std_yield:",
172                    // we're yielding to another thread.
173                    // yield_now put our original stack pointer in x0
174                    // save x18, x19, x29, lr, and sp to x8-x12
175                    "mov x8, x18",
176                    "mov x9, x19",
177                    "mov x10, x29",
178                    "mov x11, lr",
179                    "mov x12, sp",
180                    // then restore the originals
181                    "ldr x18, [x0, #-16]",
182                    "ldp x19, x29, [x0, #-32]",
183                    "ldp x1, lr, [x0, #-48]",
184                    "mov sp, x1",
185                    // set did_end = 0
186                    "mov x1, #0",
187
188                    "2:",
189                    entry = in(reg) Self::entry_point,
190                    stack = in(reg) stack,
191                    inout("x0") args => _,
192                    out("x1") did_end,
193                    inout("x8") self.registers.x18,
194                    inout("x9") self.registers.x19,
195                    inout("x10") self.registers.x29,
196                    inout("x11") self.registers.x30,
197                    inout("x12") self.registers.sp,
198                    // mark everything possible as clobbered so the compiler can take care of
199                    // saving and restoring most of the registers
200                    out("x20") _, out("x21") _, out("x22") _, out("x23") _,
201                    out("x24") _, out("x25") _, out("x26") _, out("x27") _,
202                    out("x28") _,
203                    clobber_abi("C")
204                );
205                if did_end == 1 {
206                    RunStatus::Ended
207                } else {
208                    RunStatus::Yielded
209                }
210            }
211        }
212    }
213
214    struct Runtime {
215        state: Mutex<RuntimeState>,
216    }
217
218    struct RuntimeState {
219        next_thread_id: u64,
220        queue: LinkedList<RuntimeThread>,
221        active_threads: Vec<Thread>,
222    }
223
224    impl Runtime {
225        const fn new() -> Self {
226            Self {
227                state: Mutex::new(RuntimeState {
228                    next_thread_id: 1,
229                    queue: LinkedList::new(),
230                    active_threads: Vec::new(),
231                }),
232            }
233        }
234
235        unsafe fn contribute(&self) {
236            loop {
237                match {
238                    let mut state = self.state.lock().unwrap();
239                    let thread = state.queue.pop_front();
240                    if let Some(thread) = &thread {
241                        state.active_threads.push(thread.handle.clone());
242                    }
243                    thread
244                } {
245                    Some(mut thread) => match thread.run() {
246                        RunStatus::Yielded => {
247                            let mut state = self.state.lock().unwrap();
248                            state.queue.push_back(thread);
249                        }
250                        RunStatus::Ended => {
251                            let mut state = self.state.lock().unwrap();
252                            let idx = state
253                                .active_threads
254                                .iter()
255                                .position(|t| t.id() == thread.handle.id())
256                                .unwrap();
257                            state.active_threads.swap_remove(idx);
258                        }
259                    },
260                    None => return,
261                }
262            }
263        }
264
265        fn current(&self) -> Thread {
266            let stack: u64;
267            unsafe {
268                asm!(
269                    "mov {stack}, sp",
270                    stack = out(reg) stack,
271                );
272            }
273            let state = self.state.lock().unwrap();
274            for t in &state.active_threads {
275                if stack >= t.inner.stack_addr
276                    && stack < t.inner.stack_addr + t.inner.stack_size as u64
277                {
278                    return t.clone();
279                }
280            }
281            Thread {
282                inner: Arc::new(ThreadInner {
283                    id: ThreadId {
284                        id: aarch64_cpu::registers::TPIDRRO_EL0.get(),
285                        is_external: true,
286                    },
287                    name: None,
288                    stack_addr: 0,
289                    stack_size: 0,
290                }),
291            }
292        }
293
294        #[allow(named_asm_labels)]
295        #[inline(never)]
296        fn yield_now(&self) {
297            let t = {
298                let state = self.state.lock_impl(false).unwrap();
299                if state.queue.is_empty() {
300                    // There are no pending threads.
301                    unsafe { asm!("yield") };
302                    return;
303                }
304
305                let stack: u64;
306                unsafe {
307                    asm!(
308                        "mov {stack}, sp",
309                        stack = out(reg) stack,
310                    );
311                }
312
313                match state.active_threads.iter().find(|t| {
314                    stack >= t.inner.stack_addr
315                        && stack < t.inner.stack_addr + t.inner.stack_size as u64
316                }) {
317                    Some(t) => t.clone(),
318                    None => {
319                        // This isn't one of our threads.
320                        unsafe { asm!("yield") };
321                        return;
322                    }
323                }
324            };
325
326            unsafe {
327                asm!(
328                    "b aarch64_std_yield",
329                    "aarch64_std_unyield:",
330                    inout("x0") t.inner.stack_addr + t.inner.stack_size as u64 => _,
331                    // mark everything possible as clobbered so the compiler can take care of
332                    // saving and restoring most of the registers
333                    out("x20") _, out("x21") _, out("x22") _, out("x23") _,
334                    out("x24") _, out("x25") _, out("x26") _, out("x27") _,
335                    out("x28") _,
336                    clobber_abi("C")
337                );
338            }
339        }
340
341        /// Spawns a thread.
342        fn spawn<F, T>(&self, f: F, name: Option<String>, stack_size: usize) -> JoinHandle<T>
343        where
344            F: FnOnce() -> T,
345            F: Send + 'static,
346            T: Send + 'static,
347        {
348            let stack_size_div_16 = (stack_size + 15) / 16;
349            let mut stack = Vec::with_capacity(stack_size_div_16);
350            stack.resize_with(stack_size_div_16, Default::default);
351
352            let result = Arc::new(Mutex::new(None));
353
354            let args = RuntimeThreadArgs {
355                f: {
356                    let result = result.clone();
357                    Box::new(move || {
358                        // TODO: is there a way we can catch panics?
359                        let ret = f();
360                        *result.lock().unwrap() = Some(Ok(ret));
361                    })
362                },
363            };
364
365            let mut state = self.state.lock().unwrap();
366
367            let id = ThreadId {
368                id: state.next_thread_id,
369                is_external: false,
370            };
371            state.next_thread_id += 1;
372
373            let handle = Thread {
374                inner: Arc::new(ThreadInner {
375                    id,
376                    name,
377                    stack_addr: stack.as_ptr() as u64,
378                    stack_size: stack.len() * 16,
379                }),
380            };
381
382            state.queue.push_back(RuntimeThread {
383                args: Some(args),
384                registers: Default::default(),
385                handle: handle.clone(),
386                stack,
387            });
388
389            JoinHandle {
390                thread: handle,
391                result,
392            }
393        }
394    }
395
396    static GLOBAL_RUNTIME: Runtime = Runtime::new();
397
398    /// A unique identifier for a running thread.
399    ///
400    /// A `ThreadId` is an opaque object that uniquely identifies each thread
401    /// created during the lifetime of a process. `ThreadId`s are guaranteed not to
402    /// be reused, even when a thread terminates. `ThreadId`s are under the control
403    /// of Rust's standard library and there may not be any relationship between
404    /// `ThreadId` and the underlying platform's notion of a thread identifier --
405    /// the two concepts cannot, therefore, be used interchangeably. A `ThreadId`
406    /// can be retrieved from the [`id`] method on a [`Thread`].
407    #[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
408    pub struct ThreadId {
409        id: u64,
410        is_external: bool,
411    }
412
413    /// A handle to a thread.
414    ///
415    /// Threads are represented via the `Thread` type, which you can get in one of
416    /// two ways:
417    ///
418    /// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
419    ///   function, and calling [`thread`][`JoinHandle::thread`] on the
420    ///   [`JoinHandle`].
421    /// * By requesting the current thread, using the [`thread::current`] function.
422    ///
423    /// The [`thread::current`] function is available even for threads not spawned
424    /// by the APIs of this module.
425    ///
426    /// There is usually no need to create a `Thread` struct yourself, one
427    /// should instead use a function like `spawn` to create new threads, see the
428    /// docs of [`Builder`] and [`spawn`] for more details.
429    ///
430    /// [`thread::current`]: current
431    #[derive(Clone, Debug)]
432    pub struct Thread {
433        inner: Arc<ThreadInner>,
434    }
435
436    #[derive(Clone, Debug)]
437    struct ThreadInner {
438        id: ThreadId,
439        name: Option<String>,
440        stack_addr: u64,
441        stack_size: usize,
442    }
443
444    impl Thread {
445        /// Gets the thread’s unique identifier.
446        pub fn id(&self) -> ThreadId {
447            self.inner.id
448        }
449
450        /// Gets the thread’s unique identifier.
451        pub fn name(&self) -> Option<&str> {
452            self.inner.name.as_ref().map(|s| s.as_str())
453        }
454
455        /// Atomically makes the handle's token available if it is not already.
456        ///
457        /// Every thread is equipped with some basic low-level blocking support, via
458        /// the [`park`][park] function and the `unpark()` method. These can be
459        /// used as a more CPU-efficient implementation of a spinlock.
460        ///
461        /// See the [park documentation][park] for more details.
462        #[inline]
463        pub fn unpark(&self) {
464            // TODO: a more efficient implementation?
465        }
466    }
467
468    /// Thread factory, which can be used in order to configure the properties of
469    /// a new thread.
470    ///
471    /// Methods can be chained on it in order to configure it.
472    ///
473    /// The two configurations available are:
474    ///
475    /// - [`name`]: specifies an [associated name for the thread][naming-threads]
476    /// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size]
477    ///
478    /// The [`spawn`] method will take ownership of the builder and create an
479    /// [`io::Result`] to the thread handle with the given configuration.
480    ///
481    /// The [`thread::spawn`] free function uses a `Builder` with default
482    /// configuration and [`unwrap`]s its return value.
483    ///
484    /// You may want to use [`spawn`] instead of [`thread::spawn`], when you want
485    /// to recover from a failure to launch a thread, indeed the free function will
486    /// panic where the `Builder` method will return a [`io::Result`].
487    #[must_use = "must eventually spawn the thread"]
488    #[derive(Debug)]
489    pub struct Builder {
490        name: Option<String>,
491        stack_size: usize,
492    }
493
494    impl Builder {
495        /// Generates the base configuration for spawning a thread, from which configuration methods can be chained.
496        pub fn new() -> Self {
497            const DEFAULT_STACK_SIZE: usize = 8 * 1024;
498            Self {
499                name: None,
500                stack_size: DEFAULT_STACK_SIZE,
501            }
502        }
503
504        /// Names the thread-to-be. Currently the name is used for identification
505        /// only in panic messages.
506        ///
507        /// The name must not contain null bytes (`\0`).
508        ///
509        /// For more information about named threads, see
510        /// [this module-level documentation][naming-threads].
511        pub fn name(mut self, name: String) -> Builder {
512            self.name = Some(name);
513            self
514        }
515
516        /// Sets the size of the stack (in bytes) for the new thread.
517        pub fn stack_size(mut self, size: usize) -> Builder {
518            self.stack_size = size;
519            self
520        }
521
522        /// Spawns a new thread by taking ownership of the `Builder`, and returns an
523        /// [`io::Result`] to its [`JoinHandle`].
524        ///
525        /// The spawned thread may outlive the caller (unless the caller thread
526        /// is the main thread; the whole process is terminated when the main
527        /// thread finishes). The join handle can be used to block on
528        /// termination of the spawned thread, including recovering its panics.
529        ///
530        /// For a more complete documentation see [`thread::spawn`][`spawn`].
531        pub fn spawn<F, T>(self, f: F) -> core::result::Result<JoinHandle<T>, Infallible>
532        where
533            F: FnOnce() -> T,
534            F: Send + 'static,
535            T: Send + 'static,
536        {
537            Ok(GLOBAL_RUNTIME.spawn(f, self.name, self.stack_size))
538        }
539    }
540
541    /// Spawns a new thread, returning a [`JoinHandle`] for it.
542    ///
543    /// The join handle provides a [`join`] method that can be used to join the spawned
544    /// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
545    /// the argument given to [`panic!`].
546    ///
547    /// If the join handle is dropped, the spawned thread will implicitly be *detached*.
548    /// In this case, the spawned thread may no longer be joined.
549    /// (It is the responsibility of the program to either eventually join threads it
550    /// creates or detach them; otherwise, a resource leak will result.)
551    ///
552    /// This call will create a thread using default parameters of [`Builder`], if you
553    /// want to specify the stack size or the name of the thread, use this API
554    /// instead.
555    ///
556    /// As you can see in the signature of `spawn` there are two constraints on
557    /// both the closure given to `spawn` and its return value, let's explain them:
558    ///
559    /// - The `'static` constraint means that the closure and its return value
560    ///   must have a lifetime of the whole program execution. The reason for this
561    ///   is that threads can outlive the lifetime they have been created in.
562    ///
563    ///   Indeed if the thread, and by extension its return value, can outlive their
564    ///   caller, we need to make sure that they will be valid afterwards, and since
565    ///   we *can't* know when it will return we need to have them valid as long as
566    ///   possible, that is until the end of the program, hence the `'static`
567    ///   lifetime.
568    /// - The [`Send`] constraint is because the closure will need to be passed
569    ///   *by value* from the thread where it is spawned to the new thread. Its
570    ///   return value will need to be passed from the new thread to the thread
571    ///   where it is `join`ed.
572    ///   As a reminder, the [`Send`] marker trait expresses that it is safe to be
573    ///   passed from thread to thread. [`Sync`] expresses that it is safe to have a
574    ///   reference be passed from thread to thread.
575    pub fn spawn<F, T>(f: F) -> JoinHandle<T>
576    where
577        F: FnOnce() -> T,
578        F: Send + 'static,
579        T: Send + 'static,
580    {
581        Builder::new().spawn(f).expect("failed to spawn thread")
582    }
583
584    /// Gets a handle to the thread that invokes it.
585    pub fn current() -> Thread {
586        GLOBAL_RUNTIME.current()
587    }
588
589    /// Cooperatively gives up a timeslice to the scheduler.
590    ///
591    /// For multithreading to work effectively, threads must call this function whenever they are
592    /// willing to be swapped out.
593    ///
594    /// If called within the context of a spawned thread, another pending thread will be swapped
595    /// in. Otherwise, this will evaluate to an assembly YIELD instruction.
596    ///
597    /// Many functions within this crate such as [`sleep`] have built-in calls to this function.
598    pub fn yield_now() {
599        GLOBAL_RUNTIME.yield_now();
600    }
601
602    /// This is a non-standard function that should be called by a hardware or OS thread in order to drive spawned threads.
603    ///
604    /// The native thread will contribute its CPU time to the runtime's green threads and returns
605    /// if there are no green threads that currently need to be driven (at which point you may just
606    /// want to call this function again).
607    ///
608    /// # Safety
609    /// User space threads can't reliably detect stack overflows. Some systems have protections in
610    /// place that will crash the program on overflow, but others will simply have undefined
611    /// behavior. To use spawned threads safely, you must ensure that your stack sizes are big
612    /// enough to never overflow.
613    pub unsafe fn contribute() {
614        GLOBAL_RUNTIME.contribute();
615    }
616
617    /// Blocks unless or until the current thread's token is made available.
618    ///
619    /// A call to `park` does not guarantee that the thread will remain parked
620    /// forever, and callers should be prepared for this possibility.
621    ///
622    /// # park and unpark
623    ///
624    /// Every thread is equipped with some basic low-level blocking support, via the
625    /// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`]
626    /// method. [`park`] blocks the current thread, which can then be resumed from
627    /// another thread by calling the [`unpark`] method on the blocked thread's
628    /// handle.
629    ///
630    /// Conceptually, each [`Thread`] handle has an associated token, which is
631    /// initially not present:
632    ///
633    /// * The [`thread::park`][`park`] function blocks the current thread unless or
634    ///   until the token is available for its thread handle, at which point it
635    ///   atomically consumes the token. It may also return *spuriously*, without
636    ///   consuming the token. [`thread::park_timeout`] does the same, but allows
637    ///   specifying a maximum time to block the thread for.
638    ///
639    /// * The [`unpark`] method on a [`Thread`] atomically makes the token available
640    ///   if it wasn't already. Because the token is initially absent, [`unpark`]
641    ///   followed by [`park`] will result in the second call returning immediately.
642    ///
643    /// In other words, each [`Thread`] acts a bit like a spinlock that can be
644    /// locked and unlocked using `park` and `unpark`.
645    ///
646    /// Notice that being unblocked does not imply any synchronization with someone
647    /// that unparked this thread, it could also be spurious.
648    /// For example, it would be a valid, but inefficient, implementation to make both [`park`] and
649    /// [`unpark`] return immediately without doing anything.
650    ///
651    /// The API is typically used by acquiring a handle to the current thread,
652    /// placing that handle in a shared data structure so that other threads can
653    /// find it, and then `park`ing in a loop. When some desired condition is met, another
654    /// thread calls [`unpark`] on the handle.
655    ///
656    /// The motivation for this design is twofold:
657    ///
658    /// * It avoids the need to allocate mutexes and condvars when building new
659    ///   synchronization primitives; the threads already provide basic
660    ///   blocking/signaling.
661    ///
662    /// * It can be implemented very efficiently on many platforms.
663    ///
664    /// [`unpark`]: Thread::unpark
665    /// [`thread::park_timeout`]: park_timeout
666    pub fn park() {
667        // TODO: a more efficient implementation?
668        yield_now();
669    }
670
671    /// Blocks unless or until the current thread's token is made available or
672    /// the specified duration has been reached (may wake spuriously).
673    ///
674    /// The semantics of this function are equivalent to [`park`][park] except
675    /// that the thread will be blocked for roughly no longer than `dur`. This
676    /// method should not be used for precise timing due to anomalies such as
677    /// preemption or platform differences that might not cause the maximum
678    /// amount of time waited to be precisely `dur` long.
679    ///
680    /// See the [park documentation][park] for more details.
681    pub fn park_timeout(_dur: Duration) {
682        // TODO: a more efficient implementation?
683        yield_now();
684    }
685}
686
687#[cfg(feature = "alloc")]
688pub use runtime::*;
689
690#[cfg(test)]
691mod tests {
692    use super::*;
693    #[cfg(feature = "alloc")]
694    use crate::sync::Mutex;
695    #[cfg(feature = "alloc")]
696    use alloc::sync::Arc;
697
698    #[test]
699    fn test_sleep() {
700        sleep(Duration::from_millis(500));
701    }
702
703    #[cfg(feature = "alloc")]
704    #[test]
705    fn test_current_with_os_threads() {
706        let a = std::thread::spawn(|| std::thread::current())
707            .join()
708            .unwrap();
709        let b = std::thread::spawn(|| std::thread::current())
710            .join()
711            .unwrap();
712        assert_ne!(a.id(), b.id());
713    }
714
715    #[cfg(feature = "alloc")]
716    #[test]
717    fn test_spawn() {
718        let v = Arc::new(Mutex::new(Vec::new()));
719
720        let foo = Builder::new()
721            .name("foo".into())
722            .spawn({
723                let v = v.clone();
724                move || {
725                    let t = current();
726                    assert_eq!(t.name().unwrap(), "foo");
727
728                    v.lock().unwrap().push(1);
729                    yield_now();
730                    loop {
731                        let mut v = v.lock().unwrap();
732                        if v.len() == 2 {
733                            v.push(3);
734                            break;
735                        }
736                    }
737
738                    "foo"
739                }
740            })
741            .unwrap();
742
743        let bar = Builder::new()
744            .name("bar".into())
745            .spawn({
746                let v = v.clone();
747                move || {
748                    let t = current();
749                    assert_eq!(t.name().unwrap(), "bar");
750
751                    loop {
752                        let mut v = v.lock().unwrap();
753                        if v.len() == 1 {
754                            v.push(2);
755                            break;
756                        }
757                    }
758
759                    "bar"
760                }
761            })
762            .unwrap();
763
764        unsafe { contribute() };
765
766        assert!(foo.is_finished());
767        assert_eq!(foo.join().unwrap(), "foo");
768
769        assert!(bar.is_finished());
770        assert_eq!(bar.join().unwrap(), "bar");
771
772        assert_eq!(*v.lock().unwrap(), vec![1, 2, 3]);
773    }
774}