pub struct TrashQueueStrategy();alloc only.Expand description
Strategy which adds garbage to a global ‘trash VecDeque’.
In std contexts, this trash queue is protected using std::thread_local!.
In no_std contexts, it is instead implemented as a mutable static variable.
Perfectly fine for truly single-threaded applications.
This does mean that that if you do use some sort of ‘alternative threading’ in a no_std context, this strategy will be unsound!
Implementations§
Source§impl TrashQueueStrategy
impl TrashQueueStrategy
Sourcepub fn ensure_initialized()
pub fn ensure_initialized()
Makes sure the global (thread local) queue is initialized If you do not call this, it will be initialized the first time an object is dropped, which will add some overhead at that moment.
Called automatically by TrashQueueStrategy::cleanup_on_exit()
Examples found in repository?
19fn main() {
20 let boxed = setup();
21 let not_backdropped = boxed.clone();
22 time("none", move || {
23 assert_eq!(not_backdropped.len(), LEN);
24 // Destructor runs here
25 });
26
27 let backdropped: TrivialBackdrop<_> = Backdrop::new(boxed.clone());
28 time("fake backdrop", move || {
29 assert_eq!(backdropped.len(), LEN);
30 // Destructor runs here
31 });
32
33 let backdropped: thread::ThreadBackdrop<_> = Backdrop::new(boxed.clone());
34 time("thread backdrop", move || {
35 assert_eq!(backdropped.len(), LEN);
36 // Destructor runs here
37 });
38
39 TrashThreadStrategy::with_trash_thread(||{
40 let backdropped: thread::TrashThreadBackdrop<_> = Backdrop::new(boxed.clone());
41 time("trash thread backdrop", move || {
42 assert_eq!(backdropped.len(), LEN);
43 // Destructor runs here
44 });
45 });
46
47 TrashQueueStrategy::ensure_initialized();
48 let backdropped = Backdrop::<_, TrashQueueStrategy>::new(boxed.clone());
49 time("(single threaded) trash queue backdrop", move || {
50 assert_eq!(backdropped.len(), LEN);
51 // Destructor runs here
52 });
53
54 time("(single threaded) trash queue backdrop (actually cleaning up later)", move || {
55 TrashQueueStrategy::cleanup_all();
56 });
57
58 #[cfg(miri)]
59 {
60 println!("Skipping Tokio examples when running on Miri, since it does not support Tokio yet");
61 }
62 #[cfg(not(miri))]
63 {
64 ::tokio::runtime::Builder::new_multi_thread()
65 .enable_all()
66 .build()
67 .unwrap()
68 .block_on(async {
69 let backdropped: crate::tokio::TokioTaskBackdrop<_> = Backdrop::new(boxed.clone());
70 time("tokio task (multithread runner)", move || {
71 assert_eq!(backdropped.len(), LEN);
72 // Destructor runs here
73 });
74
75 let backdropped: crate::tokio::TokioBlockingTaskBackdrop<_> = Backdrop::new(boxed.clone());
76 time("tokio blocking task (multithread runner)", move || {
77 assert_eq!(backdropped.len(), LEN);
78 // Destructor runs here
79 });
80 });
81
82 ::tokio::runtime::Builder::new_current_thread()
83 .enable_all()
84 .build()
85 .unwrap()
86 .block_on(async {
87 let backdropped: crate::tokio::TokioTaskBackdrop<_> = Backdrop::new(setup());
88 time("tokio task (current thread runner)", move || {
89 assert_eq!(backdropped.len(), LEN);
90 // Destructor runs here
91 });
92
93 let backdropped: crate::tokio::TokioBlockingTaskBackdrop<_> = Backdrop::new(setup());
94 time("tokio blocking task (current thread runner)", move || {
95 assert_eq!(backdropped.len(), LEN);
96 // Destructor runs here
97 });
98 });
99 }
100}Sourcepub fn cleanup_one() -> bool
pub fn cleanup_one() -> bool
Cleans up a single item in the trash queue.
Returns true if there is more garbage in the queue at this moment.
That could be used to e.g. clean up ‘some’ garbage but not all.
Sourcepub fn cleanup_all()
pub fn cleanup_all()
Cleans up everything that is in the trash queue.
Examples found in repository?
19fn main() {
20 let boxed = setup();
21 let not_backdropped = boxed.clone();
22 time("none", move || {
23 assert_eq!(not_backdropped.len(), LEN);
24 // Destructor runs here
25 });
26
27 let backdropped: TrivialBackdrop<_> = Backdrop::new(boxed.clone());
28 time("fake backdrop", move || {
29 assert_eq!(backdropped.len(), LEN);
30 // Destructor runs here
31 });
32
33 let backdropped: thread::ThreadBackdrop<_> = Backdrop::new(boxed.clone());
34 time("thread backdrop", move || {
35 assert_eq!(backdropped.len(), LEN);
36 // Destructor runs here
37 });
38
39 TrashThreadStrategy::with_trash_thread(||{
40 let backdropped: thread::TrashThreadBackdrop<_> = Backdrop::new(boxed.clone());
41 time("trash thread backdrop", move || {
42 assert_eq!(backdropped.len(), LEN);
43 // Destructor runs here
44 });
45 });
46
47 TrashQueueStrategy::ensure_initialized();
48 let backdropped = Backdrop::<_, TrashQueueStrategy>::new(boxed.clone());
49 time("(single threaded) trash queue backdrop", move || {
50 assert_eq!(backdropped.len(), LEN);
51 // Destructor runs here
52 });
53
54 time("(single threaded) trash queue backdrop (actually cleaning up later)", move || {
55 TrashQueueStrategy::cleanup_all();
56 });
57
58 #[cfg(miri)]
59 {
60 println!("Skipping Tokio examples when running on Miri, since it does not support Tokio yet");
61 }
62 #[cfg(not(miri))]
63 {
64 ::tokio::runtime::Builder::new_multi_thread()
65 .enable_all()
66 .build()
67 .unwrap()
68 .block_on(async {
69 let backdropped: crate::tokio::TokioTaskBackdrop<_> = Backdrop::new(boxed.clone());
70 time("tokio task (multithread runner)", move || {
71 assert_eq!(backdropped.len(), LEN);
72 // Destructor runs here
73 });
74
75 let backdropped: crate::tokio::TokioBlockingTaskBackdrop<_> = Backdrop::new(boxed.clone());
76 time("tokio blocking task (multithread runner)", move || {
77 assert_eq!(backdropped.len(), LEN);
78 // Destructor runs here
79 });
80 });
81
82 ::tokio::runtime::Builder::new_current_thread()
83 .enable_all()
84 .build()
85 .unwrap()
86 .block_on(async {
87 let backdropped: crate::tokio::TokioTaskBackdrop<_> = Backdrop::new(setup());
88 time("tokio task (current thread runner)", move || {
89 assert_eq!(backdropped.len(), LEN);
90 // Destructor runs here
91 });
92
93 let backdropped: crate::tokio::TokioBlockingTaskBackdrop<_> = Backdrop::new(setup());
94 time("tokio blocking task (current thread runner)", move || {
95 assert_eq!(backdropped.len(), LEN);
96 // Destructor runs here
97 });
98 });
99 }
100}Sourcepub fn cleanup_on_exit<R>(closure: impl FnOnce() -> R) -> R
pub fn cleanup_on_exit<R>(closure: impl FnOnce() -> R) -> R
Wrapper which will:
- Call
TrashQueueStrategy::ensure_initialized()before your closure - Call your closure
- Call
TrashQueueStrategy::cleanup_all()after your closure.
As such, you can use this to delay dropping until after your critical code section very easily: