1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![allow(named_asm_labels)]
3#![cfg_attr(not(feature = "std"), no_std)]
4
5pub use bytemuck;
37
38#[cfg(any(
55 feature = "std",
56 target_arch = "x86_64",
57 target_arch = "aarch64",
58 target_arch = "arm",
59 target_arch = "x86"
60))]
61#[inline(always)]
62pub fn global<T: bytemuck::Zeroable + Sync + 'static>() -> &'static T {
63 global_impl::<false, T>()
64}
65
66#[cfg(any(
74 feature = "std",
75 target_arch = "x86_64",
76 target_arch = "aarch64",
77 target_arch = "arm",
78 target_arch = "x86"
79))]
80#[inline(always)]
81pub fn local_global<T: bytemuck::Zeroable + Sync + 'static>() -> &'static T {
82 global_impl::<true, T>()
83}
84
85#[cfg(any(
86 feature = "std",
87 target_arch = "x86_64",
88 target_arch = "aarch64",
89 target_arch = "arm",
90 target_arch = "x86"
91))]
92#[inline(always)]
93fn global_impl<const LOCAL: bool, T: bytemuck::Zeroable + Sync + 'static>() -> &'static T {
94 assert!(core::mem::align_of::<T>() <= 4 * 1024);
95 #[cfg(any(
96 target_arch = "x86_64",
97 target_arch = "aarch64",
98 target_arch = "arm",
99 target_arch = "x86"
100 ))]
101 {
102 unsafe {
103 core::arch::asm!(
109 ".ifnotdef global_{local}_{id}",
110 ".if {local}",
111 ".local global_{local}_{id}",
112 ".endif",
113 ".comm global_{local}_{id}, {size}, {align}",
114 ".endif",
115 local = const if LOCAL {1} else {0},
116 id = sym global::<T>,
117 size = const core::mem::size_of::<T>(),
118 align = const core::mem::align_of::<T>(),
119 options(nomem)
120 );
121 let addr: usize;
123 #[cfg(target_arch = "x86_64")]
124 {
125 core::arch::asm!(
126 "lea {addr}, [rip+global_{local}_{id}]",
127 addr = out(reg) addr,
128 local = const if LOCAL {1} else {0},
129 id = sym global::<T>,
130 options(pure, nomem)
131 );
132 }
133 #[cfg(target_arch = "aarch64")]
134 {
135 core::arch::asm!(
136 "adrp {addr}, global_{local}_{id}",
137 "add {addr}, {addr}, :lo12:global_{local}_{id}",
138 addr = out(reg) addr,
139 local = const if LOCAL {1} else {0},
140 id = sym global::<T>,
141 options(pure, nomem)
142 );
143 }
144 #[cfg(target_arch = "arm")]
145 {
146 core::arch::asm!(
147 "ldr {addr}, =global_{local}_{id}",
148 addr = out(reg) addr,
149 local = const if LOCAL {1} else {0},
150 id = sym global::<T>,
151 options(pure, nomem)
152 );
153 }
154 #[cfg(target_arch = "x86")]
155 {
156 core::arch::asm!(
157 "call 2f",
159 "2: pop {addr}",
160 "lea global_{local}_{id}-2b({addr}), {addr}",
161 addr = out(reg) addr,
162 local = const if LOCAL {1} else {0},
163 id = sym global::<T>,
164 options(pure, nomem, att_syntax)
165 );
166 }
167 &*(addr as *const _)
168 }
169 }
170 #[cfg(not(any(
171 target_arch = "x86_64",
172 target_arch = "aarch64",
173 target_arch = "arm",
174 target_arch = "x86"
175 )))]
176 {
177 pub(crate) struct SyncWrapper(*const u8);
178 unsafe impl Sync for SyncWrapper {}
179 unsafe impl Send for SyncWrapper {}
180 use core::any::TypeId;
181 static MAP: std::sync::RwLock<TypeIdMap<SyncWrapper>> =
182 std::sync::RwLock::new(TypeIdMap::with_hasher(NoOpTypeIdBuildHasher));
183 {
184 let guard = MAP.read().unwrap();
185 if let Some(value) = guard.get(&TypeId::of::<T>()) {
186 return unsafe { &*(value.0 as *const T) };
187 }
188 }
189 let mut guard = MAP.write().unwrap();
190 let value =
191 guard
192 .entry(TypeId::of::<T>())
193 .or_insert(SyncWrapper(
194 alloc::boxed::Box::into_raw(alloc::boxed::Box::new(
195 <T as bytemuck::Zeroable>::zeroed(),
196 )) as *const u8,
197 ));
198 unsafe { &*(value.0 as *const T) }
199 }
200}
201
202#[cfg(all(
217 feature = "alloc",
218 any(
219 feature = "std",
220 target_arch = "x86_64",
221 target_arch = "aarch64",
222 target_arch = "arm",
223 target_arch = "x86"
224 )
225))]
226pub mod non_zeroable_global {
227 extern crate alloc;
228
229 use super::*;
230
231 struct Heap<T>(core::sync::atomic::AtomicPtr<T>);
235 unsafe impl<T> bytemuck::Zeroable for Heap<T> {}
236 unsafe impl<T: Sync> Sync for Heap<T> {}
237
238 #[derive(Debug)]
239 pub struct AlreadyInitialized;
240
241 pub fn init<T: Sync + 'static>(data: T) -> Result<(), AlreadyInitialized> {
247 use core::sync::atomic::Ordering;
248 let boxed = alloc::boxed::Box::into_raw(alloc::boxed::Box::new(data));
249 match global::<Heap<T>>().0.compare_exchange(
250 core::ptr::null_mut(),
251 boxed,
252 Ordering::SeqCst,
253 Ordering::SeqCst,
254 ) {
255 Ok(_) => Ok(()),
256 Err(_) => {
257 unsafe {
258 drop(alloc::boxed::Box::from_raw(boxed));
259 }
260 Err(AlreadyInitialized)
261 }
262 }
263 }
264
265 pub fn get<T: Sync + 'static>() -> Option<&'static T> {
269 use core::sync::atomic::Ordering;
270 let data = global::<Heap<T>>().0.load(Ordering::SeqCst);
271 if data.is_null() {
272 None
273 } else {
274 Some(unsafe { &*data })
275 }
276 }
277
278 pub fn get_or_init<T: Sync + 'static>(cons: impl Fn() -> T) -> &'static T {
286 use core::sync::atomic::Ordering;
287 let data = global::<Heap<T>>().0.load(Ordering::SeqCst);
288 if data.is_null() {
289 let _ = init::<_>(cons());
290 get::<_>().unwrap()
291 } else {
292 unsafe { &*data }
293 }
294 }
295}
296
297#[cfg(all(
325 feature = "alloc",
326 any(
327 feature = "std",
328 target_arch = "x86_64",
329 target_arch = "aarch64",
330 target_arch = "arm",
331 target_arch = "x86"
332 )
333))]
334#[macro_export]
335macro_rules! generic_static {
336 {static $ident:ident $(: &$type:ty)? = &$init:expr;} => {
337 #[allow(non_snake_case)]
338 let $ident $(: &'static $type)? = {
339 #[cfg(any(
340 target_arch = "x86_64",
341 target_arch = "aarch64",
342 target_arch = "arm",
343 target_arch = "x86"
344 ))] {
345 extern crate alloc;
346
347 let init = ||$init;
348 fn assert_sync_static<T: Sync + 'static>(_: &impl FnOnce() -> T) {}
349 assert_sync_static(&init);
350
351 fn make<Key: 'static, Value: Sync + 'static>(_: Key, _: &impl FnOnce()->Value)
354 -> &'static ::core::sync::atomic::AtomicPtr<Value> {
355 struct Holder<T, D> {
356 _marker: core::marker::PhantomData<T>,
357 value: core::sync::atomic::AtomicPtr<D>
358 }
359 unsafe impl<T, D> $crate::bytemuck::Zeroable for Holder<T,D>{}
360 unsafe impl<T, D> Sync for Holder<T,D>{}
361 &$crate::global::<Holder<Key, Value>>().value
362 }
363 let ptr = make(||(), &init);
364
365 let data = ptr.load(::core::sync::atomic::Ordering::SeqCst);
366 if data.is_null() {
367 let boxed = alloc::boxed::Box::into_raw(alloc::boxed::Box::new(init()));
371 if ptr
372 .compare_exchange(
373 ::core::ptr::null_mut(),
374 boxed,
375 ::core::sync::atomic::Ordering::SeqCst,
376 ::core::sync::atomic::Ordering::SeqCst,
377 )
378 .is_err()
379 {
380 unsafe {
382 drop(alloc::boxed::Box::from_raw(boxed));
383 }
384 }
385 unsafe { &*boxed }
386 } else {
387 unsafe { &*data }
388 }
389 }
390 #[cfg(not(any(
391 target_arch = "x86_64",
392 target_arch = "aarch64",
393 target_arch = "arm",
394 target_arch = "x86"
395 )))] {
396 #[cfg(not(feature = "std"))]
397 compile_error!("Unsupported platform, enable feature \"std\" to enable fallback");
398
399 struct SyncWrapper(*const u8);
400 unsafe impl Sync for SyncWrapper {}
401 unsafe impl Send for SyncWrapper {}
402 fn id<T: 'static>(_: T) -> core::any::TypeId {
403 core::any::TypeId::of::<T>()
404 }
405 let id = id(||());
411 static MAP: ::std::sync::RwLock<$crate::TypeIdMap<SyncWrapper>> =
412 ::std::sync::RwLock::new($crate::TypeIdMap::with_hasher(
413 $crate::NoOpTypeIdBuildHasher,
414 ));
415 {
416 let guard = MAP.read().unwrap();
417 if let Some(value) = guard.get(&id) {
418 break 'block unsafe { &*(value.0 as *const _) };
419 }
420 }
421 let mut guard = MAP.write().unwrap();
422 let value = guard
423 .entry(id)
424 .or_insert(SyncWrapper(
425 alloc::boxed::Box::into_raw(alloc::boxed::Box::new($init)) as *const u8,
426 ));
427 unsafe { &*(value.0 as *const _) }
428 }
429 };
430 };
431}
432
433#[cfg(feature = "std")]
434pub use with_std::*;
435
436#[cfg(feature = "std")]
437mod with_std {
438 use core::any::TypeId;
439 use core::hash::{BuildHasher, Hasher};
440 use std::collections::HashMap;
441
442 pub type TypeIdMap<T> = HashMap<TypeId, T, NoOpTypeIdBuildHasher>;
444
445 #[derive(Default)]
447 pub struct NoOpTypeIdBuildHasher;
448
449 impl BuildHasher for NoOpTypeIdBuildHasher {
450 type Hasher = NoOpTypeIdHasher;
451
452 fn build_hasher(&self) -> Self::Hasher {
453 NoOpTypeIdHasher(0)
454 }
455 }
456
457 #[doc(hidden)]
458 #[derive(Default)]
459 pub struct NoOpTypeIdHasher(u64);
460
461 impl Hasher for NoOpTypeIdHasher {
462 fn finish(&self) -> u64 {
463 self.0
464 }
465
466 fn write(&mut self, bytes: &[u8]) {
467 self.0 = bytes.iter().fold(self.0, |hash, b| {
469 hash.rotate_left(8).wrapping_add(*b as u64)
470 });
471 }
472
473 fn write_u64(&mut self, i: u64) {
474 self.0 = i
475 }
476 }
477}
478
479#[cfg(test)]
480mod test {
481 use std::{
482 any::TypeId,
483 hash::{Hash, Hasher},
484 };
485
486 #[test]
487 fn test_local_global() {
488 use crate::local_global;
489 use core::sync::atomic::{AtomicI32, AtomicI64, Ordering};
490
491 let a = local_global::<AtomicI32>();
492 let b = local_global::<AtomicI64>();
493 assert_eq!(a.load(Ordering::Relaxed), 0);
494 a.store(69, Ordering::Relaxed);
495 assert_eq!(a.load(Ordering::Relaxed), 69);
496 assert_eq!(b.load(Ordering::Relaxed), 0);
497 assert_eq!(*local_global::<i64>(), 0);
498
499 core::hint::black_box(local_global::<AtomicI64>());
500 }
501
502 #[test]
503 fn test_macro() {
504 use core::sync::atomic::{AtomicI32, Ordering};
505 #[allow(clippy::extra_unused_type_parameters)]
506 fn get_and_inc<T: 'static>() -> i32 {
507 generic_static!(
508 static BLUB: &AtomicI32 = &AtomicI32::new(1);
509 );
510 let value = BLUB.load(Ordering::Relaxed);
511 BLUB.fetch_add(1, Ordering::Relaxed);
512 value
513 }
514 assert_eq!(get_and_inc::<bool>(), 1);
515 assert_eq!(get_and_inc::<bool>(), 2);
516 assert_eq!(get_and_inc::<i32>(), 1);
517 assert_eq!(get_and_inc::<bool>(), 3);
518
519 generic_static!(
520 static FOO_1: &AtomicI32 = &AtomicI32::new(0);
521 );
522 generic_static!(
523 static FOO_2: &AtomicI32 = &AtomicI32::new(69);
524 );
525 assert_eq!(FOO_1.load(Ordering::Relaxed), 0);
526 assert_eq!(FOO_2.load(Ordering::Relaxed), 69);
527 FOO_1.store(1, Ordering::Relaxed);
528 FOO_2.store(2, Ordering::Relaxed);
529 assert_eq!(FOO_1.load(Ordering::Relaxed), 1);
530 assert_eq!(FOO_2.load(Ordering::Relaxed), 2);
531 }
532
533 #[test]
534 fn test_macro_types() {
535 fn generic<T: Sync + 'static>(t: T) {
536 generic_static! {
537 static _FOO = &t;
538 }
539 }
540 generic(0);
541 generic(true);
542 }
543
544 #[test]
545 fn type_id_hash() {
546 TypeId::of::<()>().hash(&mut {
547 struct H;
548 impl Hasher for H {
549 fn finish(&self) -> u64 {
550 0
551 }
552
553 fn write(&mut self, _: &[u8]) {
554 unimplemented!()
555 }
556
557 fn write_u64(&mut self, _: u64) {}
558 }
559 H
560 });
561 }
562}