extern crate alloc;
use core::{alloc::Layout, ptr::NonNull};
use memory_addr::align_up;
use crate::addr_of_sym;
const TLS_ALIGN: usize = 0x10;
cfg_if::cfg_if! {
if #[cfg(target_arch = "x86_64")] {
const TCB_SIZE: usize = 8; const GAP_ABOVE_TP: usize = 0;
} else if #[cfg(target_arch = "aarch64")] {
const TCB_SIZE: usize = 0;
const GAP_ABOVE_TP: usize = 16;
} else if #[cfg(target_arch = "riscv64")] {
const TCB_SIZE: usize = 0;
const GAP_ABOVE_TP: usize = 0;
} else if #[cfg(target_arch = "loongarch64")] {
const TCB_SIZE: usize = 0;
const GAP_ABOVE_TP: usize = 0;
}
}
unsafe extern "C" {
fn _stdata();
fn _etdata();
fn _etbss();
}
pub struct TlsArea {
base: NonNull<u8>,
layout: Layout,
}
impl Drop for TlsArea {
fn drop(&mut self) {
unsafe {
alloc::alloc::dealloc(self.base.as_ptr(), self.layout);
}
}
}
impl TlsArea {
pub fn tls_ptr(&self) -> *mut u8 {
unsafe { self.base.as_ptr().add(tp_offset()) }
}
pub fn alloc() -> Self {
let layout = Layout::from_size_align(tls_area_size(), TLS_ALIGN).unwrap();
let area_base = unsafe { alloc::alloc::alloc_zeroed(layout) };
unsafe {
let tls_load_base = _stdata as *mut u8;
let tls_load_size = (_etdata as *mut u8).offset_from_unsigned(_stdata as *mut u8);
core::ptr::copy_nonoverlapping(
tls_load_base,
area_base.add(static_tls_offset()),
tls_load_size,
);
init_tcb(area_base);
}
Self {
base: NonNull::new(area_base).unwrap(),
layout,
}
}
}
fn static_tls_size() -> usize {
align_up(addr_of_sym!(_etbss) - addr_of_sym!(_stdata), TLS_ALIGN)
}
fn static_tls_offset() -> usize {
if cfg!(target_arch = "x86_64") {
0
} else if cfg!(any(
target_arch = "aarch64",
target_arch = "riscv64",
target_arch = "loongarch64"
)) {
TCB_SIZE + GAP_ABOVE_TP
} else {
unreachable!()
}
}
fn tp_offset() -> usize {
if cfg!(target_arch = "x86_64") {
static_tls_size()
} else if cfg!(any(
target_arch = "aarch64",
target_arch = "riscv64",
target_arch = "loongarch64"
)) {
TCB_SIZE
} else {
unreachable!()
}
}
fn tls_area_size() -> usize {
if cfg!(target_arch = "x86_64") {
static_tls_size() + TCB_SIZE
} else if cfg!(any(
target_arch = "aarch64",
target_arch = "riscv64",
target_arch = "loongarch64"
)) {
TCB_SIZE + GAP_ABOVE_TP + static_tls_size()
} else {
unreachable!()
}
}
unsafe fn init_tcb(tls_area: *mut u8) {
if cfg!(target_arch = "x86_64") {
unsafe {
let tp_addr = tls_area.add(tp_offset()).cast::<usize>();
tp_addr.write(tp_addr as usize); }
}
}