use crate::arch::{
STACK_ALIGNMENT, TLS_OFFSET, clone, munmap_and_exit_thread, set_thread_pointer, thread_pointer,
};
#[cfg(feature = "thread-at-exit")]
use alloc::boxed::Box;
#[cfg(feature = "unstable-errno")]
use core::cell::Cell;
use core::cmp::max;
use core::ffi::c_void;
use core::mem::{align_of, offset_of, size_of};
use core::ptr::{NonNull, copy_nonoverlapping, drop_in_place, null, null_mut};
use core::slice;
use core::sync::atomic::Ordering::SeqCst;
use core::sync::atomic::{AtomicI32, AtomicPtr, AtomicU8, AtomicU32};
use linux_raw_sys::elf::*;
use rustix::io;
use rustix::mm::{MapFlags, MprotectFlags, ProtFlags, mmap_anonymous, mprotect};
use rustix::param::{linux_execfn, page_size};
use rustix::process::{Resource, getrlimit};
#[cfg(feature = "signal")]
use rustix::runtime::{How, KernelSigSet, kernel_sigprocmask};
use rustix::runtime::{exe_phdrs, set_tid_address};
use rustix::thread::gettid;
pub use rustix::thread::Pid as ThreadId;
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct Thread(NonNull<ThreadData>);
impl Thread {
#[inline]
pub fn from_raw(raw: *mut c_void) -> Self {
Self(NonNull::new(raw.cast()).unwrap())
}
#[inline]
pub unsafe fn from_raw_unchecked(raw: *mut c_void) -> Self {
unsafe { Self(NonNull::new_unchecked(raw.cast())) }
}
#[inline]
pub fn from_raw_non_null(raw: NonNull<c_void>) -> Self {
Self(raw.cast())
}
#[inline]
pub fn to_raw(self) -> *mut c_void {
self.0.cast().as_ptr()
}
#[inline]
pub fn to_raw_non_null(self) -> NonNull<c_void> {
self.0.cast()
}
}
struct ThreadData {
thread_id: AtomicI32,
#[cfg(feature = "unstable-errno")]
errno_val: Cell<i32>,
detached: AtomicU8,
stack_addr: *mut c_void,
stack_size: usize,
guard_size: usize,
map_size: usize,
return_value: AtomicPtr<c_void>,
#[cfg(feature = "thread-at-exit")]
dtors: smallvec::SmallVec<[Box<dyn FnOnce()>; 4]>,
}
const INITIAL: u8 = 0;
const DETACHED: u8 = 1;
const ABANDONED: u8 = 2;
impl ThreadData {
#[inline]
fn new(stack_addr: *mut c_void, stack_size: usize, guard_size: usize, map_size: usize) -> Self {
Self {
thread_id: AtomicI32::new(0),
#[cfg(feature = "unstable-errno")]
errno_val: Cell::new(0),
detached: AtomicU8::new(INITIAL),
stack_addr,
stack_size,
guard_size,
map_size,
return_value: AtomicPtr::new(null_mut()),
#[cfg(feature = "thread-at-exit")]
dtors: smallvec::SmallVec::new(),
}
}
}
#[repr(C)]
struct Metadata {
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
thread: ThreadData,
abi: Abi,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
thread: ThreadData,
}
#[repr(C)]
#[cfg_attr(target_arch = "arm", repr(align(8)))]
struct Abi {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
thread_pointee: [u8; 0],
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
canary: usize,
#[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
thread_pointee: [u8; 0],
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
dtv: *const c_void,
#[cfg(target_arch = "riscv64")]
thread_pointee: [u8; 0],
#[cfg(any(target_arch = "aarch64", target_arch = "arm"))]
_pad: [usize; 1],
#[cfg(target_arch = "riscv64")]
_pad: [usize; 0],
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
this: *mut c_void,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
dtv: *const c_void,
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
_pad: [usize; 3],
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
canary: usize,
}
static mut STARTUP_TLS_INFO: StartupTlsInfo = StartupTlsInfo {
addr: null(),
mem_size: 0,
file_size: 0,
align: 0,
};
struct StartupTlsInfo {
addr: *const c_void,
mem_size: usize,
file_size: usize,
align: usize,
}
static mut STARTUP_STACK_SIZE: usize = 0;
pub(super) fn initialize_startup_info() {
let mut tls_phdr = null();
let mut stack_size = 0;
let mut offset = 0;
let (first_phdr, phent, phnum) = exe_phdrs();
let mut current_phdr = first_phdr.cast::<Elf_Phdr>();
let dynamic_addr: *const u8 = crate::arch::dynamic_table_addr().cast();
unsafe {
let phdrs_end = current_phdr.byte_add(phnum * phent);
while current_phdr != phdrs_end {
let phdr = &*current_phdr;
current_phdr = current_phdr.byte_add(phent);
match phdr.p_type {
PT_PHDR => offset = first_phdr.addr().wrapping_sub(phdr.p_vaddr),
PT_DYNAMIC => offset = dynamic_addr.addr().wrapping_sub(phdr.p_vaddr),
PT_TLS => tls_phdr = phdr,
PT_GNU_STACK => stack_size = phdr.p_memsz,
_ => {}
}
}
STARTUP_TLS_INFO = if tls_phdr.is_null() {
StartupTlsInfo {
addr: NonNull::dangling().as_ptr(),
mem_size: 0,
file_size: 0,
align: 1,
}
} else {
let tls_phdr = &*tls_phdr;
StartupTlsInfo {
addr: first_phdr.with_addr(offset.wrapping_add(tls_phdr.p_vaddr)),
mem_size: tls_phdr.p_memsz,
file_size: tls_phdr.p_filesz,
align: tls_phdr.p_align,
}
};
STARTUP_STACK_SIZE = stack_size;
}
}
unsafe extern "C" {
static _DYNAMIC: c_void;
}
core::arch::global_asm!(".weak _DYNAMIC");
pub(super) unsafe fn initialize_main(mem: *mut c_void) {
unsafe {
let execfn = linux_execfn().to_bytes_with_nul();
let stack_base = execfn.as_ptr().add(execfn.len());
let stack_base = stack_base
.map_addr(|ptr| round_up(ptr, page_size()))
.cast_mut();
let stack_map_size = getrlimit(Resource::Stack).current.unwrap() as usize;
let stack_least = stack_base.sub(stack_map_size);
let stack_size = stack_least.offset_from(mem.cast::<u8>()) as usize;
let guard_size = page_size();
let random_ptr = rustix::runtime::random().cast::<usize>();
let canary = random_ptr.read_unaligned();
__stack_chk_guard = canary;
let mut alloc_size = 0;
let (tls_data_bottom, header) = calculate_tls_size(&mut alloc_size);
let new = mmap_anonymous(
null_mut(),
alloc_size,
ProtFlags::READ | ProtFlags::WRITE,
MapFlags::PRIVATE,
)
.unwrap()
.cast::<u8>();
let metadata_align = max(STARTUP_TLS_INFO.align, align_of::<Metadata>());
debug_assert_eq!(new.addr() % metadata_align, 0);
let tls_data = new.add(tls_data_bottom);
let metadata: *mut Metadata = new.add(header).cast();
let (newtls, thread_id_ptr) = initialize_tls(
tls_data,
metadata,
canary,
stack_least,
stack_size,
guard_size,
0,
);
let tid = rustix::runtime::set_tid_address(thread_id_ptr.cast());
*thread_id_ptr = tid.as_raw_nonzero().get();
set_thread_pointer(newtls);
}
}
fn calculate_tls_size(map_size: &mut usize) -> (usize, usize) {
let (startup_tls_align, startup_tls_mem_size) =
unsafe { (STARTUP_TLS_INFO.align, STARTUP_TLS_INFO.mem_size) };
let tls_data_align = startup_tls_align;
let page_align = page_size();
let header_align = align_of::<Metadata>();
let metadata_align = max(tls_data_align, header_align);
debug_assert!(metadata_align <= page_align);
*map_size = round_up(*map_size, metadata_align);
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
let tls_data_bottom = *map_size;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
{
*map_size += round_up(startup_tls_mem_size, tls_data_align);
}
let header = *map_size;
*map_size += size_of::<Metadata>();
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
{
*map_size = round_up(*map_size, tls_data_align);
}
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
let tls_data_bottom = *map_size;
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
{
*map_size += round_up(startup_tls_mem_size, tls_data_align);
}
(tls_data_bottom, header)
}
unsafe fn initialize_tls(
tls_data: *mut u8,
metadata: *mut Metadata,
canary: usize,
stack_least: *mut u8,
stack_size: usize,
guard_size: usize,
map_size: usize,
) -> (*mut c_void, *mut i32) {
unsafe {
let newtls: *mut c_void = (*metadata).abi.thread_pointee.as_mut_ptr().cast();
metadata.write(Metadata {
abi: Abi {
canary,
dtv: null(),
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
this: newtls,
_pad: Default::default(),
thread_pointee: [],
},
thread: ThreadData::new(stack_least.cast(), stack_size, guard_size, map_size),
});
slice::from_raw_parts_mut(tls_data, STARTUP_TLS_INFO.file_size).copy_from_slice(
slice::from_raw_parts(
STARTUP_TLS_INFO.addr.cast::<u8>(),
STARTUP_TLS_INFO.file_size,
),
);
slice::from_raw_parts_mut(
tls_data.add(STARTUP_TLS_INFO.file_size),
STARTUP_TLS_INFO.mem_size - STARTUP_TLS_INFO.file_size,
)
.fill(0);
let thread_id_ptr = (*metadata).thread.thread_id.as_ptr().cast::<i32>();
(newtls, thread_id_ptr)
}
}
pub unsafe fn create(
fn_: unsafe fn(&mut [Option<NonNull<c_void>>]) -> Option<NonNull<c_void>>,
args: &[Option<NonNull<c_void>>],
stack_size: usize,
guard_size: usize,
) -> io::Result<Thread> {
let page_align = page_size();
let stack_align = 16;
let mut map_size = 0;
map_size += round_up(guard_size, page_align);
let stack_bottom = map_size;
map_size += round_up(stack_size, stack_align);
let stack_top = map_size;
let (tls_data_bottom, header) = calculate_tls_size(&mut map_size);
unsafe {
let map = mmap_anonymous(
null_mut(),
map_size,
ProtFlags::empty(),
MapFlags::PRIVATE | MapFlags::STACK,
)?
.cast::<u8>();
mprotect(
map.add(stack_bottom).cast(),
map_size - stack_bottom,
MprotectFlags::READ | MprotectFlags::WRITE,
)?;
let stack = map.add(stack_top);
let stack_least = map.add(stack_bottom);
let tls_data = map.add(tls_data_bottom);
let metadata: *mut Metadata = map.add(header).cast();
let canary = (*current_metadata()).abi.canary;
let (newtls, thread_id_ptr) = initialize_tls(
tls_data,
metadata,
canary,
stack_least,
stack_size,
guard_size,
map_size,
);
let stack = stack.cast::<Option<NonNull<c_void>>>().sub(args.len());
let stack = stack.with_addr(stack.addr() & STACK_ALIGNMENT.wrapping_neg());
copy_nonoverlapping(args.as_ptr(), stack, args.len());
let flags = CloneFlags::VM
| CloneFlags::FS
| CloneFlags::FILES
| CloneFlags::SIGHAND
| CloneFlags::THREAD
| CloneFlags::SYSVSEM
| CloneFlags::SETTLS
| CloneFlags::CHILD_CLEARTID
| CloneFlags::CHILD_SETTID
| CloneFlags::PARENT_SETTID;
let clone_res = clone(
flags.bits(),
stack.cast(),
thread_id_ptr,
thread_id_ptr,
newtls,
core::mem::transmute(fn_),
args.len(),
);
if clone_res >= 0 {
#[cfg(feature = "log")]
{
let id = current_id();
log::trace!(
"Thread[{:?}] launched thread Thread[{:?}] with stack_size={} and guard_size={}",
id.as_raw_nonzero(),
clone_res,
stack_size,
guard_size
);
for (i, arg) in args.iter().enumerate() {
log::trace!("Thread[{:?}] args[{}]: {:?}", id.as_raw_nonzero(), i, arg);
}
}
Ok(Thread(NonNull::from(&mut (*metadata).thread)))
} else {
Err(io::Errno::from_raw_os_error(-clone_res as i32))
}
}
}
pub(super) unsafe extern "C" fn entry(
fn_: extern "C" fn(),
args: *mut *mut c_void,
num_args: usize,
) -> ! {
unsafe {
#[cfg(feature = "log")]
log::trace!("Thread[{:?}] launched", current_id().as_raw_nonzero());
#[cfg(debug_assertions)]
{
#[cfg(feature = "nightly")]
{
unsafe extern "C" {
#[link_name = "llvm.frameaddress"]
fn builtin_frame_address(level: i32) -> *const u8;
#[link_name = "llvm.returnaddress"]
fn builtin_return_address(level: i32) -> *const u8;
#[cfg(target_arch = "aarch64")]
#[link_name = "llvm.sponentry"]
fn builtin_sponentry() -> *const u8;
}
debug_assert_eq!(builtin_return_address(0), null());
debug_assert_ne!(builtin_frame_address(0), null());
#[cfg(not(any(target_arch = "x86", target_arch = "arm")))]
debug_assert_eq!(builtin_frame_address(0).addr() & 0xf, 0);
#[cfg(target_arch = "arm")]
debug_assert_eq!(builtin_frame_address(0).addr() & 0x3, 0);
#[cfg(target_arch = "x86")]
debug_assert_eq!(builtin_frame_address(0).addr() & 0xf, 8);
debug_assert_eq!(builtin_frame_address(1), null());
#[cfg(target_arch = "aarch64")]
debug_assert_ne!(builtin_sponentry(), null());
#[cfg(target_arch = "aarch64")]
debug_assert_eq!(builtin_sponentry().addr() & 0xf, 0);
}
debug_assert_eq!(current_id(), gettid());
}
let fn_: unsafe fn(&mut [*mut c_void]) -> Option<NonNull<c_void>> =
core::mem::transmute(fn_);
let args = slice::from_raw_parts_mut(args, num_args);
let return_value = fn_(args);
exit(return_value)
}
}
unsafe fn exit(return_value: Option<NonNull<c_void>>) -> ! {
unsafe {
let current = current();
#[cfg(feature = "log")]
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"Thread[{:?}] returned {:?}",
current.0.as_ref().thread_id.load(SeqCst),
return_value
);
}
#[cfg(feature = "thread-at-exit")]
call_dtors(current);
let state = current
.0
.as_ref()
.detached
.compare_exchange(INITIAL, ABANDONED, SeqCst, SeqCst);
if let Err(e) = state {
#[cfg(feature = "log")]
let current_thread_id = current.0.as_ref().thread_id.load(SeqCst);
let current_map_size = current.0.as_ref().map_size;
let current_stack_addr = current.0.as_ref().stack_addr;
let current_guard_size = current.0.as_ref().guard_size;
#[cfg(feature = "log")]
log::trace!("Thread[{:?}] exiting as detached", current_thread_id);
debug_assert_eq!(e, DETACHED);
drop_in_place(current.0.as_ptr());
let map_size = current_map_size;
if map_size != 0 {
let _ = set_tid_address(null_mut());
#[cfg(feature = "signal")]
{
let all = KernelSigSet::all();
kernel_sigprocmask(How::BLOCK, Some(&all)).ok();
}
let map = current_stack_addr.byte_sub(current_guard_size);
munmap_and_exit_thread(map, map_size);
}
} else {
#[cfg(feature = "log")]
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"Thread[{:?}] exiting as joinable",
current.0.as_ref().thread_id.load(SeqCst)
);
}
let return_value = match return_value {
Some(return_value) => return_value.as_ptr(),
None => null_mut(),
};
current.0.as_ref().return_value.store(return_value, SeqCst);
}
rustix::runtime::exit_thread(0)
}
}
#[cfg(feature = "thread-at-exit")]
pub(crate) fn call_dtors(current: Thread) {
let mut current = current;
while let Some(func) = unsafe { current.0.as_mut().dtors.pop() } {
#[cfg(feature = "log")]
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"Thread[{:?}] calling `thread::at_exit`-registered function",
unsafe { current.0.as_ref().thread_id.load(SeqCst) },
);
}
func();
}
}
#[inline]
pub unsafe fn detach(thread: Thread) {
unsafe {
#[cfg(feature = "log")]
let thread_id = thread.0.as_ref().thread_id.load(SeqCst);
#[cfg(feature = "log")]
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"Thread[{:?}] marked as detached by Thread[{:?}]",
thread_id,
current_id().as_raw_nonzero()
);
}
if thread.0.as_ref().detached.swap(DETACHED, SeqCst) == ABANDONED {
wait_for_exit(thread);
#[cfg(feature = "log")]
log_thread_to_be_freed(thread_id);
free_memory(thread);
}
}
}
pub unsafe fn join(thread: Thread) -> Option<NonNull<c_void>> {
unsafe {
let thread_data = thread.0.as_ref();
#[cfg(feature = "log")]
let thread_id = thread_data.thread_id.load(SeqCst);
#[cfg(feature = "log")]
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"Thread[{:?}] is being joined by Thread[{:?}]",
thread_id,
current_id().as_raw_nonzero()
);
}
wait_for_exit(thread);
debug_assert_eq!(thread_data.detached.load(SeqCst), ABANDONED);
#[cfg(feature = "log")]
log_thread_to_be_freed(thread_id);
let return_value = thread_data.return_value.load(SeqCst);
free_memory(thread);
NonNull::new(return_value)
}
}
unsafe fn wait_for_exit(thread: Thread) {
unsafe {
use rustix::thread::futex;
let thread_data = thread.0.as_ref();
let thread_id = &thread_data.thread_id;
while let Some(id_value) = ThreadId::from_raw(thread_id.load(SeqCst)) {
match futex::wait(
AtomicU32::from_ptr(thread_id.as_ptr().cast()),
futex::Flags::empty(),
id_value.as_raw_nonzero().get() as u32,
None,
) {
Ok(_) => break,
Err(io::Errno::INTR) => continue,
Err(e) => debug_assert_eq!(e, io::Errno::AGAIN),
}
}
}
}
#[cfg(feature = "log")]
fn log_thread_to_be_freed(thread_id: i32) {
if log::log_enabled!(log::Level::Trace) {
log::trace!("Thread[{:?}] memory being freed", thread_id);
}
}
unsafe fn free_memory(thread: Thread) {
unsafe {
use rustix::mm::munmap;
let map_size = thread.0.as_ref().map_size;
let stack_addr = thread.0.as_ref().stack_addr;
let guard_size = thread.0.as_ref().guard_size;
drop_in_place(thread.0.as_ptr());
if map_size != 0 {
let map = stack_addr.byte_sub(guard_size);
munmap(map, map_size).unwrap();
}
}
}
#[cfg(feature = "thread-at-exit")]
pub fn at_exit(func: Box<dyn FnOnce()>) {
unsafe {
current().0.as_mut().dtors.push(func);
}
}
#[inline]
#[must_use]
fn current_metadata() -> *mut Metadata {
thread_pointer()
.wrapping_byte_sub(offset_of!(Metadata, abi) + offset_of!(Abi, thread_pointee))
.cast()
}
#[inline]
#[must_use]
pub fn current() -> Thread {
unsafe { Thread(NonNull::from(&mut (*current_metadata()).thread)) }
}
#[inline]
#[must_use]
pub fn current_id() -> ThreadId {
let tid = unsafe { ThreadId::from_raw_unchecked(current().0.as_ref().thread_id.load(SeqCst)) };
debug_assert_eq!(tid, gettid(), "`current_id` disagrees with `gettid`");
tid
}
#[doc(hidden)]
#[inline]
pub unsafe fn set_current_id_after_a_fork(tid: ThreadId) {
unsafe {
let current = current();
debug_assert_ne!(
tid.as_raw_nonzero().get(),
current.0.as_ref().thread_id.load(SeqCst),
"current thread ID already matches new thread ID"
);
debug_assert_eq!(tid, gettid(), "new thread ID disagrees with `gettid`");
current
.0
.as_ref()
.thread_id
.store(tid.as_raw_nonzero().get(), SeqCst);
}
}
#[cfg(feature = "unstable-errno")]
#[inline]
pub fn errno_location() -> *mut i32 {
unsafe { core::ptr::addr_of_mut!((*current_metadata()).thread.errno_val).cast::<i32>() }
}
#[inline]
#[must_use]
pub fn current_tls_addr(module: usize, offset: usize) -> *mut c_void {
assert_eq!(module, 1);
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "riscv64"))]
{
thread_pointer()
.wrapping_byte_add(size_of::<Abi>() - offset_of!(Abi, thread_pointee))
.wrapping_byte_add(TLS_OFFSET)
.wrapping_byte_add(offset)
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe {
thread_pointer()
.wrapping_byte_sub(STARTUP_TLS_INFO.mem_size)
.wrapping_byte_add(TLS_OFFSET)
.wrapping_byte_add(offset)
}
}
#[inline]
#[cfg_attr(docsrs, doc(cfg(feature = "take-charge")))]
pub unsafe fn id(thread: Thread) -> Option<ThreadId> {
unsafe {
let raw = thread.0.as_ref().thread_id.load(SeqCst);
ThreadId::from_raw(raw)
}
}
#[inline]
#[must_use]
pub unsafe fn stack(thread: Thread) -> (*mut c_void, usize, usize) {
unsafe {
let data = thread.0.as_ref();
(data.stack_addr, data.stack_size, data.guard_size)
}
}
#[inline]
#[must_use]
pub fn default_stack_size() -> usize {
unsafe { max(0x20000, STARTUP_STACK_SIZE) }
}
#[inline]
#[must_use]
pub fn default_guard_size() -> usize {
page_size() * 4
}
#[inline]
pub fn yield_current() {
rustix::thread::sched_yield()
}
#[cfg(target_arch = "arm")]
#[unsafe(no_mangle)]
extern "C" fn __aeabi_read_tp() -> *mut c_void {
thread_pointer()
}
#[unsafe(no_mangle)]
static mut __stack_chk_guard: usize = 0;
const fn round_up(addr: usize, boundary: usize) -> usize {
(addr + (boundary - 1)) & boundary.wrapping_neg()
}
bitflags::bitflags! {
struct CloneFlags: u32 {
const NEWTIME = linux_raw_sys::general::CLONE_NEWTIME; const VM = linux_raw_sys::general::CLONE_VM;
const FS = linux_raw_sys::general::CLONE_FS;
const FILES = linux_raw_sys::general::CLONE_FILES;
const SIGHAND = linux_raw_sys::general::CLONE_SIGHAND;
const PIDFD = linux_raw_sys::general::CLONE_PIDFD; const PTRACE = linux_raw_sys::general::CLONE_PTRACE;
const VFORK = linux_raw_sys::general::CLONE_VFORK;
const PARENT = linux_raw_sys::general::CLONE_PARENT;
const THREAD = linux_raw_sys::general::CLONE_THREAD;
const NEWNS = linux_raw_sys::general::CLONE_NEWNS;
const SYSVSEM = linux_raw_sys::general::CLONE_SYSVSEM;
const SETTLS = linux_raw_sys::general::CLONE_SETTLS;
const PARENT_SETTID = linux_raw_sys::general::CLONE_PARENT_SETTID;
const CHILD_CLEARTID = linux_raw_sys::general::CLONE_CHILD_CLEARTID;
const DETACHED = linux_raw_sys::general::CLONE_DETACHED;
const UNTRACED = linux_raw_sys::general::CLONE_UNTRACED;
const CHILD_SETTID = linux_raw_sys::general::CLONE_CHILD_SETTID;
const NEWCGROUP = linux_raw_sys::general::CLONE_NEWCGROUP; const NEWUTS = linux_raw_sys::general::CLONE_NEWUTS;
const NEWIPC = linux_raw_sys::general::CLONE_NEWIPC;
const NEWUSER = linux_raw_sys::general::CLONE_NEWUSER;
const NEWPID = linux_raw_sys::general::CLONE_NEWPID;
const NEWNET = linux_raw_sys::general::CLONE_NEWNET;
const IO = linux_raw_sys::general::CLONE_IO;
}
}