#ifndef _DB_MUTEX_INT_H_
#define _DB_MUTEX_INT_H_
#include "dbinc/atomic.h"
#if defined(__cplusplus)
extern "C" {
#endif
#if defined(HAVE_MUTEX_PTHREADS)
#define MUTEX_FIELDS \
union { \
struct { \
pthread_mutex_t mutex; \
pthread_cond_t cond; \
} m; \
pthread_rwlock_t rwlock; \
} u;
#if defined(HAVE_SHARED_LATCHES) && !defined(HAVE_MUTEX_HYBRID)
#define RET_SET_PTHREAD_LOCK(mutexp, ret) do { \
if (F_ISSET(mutexp, DB_MUTEX_SHARED)) \
RET_SET((pthread_rwlock_wrlock(&(mutexp)->u.rwlock)), \
ret); \
else \
RET_SET((pthread_mutex_lock(&(mutexp)->u.m.mutex)), ret); \
} while (0)
#define RET_SET_PTHREAD_TRYLOCK(mutexp, ret) do { \
if (F_ISSET(mutexp, DB_MUTEX_SHARED)) \
RET_SET((pthread_rwlock_trywrlock(&(mutexp)->u.rwlock)), \
ret); \
else \
RET_SET((pthread_mutex_trylock(&(mutexp)->u.m.mutex)), \
ret); \
} while (0)
#else
#define RET_SET_PTHREAD_LOCK(mutexp, ret) \
RET_SET(pthread_mutex_lock(&(mutexp)->u.m.mutex), ret);
#define RET_SET_PTHREAD_TRYLOCK(mutexp, ret) \
RET_SET(pthread_mutex_trylock(&(mutexp)->u.m.mutex), ret);
#endif
#endif
#ifdef HAVE_MUTEX_UI_THREADS
#include <thread.h>
#endif
#ifdef HAVE_MUTEX_SOLARIS_LWP
#include <synch.h>
#define MUTEX_FIELDS \
lwp_mutex_t mutex; \
lwp_cond_t cond;
#endif
#ifdef HAVE_MUTEX_UI_THREADS
#include <thread.h>
#include <synch.h>
#define MUTEX_FIELDS \
mutex_t mutex; \
cond_t cond;
#endif
#ifdef HAVE_MUTEX_AIX_CHECK_LOCK
#include <sys/atomic_op.h>
typedef int tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) 0
#define MUTEX_SET(x) (!_check_lock(x, 0, 1))
#define MUTEX_UNSET(x) _clear_lock(x, 0)
#endif
#endif
#ifdef HAVE_MUTEX_DARWIN_SPIN_LOCK_TRY
typedef u_int32_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
extern int _spin_lock_try(tsl_t *);
extern void _spin_unlock(tsl_t *);
#define MUTEX_SET(tsl) _spin_lock_try(tsl)
#define MUTEX_UNSET(tsl) _spin_unlock(tsl)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
#define MUTEX_ALIGN 16
#endif
#if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
#include <sys/mman.h>
typedef msemaphore tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
#define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
#define MUTEX_UNSET(x) msem_unlock(x, 0)
#endif
#endif
#ifdef HAVE_MUTEX_PLAN9
typedef Lock tsl_t;
#define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
#define MUTEX_SET(x) canlock(x)
#define MUTEX_UNSET(x) unlock(x)
#endif
#ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
#include <ulocks.h>
typedef spinlock_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) (initspin(x, 1), 0)
#define MUTEX_SET(x) (cspinlock(x) == 0)
#define MUTEX_UNSET(x) spinunlock(x)
#endif
#endif
#ifdef HAVE_MUTEX_SEMA_INIT
#include <synch.h>
typedef sema_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_DESTROY(x) sema_destroy(x)
#define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
#define MUTEX_SET(x) (sema_wait(x) == 0)
#define MUTEX_UNSET(x) sema_post(x)
#endif
#endif
#ifdef HAVE_MUTEX_SGI_INIT_LOCK
#include <abi_mutex.h>
typedef abilock_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) (init_lock(x) != 0)
#define MUTEX_SET(x) (!acquire_lock(x))
#define MUTEX_UNSET(x) release_lock(x)
#endif
#endif
#ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
#include <sys/atomic.h>
#define MUTEX_MEMBAR(x) membar_enter()
#define MEMBAR_ENTER() membar_enter()
#define MEMBAR_EXIT() membar_exit()
#include <sys/machlock.h>
typedef lock_t tsl_t;
extern int _lock_try(lock_t *);
extern void _lock_clear(lock_t *);
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) 0
#define MUTEX_SET(x) _lock_try(x)
#define MUTEX_UNSET(x) _lock_clear(x)
#endif
#endif
#ifdef HAVE_MUTEX_VMS
#include <sys/mman.h>
#include <builtins.h>
typedef volatile unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#ifdef __ALPHA
#define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
#else
#define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
#endif
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_VXWORKS
#include "taskLib.h"
typedef SEM_ID tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) \
(semTake((*(tsl)), nowait ? NO_WAIT : WAIT_FOREVER) == OK)
#define MUTEX_UNSET(tsl) (semGive((*tsl)))
#define MUTEX_INIT(tsl) \
((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
#define MUTEX_DESTROY(tsl) semDelete(*tsl)
#endif
#endif
#ifdef HAVE_MUTEX_WIN16
typedef unsigned int tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) 0
#define MUTEX_SET(tsl) (*(tsl) = 1)
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#endif
#endif
#if defined(HAVE_MUTEX_WIN32) || defined(HAVE_MUTEX_WIN32_GCC)
typedef LONG volatile tsl_t;
#define MUTEX_FIELDS \
LONG nwaiters; \
u_int32_t id; \
#if defined(LOAD_ACTUAL_MUTEX_CODE)
#define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
#define MUTEX_UNSET(tsl) InterlockedExchange((PLONG)tsl, 0)
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
#ifdef HAVE_MUTEX_WIN32
#if !defined(_WIN64) && !defined(DB_WINCE)
#define MUTEX_PAUSE {__asm{_emit 0xf3}; __asm{_emit 0x90}}
#endif
#endif
#ifdef HAVE_MUTEX_WIN32_GCC
#define MUTEX_PAUSE __asm__ volatile ("rep; nop" : : );
#endif
#endif
#endif
#ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
typedef unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
register tsl_t *__l = (tsl); \
int __r; \
__asm__ volatile("tas %1; \n \
seq %0" \
: "=dm" (__r), "=m" (*__l) \
: "1" (*__l) \
); \
__r & 1; \
})
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
typedef u_int32_t tsl_t;
#define MUTEX_ALIGN 4
#ifdef LOAD_ACTUAL_MUTEX_CODE
static inline int
MUTEX_SET(tsl_t *tsl) {
register tsl_t *__l = tsl;
register tsl_t __r;
__asm__ volatile(
"1: ldl_l %0,%2\n"
" blbs %0,2f\n"
" or $31,1,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
" mb\n"
" br 3f\n"
"2: xor %0,%0\n"
"3:"
: "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
return __r;
}
static inline int
MUTEX_UNSET(tsl_t *tsl) {
__asm__ volatile(" mb\n");
return *tsl = 0;
}
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
#endif
#endif
#ifdef HAVE_MUTEX_TRU64_CC_ASSEMBLY
typedef volatile u_int32_t tsl_t;
#define MUTEX_ALIGN 4
#ifdef LOAD_ACTUAL_MUTEX_CODE
#include <alpha/builtins.h>
#define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
#define MUTEX_UNSET(tsl) (__UNLOCK_LONG(tsl))
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
typedef unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
int __r; \
__asm__ volatile( \
"swpb %0, %1, [%2]\n\t" \
"eor %0, %0, #1\n\t" \
: "=&r" (__r) \
: "r" (1), "r" (tsl) \
); \
__r & 1; \
})
#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
typedef u_int32_t tsl_t;
#define MUTEX_ALIGN 16
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
register tsl_t *__l = (tsl); \
int __r; \
__asm__ volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l)); \
__r & 1; \
})
#define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = -1)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
typedef volatile unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
register tsl_t *__l = (tsl); \
long __r; \
__asm__ volatile("xchg1 %0=%1,%2" : \
"=r"(__r), "+m"(*__l) : "r"(1)); \
__r ^ 1; \
})
#define MUTEX_UNSET(tsl) (*(tsl_t *)(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#if defined(HAVE_MUTEX_PPC_GCC_ASSEMBLY)
typedef u_int32_t tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
static inline int
MUTEX_SET(int *tsl) {
int __r;
__asm__ volatile (
"0: \n\t"
" lwarx %0,0,%1 \n\t"
" cmpwi %0,0 \n\t"
" bne- 1f \n\t"
" stwcx. %1,0,%1 \n\t"
" isync \n\t"
" beq+ 2f \n\t"
" b 0b \n\t"
"1: \n\t"
" li %1,0 \n\t"
"2: \n\t"
: "=&r" (__r), "+r" (tsl)
:
: "cr0", "memory");
return (int)tsl;
}
static inline int
MUTEX_UNSET(tsl_t *tsl) {
__asm__ volatile("sync" : : : "memory");
return *tsl = 0;
}
#define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
#endif
#endif
#ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
typedef int tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) (!cs(&zero, (tsl), 1))
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
typedef int tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
static inline int
MUTEX_SET(tsl_t *tsl) { \
register tsl_t *__l = (tsl); \
int __r; \
__asm__ volatile( \
" la 1,%1\n" \
" lhi 0,1\n" \
" l %0,%1\n" \
"0: cs %0,0,0(1)\n" \
" jl 0b" \
: "=&d" (__r), "+m" (*__l) \
: : "0", "1", "cc"); \
return !__r; \
}
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
typedef unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#if defined(__USLC__)
asm int
_tsl_set(void *tsl)
{
%mem tsl
movl tsl, %ecx
movl $1, %eax
lock
xchgb (%ecx),%al
xorl $1,%eax
}
#endif
#define MUTEX_SET(tsl) _tsl_set(tsl)
#define MUTEX_UNSET(tsl) (*(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#endif
#endif
#ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
typedef unsigned char tsl_t;
#define MUTEX_ALIGN 8
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
register tsl_t *__l = (tsl); \
register tsl_t __r; \
__asm__ volatile \
("ldstub [%1],%0; stbar" \
: "=r"( __r) : "r" (__l)); \
!__r; \
})
#define MUTEX_UNSET(tsl) (*(tsl) = 0, MUTEX_MEMBAR(tsl))
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#define MUTEX_MEMBAR(x) \
({ __asm__ volatile ("membar #StoreStore|#StoreLoad|#LoadStore"); })
#define MEMBAR_ENTER() \
({ __asm__ volatile ("membar #StoreStore|#StoreLoad"); })
#define MEMBAR_EXIT() \
({ __asm__ volatile ("membar #StoreStore|#LoadStore"); })
#endif
#endif
#ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
typedef int tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_INIT(x) 0
#define MUTEX_SET(x) (!uts_lock(x, 1))
#define MUTEX_UNSET(x) (*(x) = 0)
#endif
#endif
#ifdef HAVE_MUTEX_MIPS_GCC_ASSEMBLY
typedef u_int32_t tsl_t;
#define MUTEX_ALIGN 4
#ifdef LOAD_ACTUAL_MUTEX_CODE
static inline int
MUTEX_SET(tsl_t *tsl) {
register tsl_t *__l = tsl;
register tsl_t __r, __t;
__asm__ volatile(
" .set push \n"
" .set mips2 \n"
" .set noreorder \n"
" .set nomacro \n"
"1: ll %0, %3 \n"
" ori %2, %0, 1 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" nop \n"
" andi %2, %0, 1 \n"
" sync \n"
" .set reorder \n"
" .set pop \n"
: "=&r" (__t), "=m" (*tsl), "=&r" (__r)
: "m" (*tsl)
: "memory");
return (!__r);
}
static inline void
MUTEX_UNSET(tsl_t *tsl) {
__asm__ volatile(
" .set noreorder \n"
" sync \n"
" sw $0, %0 \n"
" .set reorder \n"
: "=m" (*tsl)
: "m" (*tsl)
: "memory");
}
#define MUTEX_INIT(tsl) (*(tsl) = 0)
#endif
#endif
#if defined(HAVE_MUTEX_X86_GCC_ASSEMBLY) || \
defined(HAVE_MUTEX_X86_64_GCC_ASSEMBLY)
typedef volatile unsigned char tsl_t;
#ifdef LOAD_ACTUAL_MUTEX_CODE
#define MUTEX_SET(tsl) ({ \
tsl_t __r; \
__asm__ volatile("movb $1, %b0\n\t" \
"xchgb %b0,%1" \
: "=&q" (__r) \
: "m" (*(tsl_t *)(tsl)) \
: "memory", "cc"); \
!__r; \
})
#define MUTEX_UNSET(tsl) (*(tsl_t *)(tsl) = 0)
#define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
#if defined(HAVE_MUTEX_X86_GCC_ASSEMBLY)
#define MUTEX_MEMBAR(addr) \
({ __asm__ volatile ("lock; addl $0, %0" ::"m" (addr): "memory"); 1; })
#else
#define MUTEX_MEMBAR(addr) \
({ __asm__ volatile ("mfence" ::: "memory"); 1; })
#endif
#define MUTEX_PAUSE __asm__ volatile ("rep; nop" : : );
#endif
#endif
#ifndef MUTEX_ALIGN
#define MUTEX_ALIGN sizeof(unsigned int)
#endif
#ifndef MUTEX_DESTROY
#define MUTEX_DESTROY(x)
#endif
#ifndef MUTEX_PAUSE
#define MUTEX_PAUSE
#endif
#if !defined(HAVE_ATOMIC_SUPPORT) && defined(HAVE_MUTEX_SUPPORT) && \
!defined(MAX_ATOMIC_MUTEXES)
#define MAX_ATOMIC_MUTEXES 1
#endif
struct __db_mutexmgr {
DB_ENV *dbenv;
REGINFO reginfo;
void *mutex_array;
};
#define MUTEX_SYSTEM_LOCK(dbenv) \
MUTEX_LOCK(dbenv, ((DB_MUTEXREGION *) \
(dbenv)->mutex_handle->reginfo.primary)->mtx_region)
#define MUTEX_SYSTEM_UNLOCK(dbenv) \
MUTEX_UNLOCK(dbenv, ((DB_MUTEXREGION *) \
(dbenv)->mutex_handle->reginfo.primary)->mtx_region)
typedef struct __db_mutexregion {
roff_t mutex_off_alloc;
roff_t mutex_off;
db_size_t mutex_size;
roff_t thread_off;
db_mutex_t mtx_region;
db_mutex_t mutex_next;
#if !defined(HAVE_ATOMIC_SUPPORT) && defined(HAVE_MUTEX_SUPPORT)
db_mutex_t mtx_atomic[MAX_ATOMIC_MUTEXES];
#endif
DB_MUTEX_STAT stat;
} DB_MUTEXREGION;
#ifdef HAVE_MUTEX_SUPPORT
struct __db_mutex_t {
#ifdef MUTEX_FIELDS
MUTEX_FIELDS
#endif
#ifndef HAVE_MUTEX_FCNTL
#if defined(HAVE_MUTEX_HYBRID) || \
(defined(HAVE_SHARED_LATCHES) && !defined(HAVE_MUTEX_PTHREADS))
tsl_t tas;
db_atomic_t sharecount;
#elif !defined(MUTEX_FIELDS)
tsl_t tas;
#endif
#endif
#ifdef HAVE_MUTEX_HYBRID
volatile u_int32_t wait;
#endif
pid_t pid;
db_threadid_t tid;
db_mutex_t mutex_next_link;
#ifdef HAVE_STATISTICS
int alloc_id;
u_int32_t mutex_set_wait;
u_int32_t mutex_set_nowait;
#ifdef HAVE_SHARED_LATCHES
u_int32_t mutex_set_rd_wait;
u_int32_t mutex_set_rd_nowait;
#endif
#ifdef HAVE_MUTEX_HYBRID
u_int32_t hybrid_wait;
u_int32_t hybrid_wakeup;
#endif
#endif
volatile u_int32_t flags;
};
#endif
#define MUTEXP_SET(env, indx) \
(F_ISSET(env, ENV_PRIVATE) ? (DB_MUTEX *) indx : \
(DB_MUTEX *)((u_int8_t *)env->mutex_handle->mutex_array + \
(indx) * \
((DB_MUTEXREGION *)env->mutex_handle->reginfo.primary)->mutex_size))
#ifdef HAVE_MUTEX_SUPPORT
#define MUTEX_IS_OWNED(env, mutex) \
(mutex == MUTEX_INVALID || !MUTEX_ON(env) || \
F_ISSET(env->dbenv, DB_ENV_NOLOCKING) || \
F_ISSET(MUTEXP_SET(env, mutex), DB_MUTEX_LOCKED))
#else
#define MUTEX_IS_OWNED(env, mutex) 0
#endif
#if defined(HAVE_MUTEX_HYBRID) || defined(DB_WIN32) || \
(defined(HAVE_SHARED_LATCHES) && !defined(HAVE_MUTEX_PTHREADS))
#define MUTEXP_IS_BUSY(mutexp) \
(F_ISSET(mutexp, DB_MUTEX_SHARED) ? \
(atomic_read(&(mutexp)->sharecount) != 0) : \
F_ISSET(mutexp, DB_MUTEX_LOCKED))
#define MUTEXP_BUSY_FIELD(mutexp) \
(F_ISSET(mutexp, DB_MUTEX_SHARED) ? \
(atomic_read(&(mutexp)->sharecount)) : (mutexp)->flags)
#else
#define MUTEXP_IS_BUSY(mutexp) (F_ISSET((mutexp), DB_MUTEX_LOCKED))
#define MUTEXP_BUSY_FIELD(mutexp) ((mutexp)->flags)
#endif
#define MUTEX_IS_BUSY(env, mutex) \
(mutex == MUTEX_INVALID || !MUTEX_ON(env) || \
F_ISSET(env->dbenv, DB_ENV_NOLOCKING) || \
MUTEXP_IS_BUSY(MUTEXP_SET(env, mutex)))
#define MUTEX_REQUIRED(env, mutex) \
DB_ASSERT(env, MUTEX_IS_OWNED(env, mutex))
#define MUTEX_REQUIRED_READ(env, mutex) \
DB_ASSERT(env, MUTEX_IS_OWNED(env, mutex) || MUTEX_IS_BUSY(env, mutex))
#ifdef LOAD_ACTUAL_MUTEX_CODE
#if defined(HAVE_SHARED_LATCHES)
#define MUTEX_SHARE_ISEXCLUSIVE (-1024)
#define MUTEXP_ACQUIRE(mutexp) \
(F_ISSET(mutexp, DB_MUTEX_SHARED) ? \
atomic_compare_exchange(env, \
&(mutexp)->sharecount, 0, MUTEX_SHARE_ISEXCLUSIVE) : \
MUTEX_SET(&(mutexp)->tas))
#else
#define MUTEXP_ACQUIRE(mutexp) MUTEX_SET(&(mutexp)->tas)
#endif
#ifndef MEMBAR_ENTER
#define MEMBAR_ENTER()
#define MEMBAR_EXIT()
#endif
#endif
#if defined(__cplusplus)
}
#endif
#endif