#include "db_config.h"
#include "db_int.h"
#include "dbinc/lock.h"
static inline int __db_tas_mutex_lock_int
__P((ENV *, db_mutex_t, db_timeout_t, int));
static inline int __db_tas_mutex_readlock_int __P((ENV *, db_mutex_t, int));
int
__db_tas_mutex_init(env, mutex, flags)
ENV *env;
db_mutex_t mutex;
u_int32_t flags;
{
DB_ENV *dbenv;
DB_MUTEX *mutexp;
int ret;
#ifndef HAVE_MUTEX_HYBRID
COMPQUIET(flags, 0);
#endif
dbenv = env->dbenv;
mutexp = MUTEXP_SET(env, mutex);
if (((uintptr_t)mutexp & (dbenv->mutex_align - 1)) != 0) {
__db_errx(env, DB_STR("2028",
"TAS: mutex not appropriately aligned"));
return (EINVAL);
}
#ifdef HAVE_SHARED_LATCHES
if (F_ISSET(mutexp, DB_MUTEX_SHARED))
atomic_init(&mutexp->sharecount, 0);
else
#endif
if (MUTEX_INIT(&mutexp->tas)) {
ret = __os_get_syserr();
__db_syserr(env, ret, DB_STR("2029",
"TAS: mutex initialize"));
return (__os_posix_err(ret));
}
#ifdef HAVE_MUTEX_HYBRID
if ((ret = __db_pthread_mutex_init(env,
mutex, flags | DB_MUTEX_SELF_BLOCK)) != 0)
return (ret);
#endif
return (0);
}
inline static int
__db_tas_mutex_lock_int(env, mutex, timeout, nowait)
ENV *env;
db_mutex_t mutex;
db_timeout_t timeout;
int nowait;
{
DB_ENV *dbenv;
DB_MUTEX *mutexp;
DB_MUTEXMGR *mtxmgr;
DB_MUTEXREGION *mtxregion;
DB_THREAD_INFO *ip;
db_timespec now, timespec;
u_int32_t nspins;
int ret;
#ifdef HAVE_MUTEX_HYBRID
const u_long micros = 0;
#else
u_long micros, max_micros;
db_timeout_t time_left;
#endif
dbenv = env->dbenv;
if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
return (0);
mtxmgr = env->mutex_handle;
mtxregion = mtxmgr->reginfo.primary;
mutexp = MUTEXP_SET(env, mutex);
CHECK_MTX_THREAD(env, mutexp);
#ifdef HAVE_STATISTICS
if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
STAT_INC(env, mutex, set_wait, mutexp->mutex_set_wait, mutex);
else
STAT_INC(env,
mutex, set_nowait, mutexp->mutex_set_nowait, mutex);
#endif
#ifndef HAVE_MUTEX_HYBRID
micros = 1000;
max_micros = F_ISSET(mutexp, DB_MUTEX_LOGICAL_LOCK) ? 10000 : 25000;
#endif
if (timeout != 0)
timespecclear(×pec);
ip = NULL;
loop:
for (nspins =
mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
#ifdef HAVE_MUTEX_S390_CC_ASSEMBLY
tsl_t zero;
zero = 0;
#endif
#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
relock:
#endif
if (MUTEXP_IS_BUSY(mutexp) || !MUTEXP_ACQUIRE(mutexp)) {
if (F_ISSET(dbenv, DB_ENV_FAILCHK) &&
ip == NULL && dbenv->is_alive(dbenv,
mutexp->pid, mutexp->tid, 0) == 0) {
ret = __env_set_state(env, &ip, THREAD_VERIFY);
if (ret != 0 ||
ip->dbth_state == THREAD_FAILCHK)
return (DB_RUNRECOVERY);
}
if (nowait)
return (DB_LOCK_NOTGRANTED);
MUTEX_PAUSE
continue;
}
MEMBAR_ENTER();
#ifdef HAVE_MUTEX_HPPA_MSEM_INIT
if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
goto relock;
}
#endif
#ifdef DIAGNOSTIC
if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
char buf[DB_THREADID_STRLEN];
__db_errx(env, DB_STR_A("2030",
"TAS lock failed: lock %ld currently in use: ID: %s",
"%ld %s"), (long)mutex,
dbenv->thread_id_string(dbenv,
mutexp->pid, mutexp->tid, buf));
return (__env_panic(env, EACCES));
}
#endif
F_SET(mutexp, DB_MUTEX_LOCKED);
dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
#ifdef DIAGNOSTIC
if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
__os_yield(env, 0, 0);
#endif
return (0);
}
if (timeout != 0) {
if (!timespecisset(×pec))
__clock_set_expires(env, ×pec, timeout);
else {
timespecclear(&now);
if (__clock_expired(env, &now, ×pec))
return (DB_TIMEOUT);
#ifndef HAVE_MUTEX_HYBRID
timespecsub(&now, ×pec);
DB_TIMESPEC_TO_TIMEOUT(time_left, &now, 0);
time_left = timeout - time_left;
if (micros > time_left)
micros = time_left;
#endif
}
}
PERFMON4(env, mutex, suspend, mutex, TRUE, mutexp->alloc_id, mutexp);
__os_yield(env, 0, micros);
PERFMON4(env, mutex, resume, mutex, TRUE, mutexp->alloc_id, mutexp);
#if defined(HAVE_MUTEX_HYBRID)
if (!MUTEXP_IS_BUSY(mutexp))
goto loop;
if ((ret = __db_hybrid_mutex_suspend(env,
mutex, timeout == 0 ? NULL : ×pec, TRUE)) != 0)
return (ret);
#else
if ((micros <<= 1) > max_micros)
micros = max_micros;
#endif
PANIC_CHECK(env);
goto loop;
}
int
__db_tas_mutex_lock(env, mutex, timeout)
ENV *env;
db_mutex_t mutex;
db_timeout_t timeout;
{
return (__db_tas_mutex_lock_int(env, mutex, timeout, 0));
}
int
__db_tas_mutex_trylock(env, mutex)
ENV *env;
db_mutex_t mutex;
{
return (__db_tas_mutex_lock_int(env, mutex, 0, 1));
}
#if defined(HAVE_SHARED_LATCHES)
static inline int
__db_tas_mutex_readlock_int(env, mutex, nowait)
ENV *env;
db_mutex_t mutex;
int nowait;
{
DB_ENV *dbenv;
DB_MUTEX *mutexp;
DB_MUTEXMGR *mtxmgr;
DB_MUTEXREGION *mtxregion;
DB_THREAD_INFO *ip;
int lock;
u_int32_t nspins;
int ret;
#ifndef HAVE_MUTEX_HYBRID
u_long micros, max_micros;
#endif
dbenv = env->dbenv;
if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
return (0);
mtxmgr = env->mutex_handle;
mtxregion = mtxmgr->reginfo.primary;
mutexp = MUTEXP_SET(env, mutex);
CHECK_MTX_THREAD(env, mutexp);
DB_ASSERT(env, F_ISSET(mutexp, DB_MUTEX_SHARED));
#ifdef HAVE_STATISTICS
if (F_ISSET(mutexp, DB_MUTEX_LOCKED))
STAT_INC(env,
mutex, set_rd_wait, mutexp->mutex_set_rd_wait, mutex);
else
STAT_INC(env,
mutex, set_rd_nowait, mutexp->mutex_set_rd_nowait, mutex);
#endif
#ifndef HAVE_MUTEX_HYBRID
micros = 1000;
max_micros = F_ISSET(mutexp, DB_MUTEX_LOGICAL_LOCK) ? 10000 : 25000;
#endif
loop:
for (nspins =
mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
lock = atomic_read(&mutexp->sharecount);
if (lock == MUTEX_SHARE_ISEXCLUSIVE ||
!atomic_compare_exchange(env,
&mutexp->sharecount, lock, lock + 1)) {
MUTEX_PAUSE
continue;
}
MEMBAR_ENTER();
dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
return (0);
}
if (F_ISSET(dbenv, DB_ENV_FAILCHK) &&
dbenv->is_alive(dbenv, mutexp->pid, mutexp->tid, 0) == 0) {
ret = __env_set_state(env, &ip, THREAD_VERIFY);
if (ret != 0 || ip->dbth_state == THREAD_FAILCHK)
return (DB_RUNRECOVERY);
}
if (nowait) {
if (atomic_read(&mutexp->sharecount) != MUTEX_SHARE_ISEXCLUSIVE)
goto loop;
return (DB_LOCK_NOTGRANTED);
}
#ifdef HAVE_MUTEX_HYBRID
PERFMON4(env, mutex, suspend, mutex, FALSE, mutexp->alloc_id, mutexp);
__os_yield(env, 0, 0);
PERFMON4(env, mutex, resume, mutex, FALSE, mutexp->alloc_id, mutexp);
if (atomic_read(&mutexp->sharecount) != MUTEX_SHARE_ISEXCLUSIVE)
goto loop;
if ((ret = __db_hybrid_mutex_suspend(env, mutex, NULL, FALSE)) != 0)
return (ret);
#else
PERFMON4(env, mutex, suspend, mutex, FALSE, mutexp->alloc_id, mutexp);
__os_yield(env, 0, micros);
PERFMON4(env, mutex, resume, mutex, FALSE, mutexp->alloc_id, mutexp);
if ((micros <<= 1) > max_micros)
micros = max_micros;
#endif
PANIC_CHECK(env);
goto loop;
}
int
__db_tas_mutex_readlock(env, mutex)
ENV *env;
db_mutex_t mutex;
{
return (__db_tas_mutex_readlock_int(env, mutex, 0));
}
int
__db_tas_mutex_tryreadlock(env, mutex)
ENV *env;
db_mutex_t mutex;
{
return (__db_tas_mutex_readlock_int(env, mutex, 1));
}
#endif
int
__db_tas_mutex_unlock(env, mutex)
ENV *env;
db_mutex_t mutex;
{
DB_ENV *dbenv;
DB_MUTEX *mutexp;
#ifdef HAVE_MUTEX_HYBRID
int ret;
#ifdef MUTEX_DIAG
int waiters;
#endif
#endif
#ifdef HAVE_SHARED_LATCHES
int sharecount;
#endif
dbenv = env->dbenv;
if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
return (0);
mutexp = MUTEXP_SET(env, mutex);
#if defined(HAVE_MUTEX_HYBRID) && defined(MUTEX_DIAG)
waiters = mutexp->wait;
#endif
#if defined(DIAGNOSTIC)
#if defined(HAVE_SHARED_LATCHES)
if (F_ISSET(mutexp, DB_MUTEX_SHARED)) {
if (atomic_read(&mutexp->sharecount) == 0) {
__db_errx(env, DB_STR_A("2031",
"shared unlock %ld already unlocked", "%ld"),
(long)mutex);
return (__env_panic(env, EACCES));
}
} else
#endif
if (!F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
__db_errx(env, DB_STR_A("2032",
"unlock %ld already unlocked", "%ld"), (long)mutex);
return (__env_panic(env, EACCES));
}
#endif
#ifdef HAVE_SHARED_LATCHES
if (F_ISSET(mutexp, DB_MUTEX_SHARED)) {
sharecount = atomic_read(&mutexp->sharecount);
if (sharecount == MUTEX_SHARE_ISEXCLUSIVE) {
F_CLR(mutexp, DB_MUTEX_LOCKED);
MEMBAR_EXIT();
atomic_init(&mutexp->sharecount, 0);
} else {
DB_ASSERT(env, sharecount > 0);
MEMBAR_EXIT();
sharecount = atomic_dec(env, &mutexp->sharecount);
DB_ASSERT(env, sharecount >= 0);
if (sharecount > 0)
return (0);
}
} else
#endif
{
F_CLR(mutexp, DB_MUTEX_LOCKED);
MUTEX_UNSET(&mutexp->tas);
}
#ifdef HAVE_MUTEX_HYBRID
#ifdef DIAGNOSTIC
if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
__os_yield(env, 0, 0);
#endif
MUTEX_MEMBAR(mutexp->flags);
if (mutexp->wait &&
(ret = __db_pthread_mutex_unlock(env, mutex)) != 0)
return (ret);
#ifdef MUTEX_DIAG
if (mutexp->wait)
printf("tas_unlock %ld %x waiters! busy %x waiters %d/%d\n",
mutex, pthread_self(),
MUTEXP_BUSY_FIELD(mutexp), waiters, mutexp->wait);
#endif
#endif
return (0);
}
int
__db_tas_mutex_destroy(env, mutex)
ENV *env;
db_mutex_t mutex;
{
DB_MUTEX *mutexp;
#ifdef HAVE_MUTEX_HYBRID
int ret;
#endif
if (!MUTEX_ON(env))
return (0);
mutexp = MUTEXP_SET(env, mutex);
MUTEX_DESTROY(&mutexp->tas);
#ifdef HAVE_MUTEX_HYBRID
if ((ret = __db_pthread_mutex_destroy(env, mutex)) != 0)
return (ret);
#endif
COMPQUIET(mutexp, NULL);
return (0);
}