#include "builtin/AtomicsObject.h"
#include "mozilla/Atomics.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Maybe.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Unused.h"
#include "jsapi.h"
#include "jsfriendapi.h"
#include "jsnum.h"
#include "jit/AtomicOperations.h"
#include "jit/InlinableNatives.h"
#include "js/Class.h"
#include "js/PropertySpec.h"
#include "vm/GlobalObject.h"
#include "vm/Time.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmInstance.h"
#include "vm/JSObject-inl.h"
using namespace js;
const Class AtomicsObject::class_ = {"Atomics",
JSCLASS_HAS_CACHED_PROTO(JSProto_Atomics)};
static bool ReportBadArrayType(JSContext* cx) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ATOMICS_BAD_ARRAY);
return false;
}
static bool ReportOutOfRange(JSContext* cx) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
return false;
}
static bool GetSharedTypedArray(JSContext* cx, HandleValue v,
MutableHandle<TypedArrayObject*> viewp) {
if (!v.isObject()) {
return ReportBadArrayType(cx);
}
if (!v.toObject().is<TypedArrayObject>()) {
return ReportBadArrayType(cx);
}
viewp.set(&v.toObject().as<TypedArrayObject>());
if (!viewp->isSharedMemory()) {
return ReportBadArrayType(cx);
}
return true;
}
static bool GetTypedArrayIndex(JSContext* cx, HandleValue v,
Handle<TypedArrayObject*> view,
uint32_t* offset) {
uint64_t index;
if (!ToIndex(cx, v, &index)) {
return false;
}
if (index >= view->length()) {
return ReportOutOfRange(cx);
}
*offset = uint32_t(index);
return true;
}
static int32_t CompareExchange(Scalar::Type viewType, int32_t oldCandidate,
int32_t newCandidate, SharedMem<void*> viewData,
uint32_t offset, bool* badArrayType = nullptr) {
switch (viewType) {
case Scalar::Int8: {
int8_t oldval = (int8_t)oldCandidate;
int8_t newval = (int8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<int8_t*>() + offset, oldval, newval);
return oldval;
}
case Scalar::Uint8: {
uint8_t oldval = (uint8_t)oldCandidate;
uint8_t newval = (uint8_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<uint8_t*>() + offset, oldval, newval);
return oldval;
}
case Scalar::Int16: {
int16_t oldval = (int16_t)oldCandidate;
int16_t newval = (int16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<int16_t*>() + offset, oldval, newval);
return oldval;
}
case Scalar::Uint16: {
uint16_t oldval = (uint16_t)oldCandidate;
uint16_t newval = (uint16_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<uint16_t*>() + offset, oldval, newval);
return oldval;
}
case Scalar::Int32: {
int32_t oldval = oldCandidate;
int32_t newval = newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<int32_t*>() + offset, oldval, newval);
return oldval;
}
case Scalar::Uint32: {
uint32_t oldval = (uint32_t)oldCandidate;
uint32_t newval = (uint32_t)newCandidate;
oldval = jit::AtomicOperations::compareExchangeSeqCst(
viewData.cast<uint32_t*>() + offset, oldval, newval);
return (int32_t)oldval;
}
default:
if (badArrayType) {
*badArrayType = true;
}
return 0;
}
}
bool js::atomics_compareExchange(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
HandleValue idxv = args.get(1);
HandleValue oldv = args.get(2);
HandleValue newv = args.get(3);
MutableHandleValue r = args.rval();
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
int32_t oldCandidate;
if (!ToInt32(cx, oldv, &oldCandidate)) {
return false;
}
int32_t newCandidate;
if (!ToInt32(cx, newv, &newCandidate)) {
return false;
}
bool badType = false;
int32_t result = CompareExchange(view->type(), oldCandidate, newCandidate,
view->dataPointerShared(), offset, &badType);
if (badType) {
return ReportBadArrayType(cx);
}
if (view->type() == Scalar::Uint32) {
r.setNumber((double)(uint32_t)result);
} else {
r.setInt32(result);
}
return true;
}
bool js::atomics_load(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
HandleValue idxv = args.get(1);
MutableHandleValue r = args.rval();
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
SharedMem<void*> viewData = view->dataPointerShared();
switch (view->type()) {
case Scalar::Uint8: {
uint8_t v =
jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int8: {
int8_t v =
jit::AtomicOperations::loadSeqCst(viewData.cast<uint8_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int16: {
int16_t v =
jit::AtomicOperations::loadSeqCst(viewData.cast<int16_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint16: {
uint16_t v = jit::AtomicOperations::loadSeqCst(
viewData.cast<uint16_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Int32: {
int32_t v =
jit::AtomicOperations::loadSeqCst(viewData.cast<int32_t*>() + offset);
r.setInt32(v);
return true;
}
case Scalar::Uint32: {
uint32_t v = jit::AtomicOperations::loadSeqCst(
viewData.cast<uint32_t*>() + offset);
r.setNumber(v);
return true;
}
default:
return ReportBadArrayType(cx);
}
}
enum XchgStoreOp { DoExchange, DoStore };
template <XchgStoreOp op>
static int32_t ExchangeOrStore(Scalar::Type viewType, int32_t numberValue,
SharedMem<void*> viewData, uint32_t offset,
bool* badArrayType = nullptr) {
#define INT_OP(ptr, value) \
JS_BEGIN_MACRO \
if (op == DoStore) \
jit::AtomicOperations::storeSeqCst(ptr, value); \
else \
value = jit::AtomicOperations::exchangeSeqCst(ptr, value); \
JS_END_MACRO
switch (viewType) {
case Scalar::Int8: {
int8_t value = (int8_t)numberValue;
INT_OP(viewData.cast<int8_t*>() + offset, value);
return value;
}
case Scalar::Uint8: {
uint8_t value = (uint8_t)numberValue;
INT_OP(viewData.cast<uint8_t*>() + offset, value);
return value;
}
case Scalar::Int16: {
int16_t value = (int16_t)numberValue;
INT_OP(viewData.cast<int16_t*>() + offset, value);
return value;
}
case Scalar::Uint16: {
uint16_t value = (uint16_t)numberValue;
INT_OP(viewData.cast<uint16_t*>() + offset, value);
return value;
}
case Scalar::Int32: {
int32_t value = numberValue;
INT_OP(viewData.cast<int32_t*>() + offset, value);
return value;
}
case Scalar::Uint32: {
uint32_t value = (uint32_t)numberValue;
INT_OP(viewData.cast<uint32_t*>() + offset, value);
return (int32_t)value;
}
default:
if (badArrayType) {
*badArrayType = true;
}
return 0;
}
#undef INT_OP
}
template <XchgStoreOp op>
static bool ExchangeOrStore(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
HandleValue idxv = args.get(1);
HandleValue valv = args.get(2);
MutableHandleValue r = args.rval();
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
double integerValue;
if (!ToInteger(cx, valv, &integerValue)) {
return false;
}
bool badType = false;
int32_t result =
ExchangeOrStore<op>(view->type(), JS::ToInt32(integerValue),
view->dataPointerShared(), offset, &badType);
if (badType) {
return ReportBadArrayType(cx);
}
if (op == DoStore) {
r.setNumber(integerValue);
} else if (view->type() == Scalar::Uint32) {
r.setNumber((double)(uint32_t)result);
} else {
r.setInt32(result);
}
return true;
}
bool js::atomics_store(JSContext* cx, unsigned argc, Value* vp) {
return ExchangeOrStore<DoStore>(cx, argc, vp);
}
bool js::atomics_exchange(JSContext* cx, unsigned argc, Value* vp) {
return ExchangeOrStore<DoExchange>(cx, argc, vp);
}
template <typename T>
static bool AtomicsBinop(JSContext* cx, HandleValue objv, HandleValue idxv,
HandleValue valv, MutableHandleValue r) {
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
int32_t numberValue;
if (!ToInt32(cx, valv, &numberValue)) {
return false;
}
SharedMem<void*> viewData = view->dataPointerShared();
switch (view->type()) {
case Scalar::Int8: {
int8_t v = (int8_t)numberValue;
r.setInt32(T::operate(viewData.cast<int8_t*>() + offset, v));
return true;
}
case Scalar::Uint8: {
uint8_t v = (uint8_t)numberValue;
r.setInt32(T::operate(viewData.cast<uint8_t*>() + offset, v));
return true;
}
case Scalar::Int16: {
int16_t v = (int16_t)numberValue;
r.setInt32(T::operate(viewData.cast<int16_t*>() + offset, v));
return true;
}
case Scalar::Uint16: {
uint16_t v = (uint16_t)numberValue;
r.setInt32(T::operate(viewData.cast<uint16_t*>() + offset, v));
return true;
}
case Scalar::Int32: {
int32_t v = numberValue;
r.setInt32(T::operate(viewData.cast<int32_t*>() + offset, v));
return true;
}
case Scalar::Uint32: {
uint32_t v = (uint32_t)numberValue;
r.setNumber((double)T::operate(viewData.cast<uint32_t*>() + offset, v));
return true;
}
default:
return ReportBadArrayType(cx);
}
}
#define INTEGRAL_TYPES_FOR_EACH(NAME) \
static int8_t operate(SharedMem<int8_t*> addr, int8_t v) { \
return NAME(addr, v); \
} \
static uint8_t operate(SharedMem<uint8_t*> addr, uint8_t v) { \
return NAME(addr, v); \
} \
static int16_t operate(SharedMem<int16_t*> addr, int16_t v) { \
return NAME(addr, v); \
} \
static uint16_t operate(SharedMem<uint16_t*> addr, uint16_t v) { \
return NAME(addr, v); \
} \
static int32_t operate(SharedMem<int32_t*> addr, int32_t v) { \
return NAME(addr, v); \
} \
static uint32_t operate(SharedMem<uint32_t*> addr, uint32_t v) { \
return NAME(addr, v); \
}
class PerformAdd {
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAddSeqCst)
static int32_t perform(int32_t x, int32_t y) { return x + y; }
};
bool js::atomics_add(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return AtomicsBinop<PerformAdd>(cx, args.get(0), args.get(1), args.get(2),
args.rval());
}
class PerformSub {
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchSubSeqCst)
static int32_t perform(int32_t x, int32_t y) { return x - y; }
};
bool js::atomics_sub(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return AtomicsBinop<PerformSub>(cx, args.get(0), args.get(1), args.get(2),
args.rval());
}
class PerformAnd {
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAndSeqCst)
static int32_t perform(int32_t x, int32_t y) { return x & y; }
};
bool js::atomics_and(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return AtomicsBinop<PerformAnd>(cx, args.get(0), args.get(1), args.get(2),
args.rval());
}
class PerformOr {
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchOrSeqCst)
static int32_t perform(int32_t x, int32_t y) { return x | y; }
};
bool js::atomics_or(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return AtomicsBinop<PerformOr>(cx, args.get(0), args.get(1), args.get(2),
args.rval());
}
class PerformXor {
public:
INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchXorSeqCst)
static int32_t perform(int32_t x, int32_t y) { return x ^ y; }
};
bool js::atomics_xor(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return AtomicsBinop<PerformXor>(cx, args.get(0), args.get(1), args.get(2),
args.rval());
}
bool js::atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue v = args.get(0);
int32_t size;
if (v.isInt32()) {
size = v.toInt32();
} else {
double dsize;
if (!ToInteger(cx, v, &dsize)) {
return false;
}
if (!mozilla::NumberIsInt32(dsize, &size)) {
args.rval().setBoolean(false);
return true;
}
}
args.rval().setBoolean(jit::AtomicOperations::isLockfreeJS(size));
return true;
}
namespace js {
class FutexWaiter {
public:
FutexWaiter(uint32_t offset, JSContext* cx)
: offset(offset), cx(cx), lower_pri(nullptr), back(nullptr) {}
uint32_t offset; JSContext* cx; FutexWaiter* lower_pri; FutexWaiter* back; };
class AutoLockFutexAPI {
mozilla::Maybe<js::UniqueLock<js::Mutex>> unique_;
public:
AutoLockFutexAPI() {
js::Mutex* lock = FutexThread::lock_;
unique_.emplace(*lock);
}
~AutoLockFutexAPI() { unique_.reset(); }
js::UniqueLock<js::Mutex>& unique() { return *unique_; }
};
}
template <typename T>
static FutexThread::WaitResult AtomicsWait(
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset, T value,
const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
MOZ_ASSERT(sarb, "wait is only applicable to shared memory");
if (!cx->fx.canWait()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
return FutexThread::WaitResult::Error;
}
SharedMem<T*> addr =
sarb->dataPointerShared().cast<T*>() + (byteOffset / sizeof(T));
AutoLockFutexAPI lock;
if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
return FutexThread::WaitResult::NotEqual;
}
FutexWaiter w(byteOffset, cx);
if (FutexWaiter* waiters = sarb->waiters()) {
w.lower_pri = waiters;
w.back = waiters->back;
waiters->back->lower_pri = &w;
waiters->back = &w;
} else {
w.lower_pri = w.back = &w;
sarb->setWaiters(&w);
}
FutexThread::WaitResult retval = cx->fx.wait(cx, lock.unique(), timeout);
if (w.lower_pri == &w) {
sarb->setWaiters(nullptr);
} else {
w.lower_pri->back = w.back;
w.back->lower_pri = w.lower_pri;
if (sarb->waiters() == &w) {
sarb->setWaiters(w.lower_pri);
}
}
return retval;
}
FutexThread::WaitResult js::atomics_wait_impl(
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset,
int32_t value, const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
return AtomicsWait(cx, sarb, byteOffset, value, timeout);
}
FutexThread::WaitResult js::atomics_wait_impl(
JSContext* cx, SharedArrayRawBuffer* sarb, uint32_t byteOffset,
int64_t value, const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
return AtomicsWait(cx, sarb, byteOffset, value, timeout);
}
bool js::atomics_wait(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
HandleValue idxv = args.get(1);
HandleValue valv = args.get(2);
HandleValue timeoutv = args.get(3);
MutableHandleValue r = args.rval();
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
if (view->type() != Scalar::Int32) {
return ReportBadArrayType(cx);
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
int32_t value;
if (!ToInt32(cx, valv, &value)) {
return false;
}
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (!timeoutv.isUndefined()) {
double timeout_ms;
if (!ToNumber(cx, timeoutv, &timeout_ms)) {
return false;
}
if (!mozilla::IsNaN(timeout_ms)) {
if (timeout_ms < 0) {
timeout = mozilla::Some(mozilla::TimeDuration::FromSeconds(0.0));
} else if (!mozilla::IsInfinite(timeout_ms)) {
timeout =
mozilla::Some(mozilla::TimeDuration::FromMilliseconds(timeout_ms));
}
}
}
Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
uint32_t byteOffset =
offset * sizeof(int32_t) +
(view->dataPointerShared().cast<uint8_t*>().unwrap() -
sab->dataPointerShared().unwrap());
switch (atomics_wait_impl(cx, sab->rawBufferObject(), byteOffset, value,
timeout)) {
case FutexThread::WaitResult::NotEqual:
r.setString(cx->names().futexNotEqual);
return true;
case FutexThread::WaitResult::OK:
r.setString(cx->names().futexOK);
return true;
case FutexThread::WaitResult::TimedOut:
r.setString(cx->names().futexTimedOut);
return true;
case FutexThread::WaitResult::Error:
return false;
default:
MOZ_CRASH("Should not happen");
}
}
int64_t js::atomics_notify_impl(SharedArrayRawBuffer* sarb, uint32_t byteOffset,
int64_t count) {
MOZ_ASSERT(sarb, "notify is only applicable to shared memory");
AutoLockFutexAPI lock;
int64_t woken = 0;
FutexWaiter* waiters = sarb->waiters();
if (waiters && count) {
FutexWaiter* iter = waiters;
do {
FutexWaiter* c = iter;
iter = iter->lower_pri;
if (c->offset != byteOffset || !c->cx->fx.isWaiting()) {
continue;
}
c->cx->fx.notify(FutexThread::NotifyExplicit);
MOZ_RELEASE_ASSERT(woken < INT64_MAX);
++woken;
if (count > 0) {
--count;
}
} while (count && iter != waiters);
}
return woken;
}
bool js::atomics_notify(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
HandleValue objv = args.get(0);
HandleValue idxv = args.get(1);
HandleValue countv = args.get(2);
MutableHandleValue r = args.rval();
Rooted<TypedArrayObject*> view(cx, nullptr);
if (!GetSharedTypedArray(cx, objv, &view)) {
return false;
}
if (view->type() != Scalar::Int32) {
return ReportBadArrayType(cx);
}
uint32_t offset;
if (!GetTypedArrayIndex(cx, idxv, view, &offset)) {
return false;
}
int64_t count;
if (countv.isUndefined()) {
count = -1;
} else {
double dcount;
if (!ToInteger(cx, countv, &dcount)) {
return false;
}
if (dcount < 0.0) {
dcount = 0.0;
}
count = dcount > INT64_MAX ? -1 : int64_t(dcount);
}
Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
uint32_t byteOffset =
offset * sizeof(int32_t) +
(view->dataPointerShared().cast<uint8_t*>().unwrap() -
sab->dataPointerShared().unwrap());
r.setNumber(
double(atomics_notify_impl(sab->rawBufferObject(), byteOffset, count)));
return true;
}
bool js::FutexThread::initialize() {
MOZ_ASSERT(!lock_);
lock_ = js_new<js::Mutex>(mutexid::FutexThread);
return lock_ != nullptr;
}
void js::FutexThread::destroy() {
if (lock_) {
js::Mutex* lock = lock_;
js_delete(lock);
lock_ = nullptr;
}
}
void js::FutexThread::lock() {
js::Mutex* lock = lock_;
lock->lock();
}
mozilla::Atomic<js::Mutex*, mozilla::SequentiallyConsistent,
mozilla::recordreplay::Behavior::DontPreserve>
FutexThread::lock_;
void js::FutexThread::unlock() {
js::Mutex* lock = lock_;
lock->unlock();
}
js::FutexThread::FutexThread()
: cond_(nullptr), state_(Idle), canWait_(false) {}
bool js::FutexThread::initInstance() {
MOZ_ASSERT(lock_);
cond_ = js_new<js::ConditionVariable>();
return cond_ != nullptr;
}
void js::FutexThread::destroyInstance() {
if (cond_) {
js_delete(cond_);
}
}
bool js::FutexThread::isWaiting() {
return state_ == Waiting || state_ == WaitingInterrupted ||
state_ == WaitingNotifiedForInterrupt;
}
FutexThread::WaitResult js::FutexThread::wait(
JSContext* cx, js::UniqueLock<js::Mutex>& locked,
const mozilla::Maybe<mozilla::TimeDuration>& timeout) {
MOZ_ASSERT(&cx->fx == this);
MOZ_ASSERT(cx->fx.canWait());
MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
if (state_ == WaitingInterrupted) {
UnlockGuard<Mutex> unlock(locked);
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
return WaitResult::Error;
}
auto onFinish = mozilla::MakeScopeExit([&] { state_ = Idle; });
const bool isTimed = timeout.isSome();
auto finalEnd = timeout.map([](const mozilla::TimeDuration& timeout) {
return mozilla::TimeStamp::Now() + timeout;
});
auto maxSlice = mozilla::TimeDuration::FromSeconds(4000.0);
for (;;) {
auto sliceEnd = finalEnd.map([&](mozilla::TimeStamp& finalEnd) {
auto sliceEnd = mozilla::TimeStamp::Now() + maxSlice;
if (finalEnd < sliceEnd) {
sliceEnd = finalEnd;
}
return sliceEnd;
});
state_ = Waiting;
if (isTimed) {
mozilla::Unused << cond_->wait_until(locked, *sliceEnd);
} else {
cond_->wait(locked);
}
switch (state_) {
case FutexThread::Waiting:
if (isTimed) {
auto now = mozilla::TimeStamp::Now();
if (now >= *finalEnd) {
return WaitResult::TimedOut;
}
}
break;
case FutexThread::Woken:
return WaitResult::OK;
case FutexThread::WaitingNotifiedForInterrupt:
state_ = WaitingInterrupted;
{
UnlockGuard<Mutex> unlock(locked);
if (!cx->handleInterrupt()) {
return WaitResult::Error;
}
}
if (state_ == Woken) {
return WaitResult::OK;
}
break;
default:
MOZ_CRASH("Bad FutexState in wait()");
}
}
}
void js::FutexThread::notify(NotifyReason reason) {
MOZ_ASSERT(isWaiting());
if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) &&
reason == NotifyExplicit) {
state_ = Woken;
return;
}
switch (reason) {
case NotifyExplicit:
state_ = Woken;
break;
case NotifyForJSInterrupt:
if (state_ == WaitingNotifiedForInterrupt) {
return;
}
state_ = WaitingNotifiedForInterrupt;
break;
default:
MOZ_CRASH("bad NotifyReason in FutexThread::notify()");
}
cond_->notify_all();
}
const JSFunctionSpec AtomicsMethods[] = {
JS_INLINABLE_FN("compareExchange", atomics_compareExchange, 4, 0,
AtomicsCompareExchange),
JS_INLINABLE_FN("load", atomics_load, 2, 0, AtomicsLoad),
JS_INLINABLE_FN("store", atomics_store, 3, 0, AtomicsStore),
JS_INLINABLE_FN("exchange", atomics_exchange, 3, 0, AtomicsExchange),
JS_INLINABLE_FN("add", atomics_add, 3, 0, AtomicsAdd),
JS_INLINABLE_FN("sub", atomics_sub, 3, 0, AtomicsSub),
JS_INLINABLE_FN("and", atomics_and, 3, 0, AtomicsAnd),
JS_INLINABLE_FN("or", atomics_or, 3, 0, AtomicsOr),
JS_INLINABLE_FN("xor", atomics_xor, 3, 0, AtomicsXor),
JS_INLINABLE_FN("isLockFree", atomics_isLockFree, 1, 0, AtomicsIsLockFree),
JS_FN("wait", atomics_wait, 4, 0),
JS_FN("notify", atomics_notify, 3, 0),
JS_FN("wake", atomics_notify, 3, 0), JS_FS_END};
JSObject* AtomicsObject::initClass(JSContext* cx,
Handle<GlobalObject*> global) {
RootedObject objProto(cx,
GlobalObject::getOrCreateObjectPrototype(cx, global));
if (!objProto) {
return nullptr;
}
RootedObject Atomics(cx, NewObjectWithGivenProto(cx, &AtomicsObject::class_,
objProto, SingletonObject));
if (!Atomics) {
return nullptr;
}
if (!JS_DefineFunctions(cx, Atomics, AtomicsMethods)) {
return nullptr;
}
if (!DefineToStringTag(cx, Atomics, cx->names().Atomics)) {
return nullptr;
}
RootedValue AtomicsValue(cx, ObjectValue(*Atomics));
if (!DefineDataProperty(cx, global, cx->names().Atomics, AtomicsValue,
JSPROP_RESOLVING)) {
return nullptr;
}
global->setConstructor(JSProto_Atomics, AtomicsValue);
return Atomics;
}
JSObject* js::InitAtomicsClass(JSContext* cx, Handle<GlobalObject*> global) {
return AtomicsObject::initClass(cx, global);
}
#undef CXX11_ATOMICS
#undef GNU_ATOMICS