#ifndef jit_shared_AtomicOperations_feeling_lucky_msvc_h
#define jit_shared_AtomicOperations_feeling_lucky_msvc_h
#include "mozilla/Assertions.h"
#include "mozilla/Types.h"
#if ((defined(__x86_64__) || defined(_M_X64)) && defined(JS_CODEGEN_X64)) || \
((defined(__i386__) || defined(_M_IX86)) && defined(JS_CODEGEN_X86)) || \
(defined(__arm__) && defined(JS_CODEGEN_ARM)) || \
((defined(__aarch64__) || defined(_M_ARM64)) && defined(JS_CODEGEN_ARM64))
# error "Do not use this code on a tier-1 platform when a JIT is available"
#endif
#if !defined(_MSC_VER)
# error "This file only for Microsoft Visual C++"
#endif
inline bool js::jit::AtomicOperations::Initialize() {
return true;
}
inline void js::jit::AtomicOperations::ShutDown() {
}
inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
inline bool js::jit::AtomicOperations::isLockfree8() {
return true;
}
inline void js::jit::AtomicOperations::fenceSeqCst() {
_ReadWriteBarrier();
#if defined(_M_IX86) || defined(_M_X64)
_mm_mfence();
#elif defined(_M_ARM64)
__dmb(_ARM64_BARRIER_SY);
#else
# error "Unknown hardware for MSVC"
#endif
}
template <typename T>
inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
_ReadWriteBarrier();
T v = *addr;
_ReadWriteBarrier();
return v;
}
#ifdef _M_IX86
namespace js {
namespace jit {
# define MSC_LOADOP(T) \
template <> \
inline T AtomicOperations::loadSeqCst(T* addr) { \
_ReadWriteBarrier(); \
return (T)_InterlockedCompareExchange64((__int64 volatile*)addr, 0, 0); \
}
MSC_LOADOP(int64_t)
MSC_LOADOP(uint64_t)
# undef MSC_LOADOP
} } #endif
template <typename T>
inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
_ReadWriteBarrier();
*addr = val;
fenceSeqCst();
}
#ifdef _M_IX86
namespace js {
namespace jit {
# define MSC_STOREOP(T) \
template <> \
inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
_ReadWriteBarrier(); \
T oldval = *addr; \
for (;;) { \
T nextval = (T)_InterlockedCompareExchange64( \
(__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
if (nextval == oldval) break; \
oldval = nextval; \
} \
_ReadWriteBarrier(); \
}
MSC_STOREOP(int64_t)
MSC_STOREOP(uint64_t)
# undef MSC_STOREOP
} } #endif
#define MSC_EXCHANGEOP(T, U, xchgop) \
template <> \
inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
return (T)xchgop((U volatile*)addr, (U)val); \
}
#ifdef _M_IX86
# define MSC_EXCHANGEOP_CAS(T) \
template <> \
inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
_ReadWriteBarrier(); \
T oldval = *addr; \
for (;;) { \
T nextval = (T)_InterlockedCompareExchange64( \
(__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
if (nextval == oldval) break; \
oldval = nextval; \
} \
_ReadWriteBarrier(); \
return oldval; \
}
#endif
namespace js {
namespace jit {
MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
#ifdef _M_IX86
MSC_EXCHANGEOP_CAS(int64_t)
MSC_EXCHANGEOP_CAS(uint64_t)
#else
MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
#endif
} }
#undef MSC_EXCHANGEOP
#undef MSC_EXCHANGEOP_CAS
#define MSC_CAS(T, U, cmpxchg) \
template <> \
inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
T newval) { \
return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval); \
}
namespace js {
namespace jit {
MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
MSC_CAS(int32_t, long, _InterlockedCompareExchange)
MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
} }
#undef MSC_CAS
#define MSC_FETCHADDOP(T, U, xadd) \
template <> \
inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
return (T)xadd((U volatile*)addr, (U)val); \
}
#define MSC_FETCHSUBOP(T) \
template <> \
inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
return fetchAddSeqCst(addr, (T)(0 - val)); \
}
#ifdef _M_IX86
# define MSC_FETCHADDOP_CAS(T) \
template <> \
inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
_ReadWriteBarrier(); \
T oldval = *addr; \
for (;;) { \
T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
(__int64)(oldval + val), \
(__int64)oldval); \
if (nextval == oldval) break; \
oldval = nextval; \
} \
_ReadWriteBarrier(); \
return oldval; \
}
#endif
namespace js {
namespace jit {
MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
#ifdef _M_IX86
MSC_FETCHADDOP_CAS(int64_t)
MSC_FETCHADDOP_CAS(uint64_t)
#else
MSC_FETCHADDOP(int64_t, __int64, _InterlockedExchangeAdd64)
MSC_FETCHADDOP(uint64_t, __int64, _InterlockedExchangeAdd64)
#endif
MSC_FETCHSUBOP(int8_t)
MSC_FETCHSUBOP(uint8_t)
MSC_FETCHSUBOP(int16_t)
MSC_FETCHSUBOP(uint16_t)
MSC_FETCHSUBOP(int32_t)
MSC_FETCHSUBOP(uint32_t)
MSC_FETCHSUBOP(int64_t)
MSC_FETCHSUBOP(uint64_t)
} }
#undef MSC_FETCHADDOP
#undef MSC_FETCHADDOP_CAS
#undef MSC_FETCHSUBOP
#define MSC_FETCHBITOPX(T, U, name, op) \
template <> \
inline T AtomicOperations::name(T* addr, T val) { \
return (T)op((U volatile*)addr, (U)val); \
}
#define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
MSC_FETCHBITOPX(T, U, fetchAndSeqCst, andop) \
MSC_FETCHBITOPX(T, U, fetchOrSeqCst, orop) \
MSC_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
#ifdef _M_IX86
# define AND_OP &
# define OR_OP |
# define XOR_OP ^
# define MSC_FETCHBITOPX_CAS(T, name, OP) \
template <> \
inline T AtomicOperations::name(T* addr, T val) { \
_ReadWriteBarrier(); \
T oldval = *addr; \
for (;;) { \
T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
(__int64)(oldval OP val), \
(__int64)oldval); \
if (nextval == oldval) break; \
oldval = nextval; \
} \
_ReadWriteBarrier(); \
return oldval; \
}
# define MSC_FETCHBITOP_CAS(T) \
MSC_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
MSC_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP) \
MSC_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
#endif
namespace js {
namespace jit {
MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8,
_InterlockedXor8)
MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8,
_InterlockedXor8)
MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16,
_InterlockedXor16)
MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16,
_InterlockedXor16)
MSC_FETCHBITOP(int32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
#ifdef _M_IX86
MSC_FETCHBITOP_CAS(int64_t)
MSC_FETCHBITOP_CAS(uint64_t)
#else
MSC_FETCHBITOP(int64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
_InterlockedXor64)
MSC_FETCHBITOP(uint64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
_InterlockedXor64)
#endif
} }
#undef MSC_FETCHBITOPX_CAS
#undef MSC_FETCHBITOPX
#undef MSC_FETCHBITOP_CAS
#undef MSC_FETCHBITOP
template <typename T>
inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
return *addr;
}
template <typename T>
inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
*addr = val;
}
inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
const void* src,
size_t nbytes) {
MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
::memcpy(dest, src, nbytes);
}
inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
const void* src,
size_t nbytes) {
::memmove(dest, src, nbytes);
}
#endif