#include "gc/Memory.h"
#include "mozilla/Atomics.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/RandomNum.h"
#include "mozilla/TaggedAnonymousMemory.h"
#include "js/HeapAPI.h"
#include "vm/Runtime.h"
#ifdef XP_WIN
# include "util/Windows.h"
# include <psapi.h>
#else
# include <algorithm>
# include <errno.h>
# include <sys/mman.h>
# include <sys/resource.h>
# include <sys/stat.h>
# include <sys/types.h>
# include <unistd.h>
#endif
namespace js {
namespace gc {
static size_t pageSize = 0;
static size_t allocGranularity = 0;
static size_t numAddressBits = 0;
#if defined(XP_DARWIN)
static mozilla::Atomic<int, mozilla::Relaxed,
mozilla::recordreplay::Behavior::DontPreserve>
growthDirection(1);
#elif defined(XP_UNIX)
static mozilla::Atomic<int, mozilla::Relaxed,
mozilla::recordreplay::Behavior::DontPreserve>
growthDirection(0);
#endif
static const int MaxLastDitchAttempts = 32;
#ifdef JS_64BIT
static const size_t MinAddressBitsForRandomAlloc = 43;
static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
static size_t minValidAddress = 0;
static size_t maxValidAddress = 0;
static size_t hugeSplit = 0;
#endif
size_t SystemPageSize() { return pageSize; }
size_t SystemAddressBits() { return numAddressBits; }
bool UsingScattershotAllocator() {
#ifdef JS_64BIT
return numAddressBits >= MinAddressBitsForRandomAlloc;
#else
return false;
#endif
}
enum class Commit : bool {
No = false,
Yes = true,
};
#ifdef XP_WIN
enum class PageAccess : DWORD {
None = PAGE_NOACCESS,
Read = PAGE_READONLY,
ReadWrite = PAGE_READWRITE,
Execute = PAGE_EXECUTE,
ReadExecute = PAGE_EXECUTE_READ,
ReadWriteExecute = PAGE_EXECUTE_READWRITE,
};
#else
enum class PageAccess : int {
None = PROT_NONE,
Read = PROT_READ,
ReadWrite = PROT_READ | PROT_WRITE,
Execute = PROT_EXEC,
ReadExecute = PROT_READ | PROT_EXEC,
ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
};
#endif
template <bool AlwaysGetNew = true>
static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
size_t length, size_t alignment);
static void* MapAlignedPagesSlow(size_t length, size_t alignment);
static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
#ifdef JS_64BIT
static void* MapAlignedPagesRandom(size_t length, size_t alignment);
#endif
void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
return MapAlignedPagesLastDitch(length, alignment);
}
static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
static inline size_t OffsetFromAligned(void* region, size_t alignment) {
return uintptr_t(region) % alignment;
}
template <Commit commit, PageAccess prot>
static inline void* MapInternal(void* desired, size_t length) {
void* region = nullptr;
#ifdef XP_WIN
DWORD flags =
(commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
region = VirtualAlloc(desired, length, flags, DWORD(prot));
#else
int flags = MAP_PRIVATE | MAP_ANON;
region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
"js-gc-heap");
if (region == MAP_FAILED) {
return nullptr;
}
#endif
return region;
}
static inline void UnmapInternal(void* region, size_t length) {
MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
MOZ_ASSERT(length > 0 && length % pageSize == 0);
#ifdef XP_WIN
MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
#else
if (munmap(region, length)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
}
#endif
}
template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
static inline void* MapMemory(size_t length) {
MOZ_ASSERT(length > 0);
return MapInternal<commit, prot>(nullptr, length);
}
template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
MOZ_ASSERT(length > 0);
return MapInternal<commit, prot>(desired, length);
}
template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
static inline void* MapMemoryAt(void* desired, size_t length) {
MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
MOZ_ASSERT(length > 0);
void* region = MapInternal<commit, prot>(desired, length);
if (!region) {
return nullptr;
}
if (region != desired) {
UnmapInternal(region, length);
return nullptr;
}
return region;
}
#ifdef JS_64BIT
static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
maxNum -= minNum;
uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
uint64_t rndNum;
do {
mozilla::Maybe<uint64_t> result;
do {
mozilla::recordreplay::AutoPassThroughThreadEvents pt;
result = mozilla::RandomUint64();
} while (!result);
rndNum = result.value() / binSize;
} while (rndNum > maxNum);
return minNum + rndNum;
}
# ifndef XP_WIN
static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries);
static size_t FindAddressLimit() {
uint64_t low = 31;
uint64_t highestSeen = (UINT64_C(1) << 32) - allocGranularity - 1;
uint64_t high = 47;
for (; high >= std::max(low, UINT64_C(46)); --high) {
highestSeen = std::max(FindAddressLimitInner(high, 4), highestSeen);
low = mozilla::FloorLog2(highestSeen);
}
while (high - 1 > low) {
uint64_t middle = low + (high - low) / 2;
highestSeen = std::max(FindAddressLimitInner(middle, 4), highestSeen);
low = mozilla::FloorLog2(highestSeen);
if (highestSeen < (UINT64_C(1) << middle)) {
high = middle;
}
}
do {
high = low + 1;
highestSeen = std::max(FindAddressLimitInner(high, 8), highestSeen);
low = mozilla::FloorLog2(highestSeen);
} while (low >= high);
return low + 1;
}
static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries) {
const size_t length = allocGranularity;
uint64_t highestSeen = 0;
uint64_t startRaw = UINT64_C(1) << highBit;
uint64_t endRaw = 2 * startRaw - length - 1;
uint64_t start = (startRaw + length - 1) / length;
uint64_t end = (endRaw - (length - 1)) / length;
for (size_t i = 0; i < tries; ++i) {
uint64_t desired = length * GetNumberInRange(start, end);
void* address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
uint64_t actual = uint64_t(address);
if (address) {
UnmapInternal(address, length);
}
if (actual > highestSeen) {
highestSeen = actual;
if (actual >= startRaw) {
break;
}
}
}
return highestSeen;
}
# endif
#endif
void InitMemorySubsystem() {
if (pageSize == 0) {
#ifdef XP_WIN
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
pageSize = sysinfo.dwPageSize;
allocGranularity = sysinfo.dwAllocationGranularity;
#else
pageSize = size_t(sysconf(_SC_PAGESIZE));
allocGranularity = pageSize;
#endif
#ifdef JS_64BIT
# ifdef XP_WIN
minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
# else
numAddressBits = FindAddressLimit();
minValidAddress = allocGranularity;
maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
# endif
uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
if (maxValidAddress > maxJSAddress) {
maxValidAddress = maxJSAddress;
hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
} else {
hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
}
#else
numAddressBits = 32;
#endif
}
}
#ifdef JS_64BIT
static inline bool IsInvalidRegion(void* region, size_t length) {
const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
return (uintptr_t(region) + length - 1) & invalidPointerMask;
}
#endif
void* MapAlignedPages(size_t length, size_t alignment) {
MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
MOZ_RELEASE_ASSERT(length % pageSize == 0);
MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
std::min(alignment, allocGranularity) ==
0);
if (alignment < allocGranularity) {
alignment = allocGranularity;
}
#ifdef JS_64BIT
if (UsingScattershotAllocator()) {
void* region = MapAlignedPagesRandom(length, alignment);
MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
return region;
}
#endif
void* region = MapMemory(length);
if (OffsetFromAligned(region, alignment) == 0) {
return region;
}
void* retainedRegion;
if (TryToAlignChunk(®ion, &retainedRegion, length, alignment)) {
MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
MOZ_ASSERT(!retainedRegion);
return region;
}
if (retainedRegion) {
UnmapInternal(retainedRegion, length);
}
if (region) {
MOZ_ASSERT(OffsetFromAligned(region, alignment) != 0);
UnmapInternal(region, length);
}
region = MapAlignedPagesSlow(length, alignment);
if (!region) {
region = MapAlignedPagesLastDitch(length, alignment);
}
MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
return region;
}
#ifdef JS_64BIT
static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
uint64_t minNum, maxNum;
if (length < HugeAllocationSize) {
minNum = (minValidAddress + alignment - 1) / alignment;
maxNum = (hugeSplit - (length - 1)) / alignment;
} else {
minNum = (hugeSplit + 1 + alignment - 1) / alignment;
maxNum = (maxValidAddress - (length - 1)) / alignment;
}
void* region = nullptr;
for (size_t i = 1; i <= 1024; ++i) {
if (i & 0xf) {
uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
if (!region) {
continue;
}
} else {
region = MapMemory(length);
if (!region) {
return nullptr;
}
}
if (IsInvalidRegion(region, length)) {
UnmapInternal(region, length);
continue;
}
if (OffsetFromAligned(region, alignment) == 0) {
return region;
}
void* retainedRegion = nullptr;
if (TryToAlignChunk<false>(®ion, &retainedRegion, length, alignment)) {
MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
MOZ_ASSERT(!retainedRegion);
return region;
}
MOZ_ASSERT(region && !retainedRegion);
UnmapInternal(region, length);
}
if (numAddressBits < 48) {
region = MapAlignedPagesSlow(length, alignment);
if (region) {
return region;
}
}
if (length < HugeAllocationSize) {
MOZ_CRASH("Couldn't allocate even after 1000 tries!");
}
return nullptr;
}
#endif
static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
void* alignedRegion = nullptr;
do {
size_t reserveLength = length + alignment - pageSize;
#ifdef XP_WIN
void* region = MapMemory<Commit::No>(reserveLength);
#else
void* region = MapMemory(reserveLength);
#endif
if (!region) {
return nullptr;
}
alignedRegion =
reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
#ifdef XP_WIN
UnmapInternal(region, reserveLength);
alignedRegion = MapMemoryAt(alignedRegion, length);
#else
if (alignedRegion != region) {
UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
}
void* regionEnd =
reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
void* alignedEnd =
reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
if (alignedEnd != regionEnd) {
UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
}
#endif
} while (!alignedRegion);
return alignedRegion;
}
static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
void* tempMaps[MaxLastDitchAttempts];
int attempt = 0;
void* region = MapMemory(length);
if (OffsetFromAligned(region, alignment) == 0) {
return region;
}
for (; attempt < MaxLastDitchAttempts; ++attempt) {
if (TryToAlignChunk(®ion, tempMaps + attempt, length, alignment)) {
MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
MOZ_ASSERT(!tempMaps[attempt]);
break; }
if (!region || !tempMaps[attempt]) {
break; }
}
if (OffsetFromAligned(region, alignment)) {
UnmapInternal(region, length);
region = nullptr;
}
while (--attempt >= 0) {
UnmapInternal(tempMaps[attempt], length);
}
return region;
}
#ifdef XP_WIN
template <bool>
static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
size_t length, size_t alignment) {
void* region = *aRegion;
MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
size_t retainedLength = 0;
void* retainedRegion = nullptr;
do {
size_t offset = OffsetFromAligned(region, alignment);
if (offset == 0) {
break;
}
UnmapInternal(region, length);
retainedLength = alignment - offset;
retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
region = MapMemory(length);
} while (!retainedRegion);
bool result = OffsetFromAligned(region, alignment) == 0;
if (result && retainedRegion) {
UnmapInternal(retainedRegion, retainedLength);
retainedRegion = nullptr;
}
*aRegion = region;
*aRetainedRegion = retainedRegion;
return region && result;
}
#else
template <bool AlwaysGetNew>
static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
size_t length, size_t alignment) {
void* regionStart = *aRegion;
MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
bool addressesGrowUpward = growthDirection > 0;
bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
size_t offsetLower = OffsetFromAligned(regionStart, alignment);
size_t offsetUpper = alignment - offsetLower;
for (size_t i = 0; i < 2; ++i) {
if (addressesGrowUpward) {
void* upperStart =
reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
void* regionEnd =
reinterpret_cast<void*>(uintptr_t(regionStart) + length);
if (MapMemoryAt(regionEnd, offsetUpper)) {
UnmapInternal(regionStart, offsetUpper);
if (directionUncertain) {
++growthDirection;
}
regionStart = upperStart;
break;
}
} else {
void* lowerStart =
reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
void* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
if (MapMemoryAt(lowerStart, offsetLower)) {
UnmapInternal(lowerEnd, offsetLower);
if (directionUncertain) {
--growthDirection;
}
regionStart = lowerStart;
break;
}
}
if (!directionUncertain) {
break;
}
addressesGrowUpward = !addressesGrowUpward;
}
void* retainedRegion = nullptr;
bool result = OffsetFromAligned(regionStart, alignment) == 0;
if (AlwaysGetNew && !result) {
retainedRegion = regionStart;
regionStart = MapMemory(length);
result = OffsetFromAligned(regionStart, alignment) == 0;
if (result) {
UnmapInternal(retainedRegion, length);
retainedRegion = nullptr;
}
}
*aRegion = regionStart;
*aRetainedRegion = retainedRegion;
return regionStart && result;
}
#endif
void UnmapPages(void* region, size_t length) {
MOZ_RELEASE_ASSERT(region &&
OffsetFromAligned(region, allocGranularity) == 0);
MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
MOZ_MAKE_MEM_UNDEFINED(region, length);
UnmapInternal(region, length);
}
bool MarkPagesUnused(void* region, size_t length) {
MOZ_RELEASE_ASSERT(region);
MOZ_RELEASE_ASSERT(length > 0);
MOZ_ASSERT(OffsetFromAligned(region, ArenaSize) == 0);
MOZ_ASSERT(length % ArenaSize == 0);
MOZ_MAKE_MEM_NOACCESS(region, length);
if (!DecommitEnabled()) {
return true;
}
MOZ_RELEASE_ASSERT(OffsetFromAligned(region, pageSize) == 0);
MOZ_RELEASE_ASSERT(length % pageSize == 0);
#if defined(XP_WIN)
return VirtualAlloc(region, length, MEM_RESET,
DWORD(PageAccess::ReadWrite)) == region;
#elif defined(XP_DARWIN)
return madvise(region, length, MADV_FREE) == 0;
#elif defined(XP_SOLARIS)
return posix_madvise(region, length, POSIX_MADV_DONTNEED) == 0;
#else
return madvise(region, length, MADV_DONTNEED) == 0;
#endif
}
void MarkPagesInUse(void* region, size_t length) {
MOZ_RELEASE_ASSERT(region);
MOZ_RELEASE_ASSERT(length > 0);
MOZ_ASSERT(OffsetFromAligned(region, ArenaSize) == 0);
MOZ_ASSERT(length % ArenaSize == 0);
MOZ_MAKE_MEM_UNDEFINED(region, length);
if (!DecommitEnabled()) {
return;
}
MOZ_RELEASE_ASSERT(OffsetFromAligned(region, pageSize) == 0);
MOZ_RELEASE_ASSERT(length % pageSize == 0);
}
size_t GetPageFaultCount() {
if (mozilla::recordreplay::IsRecordingOrReplaying()) {
return 0;
}
#ifdef XP_WIN
PROCESS_MEMORY_COUNTERS pmc;
if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
return 0;
}
return pmc.PageFaultCount;
#else
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
if (err) {
return 0;
}
return usage.ru_majflt;
#endif
}
void* AllocateMappedContent(int fd, size_t offset, size_t length,
size_t alignment) {
if (length == 0 || alignment == 0 || offset % alignment != 0 ||
std::max(alignment, allocGranularity) %
std::min(alignment, allocGranularity) !=
0) {
return nullptr;
}
size_t alignedOffset = offset - (offset % allocGranularity);
size_t alignedLength = length + (offset % allocGranularity);
size_t mappedLength = alignedLength;
if (alignedLength % pageSize != 0) {
mappedLength += pageSize - alignedLength % pageSize;
}
#ifdef XP_WIN
HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
if (!hMap) {
return nullptr;
}
DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
DWORD offsetL = uint32_t(alignedOffset);
uint8_t* map = nullptr;
for (;;) {
uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
if (region == 0) {
break;
}
UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
#ifdef JS_ENABLE_UWP
map = static_cast<uint8_t*>(
MapViewOfFileFromApp(hMap, FILE_MAP_COPY, ((ULONG64)offsetH << 32) | offsetL,
alignedLength));
#else
map = static_cast<uint8_t*>(
MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
reinterpret_cast<void*>(region)));
#endif
if (map || GetLastError() != ERROR_INVALID_ADDRESS) {
break;
}
}
CloseHandle(hMap);
if (!map) {
return nullptr;
}
#else
struct stat st;
if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
length > uint64_t(st.st_size) - offset) {
return nullptr;
}
void* region = MapAlignedPages(mappedLength, alignment);
if (!region) {
return nullptr;
}
uint8_t* map =
static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
if (map == MAP_FAILED) {
UnmapInternal(region, mappedLength);
return nullptr;
}
#endif
#ifdef DEBUG
if (offset != alignedOffset) {
memset(map, 0, offset - alignedOffset);
}
if (alignedLength % pageSize) {
memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
}
#endif
return map + (offset - alignedOffset);
}
void DeallocateMappedContent(void* region, size_t length) {
if (!region) {
return;
}
uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
#ifdef XP_WIN
MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
#else
size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
MOZ_RELEASE_ASSERT(errno == ENOMEM);
}
#endif
}
static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
#ifdef XP_WIN
DWORD oldProtect;
MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
0);
#else
MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
#endif
}
void ProtectPages(void* region, size_t length) {
ProtectMemory(region, length, PageAccess::None);
}
void MakePagesReadOnly(void* region, size_t length) {
ProtectMemory(region, length, PageAccess::Read);
}
void UnprotectPages(void* region, size_t length) {
ProtectMemory(region, length, PageAccess::ReadWrite);
}
} }