#include "ds/LifoAlloc.h"
#include "mozilla/Likely.h"
#include "mozilla/MathAlgorithms.h"
#include "ds/MemoryProtectionExceptionHandler.h"
#ifdef LIFO_CHUNK_PROTECT
# include "gc/Memory.h"
#endif
using namespace js;
using mozilla::RoundUpPow2;
using mozilla::tl::BitSize;
namespace js {
namespace detail {
UniquePtr<BumpChunk> BumpChunk::newWithCapacity(size_t size) {
MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
void* mem = js_malloc(size);
if (!mem) {
return nullptr;
}
UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
return result;
}
#ifdef LIFO_CHUNK_PROTECT
static uint8_t* AlignPtrUp(uint8_t* ptr, uintptr_t align) {
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
uintptr_t uptr = uintptr_t(ptr);
uintptr_t diff = uptr & (align - 1);
diff = (align - diff) & (align - 1);
uptr = uptr + diff;
return (uint8_t*)uptr;
}
static uint8_t* AlignPtrDown(uint8_t* ptr, uintptr_t align) {
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
uintptr_t uptr = uintptr_t(ptr);
uptr = uptr & ~(align - 1);
return (uint8_t*)uptr;
}
void BumpChunk::setReadOnly() {
uintptr_t pageSize = gc::SystemPageSize();
uint8_t* b = base();
uint8_t* e = capacity_;
b = AlignPtrUp(b, pageSize);
e = AlignPtrDown(e, pageSize);
if (e <= b) {
return;
}
js::MemoryProtectionExceptionHandler::addRegion(base(), capacity_ - base());
gc::MakePagesReadOnly(b, e - b);
}
void BumpChunk::setReadWrite() {
uintptr_t pageSize = gc::SystemPageSize();
uint8_t* b = base();
uint8_t* e = capacity_;
b = AlignPtrUp(b, pageSize);
e = AlignPtrDown(e, pageSize);
if (e <= b) {
return;
}
gc::UnprotectPages(b, e - b);
js::MemoryProtectionExceptionHandler::removeRegion(base());
}
#endif
} }
void LifoAlloc::reset(size_t defaultChunkSize) {
MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
while (!chunks_.empty()) {
chunks_.popFirst();
}
while (!oversize_.empty()) {
oversize_.popFirst();
}
while (!unused_.empty()) {
unused_.popFirst();
}
defaultChunkSize_ = defaultChunkSize;
oversizeThreshold_ = defaultChunkSize;
markCount = 0;
curSize_ = 0;
smallAllocsSize_ = 0;
}
void LifoAlloc::freeAll() {
smallAllocsSize_ = 0;
while (!chunks_.empty()) {
UniqueBumpChunk bc = chunks_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
}
while (!oversize_.empty()) {
UniqueBumpChunk bc = oversize_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
}
while (!unused_.empty()) {
UniqueBumpChunk bc = unused_.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
}
MOZ_ASSERT(curSize_ == 0);
}
static size_t MallocGoodSize(size_t aSize) {
#if defined(MOZ_MEMORY)
return malloc_good_size(aSize);
#else
return aSize;
#endif
}
static size_t NextSize(size_t start, size_t used) {
const size_t mb = 1 * 1024 * 1024;
if (used < mb) {
return Max(start, used);
}
return JS_ROUNDUP(used / 8, mb);
}
LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
bool oversize) {
MOZ_ASSERT(fallibleScope_,
"[OOM] Cannot allocate a new chunk in an infallible scope.");
size_t minSize;
if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
(minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
return nullptr;
}
MOZ_ASSERT(curSize_ >= smallAllocsSize_);
const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
? MallocGoodSize(minSize)
: NextSize(defaultChunkSize_, smallAllocsSize_);
UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
if (!result) {
return nullptr;
}
MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
return result;
}
LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
if (!unused_.empty()) {
if (unused_.begin()->canAlloc(n)) {
return unused_.popFirst();
}
BumpChunkList::Iterator e(unused_.end());
for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get();
++i) {
detail::BumpChunk* elem = i->next();
MOZ_ASSERT(elem->empty());
if (elem->canAlloc(n)) {
BumpChunkList temp = unused_.splitAfter(i.get());
UniqueBumpChunk newChunk = temp.popFirst();
unused_.appendAll(std::move(temp));
return newChunk;
}
}
}
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
if (!newChunk) {
return newChunk;
}
incrementCurSize(newChunk->computedSizeOfIncludingThis());
return newChunk;
}
void* LifoAlloc::allocImplColdPath(size_t n) {
void* result;
UniqueBumpChunk newChunk = getOrCreateChunk(n);
if (!newChunk) {
return nullptr;
}
smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
chunks_.append(std::move(newChunk));
result = chunks_.last()->tryAlloc(n);
MOZ_ASSERT(result);
return result;
}
void* LifoAlloc::allocImplOversize(size_t n) {
void* result;
UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
if (!newChunk) {
return nullptr;
}
incrementCurSize(newChunk->computedSizeOfIncludingThis());
oversize_.append(std::move(newChunk));
result = oversize_.last()->tryAlloc(n);
MOZ_ASSERT(result);
return result;
}
bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
for (detail::BumpChunk& bc : unused_) {
total += bc.unused();
if (total >= n) {
return true;
}
}
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
if (!newChunk) {
return false;
}
incrementCurSize(newChunk->computedSizeOfIncludingThis());
unused_.pushFront(std::move(newChunk));
return true;
}
LifoAlloc::Mark LifoAlloc::mark() {
markCount++;
Mark res;
if (!chunks_.empty()) {
res.chunk = chunks_.last()->mark();
}
if (!oversize_.empty()) {
res.oversize = oversize_.last()->mark();
}
return res;
}
void LifoAlloc::release(Mark mark) {
markCount--;
#ifdef DEBUG
auto assertIsContained = [](const detail::BumpChunk::Mark& m,
BumpChunkList& list) {
if (m.markedChunk()) {
bool contained = false;
for (const detail::BumpChunk& chunk : list) {
if (&chunk == m.markedChunk() && chunk.contains(m)) {
contained = true;
break;
}
}
MOZ_ASSERT(contained);
}
};
assertIsContained(mark.chunk, chunks_);
assertIsContained(mark.oversize, oversize_);
#endif
BumpChunkList released;
auto cutAtMark = [&released](const detail::BumpChunk::Mark& m,
BumpChunkList& list) {
if (!m.markedChunk()) {
released = std::move(list);
} else {
released = list.splitAfter(m.markedChunk());
}
if (!list.empty()) {
list.last()->release(m);
}
};
cutAtMark(mark.chunk, chunks_);
for (detail::BumpChunk& bc : released) {
bc.release();
smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
}
unused_.appendAll(std::move(released));
cutAtMark(mark.oversize, oversize_);
while (!released.empty()) {
UniqueBumpChunk bc = released.popFirst();
decrementCurSize(bc->computedSizeOfIncludingThis());
}
}
void LifoAlloc::steal(LifoAlloc* other) {
MOZ_ASSERT(!other->markCount);
MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
chunks_ = std::move(other->chunks_);
oversize_ = std::move(other->oversize_);
unused_ = std::move(other->unused_);
markCount = other->markCount;
defaultChunkSize_ = other->defaultChunkSize_;
oversizeThreshold_ = other->oversizeThreshold_;
curSize_ = other->curSize_;
peakSize_ = Max(peakSize_, other->peakSize_);
smallAllocsSize_ = other->smallAllocsSize_;
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
fallibleScope_ = other->fallibleScope_;
#endif
other->reset(defaultChunkSize_);
}
void LifoAlloc::transferFrom(LifoAlloc* other) {
MOZ_ASSERT(!markCount);
MOZ_ASSERT(!other->markCount);
incrementCurSize(other->curSize_);
appendUnused(std::move(other->unused_));
chunks_.prependAll(std::move(other->chunks_));
oversize_.prependAll(std::move(other->oversize_));
other->curSize_ = 0;
other->smallAllocsSize_ = 0;
}
void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
MOZ_ASSERT(!markCount);
size_t size = 0;
for (detail::BumpChunk& bc : other->unused_) {
size += bc.computedSizeOfIncludingThis();
}
appendUnused(std::move(other->unused_));
incrementCurSize(size);
other->decrementCurSize(size);
}
#ifdef LIFO_CHUNK_PROTECT
void LifoAlloc::setReadOnly() {
for (detail::BumpChunk& bc : chunks_) {
bc.setReadOnly();
}
for (detail::BumpChunk& bc : oversize_) {
bc.setReadOnly();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadOnly();
}
}
void LifoAlloc::setReadWrite() {
for (detail::BumpChunk& bc : chunks_) {
bc.setReadWrite();
}
for (detail::BumpChunk& bc : oversize_) {
bc.setReadWrite();
}
for (detail::BumpChunk& bc : unused_) {
bc.setReadWrite();
}
}
#endif