#include "mozilla/DebugOnly.h"
#include "mozilla/Maybe.h"
#include "jit/BaselineIC.h"
#include "jit/CacheIRCompiler.h"
#include "jit/IonIC.h"
#include "jit/JSJitFrameIter.h"
#include "jit/Linker.h"
#include "jit/SharedICHelpers.h"
#include "jit/VMFunctions.h"
#include "proxy/DeadObjectProxy.h"
#include "proxy/Proxy.h"
#include "jit/JSJitFrameIter-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/Realm-inl.h"
#include "vm/TypeInference-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
using mozilla::Maybe;
namespace js {
namespace jit {
class AutoSaveLiveRegisters;
class MOZ_RAII IonCacheIRCompiler : public CacheIRCompiler {
public:
friend class AutoSaveLiveRegisters;
IonCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, IonIC* ic,
IonScript* ionScript, IonICStub* stub,
const PropertyTypeCheckInfo* typeCheckInfo,
uint32_t stubDataOffset)
: CacheIRCompiler(cx, writer, stubDataOffset, Mode::Ion,
StubFieldPolicy::Constant),
writer_(writer),
ic_(ic),
ionScript_(ionScript),
stub_(stub),
typeCheckInfo_(typeCheckInfo),
#ifdef DEBUG
calledPrepareVMCall_(false),
#endif
savedLiveRegs_(false) {
MOZ_ASSERT(ic_);
MOZ_ASSERT(ionScript_);
}
MOZ_MUST_USE bool init();
JitCode* compile();
private:
const CacheIRWriter& writer_;
IonIC* ic_;
IonScript* ionScript_;
IonICStub* stub_;
const PropertyTypeCheckInfo* typeCheckInfo_;
CodeOffsetJump rejoinOffset_;
Vector<CodeOffset, 4, SystemAllocPolicy> nextCodeOffsets_;
Maybe<LiveRegisterSet> liveRegs_;
Maybe<CodeOffset> stubJitCodeOffset_;
#ifdef DEBUG
bool calledPrepareVMCall_;
#endif
bool savedLiveRegs_;
template <typename T>
T rawWordStubField(uint32_t offset) {
static_assert(sizeof(T) == sizeof(uintptr_t), "T must have word size");
return (T)readStubWord(offset, StubField::Type::RawWord);
}
template <typename T>
T rawInt64StubField(uint32_t offset) {
static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
return (T)readStubInt64(offset, StubField::Type::RawInt64);
}
uint64_t* expandoGenerationStubFieldPtr(uint32_t offset) {
DebugOnly<uint64_t> generation =
readStubInt64(offset, StubField::Type::DOMExpandoGeneration);
uint64_t* ptr =
reinterpret_cast<uint64_t*>(stub_->stubDataStart() + offset);
MOZ_ASSERT(*ptr == generation);
return ptr;
}
void prepareVMCall(MacroAssembler& masm, const AutoSaveLiveRegisters&);
void callVMInternal(MacroAssembler& masm, VMFunctionId id);
template <typename Fn, Fn fn>
void callVM(MacroAssembler& masm) {
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
callVMInternal(masm, id);
}
MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
bool needsPostBarrier() const {
return ic_->asSetPropertyIC()->needsPostBarrier();
}
void pushStubCodePointer() {
stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
}
#define DEFINE_OP(op, ...) MOZ_MUST_USE bool emit##op();
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
};
class MOZ_RAII AutoSaveLiveRegisters {
IonCacheIRCompiler& compiler_;
AutoSaveLiveRegisters(const AutoSaveLiveRegisters&) = delete;
void operator=(const AutoSaveLiveRegisters&) = delete;
public:
explicit AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
: compiler_(compiler) {
MOZ_ASSERT(compiler_.liveRegs_.isSome());
compiler_.allocator.saveIonLiveRegisters(
compiler_.masm, compiler_.liveRegs_.ref(),
compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
compiler_.savedLiveRegs_ = true;
}
~AutoSaveLiveRegisters() {
MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
"Must have pushed JitCode* pointer");
compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
compiler_.liveRegs_.ref());
MOZ_ASSERT(compiler_.masm.framePushed() ==
compiler_.ionScript_->frameSize());
}
};
} }
#define DEFINE_SHARED_OP(op) \
bool IonCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
#undef DEFINE_SHARED_OP
void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
LiveRegisterSet liveRegs,
Register scratch,
IonScript* ionScript) {
freeDeadOperandLocations(masm);
size_t sizeOfLiveRegsInBytes = liveRegs.gprs().size() * sizeof(intptr_t) +
liveRegs.fpus().getPushSizeInBytes();
MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
size_t numInputs = writer_.numInputOperands();
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.isInRegister()) {
spillOperandToStack(masm, &loc);
}
}
restoreInputState(masm, false);
#ifdef DEBUG
for (size_t i = 0; i < numInputs; i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(!loc.isOnStack());
}
#endif
bool hasOperandOnStack = false;
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (!loc.isOnStack()) {
continue;
}
hasOperandOnStack = true;
size_t operandSize = loc.stackSizeInBytes();
size_t operandStackPushed = loc.stackPushed();
MOZ_ASSERT(operandSize > 0);
MOZ_ASSERT(stackPushed_ >= operandStackPushed);
MOZ_ASSERT(operandStackPushed >= operandSize);
if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
continue;
}
if (sizeOfLiveRegsInBytes > stackPushed_) {
size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
masm.subFromStackPtr(Imm32(extraBytes));
stackPushed_ += extraBytes;
}
if (loc.kind() == OperandLocation::PayloadStack) {
masm.push(
Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
stackPushed_ += operandSize;
loc.setPayloadStack(stackPushed_, loc.payloadType());
continue;
}
MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
masm.pushValue(
Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
stackPushed_ += operandSize;
loc.setValueStack(stackPushed_);
}
if (hasOperandOnStack) {
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
stackPushed_ -= sizeOfLiveRegsInBytes;
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.isOnStack()) {
loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
}
}
size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
scratch);
masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
} else {
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
masm.PushRegsInMask(liveRegs);
}
freePayloadSlots_.clear();
freeValueSlots_.clear();
MOZ_ASSERT(masm.framePushed() ==
ionScript->frameSize() + sizeOfLiveRegsInBytes);
availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
availableRegsAfterSpill_.set() = GeneralRegisterSet();
fixupAliasedInputs(masm);
}
void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
LiveRegisterSet liveRegs) {
masm.PopRegsInMask(liveRegs);
availableRegs_.set() = GeneralRegisterSet();
availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
}
static void* GetReturnAddressToIonCode(JSContext* cx) {
JSJitFrameIter frame(cx->activation()->asJit());
MOZ_ASSERT(frame.type() == FrameType::Exit,
"An exit frame is expected as update functions are called with a "
"VMFunction.");
void* returnAddr = frame.returnAddress();
#ifdef DEBUG
++frame;
MOZ_ASSERT(frame.isIonJS());
#endif
return returnAddr;
}
void IonCacheIRCompiler::prepareVMCall(MacroAssembler& masm,
const AutoSaveLiveRegisters&) {
uint32_t descriptor = MakeFrameDescriptor(
masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
pushStubCodePointer();
masm.Push(Imm32(descriptor));
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
#ifdef DEBUG
calledPrepareVMCall_ = true;
#endif
}
void IonCacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
MOZ_ASSERT(calledPrepareVMCall_);
TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
const VMFunctionData& fun = GetVMFunction(id);
uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
uint32_t descriptor = MakeFrameDescriptor(frameSize, FrameType::IonICCall,
ExitFrameLayout::Size());
masm.Push(Imm32(descriptor));
masm.callJit(code);
int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
masm.implicitPop(frameSize + framePop);
masm.freeStack(IonICCallFrameLayout::Size());
}
bool IonCacheIRCompiler::init() {
if (!allocator.init()) {
return false;
}
size_t numInputs = writer_.numInputOperands();
AllocatableGeneralRegisterSet available;
switch (ic_->kind()) {
case CacheKind::GetProp:
case CacheKind::GetElem: {
IonGetPropertyIC* ic = ic_->asGetPropertyIC();
TypedOrValueRegister output = ic->output();
if (output.hasValue()) {
available.add(output.valueReg());
} else if (!output.typedReg().isFloat()) {
available.add(output.typedReg().gpr());
}
if (ic->maybeTemp() != InvalidReg) {
available.add(ic->maybeTemp());
}
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(output);
allowDoubleResult_.emplace(ic->allowDoubleResult());
MOZ_ASSERT(numInputs == 1 || numInputs == 2);
allocator.initInputLocation(0, ic->value());
if (numInputs > 1) {
allocator.initInputLocation(1, ic->id());
}
break;
}
case CacheKind::GetPropSuper:
case CacheKind::GetElemSuper: {
IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
TypedOrValueRegister output = ic->output();
available.add(output.valueReg());
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(output);
allowDoubleResult_.emplace(true);
MOZ_ASSERT(numInputs == 2 || numInputs == 3);
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
if (ic->kind() == CacheKind::GetPropSuper) {
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(1, ic->receiver());
} else {
MOZ_ASSERT(numInputs == 3);
allocator.initInputLocation(1, ic->id());
allocator.initInputLocation(2, ic->receiver());
}
break;
}
case CacheKind::SetProp:
case CacheKind::SetElem: {
IonSetPropertyIC* ic = ic_->asSetPropertyIC();
available.add(ic->temp());
liveRegs_.emplace(ic->liveRegs());
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
if (ic->kind() == CacheKind::SetProp) {
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(1, ic->rhs());
} else {
MOZ_ASSERT(numInputs == 3);
allocator.initInputLocation(1, ic->id());
allocator.initInputLocation(2, ic->rhs());
}
break;
}
case CacheKind::GetName: {
IonGetNameIC* ic = ic_->asGetNameIC();
ValueOperand output = ic->output();
available.add(output);
available.add(ic->temp());
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(output);
MOZ_ASSERT(numInputs == 1);
allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
break;
}
case CacheKind::BindName: {
IonBindNameIC* ic = ic_->asBindNameIC();
Register output = ic->output();
available.add(output);
available.add(ic->temp());
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
MOZ_ASSERT(numInputs == 1);
allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
break;
}
case CacheKind::GetIterator: {
IonGetIteratorIC* ic = ic_->asGetIteratorIC();
Register output = ic->output();
available.add(output);
available.add(ic->temp1());
available.add(ic->temp2());
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
MOZ_ASSERT(numInputs == 1);
allocator.initInputLocation(0, ic->value());
break;
}
case CacheKind::In: {
IonInIC* ic = ic_->asInIC();
Register output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, ic->key());
allocator.initInputLocation(
1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
break;
}
case CacheKind::HasOwn: {
IonHasOwnIC* ic = ic_->asHasOwnIC();
Register output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, ic->id());
allocator.initInputLocation(1, ic->value());
break;
}
case CacheKind::InstanceOf: {
IonInstanceOfIC* ic = ic_->asInstanceOfIC();
Register output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, ic->lhs());
allocator.initInputLocation(
1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
break;
}
case CacheKind::UnaryArith: {
IonUnaryArithIC* ic = ic_->asUnaryArithIC();
ValueOperand output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(TypedOrValueRegister(output));
MOZ_ASSERT(numInputs == 1);
allocator.initInputLocation(0, ic->input());
break;
}
case CacheKind::BinaryArith: {
IonBinaryArithIC* ic = ic_->asBinaryArithIC();
ValueOperand output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(TypedOrValueRegister(output));
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, ic->lhs());
allocator.initInputLocation(1, ic->rhs());
break;
}
case CacheKind::Compare: {
IonCompareIC* ic = ic_->asCompareIC();
Register output = ic->output();
available.add(output);
liveRegs_.emplace(ic->liveRegs());
outputUnchecked_.emplace(
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
MOZ_ASSERT(numInputs == 2);
allocator.initInputLocation(0, ic->lhs());
allocator.initInputLocation(1, ic->rhs());
break;
}
case CacheKind::Call:
case CacheKind::TypeOf:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewObject:
MOZ_CRASH("Unsupported IC");
}
if (liveRegs_) {
liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
}
allocator.initAvailableRegs(available);
allocator.initAvailableRegsAfterSpill();
return true;
}
JitCode* IonCacheIRCompiler::compile() {
masm.setFramePushed(ionScript_->frameSize());
if (cx_->runtime()->geckoProfiler().enabled()) {
masm.enableProfilingInstrumentation();
}
allocator.fixupAliasedInputs(masm);
do {
switch (reader.readOp()) {
#define DEFINE_OP(op, ...) \
case CacheOp::op: \
if (!emit##op()) return nullptr; \
break;
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
default:
MOZ_CRASH("Invalid op");
}
#ifdef DEBUG
assertAllArgumentsConsumed();
#endif
allocator.nextOp();
} while (reader.more());
masm.assumeUnreachable("Should have returned from IC");
for (size_t i = 0; i < failurePaths.length(); i++) {
if (!emitFailurePath(i)) {
return nullptr;
}
Register scratch = ic_->scratchRegisterForEntryJump();
CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
masm.jump(Address(scratch, 0));
if (!nextCodeOffsets_.append(offset)) {
return nullptr;
}
}
Linker linker(masm, "getStubCode");
Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
if (!newStubCode) {
cx_->recoverFromOutOfMemory();
return nullptr;
}
rejoinOffset_.fixup(&masm);
CodeLocationJump rejoinJump(newStubCode, rejoinOffset_);
PatchJump(rejoinJump, ic_->rejoinLabel());
for (CodeOffset offset : nextCodeOffsets_) {
Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
ImmPtr(stub_->nextCodeRawPtr()),
ImmPtr((void*)-1));
}
if (stubJitCodeOffset_) {
Assembler::PatchDataWithValueCheck(
CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
}
return newStubCode;
}
bool IonCacheIRCompiler::emitGuardShape() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
ObjOperandId objId = reader.objOperandId();
Register obj = allocator.useRegister(masm, objId);
Shape* shape = shapeStubField(reader.stubOffset());
bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
Maybe<AutoScratchRegister> maybeScratch;
if (needSpectreMitigations) {
maybeScratch.emplace(allocator, masm);
}
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
if (needSpectreMitigations) {
masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
failure->label());
} else {
masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
failure->label());
}
return true;
}
bool IonCacheIRCompiler::emitGuardGroup() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
ObjOperandId objId = reader.objOperandId();
Register obj = allocator.useRegister(masm, objId);
ObjectGroup* group = groupStubField(reader.stubOffset());
bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
Maybe<AutoScratchRegister> maybeScratch;
if (needSpectreMitigations) {
maybeScratch.emplace(allocator, masm);
}
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
if (needSpectreMitigations) {
masm.branchTestObjGroup(Assembler::NotEqual, obj, group, *maybeScratch, obj,
failure->label());
} else {
masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, group,
failure->label());
}
return true;
}
bool IonCacheIRCompiler::emitGuardProto() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSObject* proto = objectStubField(reader.stubOffset());
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadObjProto(obj, scratch);
masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
failure->label());
return true;
}
bool IonCacheIRCompiler::emitGuardCompartment() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSObject* globalWrapper = objectStubField(reader.stubOffset());
JS::Compartment* compartment = compartmentStubField(reader.stubOffset());
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(ImmGCPtr(globalWrapper), scratch);
Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
masm.branchPtr(Assembler::Equal, handlerAddr,
ImmPtr(&DeadObjectProxy::singleton), failure->label());
masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
failure->label());
return true;
}
bool IonCacheIRCompiler::emitGuardAnyClass() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
ObjOperandId objId = reader.objOperandId();
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
const Class* clasp = classStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
if (objectGuardNeedsSpectreMitigations(objId)) {
masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
failure->label());
} else {
masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
scratch, failure->label());
}
return true;
}
bool IonCacheIRCompiler::emitGuardHasProxyHandler() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
const void* handler = proxyHandlerStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address handlerAddr(obj, ProxyObject::offsetOfHandler());
masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
failure->label());
return true;
}
bool IonCacheIRCompiler::emitGuardSpecificObject() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSObject* expected = objectStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
failure->label());
return true;
}
bool IonCacheIRCompiler::emitGuardSpecificAtom() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register str = allocator.useRegister(masm, reader.stringOperandId());
AutoScratchRegister scratch(allocator, masm);
JSAtom* atom = &stringStubField(reader.stubOffset())->asAtom();
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
masm.branchTest32(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::NON_ATOM_BIT), failure->label());
masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
Imm32(atom->length()), failure->label());
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.PushRegsInMask(volatileRegs);
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmGCPtr(atom), scratch);
masm.passABIArg(scratch);
masm.passABIArg(str);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelperPure));
masm.mov(ReturnReg, scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchIfFalseBool(scratch, failure->label());
masm.bind(&done);
return true;
}
bool IonCacheIRCompiler::emitGuardSpecificSymbol() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register sym = allocator.useRegister(masm, reader.symbolOperandId());
JS::Symbol* expected = symbolStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
failure->label());
return true;
}
bool IonCacheIRCompiler::emitLoadValueResult() {
MOZ_CRASH("Baseline-specific op");
}
bool IonCacheIRCompiler::emitLoadFixedSlotResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
masm.loadTypedOrValue(Address(obj, offset), output);
return true;
}
bool IonCacheIRCompiler::emitLoadDynamicSlotResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
masm.loadTypedOrValue(Address(scratch, offset), output);
return true;
}
bool IonCacheIRCompiler::emitGuardHasGetterSetter() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Shape* shape = shapeStubField(reader.stubOffset());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
volatileRegs.takeUnchecked(scratch1);
volatileRegs.takeUnchecked(scratch2);
masm.PushRegsInMask(volatileRegs);
masm.setupUnalignedABICall(scratch1);
masm.loadJSContext(scratch1);
masm.passABIArg(scratch1);
masm.passABIArg(obj);
masm.movePtr(ImmGCPtr(shape), scratch2);
masm.passABIArg(scratch2);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectHasGetterSetterPure));
masm.mov(ReturnReg, scratch1);
masm.PopRegsInMask(volatileRegs);
masm.branchIfFalseBool(scratch1, failure->label());
return true;
}
bool IonCacheIRCompiler::emitCallScriptedGetterResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
AutoScratchRegister scratch(allocator, masm);
bool isCrossRealm = reader.readBool();
MOZ_ASSERT(isCrossRealm == (cx_->realm() != target->realm()));
allocator.discardStack(masm);
uint32_t framePushedBefore = masm.framePushed();
uint32_t descriptor = MakeFrameDescriptor(
masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
pushStubCodePointer();
masm.Push(Imm32(descriptor));
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
uint32_t padding =
ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
MOZ_ASSERT(padding < JitStackAlignment);
masm.reserveStack(padding);
for (size_t i = 0; i < target->nargs(); i++) {
masm.Push(UndefinedValue());
}
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
if (isCrossRealm) {
masm.switchToRealm(target->realm(), scratch);
}
masm.movePtr(ImmGCPtr(target), scratch);
descriptor = MakeFrameDescriptor(argSize + padding, FrameType::IonICCall,
JitFrameLayout::Size());
masm.Push(Imm32(0)); masm.Push(scratch);
masm.Push(Imm32(descriptor));
MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) ==
0);
MOZ_ASSERT(target->hasJitEntry());
masm.loadJitCodeRaw(scratch, scratch);
masm.callJit(scratch);
if (isCrossRealm) {
static_assert(!JSReturnOperand.aliases(ReturnReg),
"ReturnReg available as scratch after scripted calls");
masm.switchToRealm(cx_->realm(), ReturnReg);
}
masm.storeCallResultValue(output);
masm.freeStack(masm.framePushed() - framePushedBefore);
return true;
}
bool IonCacheIRCompiler::emitCallNativeGetterResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
MOZ_ASSERT(target->isNative());
AutoScratchRegister argJSContext(allocator, masm);
AutoScratchRegister argUintN(allocator, masm);
AutoScratchRegister argVp(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(ObjectValue(*target));
masm.loadJSContext(argJSContext);
masm.move32(Imm32(0), argUintN);
masm.moveStackPtrTo(argVp.get());
masm.Push(argUintN);
pushStubCodePointer();
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
return false;
}
masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
if (target->realm() != cx_->realm()) {
masm.switchToRealm(target->realm(), scratch);
}
masm.setupUnalignedABICall(scratch);
masm.passABIArg(argJSContext);
masm.passABIArg(argUintN);
masm.passABIArg(argVp);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()),
MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
if (target->realm() != cx_->realm()) {
masm.switchToRealm(cx_->realm(), ReturnReg);
}
Address outparam(masm.getStackPointer(),
IonOOLNativeExitFrameLayout::offsetOfResult());
masm.loadValue(outparam, output.valueReg());
if (JitOptions.spectreJitToCxxCalls) {
masm.speculationBarrier();
}
masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
return true;
}
bool IonCacheIRCompiler::emitCallProxyGetResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
jsid id = idStubField(reader.stubOffset());
AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
AutoScratchRegister argProxy(allocator, masm);
AutoScratchRegister argId(allocator, masm);
AutoScratchRegister argVp(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
pushStubCodePointer();
masm.Push(UndefinedValue());
masm.moveStackPtrTo(argVp.get());
masm.Push(id, scratch);
masm.moveStackPtrTo(argId.get());
masm.Push(obj);
masm.moveStackPtrTo(argProxy.get());
masm.loadJSContext(argJSContext);
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
return false;
}
masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(argJSContext);
masm.passABIArg(argProxy);
masm.passABIArg(argId);
masm.passABIArg(argVp);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ProxyGetProperty),
MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
Address outparam(masm.getStackPointer(),
IonOOLProxyExitFrameLayout::offsetOfResult());
masm.loadValue(outparam, output.valueReg());
if (JitOptions.spectreJitToCxxCalls) {
masm.speculationBarrier();
}
masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
return true;
}
bool IonCacheIRCompiler::emitCallProxyGetByValueResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(idVal);
masm.Push(obj);
using Fn =
bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
callVM<Fn, ProxyGetPropertyByValue>(masm);
masm.storeCallResultValue(output);
return true;
}
bool IonCacheIRCompiler::emitCallProxyHasPropResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
bool hasOwn = reader.readBool();
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(idVal);
masm.Push(obj);
using Fn =
bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
if (hasOwn) {
callVM<Fn, ProxyHasOwn>(masm);
} else {
callVM<Fn, ProxyHas>(masm);
}
masm.storeCallResultValue(output);
return true;
}
bool IonCacheIRCompiler::emitCallNativeGetElementResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register index = allocator.useRegister(masm, reader.int32OperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(index);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleNativeObject, HandleValue, int32_t,
MutableHandleValue);
callVM<Fn, NativeGetElement>(masm);
masm.storeCallResultValue(output);
return true;
}
bool IonCacheIRCompiler::emitLoadUnboxedPropertyResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSValueType fieldType = reader.jsValueType();
int32_t fieldOffset = int32StubField(reader.stubOffset());
masm.loadUnboxedProperty(Address(obj, fieldOffset), fieldType, output);
return true;
}
bool IonCacheIRCompiler::emitGuardFrameHasNoArgumentsObject() {
MOZ_CRASH("Baseline-specific op");
}
bool IonCacheIRCompiler::emitLoadFrameCalleeResult() {
MOZ_CRASH("Baseline-specific op");
}
bool IonCacheIRCompiler::emitLoadFrameNumActualArgsResult() {
MOZ_CRASH("Baseline-specific op");
}
bool IonCacheIRCompiler::emitLoadFrameArgumentResult() {
MOZ_CRASH("Baseline-specific op");
}
bool IonCacheIRCompiler::emitLoadEnvironmentFixedSlotResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address slot(obj, offset);
masm.branchTestMagic(Assembler::Equal, slot, failure->label());
masm.loadTypedOrValue(slot, output);
return true;
}
bool IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
Address slot(scratch, offset);
masm.branchTestMagic(Assembler::Equal, slot, failure->label());
masm.loadTypedOrValue(slot, output);
return true;
}
bool IonCacheIRCompiler::emitLoadStringResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
MOZ_CRASH("not used in ion");
}
bool IonCacheIRCompiler::emitCallStringSplitResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register str = allocator.useRegister(masm, reader.stringOperandId());
Register sep = allocator.useRegister(masm, reader.stringOperandId());
ObjectGroup* group = groupStubField(reader.stubOffset());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(str);
masm.Push(sep);
masm.Push(ImmGCPtr(group));
masm.Push(Imm32(INT32_MAX));
using Fn = bool (*)(JSContext*, HandleString, HandleString, HandleObjectGroup,
uint32_t limit, MutableHandleValue);
callVM<Fn, StringSplitHelper>(masm);
masm.storeCallResultValue(output);
return true;
}
bool IonCacheIRCompiler::emitCompareStringResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register left = allocator.useRegister(masm, reader.stringOperandId());
Register right = allocator.useRegister(masm, reader.stringOperandId());
JSOp op = reader.jsop();
allocator.discardStack(masm);
Label slow, done;
MOZ_ASSERT(!output.hasValue());
masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
masm.jump(&done);
masm.bind(&slow);
prepareVMCall(masm, save);
masm.Push(right);
masm.Push(left);
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
if (op == JSOP_EQ || op == JSOP_STRICTEQ) {
callVM<Fn, jit::StringsEqual<true>>(masm);
} else {
callVM<Fn, jit::StringsEqual<false>>(masm);
}
masm.storeCallBoolResult(output.typedReg().gpr());
masm.bind(&done);
return true;
}
static bool GroupHasPropertyTypes(ObjectGroup* group, jsid* id, Value* v) {
AutoUnsafeCallWithABI unsafe;
if (group->unknownPropertiesDontCheckGeneration()) {
return true;
}
HeapTypeSet* propTypes = group->maybeGetPropertyDontCheckGeneration(*id);
if (!propTypes) {
return true;
}
if (!propTypes->nonConstantProperty()) {
return false;
}
return propTypes->hasType(TypeSet::GetValueType(*v));
}
static void EmitCheckPropertyTypes(MacroAssembler& masm,
const PropertyTypeCheckInfo* typeCheckInfo,
Register obj, const ConstantOrRegister& val,
const LiveRegisterSet& liveRegs,
Label* failures) {
if (!typeCheckInfo->isSet()) {
return;
}
ObjectGroup* group = typeCheckInfo->group();
AutoSweepObjectGroup sweep(group);
if (group->unknownProperties(sweep)) {
return;
}
jsid id = typeCheckInfo->id();
HeapTypeSet* propTypes = group->maybeGetProperty(sweep, id);
if (propTypes && propTypes->unknown()) {
return;
}
masm.Push(obj);
Register scratch1 = obj;
Register objScratch = InvalidReg;
Register spectreRegToZero = InvalidReg;
if (propTypes && !propTypes->unknownObject() &&
propTypes->getObjectCount() > 0) {
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
if (!val.constant()) {
TypedOrValueRegister valReg = val.reg();
if (valReg.hasValue()) {
regs.take(valReg.valueReg());
spectreRegToZero = valReg.valueReg().payloadOrValueReg();
} else if (!valReg.typedReg().isFloat()) {
regs.take(valReg.typedReg().gpr());
spectreRegToZero = valReg.typedReg().gpr();
}
}
regs.take(scratch1);
objScratch = regs.takeAny();
masm.Push(objScratch);
}
bool checkTypeSet = true;
Label failedFastPath;
if (propTypes && !propTypes->nonConstantProperty()) {
masm.jump(&failedFastPath);
}
if (val.constant()) {
if (!propTypes || !propTypes->hasType(TypeSet::GetValueType(val.value()))) {
masm.jump(&failedFastPath);
}
checkTypeSet = false;
} else {
TypedOrValueRegister reg = val.reg();
if (reg.hasTyped() && reg.type() != MIRType::Object) {
JSValueType valType = ValueTypeFromMIRType(reg.type());
if (!propTypes || !propTypes->hasType(TypeSet::PrimitiveType(valType))) {
masm.jump(&failedFastPath);
}
checkTypeSet = false;
}
}
Label done;
if (checkTypeSet) {
TypedOrValueRegister valReg = val.reg();
if (propTypes) {
TypeSet::readBarrier(propTypes);
masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch1,
objScratch, spectreRegToZero, &failedFastPath);
masm.jump(&done);
} else {
masm.jump(&failedFastPath);
}
}
if (failedFastPath.used()) {
masm.bind(&failedFastPath);
AllocatableRegisterSet regs(GeneralRegisterSet::Volatile(),
liveRegs.fpus());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
regs.takeUnchecked(scratch1);
Register scratch2 = regs.takeAnyGeneral();
masm.Push(val);
masm.moveStackPtrTo(scratch2);
Register scratch3 = regs.takeAnyGeneral();
masm.Push(id, scratch3);
masm.moveStackPtrTo(scratch3);
masm.setupUnalignedABICall(scratch1);
masm.movePtr(ImmGCPtr(group), scratch1);
masm.passABIArg(scratch1);
masm.passABIArg(scratch3);
masm.passABIArg(scratch2);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GroupHasPropertyTypes));
masm.mov(ReturnReg, scratch1);
masm.adjustStack(sizeof(Value) + sizeof(jsid));
LiveRegisterSet ignore;
ignore.add(scratch1);
masm.PopRegsInMaskIgnore(save, ignore);
masm.branchIfTrueBool(scratch1, &done);
if (objScratch != InvalidReg) {
masm.pop(objScratch);
}
masm.pop(obj);
masm.jump(failures);
}
masm.bind(&done);
if (objScratch != InvalidReg) {
masm.Pop(objScratch);
}
masm.Pop(obj);
}
bool IonCacheIRCompiler::emitStoreFixedSlot() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
Maybe<AutoScratchRegister> scratch;
if (needsPostBarrier()) {
scratch.emplace(allocator, masm);
}
if (typeCheckInfo_->isSet()) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
}
Address slot(obj, offset);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeConstantOrRegister(val, slot);
if (needsPostBarrier()) {
emitPostBarrierSlot(obj, val, scratch.ref());
}
return true;
}
bool IonCacheIRCompiler::emitStoreDynamicSlot() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
AutoScratchRegister scratch(allocator, masm);
if (typeCheckInfo_->isSet()) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
}
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
Address slot(scratch, offset);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeConstantOrRegister(val, slot);
if (needsPostBarrier()) {
emitPostBarrierSlot(obj, val, scratch);
}
return true;
}
bool IonCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op) {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
AutoScratchRegister scratch1(allocator, masm);
Maybe<AutoScratchRegister> scratch2;
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
scratch2.emplace(allocator, masm);
}
bool changeGroup = reader.readBool();
ObjectGroup* newGroup = groupStubField(reader.stubOffset());
Shape* newShape = shapeStubField(reader.stubOffset());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
int32_t numNewSlots = int32StubField(reader.stubOffset());
MOZ_ASSERT(numNewSlots > 0);
LiveRegisterSet save(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.PushRegsInMask(save);
masm.setupUnalignedABICall(scratch1);
masm.loadJSContext(scratch1);
masm.passABIArg(scratch1);
masm.passABIArg(obj);
masm.move32(Imm32(numNewSlots), scratch2.ref());
masm.passABIArg(scratch2.ref());
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsPure));
masm.mov(ReturnReg, scratch1);
LiveRegisterSet ignore;
ignore.add(scratch1);
masm.PopRegsInMaskIgnore(save, ignore);
masm.branchIfFalseBool(scratch1, failure->label());
}
if (changeGroup) {
Label noGroupChange;
masm.branchIfObjGroupHasNoAddendum(obj, scratch1, &noGroupChange);
masm.storeObjGroup(newGroup, obj,
[](MacroAssembler& masm, const Address& addr) {
EmitPreBarrier(masm, addr, MIRType::ObjectGroup);
});
masm.bind(&noGroupChange);
}
masm.storeObjShape(newShape, obj,
[](MacroAssembler& masm, const Address& addr) {
EmitPreBarrier(masm, addr, MIRType::Shape);
});
if (op == CacheOp::AddAndStoreFixedSlot) {
Address slot(obj, offset);
masm.storeConstantOrRegister(val, slot);
} else {
MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
op == CacheOp::AllocateAndStoreDynamicSlot);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
Address slot(scratch1, offset);
masm.storeConstantOrRegister(val, slot);
}
if (needsPostBarrier()) {
emitPostBarrierSlot(obj, val, scratch1);
}
return true;
}
bool IonCacheIRCompiler::emitAddAndStoreFixedSlot() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
}
bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
}
bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
}
bool IonCacheIRCompiler::emitStoreUnboxedProperty() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSValueType fieldType = reader.jsValueType();
int32_t offset = int32StubField(reader.stubOffset());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
Maybe<AutoScratchRegister> scratch;
if (needsPostBarrier() && UnboxedTypeNeedsPostBarrier(fieldType)) {
scratch.emplace(allocator, masm);
}
if (fieldType == JSVAL_TYPE_OBJECT && typeCheckInfo_->isSet()) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
}
Address fieldAddr(obj, offset);
EmitICUnboxedPreBarrier(masm, fieldAddr, fieldType);
masm.storeUnboxedProperty(fieldAddr, fieldType, val, nullptr);
if (needsPostBarrier() && UnboxedTypeNeedsPostBarrier(fieldType)) {
emitPostBarrierSlot(obj, val, scratch.ref());
}
return true;
}
bool IonCacheIRCompiler::emitStoreTypedObjectReferenceProperty() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
TypedThingLayout layout = reader.typedThingLayout();
ReferenceType type = reader.referenceTypeDescrType();
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
if (type != ReferenceType::TYPE_STRING) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, TypedOrValueRegister(val),
*liveRegs_, failure->label());
}
LoadTypedThingData(masm, layout, obj, scratch1);
Address dest(scratch1, offset);
emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
if (needsPostBarrier() && type != ReferenceType::TYPE_STRING) {
emitPostBarrierSlot(obj, val, scratch1);
}
return true;
}
bool IonCacheIRCompiler::emitStoreTypedObjectScalarProperty() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
int32_t offset = int32StubField(reader.stubOffset());
TypedThingLayout layout = reader.typedThingLayout();
Scalar::Type type = reader.scalarType();
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
LoadTypedThingData(masm, layout, obj, scratch1);
Address dest(scratch1, offset);
StoreToTypedArray(cx_, masm, type, val, dest, scratch2, failure->label());
return true;
}
static void EmitStoreDenseElement(MacroAssembler& masm,
const ConstantOrRegister& value,
Register elements,
BaseObjectElementIndex target) {
Address elementsFlags(elements, ObjectElements::offsetOfFlags());
if (value.constant()) {
Value v = value.value();
Label done;
if (v.isInt32()) {
Label dontConvert;
masm.branchTest32(Assembler::Zero, elementsFlags,
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
&dontConvert);
masm.storeValue(DoubleValue(v.toInt32()), target);
masm.jump(&done);
masm.bind(&dontConvert);
}
masm.storeValue(v, target);
masm.bind(&done);
return;
}
TypedOrValueRegister reg = value.reg();
if (reg.hasTyped() && reg.type() != MIRType::Int32) {
masm.storeTypedOrValue(reg, target);
return;
}
Label convert, storeValue, done;
masm.branchTest32(Assembler::NonZero, elementsFlags,
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS), &convert);
masm.bind(&storeValue);
masm.storeTypedOrValue(reg, target);
masm.jump(&done);
masm.bind(&convert);
ScratchDoubleScope fpscratch(masm);
if (reg.hasValue()) {
masm.branchTestInt32(Assembler::NotEqual, reg.valueReg(), &storeValue);
masm.int32ValueToDouble(reg.valueReg(), fpscratch);
masm.storeDouble(fpscratch, target);
} else {
MOZ_ASSERT(reg.type() == MIRType::Int32);
masm.convertInt32ToDouble(reg.typedReg().gpr(), fpscratch);
masm.storeDouble(fpscratch, target);
}
masm.bind(&done);
}
static void EmitAssertNoCopyOnWriteElements(MacroAssembler& masm,
Register elementsReg) {
#ifdef DEBUG
Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
Label ok;
masm.branchTest32(Assembler::Zero, elementsFlags,
Imm32(ObjectElements::COPY_ON_WRITE), &ok);
masm.assumeUnreachable("Unexpected copy-on-write elements in Ion IC!");
masm.bind(&ok);
#endif
}
bool IonCacheIRCompiler::emitStoreDenseElement() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register index = allocator.useRegister(masm, reader.int32OperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
EmitAssertNoCopyOnWriteElements(masm, scratch1);
Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
BaseObjectElementIndex element(scratch1, index);
masm.branchTestMagic(Assembler::Equal, element, failure->label());
Address flags(scratch1, ObjectElements::offsetOfFlags());
masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN),
failure->label());
EmitPreBarrier(masm, element, MIRType::Value);
EmitStoreDenseElement(masm, val, scratch1, element);
if (needsPostBarrier()) {
emitPostBarrierElement(obj, val, scratch1, index);
}
return true;
}
bool IonCacheIRCompiler::emitStoreDenseElementHole() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register index = allocator.useRegister(masm, reader.int32OperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
reader.readBool();
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
failure->label());
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
EmitAssertNoCopyOnWriteElements(masm, scratch1);
Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
BaseObjectElementIndex element(scratch1, index);
Label inBounds, outOfBounds;
Register spectreTemp = scratch2;
masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
masm.jump(&inBounds);
masm.bind(&outOfBounds);
masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
Label capacityOk, allocElement;
Address capacity(scratch1, ObjectElements::offsetOfCapacity());
masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
masm.jump(&capacityOk);
masm.bind(&allocElement);
Address elementsFlags(scratch1, ObjectElements::offsetOfFlags());
masm.branchTest32(Assembler::NonZero, elementsFlags,
Imm32(ObjectElements::NONWRITABLE_ARRAY_LENGTH),
failure->label());
LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
save.takeUnchecked(scratch1);
masm.PushRegsInMask(save);
masm.setupUnalignedABICall(scratch1);
masm.loadJSContext(scratch1);
masm.passABIArg(scratch1);
masm.passABIArg(obj);
masm.callWithABI(
JS_FUNC_TO_DATA_PTR(void*, NativeObject::addDenseElementPure));
masm.mov(ReturnReg, scratch1);
masm.PopRegsInMask(save);
masm.branchIfFalseBool(scratch1, failure->label());
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
masm.bind(&capacityOk);
masm.add32(Imm32(1), initLength);
Label skipIncrementLength;
Address length(scratch1, ObjectElements::offsetOfLength());
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
masm.add32(Imm32(1), length);
masm.bind(&skipIncrementLength);
Label doStore;
masm.jump(&doStore);
masm.bind(&inBounds);
EmitPreBarrier(masm, element, MIRType::Value);
masm.bind(&doStore);
EmitStoreDenseElement(masm, val, scratch1, element);
if (needsPostBarrier()) {
emitPostBarrierElement(obj, val, scratch1, index);
}
return true;
}
bool IonCacheIRCompiler::emitArrayPush() {
MOZ_ASSERT_UNREACHABLE("emitArrayPush not supported for IonCaches.");
return false;
}
bool IonCacheIRCompiler::emitStoreTypedElement() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register index = allocator.useRegister(masm, reader.int32OperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
TypedThingLayout layout = reader.typedThingLayout();
Scalar::Type arrayType = reader.scalarType();
bool handleOOB = reader.readBool();
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
LoadTypedThingLength(masm, layout, obj, scratch1);
masm.spectreBoundsCheck32(index, scratch1, scratch2,
handleOOB ? &done : failure->label());
LoadTypedThingData(masm, layout, obj, scratch1);
BaseIndex dest(scratch1, index,
ScaleFromElemWidth(Scalar::byteSize(arrayType)));
FloatRegister maybeTempDouble = ic_->asSetPropertyIC()->maybeTempDouble();
FloatRegister maybeTempFloat32 = ic_->asSetPropertyIC()->maybeTempFloat32();
MOZ_ASSERT(maybeTempDouble != InvalidFloatReg);
MOZ_ASSERT_IF(jit::hasUnaliasedDouble(), maybeTempFloat32 != InvalidFloatReg);
if (arrayType == Scalar::Float32) {
FloatRegister tempFloat =
hasUnaliasedDouble() ? maybeTempFloat32 : maybeTempDouble;
if (!masm.convertConstantOrRegisterToFloat(cx_, val, tempFloat,
failure->label())) {
return false;
}
masm.storeToTypedFloatArray(arrayType, tempFloat, dest);
} else if (arrayType == Scalar::Float64) {
if (!masm.convertConstantOrRegisterToDouble(cx_, val, maybeTempDouble,
failure->label())) {
return false;
}
masm.storeToTypedFloatArray(arrayType, maybeTempDouble, dest);
} else {
Register valueToStore = scratch2;
if (arrayType == Scalar::Uint8Clamped) {
if (!masm.clampConstantOrRegisterToUint8(
cx_, val, maybeTempDouble, valueToStore, failure->label())) {
return false;
}
} else {
if (!masm.truncateConstantOrRegisterToInt32(
cx_, val, maybeTempDouble, valueToStore, failure->label())) {
return false;
}
}
masm.storeToTypedIntArray(arrayType, valueToStore, dest);
}
masm.bind(&done);
return true;
}
bool IonCacheIRCompiler::emitCallNativeSetter() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
MOZ_ASSERT(target->isNative());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
AutoScratchRegister argJSContext(allocator, masm);
AutoScratchRegister argVp(allocator, masm);
AutoScratchRegister argUintN(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
masm.Push(val);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(ObjectValue(*target));
masm.moveStackPtrTo(argVp.get());
masm.loadJSContext(argJSContext);
masm.move32(Imm32(1), argUintN);
masm.Push(argUintN);
pushStubCodePointer();
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
return false;
}
masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
if (target->realm() != cx_->realm()) {
masm.switchToRealm(target->realm(), scratch);
}
masm.setupUnalignedABICall(scratch);
masm.passABIArg(argJSContext);
masm.passABIArg(argUintN);
masm.passABIArg(argVp);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()),
MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
if (target->realm() != cx_->realm()) {
masm.switchToRealm(cx_->realm(), ReturnReg);
}
masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
return true;
}
bool IonCacheIRCompiler::emitCallScriptedSetter() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
bool isCrossRealm = reader.readBool();
MOZ_ASSERT(isCrossRealm == (cx_->realm() != target->realm()));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
uint32_t framePushedBefore = masm.framePushed();
uint32_t descriptor = MakeFrameDescriptor(
masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
pushStubCodePointer();
masm.Push(Imm32(descriptor));
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
size_t numArgs = Max<size_t>(1, target->nargs());
uint32_t argSize = (numArgs + 1) * sizeof(Value);
uint32_t padding =
ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
MOZ_ASSERT(padding < JitStackAlignment);
masm.reserveStack(padding);
for (size_t i = 1; i < target->nargs(); i++) {
masm.Push(UndefinedValue());
}
masm.Push(val);
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
if (isCrossRealm) {
masm.switchToRealm(target->realm(), scratch);
}
masm.movePtr(ImmGCPtr(target), scratch);
descriptor = MakeFrameDescriptor(argSize + padding, FrameType::IonICCall,
JitFrameLayout::Size());
masm.Push(Imm32(1)); masm.Push(scratch);
masm.Push(Imm32(descriptor));
MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) ==
0);
MOZ_ASSERT(target->hasJitEntry());
masm.loadJitCodeRaw(scratch, scratch);
masm.callJit(scratch);
if (isCrossRealm) {
masm.switchToRealm(cx_->realm(), ReturnReg);
}
masm.freeStack(masm.framePushed() - framePushedBefore);
return true;
}
bool IonCacheIRCompiler::emitCallSetArrayLength() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
bool strict = reader.readBool();
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(Imm32(strict));
masm.Push(val);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
callVM<Fn, jit::SetArrayLength>(masm);
return true;
}
bool IonCacheIRCompiler::emitCallProxySet() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
jsid id = idStubField(reader.stubOffset());
bool strict = reader.readBool();
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(Imm32(strict));
masm.Push(val);
masm.Push(id, scratch);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
callVM<Fn, ProxySetProperty>(masm);
return true;
}
bool IonCacheIRCompiler::emitCallProxySetByValue() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ConstantOrRegister idVal =
allocator.useConstantOrRegister(masm, reader.valOperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
bool strict = reader.readBool();
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(Imm32(strict));
masm.Push(val);
masm.Push(idVal);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
callVM<Fn, ProxySetPropertyByValue>(masm);
return true;
}
bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register id = allocator.useRegister(masm, reader.int32OperandId());
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
bool strict = reader.readBool();
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(Imm32(strict));
masm.Push(val);
masm.Push(id);
masm.Push(obj);
using Fn = bool (*)(JSContext * cx, HandleArrayObject obj, int32_t int_id,
HandleValue v, bool strict);
callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
return true;
}
bool IonCacheIRCompiler::emitCallGetSparseElementResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
Register id = allocator.useRegister(masm, reader.int32OperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(id);
masm.Push(obj);
using Fn = bool (*)(JSContext * cx, HandleArrayObject obj, int32_t int_id,
MutableHandleValue result);
callVM<Fn, GetSparseElementHelper>(masm);
masm.storeCallResultValue(output);
return true;
}
bool IonCacheIRCompiler::emitMegamorphicSetElement() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ConstantOrRegister idVal =
allocator.useConstantOrRegister(masm, reader.valOperandId());
ConstantOrRegister val =
allocator.useConstantOrRegister(masm, reader.valOperandId());
bool strict = reader.readBool();
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(Imm32(strict));
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
masm.Push(val);
masm.Push(idVal);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
HandleValue, bool);
callVM<Fn, SetObjectElementWithReceiver>(masm);
return true;
}
bool IonCacheIRCompiler::emitLoadTypedObjectResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, reader.objOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
TypedThingLayout layout = reader.typedThingLayout();
uint32_t typeDescr = reader.typeDescrKey();
uint32_t fieldOffset = int32StubField(reader.stubOffset());
LoadTypedThingData(masm, layout, obj, scratch1);
Address fieldAddr(scratch1, fieldOffset);
emitLoadTypedObjectResultShared(fieldAddr, scratch2, typeDescr, output);
return true;
}
bool IonCacheIRCompiler::emitTypeMonitorResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
return emitReturnFromIC();
}
bool IonCacheIRCompiler::emitReturnFromIC() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
if (!savedLiveRegs_) {
allocator.restoreInputState(masm);
}
RepatchLabel rejoin;
rejoinOffset_ = masm.jumpWithPatch(&rejoin);
masm.bind(&rejoin);
return true;
}
bool IonCacheIRCompiler::emitLoadStackValue() {
MOZ_ASSERT_UNREACHABLE("emitLoadStackValue not supported for IonCaches.");
return false;
}
bool IonCacheIRCompiler::emitGuardAndGetIterator() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
AutoScratchRegister niScratch(allocator, masm);
PropertyIteratorObject* iterobj =
&objectStubField(reader.stubOffset())->as<PropertyIteratorObject>();
NativeIterator** enumerators =
rawWordStubField<NativeIterator**>(reader.stubOffset());
Register output = allocator.defineRegister(masm, reader.objOperandId());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(ImmGCPtr(iterobj), output);
masm.loadObjPrivate(output, PropertyIteratorObject::NUM_FIXED_SLOTS,
niScratch);
masm.branchIfNativeIteratorNotReusable(niScratch, failure->label());
Address iterObjAddr(niScratch, NativeIterator::offsetOfObjectBeingIterated());
EmitPreBarrier(masm, iterObjAddr, MIRType::Object);
Address iterFlagsAddr(niScratch, NativeIterator::offsetOfFlags());
masm.storePtr(obj, iterObjAddr);
masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
emitPostBarrierSlot(output,
TypedOrValueRegister(MIRType::Object, AnyRegister(obj)),
scratch1);
masm.loadPtr(AbsoluteAddress(enumerators), scratch1);
emitRegisterEnumerator(scratch1, niScratch, scratch2);
return true;
}
bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
Shape* shape = shapeStubField(reader.stubOffset());
AutoScratchRegister objScratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.branchTestUndefined(Assembler::Equal, val, &done);
masm.debugAssertIsObject(val);
masm.unboxObject(val, objScratch);
masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
shape, failure->label());
masm.bind(&done);
return true;
}
bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
Register obj = allocator.useRegister(masm, reader.objOperandId());
ExpandoAndGeneration* expandoAndGeneration =
rawWordStubField<ExpandoAndGeneration*>(reader.stubOffset());
uint64_t* generationFieldPtr =
expandoGenerationStubFieldPtr(reader.stubOffset());
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
ValueOperand output =
allocator.defineValueRegister(masm, reader.valOperandId());
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch1);
Address expandoAddr(scratch1,
detail::ProxyReservedSlots::offsetOfPrivateSlot());
masm.loadValue(expandoAddr, output);
masm.branchTestValue(Assembler::NotEqual, output,
PrivateValue(expandoAndGeneration), failure->label());
masm.movePtr(ImmPtr(expandoAndGeneration), output.scratchReg());
masm.movePtr(ImmPtr(generationFieldPtr), scratch1);
masm.branch64(
Assembler::NotEqual,
Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
Address(scratch1, 0), scratch2, failure->label());
masm.loadValue(
Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()),
output);
return true;
}
void IonIC::attachCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
CacheKind kind, IonScript* ionScript,
bool* attached,
const PropertyTypeCheckInfo* typeCheckInfo) {
AutoAssertNoPendingException aanpe(cx);
JS::AutoCheckCannotGC nogc;
MOZ_ASSERT(!*attached);
MOZ_ASSERT(!!typeCheckInfo ==
(kind == CacheKind::SetProp || kind == CacheKind::SetElem));
if (writer.failed() || ionScript->invalidated()) {
return;
}
JitZone* jitZone = cx->zone()->jitZone();
uint32_t stubDataOffset = sizeof(IonICStub);
CacheIRStubKey::Lookup lookup(kind, ICStubEngine::IonIC, writer.codeStart(),
writer.codeLength());
CacheIRStubInfo* stubInfo = jitZone->getIonCacheIRStubInfo(lookup);
if (!stubInfo) {
bool makesGCCalls = true;
stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::IonIC, makesGCCalls,
stubDataOffset, writer);
if (!stubInfo) {
return;
}
CacheIRStubKey key(stubInfo);
if (!jitZone->putIonCacheIRStubInfo(lookup, key)) {
return;
}
}
MOZ_ASSERT(stubInfo);
for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
if (stub->stubInfo() != stubInfo) {
continue;
}
bool updated = false;
if (!writer.stubDataEqualsMaybeUpdate(stub->stubDataStart(), &updated)) {
continue;
}
if (updated || (typeCheckInfo && typeCheckInfo->needsTypeBarrier())) {
*attached = true;
}
return;
}
size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
ICStubSpace* stubSpace = cx->zone()->jitZone()->optimizedStubSpace();
void* newStubMem = stubSpace->alloc(bytesNeeded);
if (!newStubMem) {
return;
}
IonICStub* newStub =
new (newStubMem) IonICStub(fallbackLabel_.raw(), stubInfo);
writer.copyStubData(newStub->stubDataStart());
JitContext jctx(cx, nullptr);
IonCacheIRCompiler compiler(cx, writer, this, ionScript, newStub,
typeCheckInfo, stubDataOffset);
if (!compiler.init()) {
return;
}
JitCode* code = compiler.compile();
if (!code) {
return;
}
attachStub(newStub, code);
*attached = true;
}
bool IonCacheIRCompiler::emitCallStringConcatResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, reader.stringOperandId());
Register rhs = allocator.useRegister(masm, reader.stringOperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(rhs);
masm.Push(lhs);
using Fn = JSString* (*)(JSContext*, HandleString, HandleString);
callVM<Fn, ConcatStrings<CanGC>>(masm);
masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, output.valueReg());
return true;
}
bool IonCacheIRCompiler::emitCallStringObjectConcatResult() {
JitSpew(JitSpew_Codegen, __FUNCTION__);
AutoSaveLiveRegisters save(*this);
AutoOutputRegister output(*this);
ValueOperand lhs = allocator.useValueRegister(masm, reader.valOperandId());
ValueOperand rhs = allocator.useValueRegister(masm, reader.valOperandId());
allocator.discardStack(masm);
prepareVMCall(masm, save);
masm.Push(rhs);
masm.Push(lhs);
using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
callVM<Fn, DoConcatStringObject>(masm);
masm.storeCallResultValue(output);
return true;
}