#ifndef wasm_code_h
#define wasm_code_h
#include "jit/shared/Assembler-shared.h"
#include "js/HashTable.h"
#include "threading/ExclusiveData.h"
#include "vm/MutexIDs.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmTypes.h"
namespace js {
struct AsmJSMetadata;
namespace wasm {
struct MetadataTier;
struct Metadata;
struct LinkDataCacheablePod {
uint32_t trapOffset = 0;
LinkDataCacheablePod() = default;
};
struct LinkData : LinkDataCacheablePod {
const Tier tier;
explicit LinkData(Tier tier) : tier(tier) {}
LinkDataCacheablePod& pod() { return *this; }
const LinkDataCacheablePod& pod() const { return *this; }
struct InternalLink {
uint32_t patchAtOffset;
uint32_t targetOffset;
#ifdef JS_CODELABEL_LINKMODE
uint32_t mode;
#endif
};
typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
struct SymbolicLinkArray
: EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
};
InternalLinkVector internalLinks;
SymbolicLinkArray symbolicLinks;
WASM_DECLARE_SERIALIZABLE(LinkData)
};
typedef UniquePtr<LinkData> UniqueLinkData;
struct FreeCode {
uint32_t codeLength;
FreeCode() : codeLength(0) {}
explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
void operator()(uint8_t* codeBytes);
};
using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
class Code;
class CodeTier;
class ModuleSegment;
class LazyStubSegment;
class CodeSegment {
protected:
static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
enum class Kind { LazyStubs, Module };
CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
: bytes_(std::move(bytes)),
length_(length),
kind_(kind),
codeTier_(nullptr),
unregisterOnDestroy_(false) {}
bool initialize(const CodeTier& codeTier);
private:
const UniqueCodeBytes bytes_;
const uint32_t length_;
const Kind kind_;
const CodeTier* codeTier_;
bool unregisterOnDestroy_;
public:
bool initialized() const { return !!codeTier_; }
~CodeSegment();
bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
bool isModule() const { return kind_ == Kind::Module; }
const ModuleSegment* asModule() const {
MOZ_ASSERT(isModule());
return (ModuleSegment*)this;
}
const LazyStubSegment* asLazyStub() const {
MOZ_ASSERT(isLazyStubs());
return (LazyStubSegment*)this;
}
uint8_t* base() const { return bytes_.get(); }
uint32_t length() const {
MOZ_ASSERT(length_ != UINT32_MAX);
return length_;
}
bool containsCodePC(const void* pc) const {
return pc >= base() && pc < (base() + length_);
}
const CodeTier& codeTier() const {
MOZ_ASSERT(initialized());
return *codeTier_;
}
const Code& code() const;
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
};
typedef UniquePtr<ModuleSegment> UniqueModuleSegment;
class ModuleSegment : public CodeSegment {
const Tier tier_;
uint8_t* const trapCode_;
public:
ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength,
const LinkData& linkData);
static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm,
const LinkData& linkData);
static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes,
const LinkData& linkData);
bool initialize(const CodeTier& codeTier, const LinkData& linkData,
const Metadata& metadata, const MetadataTier& metadataTier);
Tier tier() const { return tier_; }
uint8_t* trapCode() const { return trapCode_; }
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
static const uint8_t* deserialize(const uint8_t* cursor,
const LinkData& linkData,
UniqueModuleSegment* segment);
const CodeRange* lookupRange(const void* pc) const;
void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
size_t* data) const;
};
class FuncExport {
FuncType funcType_;
MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
uint32_t funcIndex_;
uint32_t eagerInterpEntryOffset_; bool hasEagerStubs_;
} pod;
public:
FuncExport() = default;
explicit FuncExport(FuncType&& funcType, uint32_t funcIndex,
bool hasEagerStubs)
: funcType_(std::move(funcType)) {
pod.funcIndex_ = funcIndex;
pod.eagerInterpEntryOffset_ = UINT32_MAX;
pod.hasEagerStubs_ = hasEagerStubs;
}
void initEagerInterpEntryOffset(uint32_t entryOffset) {
MOZ_ASSERT(pod.eagerInterpEntryOffset_ == UINT32_MAX);
MOZ_ASSERT(hasEagerStubs());
pod.eagerInterpEntryOffset_ = entryOffset;
}
bool hasEagerStubs() const { return pod.hasEagerStubs_; }
const FuncType& funcType() const { return funcType_; }
uint32_t funcIndex() const { return pod.funcIndex_; }
uint32_t eagerInterpEntryOffset() const {
MOZ_ASSERT(pod.eagerInterpEntryOffset_ != UINT32_MAX);
MOZ_ASSERT(hasEagerStubs());
return pod.eagerInterpEntryOffset_;
}
bool clone(const FuncExport& src) {
mozilla::PodAssign(&pod, &src.pod);
return funcType_.clone(src.funcType_);
}
WASM_DECLARE_SERIALIZABLE(FuncExport)
};
typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
class FuncImport {
FuncType funcType_;
struct CacheablePod {
uint32_t tlsDataOffset_;
uint32_t interpExitCodeOffset_; uint32_t jitExitCodeOffset_; } pod;
public:
FuncImport() { memset(&pod, 0, sizeof(CacheablePod)); }
FuncImport(FuncType&& funcType, uint32_t tlsDataOffset)
: funcType_(std::move(funcType)) {
pod.tlsDataOffset_ = tlsDataOffset;
pod.interpExitCodeOffset_ = 0;
pod.jitExitCodeOffset_ = 0;
}
void initInterpExitOffset(uint32_t off) {
MOZ_ASSERT(!pod.interpExitCodeOffset_);
pod.interpExitCodeOffset_ = off;
}
void initJitExitOffset(uint32_t off) {
MOZ_ASSERT(!pod.jitExitCodeOffset_);
pod.jitExitCodeOffset_ = off;
}
const FuncType& funcType() const { return funcType_; }
uint32_t tlsDataOffset() const { return pod.tlsDataOffset_; }
uint32_t interpExitCodeOffset() const { return pod.interpExitCodeOffset_; }
uint32_t jitExitCodeOffset() const { return pod.jitExitCodeOffset_; }
bool clone(const FuncImport& src) {
mozilla::PodAssign(&pod, &src.pod);
return funcType_.clone(src.funcType_);
}
WASM_DECLARE_SERIALIZABLE(FuncImport)
};
typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector;
struct MetadataCacheablePod {
ModuleKind kind;
MemoryUsage memoryUsage;
uint32_t minMemoryLength;
uint32_t globalDataLength;
Maybe<uint32_t> maxMemoryLength;
Maybe<uint32_t> startFuncIndex;
Maybe<uint32_t> nameCustomSectionIndex;
bool filenameIsURL;
explicit MetadataCacheablePod(ModuleKind kind)
: kind(kind),
memoryUsage(MemoryUsage::None),
minMemoryLength(0),
globalDataLength(0),
filenameIsURL(false) {}
};
typedef uint8_t ModuleHash[8];
typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncArgTypesVector;
typedef Vector<ExprType, 0, SystemAllocPolicy> FuncReturnTypesVector;
struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
FuncTypeWithIdVector funcTypeIds;
GlobalDescVector globals;
TableDescVector tables;
CacheableChars filename;
CacheableChars sourceMapURL;
SharedBytes namePayload;
Maybe<Name> moduleName;
NameVector funcNames;
bool debugEnabled;
FuncArgTypesVector debugFuncArgTypes;
FuncReturnTypesVector debugFuncReturnTypes;
ModuleHash debugHash;
explicit Metadata(ModuleKind kind = ModuleKind::Wasm)
: MetadataCacheablePod(kind), debugEnabled(false), debugHash() {}
virtual ~Metadata() {}
MetadataCacheablePod& pod() { return *this; }
const MetadataCacheablePod& pod() const { return *this; }
bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
const AsmJSMetadata& asAsmJS() const {
MOZ_ASSERT(isAsmJS());
return *(const AsmJSMetadata*)this;
}
virtual bool mutedErrors() const { return false; }
virtual const char16_t* displayURL() const { return nullptr; }
virtual ScriptSource* maybeScriptSource() const { return nullptr; }
enum NameContext { Standalone, BeforeLocation };
virtual bool getFuncName(NameContext ctx, uint32_t funcIndex,
UTF8Bytes* name) const;
bool getFuncNameStandalone(uint32_t funcIndex, UTF8Bytes* name) const {
return getFuncName(NameContext::Standalone, funcIndex, name);
}
bool getFuncNameBeforeLocation(uint32_t funcIndex, UTF8Bytes* name) const {
return getFuncName(NameContext::BeforeLocation, funcIndex, name);
}
WASM_DECLARE_SERIALIZABLE(Metadata);
};
typedef RefPtr<Metadata> MutableMetadata;
typedef RefPtr<const Metadata> SharedMetadata;
struct MetadataTier {
explicit MetadataTier(Tier tier) : tier(tier) {}
const Tier tier;
Uint32Vector funcToCodeRange;
CodeRangeVector codeRanges;
CallSiteVector callSites;
TrapSiteVectorArray trapSites;
FuncImportVector funcImports;
FuncExportVector funcExports;
StackMaps stackMaps;
Uint32Vector debugTrapFarJumpOffsets;
FuncExport& lookupFuncExport(uint32_t funcIndex,
size_t* funcExportIndex = nullptr);
const FuncExport& lookupFuncExport(uint32_t funcIndex,
size_t* funcExportIndex = nullptr) const;
const CodeRange& codeRange(const FuncExport& funcExport) const {
return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
}
bool clone(const MetadataTier& src);
WASM_DECLARE_SERIALIZABLE(MetadataTier);
};
using UniqueMetadataTier = UniquePtr<MetadataTier>;
using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
using LazyStubSegmentVector =
Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
class LazyStubSegment : public CodeSegment {
CodeRangeVector codeRanges_;
size_t usedBytes_;
public:
LazyStubSegment(UniqueCodeBytes bytes, size_t length)
: CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs),
usedBytes_(0) {}
static UniqueLazyStubSegment create(const CodeTier& codeTier,
size_t codeLength);
static size_t AlignBytesNeeded(size_t bytes) {
return AlignBytes(bytes, gc::SystemPageSize());
}
bool hasSpace(size_t bytes) const;
bool addStubs(size_t codeLength, const Uint32Vector& funcExportIndices,
const FuncExportVector& funcExports,
const CodeRangeVector& codeRanges, uint8_t** codePtr,
size_t* indexFirstInsertedCodeRange);
const CodeRangeVector& codeRanges() const { return codeRanges_; }
const CodeRange* lookupRange(const void* pc) const;
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
size_t* data) const;
};
struct LazyFuncExport {
size_t funcIndex;
size_t lazyStubSegmentIndex;
size_t funcCodeRangeIndex;
LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex,
size_t funcCodeRangeIndex)
: funcIndex(funcIndex),
lazyStubSegmentIndex(lazyStubSegmentIndex),
funcCodeRangeIndex(funcCodeRangeIndex) {}
};
using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
class LazyStubTier {
LazyStubSegmentVector stubSegments_;
LazyFuncExportVector exports_;
size_t lastStubSegmentIndex_;
bool createMany(const Uint32Vector& funcExportIndices,
const CodeTier& codeTier, size_t* stubSegmentIndex);
public:
LazyStubTier() : lastStubSegmentIndex_(0) {}
bool empty() const { return stubSegments_.empty(); }
bool hasStub(uint32_t funcIndex) const;
void* lookupInterpEntry(uint32_t funcIndex) const;
bool createOne(uint32_t funcExportIndex, const CodeTier& codeTier);
bool createTier2(const Uint32Vector& funcExportIndices,
const CodeTier& codeTier, Maybe<size_t>* stubSegmentIndex);
void setJitEntries(const Maybe<size_t>& stubSegmentIndex, const Code& code);
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
size_t* data) const;
};
typedef UniquePtr<CodeTier> UniqueCodeTier;
typedef UniquePtr<const CodeTier> UniqueConstCodeTier;
class CodeTier {
const Code* code_;
const UniqueMetadataTier metadata_;
const UniqueModuleSegment segment_;
ExclusiveData<LazyStubTier> lazyStubs_;
static const MutexId& mutexForTier(Tier tier) {
if (tier == Tier::Baseline) {
return mutexid::WasmLazyStubsTier1;
}
MOZ_ASSERT(tier == Tier::Optimized);
return mutexid::WasmLazyStubsTier2;
}
public:
CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
: code_(nullptr),
metadata_(std::move(metadata)),
segment_(std::move(segment)),
lazyStubs_(mutexForTier(segment_->tier())) {}
bool initialized() const { return !!code_ && segment_->initialized(); }
bool initialize(const Code& code, const LinkData& linkData,
const Metadata& metadata);
Tier tier() const { return segment_->tier(); }
const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
const MetadataTier& metadata() const { return *metadata_.get(); }
const ModuleSegment& segment() const { return *segment_.get(); }
const Code& code() const {
MOZ_ASSERT(initialized());
return *code_;
}
const CodeRange* lookupRange(const void* pc) const;
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
static const uint8_t* deserialize(const uint8_t* cursor,
const LinkData& linkData,
UniqueCodeTier* codeTier);
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
size_t* data) const;
};
class JumpTables {
using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
CompileMode mode_;
TablePointer tiering_;
TablePointer jit_;
size_t numFuncs_;
public:
bool init(CompileMode mode, const ModuleSegment& ms,
const CodeRangeVector& codeRanges);
void setJitEntry(size_t i, void* target) const {
MOZ_ASSERT(i < numFuncs_);
jit_.get()[2 * i] = target;
jit_.get()[2 * i + 1] = target;
}
void** getAddressOfJitEntry(size_t i) const {
MOZ_ASSERT(i < numFuncs_);
MOZ_ASSERT(jit_.get()[2 * i]);
return &jit_.get()[2 * i];
}
size_t funcIndexFromJitEntry(void** target) const {
MOZ_ASSERT(target >= &jit_.get()[0]);
MOZ_ASSERT(target <= &(jit_.get()[2 * numFuncs_ - 1]));
size_t index = (intptr_t*)target - (intptr_t*)&jit_.get()[0];
MOZ_ASSERT(index % 2 == 0);
return index / 2;
}
void setTieringEntry(size_t i, void* target) const {
MOZ_ASSERT(i < numFuncs_);
if (mode_ == CompileMode::Tier1) {
tiering_.get()[i] = target;
}
}
void** tiering() const { return tiering_.get(); }
size_t sizeOfMiscExcludingThis() const {
return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
}
};
typedef RefPtr<const Code> SharedCode;
typedef RefPtr<Code> MutableCode;
class Code : public ShareableBase<Code> {
UniqueCodeTier tier1_;
mutable UniqueConstCodeTier tier2_; mutable Atomic<bool> hasTier2_;
SharedMetadata metadata_;
ExclusiveData<CacheableCharsVector> profilingLabels_;
JumpTables jumpTables_;
StructTypeVector structTypes_;
public:
Code(UniqueCodeTier tier1, const Metadata& metadata,
JumpTables&& maybeJumpTables, StructTypeVector&& structTypes);
bool initialized() const { return tier1_->initialized(); }
bool initialize(const LinkData& linkData);
void setTieringEntry(size_t i, void* target) const {
jumpTables_.setTieringEntry(i, target);
}
void** tieringJumpTable() const { return jumpTables_.tiering(); }
void setJitEntry(size_t i, void* target) const {
jumpTables_.setJitEntry(i, target);
}
void** getAddressOfJitEntry(size_t i) const {
return jumpTables_.getAddressOfJitEntry(i);
}
uint32_t getFuncIndex(JSFunction* fun) const;
bool setTier2(UniqueCodeTier tier2, const LinkData& linkData) const;
void commitTier2() const;
bool hasTier2() const { return hasTier2_; }
Tiers tiers() const;
bool hasTier(Tier t) const;
Tier stableTier() const; Tier bestTier()
const;
const CodeTier& codeTier(Tier tier) const;
const Metadata& metadata() const { return *metadata_; }
const StructTypeVector& structTypes() const { return structTypes_; }
const ModuleSegment& segment(Tier iter) const {
return codeTier(iter).segment();
}
const MetadataTier& metadata(Tier iter) const {
return codeTier(iter).metadata();
}
const CallSite* lookupCallSite(void* returnAddress) const;
const CodeRange* lookupFuncRange(void* pc) const;
const StackMap* lookupStackMap(uint8_t* nextPC) const;
bool containsCodePC(const void* pc) const;
bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
void ensureProfilingLabels(bool profilingEnabled) const;
const char* profilingLabel(uint32_t funcIndex) const;
void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
Metadata::SeenSet* seenMetadata,
Code::SeenSet* seenCode, size_t* code,
size_t* data) const;
size_t serializedSize() const;
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
static const uint8_t* deserialize(const uint8_t* cursor,
const LinkData& linkData,
Metadata& metadata, SharedCode* code);
};
void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
} }
#endif