[abi] Switched to C++17

This commit is contained in:
kobalicek
2025-05-24 15:53:19 +02:00
parent cecc73f297
commit 356dddbc55
125 changed files with 7518 additions and 3345 deletions

View File

@@ -1,8 +1,8 @@
{ {
"diagnostics": { "diagnostics": {
"asan": { "definitions": ["ASMJIT_SANITIZE=address"] }, "asan": { "definitions": ["ASMJIT_SANITIZE=address"] },
"ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] }, "msan": { "definitions": ["ASMJIT_SANITIZE=memory"] },
"msan": { "definitions": ["ASMJIT_SANITIZE=memory"] } "ubsan": { "definitions": ["ASMJIT_SANITIZE=undefined"] }
}, },
"valgrind_arguments": [ "valgrind_arguments": [

View File

@@ -54,7 +54,6 @@ jobs:
- { title: "no-x86" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_X86=1" } - { title: "no-x86" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_X86=1" }
- { title: "no-aarch64" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_AARCH64=1" } - { title: "no-aarch64" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_AARCH64=1" }
- { title: "lang-c++17" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++17" }
- { title: "lang-c++20" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++20" } - { title: "lang-c++20" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++20" }
- { title: "lang-c++23" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++23" } - { title: "lang-c++23" , host: "ubuntu-latest" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++23" }

View File

@@ -195,7 +195,7 @@ function(asmjit_add_target target target_type)
DEFINE_SYMBOL "" DEFINE_SYMBOL ""
CXX_VISIBILITY_PRESET hidden) CXX_VISIBILITY_PRESET hidden)
target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$<CONFIG:Debug>:${X_CFLAGS_DBG}> $<$<NOT:$<CONFIG:Debug>>:${X_CFLAGS_REL}>) target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$<CONFIG:Debug>:${X_CFLAGS_DBG}> $<$<NOT:$<CONFIG:Debug>>:${X_CFLAGS_REL}>)
target_compile_features(${target} PUBLIC cxx_std_11) target_compile_features(${target} PUBLIC cxx_std_17)
target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS}) target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS})
target_link_libraries(${target} PRIVATE ${X_LIBRARIES}) target_link_libraries(${target} PRIVATE ${X_LIBRARIES})

View File

@@ -48,13 +48,16 @@
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./arm.h" #include "./arm.h"
#include "./arm/a64assembler.h"
#include "./arm/a64builder.h" #include "asmjit-scope-begin.h"
#include "./arm/a64compiler.h" #include "arm/a64assembler.h"
#include "./arm/a64emitter.h" #include "arm/a64builder.h"
#include "./arm/a64globals.h" #include "arm/a64compiler.h"
#include "./arm/a64instdb.h" #include "arm/a64emitter.h"
#include "./arm/a64operand.h" #include "arm/a64globals.h"
#include "arm/a64instdb.h"
#include "arm/a64operand.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_A64_H_INCLUDED #endif // ASMJIT_A64_H_INCLUDED

View File

@@ -76,9 +76,12 @@
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode. //! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "./core.h" #include "core.h"
#include "./arm/armglobals.h"
#include "./arm/armoperand.h" #include "asmjit-scope-begin.h"
#include "./arm/armutils.h" #include "arm/armglobals.h"
#include "arm/armoperand.h"
#include "arm/armutils.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_ARM_H_INCLUDED #endif // ASMJIT_ARM_H_INCLUDED

View File

@@ -24,8 +24,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::Assembler - Utils // a64::Assembler - Utils
// ====================== // ======================
static ASMJIT_FORCE_INLINE constexpr uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); } static ASMJIT_INLINE_CONSTEXPR uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); }
static ASMJIT_FORCE_INLINE constexpr uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); } static ASMJIT_INLINE_CONSTEXPR uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); }
// a64::Assembler - Cond // a64::Assembler - Cond
// ===================== // =====================
@@ -74,39 +74,50 @@ static inline RegType extendOptionToRegType(uint32_t option) noexcept {
//! Struct that contains Size (2 bits), Q flag, and S (scalar) flag. These values //! Struct that contains Size (2 bits), Q flag, and S (scalar) flag. These values
//! are used to encode Q, Size, and Scalar fields in an opcode. //! are used to encode Q, Size, and Scalar fields in an opcode.
struct SizeOp { struct SizeOp {
enum : uint8_t { //! \name Constants
k128BitShift = 0, //! \{
kScalarShift = 1,
kSizeShift = 2,
kQ = uint8_t(1u << k128BitShift), static inline constexpr uint8_t k128BitShift = 0;
kS = uint8_t(1u << kScalarShift), static inline constexpr uint8_t kScalarShift = 1;
static inline constexpr uint8_t kSizeShift = 2;
k00 = uint8_t(0 << kSizeShift), static inline constexpr uint8_t kQ = uint8_t(1u << k128BitShift);
k01 = uint8_t(1 << kSizeShift), static inline constexpr uint8_t kS = uint8_t(1u << kScalarShift);
k10 = uint8_t(2 << kSizeShift),
k11 = uint8_t(3 << kSizeShift),
k00Q = k00 | kQ, static inline constexpr uint8_t k00 = uint8_t(0 << kSizeShift);
k01Q = k01 | kQ, static inline constexpr uint8_t k01 = uint8_t(1 << kSizeShift);
k10Q = k10 | kQ, static inline constexpr uint8_t k10 = uint8_t(2 << kSizeShift);
k11Q = k11 | kQ, static inline constexpr uint8_t k11 = uint8_t(3 << kSizeShift);
k00S = k00 | kS, static inline constexpr uint8_t k00Q = k00 | kQ;
k01S = k01 | kS, static inline constexpr uint8_t k01Q = k01 | kQ;
k10S = k10 | kS, static inline constexpr uint8_t k10Q = k10 | kQ;
k11S = k11 | kS, static inline constexpr uint8_t k11Q = k11 | kQ;
kInvalid = 0xFFu, static inline constexpr uint8_t k00S = k00 | kS;
static inline constexpr uint8_t k01S = k01 | kS;
static inline constexpr uint8_t k10S = k10 | kS;
static inline constexpr uint8_t k11S = k11 | kS;
// Masks used by SizeOpMap. static inline constexpr uint8_t kInvalid = 0xFFu;
kSzQ = (0x3u << kSizeShift) | kQ,
kSzS = (0x3u << kSizeShift) | kS, // Masks used by SizeOpMap.
kSzQS = (0x3u << kSizeShift) | kQ | kS static inline constexpr uint8_t kSzQ = (0x3u << kSizeShift) | kQ;
}; static inline constexpr uint8_t kSzS = (0x3u << kSizeShift) | kS;
static inline constexpr uint8_t kSzQS = (0x3u << kSizeShift) | kQ | kS;
//! \}
//! \name Members
//! \{
uint8_t value; uint8_t value;
//! \}
//! \name Accessors
//! \{
inline bool isValid() const noexcept { return value != kInvalid; } inline bool isValid() const noexcept { return value != kInvalid; }
inline void makeInvalid() noexcept { value = kInvalid; } inline void makeInvalid() noexcept { value = kInvalid; }
@@ -119,6 +130,8 @@ struct SizeOp {
ASMJIT_ASSERT(size() > 0); ASMJIT_ASSERT(size() > 0);
value = uint8_t(value - (1u << kSizeShift)); value = uint8_t(value - (1u << kSizeShift));
} }
//! \}
}; };
struct SizeOpTable { struct SizeOpTable {
@@ -282,8 +295,9 @@ static inline SizeOp armElementTypeToSizeOp(uint32_t vecOpType, RegType regType,
SizeOp op = table.array[index]; SizeOp op = table.array[index];
SizeOp modifiedOp { uint8_t(op.value & map.sizeOpMask) }; SizeOp modifiedOp { uint8_t(op.value & map.sizeOpMask) };
if (!Support::bitTest(map.acceptMask, op.value)) if (!Support::bitTest(map.acceptMask, op.value)) {
modifiedOp.makeInvalid(); modifiedOp.makeInvalid();
}
return modifiedOp; return modifiedOp;
} }
@@ -363,8 +377,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd,
for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) {
uint32_t hwImm = uint32_t(imm & 0xFFFFu); uint32_t hwImm = uint32_t(imm & 0xFFFFu);
if (hwImm == 0) if (hwImm == 0) {
continue; continue;
}
out[count++] = op | (hwIndex << 21) | (hwImm << 5) | rd; out[count++] = op | (hwIndex << 21) | (hwImm << 5) | rd;
op = kMovK; op = kMovK;
@@ -382,8 +397,9 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd,
for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) {
uint32_t hwImm = uint32_t(imm & 0xFFFFu); uint32_t hwImm = uint32_t(imm & 0xFFFFu);
if (hwImm == 0xFFFFu) if (hwImm == 0xFFFFu) {
continue; continue;
}
out[count++] = op | (hwIndex << 21) | ((hwImm ^ negMask) << 5) | rd; out[count++] = op | (hwIndex << 21) | ((hwImm ^ negMask) << 5) | rd;
op = kMovK; op = kMovK;
@@ -503,18 +519,22 @@ static inline bool armCheckMemBaseIndexRel(const Mem& mem) noexcept {
RegType baseType = mem.baseType(); RegType baseType = mem.baseType();
RegType indexType = mem.indexType(); RegType indexType = mem.indexType();
if (!Support::bitTest(kBaseMask, baseType)) if (!Support::bitTest(kBaseMask, baseType)) {
return false; return false;
}
if (baseType > RegType::kLabelTag) { if (baseType > RegType::kLabelTag) {
// Index allows either GpW or GpX. // Index allows either GpW or GpX.
if (!Support::bitTest(kIndexMask, indexType)) if (!Support::bitTest(kIndexMask, indexType)) {
return false; return false;
}
if (indexType == RegType::kNone) if (indexType == RegType::kNone) {
return true; return true;
else }
else {
return !mem.hasOffset(); return !mem.hasOffset();
}
} }
else { else {
// No index register allowed if this is a PC relative address (literal). // No index register allowed if this is a PC relative address (literal).
@@ -542,8 +562,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint
if (!reg.hasElementType()) { if (!reg.hasElementType()) {
// Scalar operation [HSD]. // Scalar operation [HSD].
uint32_t sz = diff(reg.type(), RegType::kARM_VecH); uint32_t sz = diff(reg.type(), RegType::kARM_VecH);
if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) {
return false; return false;
}
opcode->reset(szBits[sHf].mask[sz] ^ sOp); opcode->reset(szBits[sHf].mask[sz] ^ sOp);
*szOut = sz; *szOut = sz;
@@ -554,8 +575,9 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint
uint32_t q = diff(reg.type(), RegType::kARM_VecD); uint32_t q = diff(reg.type(), RegType::kARM_VecD);
uint32_t sz = diff(reg.elementType(), VecElementType::kH); uint32_t sz = diff(reg.elementType(), VecElementType::kH);
if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) {
return false; return false;
}
opcode->reset(szBits[vHf].mask[sz] ^ (vOp | (q << kQBitIndex))); opcode->reset(szBits[vHf].mask[sz] ^ (vOp | (q << kQBitIndex)));
*szOut = sz; *szOut = sz;
@@ -773,8 +795,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
CondCode instCC = BaseInst::extractARMCondCode(instId); CondCode instCC = BaseInst::extractARMCondCode(instId);
instId = instId & uint32_t(InstIdParts::kRealId); instId = instId & uint32_t(InstIdParts::kRealId);
if (instId >= Inst::_kIdCount) if (instId >= Inst::_kIdCount) {
instId = 0; instId = 0;
}
const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId]; const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId];
uint32_t encodingIndex = instInfo->_encodingDataIndex; uint32_t encodingIndex = instInfo->_encodingDataIndex;
@@ -794,21 +817,25 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
uint64_t offsetValue; // Offset value (if known). uint64_t offsetValue; // Offset value (if known).
if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) { if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
// Unknown instruction. // Unknown instruction.
if (ASMJIT_UNLIKELY(instId == 0)) if (ASMJIT_UNLIKELY(instId == 0)) {
goto InvalidInstruction; goto InvalidInstruction;
}
// Condition code can only be used with 'B' instruction. // Condition code can only be used with 'B' instruction.
if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) {
goto InvalidInstruction; goto InvalidInstruction;
}
// Grow request, happens rarely. // Grow request, happens rarely.
err = writer.ensureSpace(this, 4); err = writer.ensureSpace(this, 4);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
goto Failed; goto Failed;
}
#ifndef ASMJIT_NO_VALIDATION #ifndef ASMJIT_NO_VALIDATION
// Strict validation. // Strict validation.
@@ -817,8 +844,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt); EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone); err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
goto Failed; goto Failed;
}
} }
#endif #endif
} }
@@ -4932,11 +4960,13 @@ EmitOp_Multiple:
{ {
ASMJIT_ASSERT(multipleOpCount > 0); ASMJIT_ASSERT(multipleOpCount > 0);
err = writer.ensureSpace(this, multipleOpCount * 4u); err = writer.ensureSpace(this, multipleOpCount * 4u);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
goto Failed; goto Failed;
}
for (uint32_t i = 0; i < multipleOpCount; i++) for (uint32_t i = 0; i < multipleOpCount; i++) {
writer.emit32uLE(multipleOpData[i]); writer.emit32uLE(multipleOpData[i]);
}
goto EmitDone; goto EmitDone;
} }
@@ -4946,28 +4976,33 @@ EmitOp_Multiple:
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
EmitOp_MemBase_Rn5: EmitOp_MemBase_Rn5:
if (!checkMemBase(rmRel->as<Mem>())) if (!checkMemBase(rmRel->as<Mem>())) {
goto InvalidAddress; goto InvalidAddress;
}
opcode.addReg(rmRel->as<Mem>().baseId(), 5); opcode.addReg(rmRel->as<Mem>().baseId(), 5);
goto EmitOp; goto EmitOp;
EmitOp_MemBaseNoImm_Rn5: EmitOp_MemBaseNoImm_Rn5:
if (!checkMemBase(rmRel->as<Mem>()) || rmRel->as<Mem>().hasIndex()) if (!checkMemBase(rmRel->as<Mem>()) || rmRel->as<Mem>().hasIndex()) {
goto InvalidAddress; goto InvalidAddress;
}
if (rmRel->as<Mem>().hasOffset()) if (rmRel->as<Mem>().hasOffset()) {
goto InvalidDisplacement; goto InvalidDisplacement;
}
opcode.addReg(rmRel->as<Mem>().baseId(), 5); opcode.addReg(rmRel->as<Mem>().baseId(), 5);
goto EmitOp; goto EmitOp;
EmitOp_MemBaseIndex_Rn5_Rm16: EmitOp_MemBaseIndex_Rn5_Rm16:
if (!rmRel->as<Mem>().hasBaseReg()) if (!rmRel->as<Mem>().hasBaseReg()) {
goto InvalidAddress; goto InvalidAddress;
}
if (rmRel->as<Mem>().indexId() > 30 && rmRel->as<Mem>().indexId() != Gp::kIdZr) if (rmRel->as<Mem>().indexId() > 30 && rmRel->as<Mem>().indexId() != Gp::kIdZr) {
goto InvalidPhysId; goto InvalidPhysId;
}
opcode.addReg(rmRel->as<Mem>().indexId(), 16); opcode.addReg(rmRel->as<Mem>().indexId(), 16);
opcode.addReg(rmRel->as<Mem>().baseId(), 5); opcode.addReg(rmRel->as<Mem>().baseId(), 5);
@@ -4992,8 +5027,9 @@ EmitOp_Rel:
} }
LabelEntry* label = _code->labelEntry(labelId); LabelEntry* label = _code->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!label)) if (ASMJIT_UNLIKELY(!label)) {
goto InvalidLabel; goto InvalidLabel;
}
if (offsetFormat.type() == OffsetType::kAArch64_ADRP) { if (offsetFormat.type() == OffsetType::kAArch64_ADRP) {
// TODO: [ARM] Always create relocation entry. // TODO: [ARM] Always create relocation entry.
@@ -5009,8 +5045,9 @@ EmitOp_Rel:
size_t codeOffset = writer.offsetFrom(_bufferData); size_t codeOffset = writer.offsetFrom(_bufferData);
LabelLink* link = _code->newLabelLink(label, _section->id(), codeOffset, intptr_t(labelOffset), offsetFormat); LabelLink* link = _code->newLabelLink(label, _section->id(), codeOffset, intptr_t(labelOffset), offsetFormat);
if (ASMJIT_UNLIKELY(!link)) if (ASMJIT_UNLIKELY(!link)) {
goto OutOfMemory; goto OutOfMemory;
}
goto EmitOp; goto EmitOp;
} }
@@ -5027,8 +5064,9 @@ EmitOp_Rel:
// Create a new RelocEntry as we cannot calculate the offset right now. // Create a new RelocEntry as we cannot calculate the offset right now.
RelocEntry* re; RelocEntry* re;
err = _code->newRelocEntry(&re, RelocType::kAbsToRel); err = _code->newRelocEntry(&re, RelocType::kAbsToRel);
if (err) if (err) {
goto Failed; goto Failed;
}
re->_sourceSectionId = _section->id(); re->_sourceSectionId = _section->id();
re->_sourceOffset = codeOffset; re->_sourceOffset = codeOffset;
@@ -5039,8 +5077,9 @@ EmitOp_Rel:
else { else {
uint64_t pc = baseAddress + codeOffset; uint64_t pc = baseAddress + codeOffset;
if (offsetFormat.type() == OffsetType::kAArch64_ADRP) if (offsetFormat.type() == OffsetType::kAArch64_ADRP) {
pc &= ~uint64_t(4096 - 1); pc &= ~uint64_t(4096 - 1);
}
offsetValue = targetOffset - pc; offsetValue = targetOffset - pc;
goto EmitOp_DispImm; goto EmitOp_DispImm;
@@ -5051,12 +5090,14 @@ EmitOp_Rel:
EmitOp_DispImm: EmitOp_DispImm:
{ {
if ((offsetValue & Support::lsbMask<uint32_t>(offsetFormat.immDiscardLsb())) != 0) if ((offsetValue & Support::lsbMask<uint32_t>(offsetFormat.immDiscardLsb())) != 0) {
goto InvalidDisplacement; goto InvalidDisplacement;
}
int64_t dispImm64 = int64_t(offsetValue) >> offsetFormat.immDiscardLsb(); int64_t dispImm64 = int64_t(offsetValue) >> offsetFormat.immDiscardLsb();
if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount())) if (!Support::isEncodableOffset64(dispImm64, offsetFormat.immBitCount())) {
goto InvalidDisplacement; goto InvalidDisplacement;
}
uint32_t dispImm32 = uint32_t(dispImm64 & Support::lsbMask<uint32_t>(offsetFormat.immBitCount())); uint32_t dispImm32 = uint32_t(dispImm64 & Support::lsbMask<uint32_t>(offsetFormat.immBitCount()));
switch (offsetFormat.type()) { switch (offsetFormat.type()) {
@@ -5094,8 +5135,9 @@ EmitOp:
EmitDone: EmitDone:
if (Support::test(options, InstOptions::kReserved)) { if (Support::test(options, InstOptions::kReserved)) {
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
if (_logger) if (_logger) {
EmitterUtils::logInstructionEmitted(this, BaseInst::composeARMInstId(instId, instCC), options, o0, o1, o2, opExt, 0, 0, writer.cursor()); EmitterUtils::logInstructionEmitted(this, BaseInst::composeARMInstId(instId, instCC), options, o0, o1, o2, opExt, 0, 0, writer.cursor());
}
#endif #endif
} }
@@ -5141,21 +5183,26 @@ Failed:
Error Assembler::align(AlignMode alignMode, uint32_t alignment) { Error Assembler::align(AlignMode alignMode, uint32_t alignment) {
constexpr uint32_t kNopA64 = 0xD503201Fu; // [11010101|00000011|00100000|00011111]. constexpr uint32_t kNopA64 = 0xD503201Fu; // [11010101|00000011|00100000|00011111].
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue))) if (ASMJIT_UNLIKELY(uint32_t(alignMode) > uint32_t(AlignMode::kMaxValue))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment <= 1) if (alignment <= 1) {
return kErrorOk; return kErrorOk;
}
if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment))) if (ASMJIT_UNLIKELY(alignment > Globals::kMaxAlignment || !Support::isPowerOf2(alignment))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
uint32_t i = uint32_t(Support::alignUpDiff<size_t>(offset(), alignment)); uint32_t i = uint32_t(Support::alignUpDiff<size_t>(offset(), alignment));
if (i == 0) if (i == 0) {
return kErrorOk; return kErrorOk;
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, i)); ASMJIT_PROPAGATE(writer.ensureSpace(this, i));
@@ -5164,8 +5211,9 @@ Error Assembler::align(AlignMode alignMode, uint32_t alignment) {
case AlignMode::kCode: { case AlignMode::kCode: {
uint32_t pattern = kNopA64; uint32_t pattern = kNopA64;
if (ASMJIT_UNLIKELY(offset() & 0x3u)) if (ASMJIT_UNLIKELY(offset() & 0x3u)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
while (i >= 4) { while (i >= 4) {
writer.emit32uLE(pattern); writer.emit32uLE(pattern);

View File

@@ -21,7 +21,7 @@ class ASMJIT_VIRTAPI Assembler
public EmitterExplicitT<Assembler> { public EmitterExplicitT<Assembler> {
public: public:
typedef BaseAssembler Base; using Base = BaseAssembler;
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{

View File

@@ -17,8 +17,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() { Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64); _archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
if (code) if (code) {
code->attach(this); code->attach(this);
}
} }
Builder::~Builder() noexcept {} Builder::~Builder() noexcept {}

View File

@@ -23,7 +23,7 @@ class ASMJIT_VIRTAPI Builder
public EmitterExplicitT<Builder> { public EmitterExplicitT<Builder> {
public: public:
ASMJIT_NONCOPYABLE(Builder) ASMJIT_NONCOPYABLE(Builder)
typedef BaseBuilder Base; using Base = BaseBuilder;
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{

View File

@@ -18,8 +18,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() { Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64); _archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
if (code) if (code) {
code->attach(this); code->attach(this);
}
} }
Compiler::~Compiler() noexcept {} Compiler::~Compiler() noexcept {}

View File

@@ -24,7 +24,7 @@ class ASMJIT_VIRTAPI Compiler
public EmitterExplicitT<Compiler> { public EmitterExplicitT<Compiler> {
public: public:
ASMJIT_NONCOPYABLE(Compiler) ASMJIT_NONCOPYABLE(Compiler)
typedef BaseCompiler Base; using Base = BaseCompiler;
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{

View File

@@ -54,14 +54,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->ldr(dst.as<Gp>().x(), src); return emitter->ldr(dst.as<Gp>().x(), src);
default: { default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->ldr(dst.as<Vec>().s(), src); return emitter->ldr(dst.as<Vec>().s(), src);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->ldr(dst.as<Vec>().d(), src); return emitter->ldr(dst.as<Vec>().d(), src);
}
if (TypeUtils::isVec128(typeId)) if (TypeUtils::isVec128(typeId)) {
return emitter->ldr(dst.as<Vec>().q(), src); return emitter->ldr(dst.as<Vec>().q(), src);
}
break; break;
} }
@@ -90,14 +93,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->str(src.as<Gp>().x(), dst); return emitter->str(src.as<Gp>().x(), dst);
default: { default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->str(src.as<Vec>().s(), dst); return emitter->str(src.as<Vec>().s(), dst);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->str(src.as<Vec>().d(), dst); return emitter->str(src.as<Vec>().d(), dst);
}
if (TypeUtils::isVec128(typeId)) if (TypeUtils::isVec128(typeId)) {
return emitter->str(src.as<Vec>().q(), dst); return emitter->str(src.as<Vec>().q(), dst);
}
break; break;
} }
@@ -120,14 +126,17 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x()); return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: { default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s()); return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8()); return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
}
if (TypeUtils::isVec128(typeId)) if (TypeUtils::isVec128(typeId)) {
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16()); return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
}
break; break;
} }
@@ -340,10 +349,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
mem.makePreIndex(); mem.makePreIndex();
} }
if (pair.ids[1] == BaseReg::kIdBad) if (pair.ids[1] == BaseReg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem)); ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else }
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem)); ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode(); mem.resetOffsetMode();
@@ -422,10 +433,12 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
mem.makePostIndex(); mem.makePostIndex();
} }
if (pair.ids[1] == BaseReg::kIdBad) if (pair.ids[1] == BaseReg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem)); ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
else }
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem)); ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
}
mem.resetOffsetMode(); mem.resetOffsetMode();
} }

View File

@@ -25,18 +25,24 @@ static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept {
} }
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept { static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32) if (typeId == TypeId::kFloat32) {
return RegType::kARM_VecS; return RegType::kARM_VecS;
else if (typeId == TypeId::kFloat64) }
else if (typeId == TypeId::kFloat64) {
return RegType::kARM_VecD; return RegType::kARM_VecD;
else if (TypeUtils::isVec32(typeId)) }
else if (TypeUtils::isVec32(typeId)) {
return RegType::kARM_VecS; return RegType::kARM_VecS;
else if (TypeUtils::isVec64(typeId)) }
else if (TypeUtils::isVec64(typeId)) {
return RegType::kARM_VecD; return RegType::kARM_VecD;
else if (TypeUtils::isVec128(typeId)) }
else if (TypeUtils::isVec128(typeId)) {
return RegType::kARM_VecV; return RegType::kARM_VecV;
else }
else {
return RegType::kNone; return RegType::kNone;
}
} }
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept { ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
@@ -116,8 +122,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
default: { default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId); RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType); return DebugUtils::errored(kErrorInvalidRegType);
}
func._rets[valueIndex].initReg(regType, valueIndex, typeId); func._rets[valueIndex].initReg(regType, valueIndex, typeId);
break; break;
@@ -139,8 +146,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
if (TypeUtils::isInt(typeId)) { if (TypeUtils::isInt(typeId)) {
uint32_t regId = BaseReg::kIdBad; uint32_t regId = BaseReg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup) if (gpzPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos]; regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
}
if (regId != BaseReg::kIdBad) { if (regId != BaseReg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX; RegType regType = typeId <= TypeId::kUInt32 ? RegType::kARM_GpW : RegType::kARM_GpX;
@@ -150,8 +158,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
} }
else { else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize); uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8) if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8); stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset)); arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size; stackOffset += size;
} }
@@ -161,13 +170,15 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) { if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = BaseReg::kIdBad; uint32_t regId = BaseReg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup) if (vecPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kVec].id[vecPos]; regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
}
if (regId != BaseReg::kIdBad) { if (regId != BaseReg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId); RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType); return DebugUtils::errored(kErrorInvalidRegType);
}
arg.initTypeId(typeId); arg.initTypeId(typeId);
arg.assignRegData(regType, regId); arg.assignRegData(regType, regId);
@@ -176,8 +187,9 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
} }
else { else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize); uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
if (size >= 8) if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8); stackOffset = Support::alignUp(stackOffset, 8);
}
arg.assignStackOffset(int32_t(stackOffset)); arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size; stackOffset += size;
} }

View File

@@ -802,7 +802,7 @@ namespace Predicate {
//! Address translate options (AT). //! Address translate options (AT).
namespace AT { namespace AT {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
} }
@@ -860,7 +860,7 @@ namespace DB {
//! Data cache maintenance options. //! Data cache maintenance options.
namespace DC { namespace DC {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
} }
@@ -899,7 +899,7 @@ namespace DC {
//! Instruction cache maintenance options. //! Instruction cache maintenance options.
namespace IC { namespace IC {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
} }
@@ -953,7 +953,7 @@ namespace PSB {
} }
namespace TLBI { namespace TLBI {
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
} }
@@ -1052,7 +1052,7 @@ namespace TSB {
//! Processor state access through MSR. //! Processor state access through MSR.
namespace PState { namespace PState {
//! Encodes a pstate from `op0` and `op1`. //! Encodes a pstate from `op0` and `op1`.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1) noexcept {
return (op0 << 3) | (op1 << 0); return (op0 << 3) | (op1 << 0);
} }
@@ -1081,17 +1081,17 @@ namespace SysReg {
}; };
//! Encodes a system register from `op0`, `op1`, `cRn`, `cRm`, and `op2` fields. //! Encodes a system register from `op0`, `op1`, `cRn`, `cRm`, and `op2` fields.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op0 << 14) | (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0); return (op0 << 14) | (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
} }
//! Encodes a system register from `fields`. //! Encodes a system register from `fields`.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t encode(const Fields& fields) noexcept { static ASMJIT_INLINE_CONSTEXPR uint32_t encode(const Fields& fields) noexcept {
return encode(fields.op0, fields.op1, fields.cRn, fields.cRm, fields.op2); return encode(fields.op0, fields.op1, fields.cRn, fields.cRm, fields.op2);
} }
//! Decodes a system register to \ref Fields. //! Decodes a system register to \ref Fields.
static ASMJIT_INLINE_NODEBUG constexpr Fields decode(uint32_t id) noexcept { static ASMJIT_INLINE_CONSTEXPR Fields decode(uint32_t id) noexcept {
return Fields { return Fields {
uint8_t((id >> 14) & 0x3u), uint8_t((id >> 14) & 0x3u),
uint8_t((id >> 11) & 0x7u), uint8_t((id >> 11) & 0x7u),

View File

@@ -23,8 +23,9 @@ namespace InstInternal {
#ifndef ASMJIT_NO_TEXT #ifndef ASMJIT_NO_TEXT
Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept { Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId); uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction); return DebugUtils::errored(kErrorInvalidInstruction);
}
return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output); return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output);
} }
@@ -100,8 +101,9 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
// Get the instruction data. // Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId); uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction); return DebugUtils::errored(kErrorInvalidInstruction);
}
out->_instFlags = InstRWFlags::kNone; out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount); out->_opCount = uint8_t(opCount);
@@ -139,10 +141,12 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
op._consecutiveLeadCount = 0; op._consecutiveLeadCount = 0;
if (srcOp.isReg()) { if (srcOp.isReg()) {
if (i == 0) if (i == 0) {
op._consecutiveLeadCount = uint8_t(opCount - 1); op._consecutiveLeadCount = uint8_t(opCount - 1);
else }
else {
op.addOpFlags(OpRWFlags::kConsecutive); op.addOpFlags(OpRWFlags::kConsecutive);
}
} }
else { else {
const Mem& memOp = srcOp.as<Mem>(); const Mem& memOp = srcOp.as<Mem>();

View File

@@ -47,9 +47,13 @@ struct InstInfo {
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; } ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; } ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \} //! \}
@@ -57,6 +61,7 @@ struct InstInfo {
ASMJIT_VARAPI const InstInfo _instInfoTable[]; ASMJIT_VARAPI const InstInfo _instInfoTable[];
[[nodiscard]]
static inline const InstInfo& infoById(InstId instId) noexcept { static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId); instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId)); ASMJIT_ASSERT(Inst::isDefinedId(instId));

View File

@@ -264,7 +264,7 @@ namespace EncodingData {
#define M_OPCODE(field, bits) \ #define M_OPCODE(field, bits) \
uint32_t _##field : bits; \ uint32_t _##field : bits; \
ASMJIT_INLINE_NODEBUG constexpr uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); } ASMJIT_INLINE_CONSTEXPR uint32_t field() const noexcept { return uint32_t(_##field) << (32 - bits); }
struct BaseOp { struct BaseOp {
uint32_t opcode; uint32_t opcode;
@@ -477,20 +477,20 @@ struct BaseAtomicCasp {
uint32_t xOffset : 5; uint32_t xOffset : 5;
}; };
typedef BaseOp BaseBranchReg; using BaseBranchReg = BaseOp;
typedef BaseOp BaseBranchRel; using BaseBranchRel = BaseOp;
typedef BaseOp BaseBranchCmp; using BaseBranchCmp = BaseOp;
typedef BaseOp BaseBranchTst; using BaseBranchTst = BaseOp;
typedef BaseOp BaseExtract; using BaseExtract = BaseOp;
typedef BaseOp BaseBfc; using BaseBfc = BaseOp;
typedef BaseOp BaseBfi; using BaseBfi = BaseOp;
typedef BaseOp BaseBfx; using BaseBfx = BaseOp;
typedef BaseOp BaseCCmp; using BaseCCmp = BaseOp;
typedef BaseOp BaseCInc; using BaseCInc = BaseOp;
typedef BaseOp BaseCSet; using BaseCSet = BaseOp;
typedef BaseOp BaseCSel; using BaseCSel = BaseOp;
typedef BaseOp BaseMovKNZ; using BaseMovKNZ = BaseOp;
typedef BaseOp BaseMull; using BaseMull = BaseOp;
struct FSimdGeneric { struct FSimdGeneric {
uint32_t _scalarOp : 28; uint32_t _scalarOp : 28;
@@ -504,9 +504,9 @@ struct FSimdGeneric {
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); } constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
}; };
typedef FSimdGeneric FSimdVV; using FSimdVV = FSimdGeneric;
typedef FSimdGeneric FSimdVVV; using FSimdVVV = FSimdGeneric;
typedef FSimdGeneric FSimdVVVV; using FSimdVVVV = FSimdGeneric;
struct FSimdSV { struct FSimdSV {
uint32_t opcode; uint32_t opcode;

View File

@@ -49,17 +49,27 @@ public:
}; };
//! Test whether this register is ZR register. //! Test whether this register is ZR register.
ASMJIT_INLINE_NODEBUG constexpr bool isZR() const noexcept { return id() == kIdZr; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isZR() const noexcept { return id() == kIdZr; }
//! Test whether this register is SP register. //! Test whether this register is SP register.
ASMJIT_INLINE_NODEBUG constexpr bool isSP() const noexcept { return id() == kIdSp; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isSP() const noexcept { return id() == kIdSp; }
//! Cast this register to a 32-bit W register (returns a new operand). //! Cast this register to a 32-bit W register (returns a new operand).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpW w() const noexcept; ASMJIT_INLINE_NODEBUG GpW w() const noexcept;
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpW r32() const noexcept; ASMJIT_INLINE_NODEBUG GpW r32() const noexcept;
//! Cast this register to a 64-bit X register (returns a new operand). //! Cast this register to a 64-bit X register (returns a new operand).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpX x() const noexcept; ASMJIT_INLINE_NODEBUG GpX x() const noexcept;
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG GpX r64() const noexcept; ASMJIT_INLINE_NODEBUG GpX r64() const noexcept;
}; };
@@ -118,117 +128,181 @@ public:
//! \endcond //! \endcond
//! Returns whether the register has element type or element index (or both). //! Returns whether the register has element type or element index (or both).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementTypeOrIndex() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>(); }
//! Returns whether the vector register has associated a vector element type. //! Returns whether the vector register has associated a vector element type.
ASMJIT_INLINE_NODEBUG constexpr bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementType() const noexcept { return _signature.hasField<kSignatureRegElementTypeMask>(); }
//! Returns vector element type of the register. //! Returns vector element type of the register.
ASMJIT_INLINE_NODEBUG constexpr VecElementType elementType() const noexcept { return VecElementType(_signature.getField<kSignatureRegElementTypeMask>()); } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR VecElementType elementType() const noexcept { return VecElementType(_signature.getField<kSignatureRegElementTypeMask>()); }
//! Sets vector element type of the register to `elementType`. //! Sets vector element type of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementType(VecElementType elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(uint32_t(elementType)); } ASMJIT_INLINE_NODEBUG void setElementType(VecElementType elementType) noexcept { _signature.setField<kSignatureRegElementTypeMask>(uint32_t(elementType)); }
//! Resets vector element type to none. //! Resets vector element type to none.
ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); } ASMJIT_INLINE_NODEBUG void resetElementType() noexcept { _signature.setField<kSignatureRegElementTypeMask>(0); }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB8() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB8() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementB);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH4() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementH);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecS2() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS2() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature | kSignatureElementS);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecD1() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD1() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecD>::kSignature);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB16() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB16() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH8() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH8() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecS4() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementS);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecD2() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD2() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementD);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecB4x4() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB4x4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementB4);
} }
ASMJIT_INLINE_NODEBUG constexpr bool isVecH2x4() const noexcept { [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH2x4() const noexcept {
return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2); return _signature.subset(uint32_t(kBaseSignatureMask) | uint32_t(kSignatureRegElementTypeMask)) == (RegTraits<RegType::kARM_VecV>::kSignature | kSignatureElementH2);
} }
//! Creates a cloned register with element access. //! Creates a cloned register with element access.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept { ASMJIT_INLINE_NODEBUG Vec at(uint32_t elementIndex) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id()); return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
} }
//! Cast this register to an 8-bit B register (AArch64 only). //! Cast this register to an 8-bit B register (AArch64 only).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecB b() const noexcept; ASMJIT_INLINE_NODEBUG VecB b() const noexcept;
//! Cast this register to a 16-bit H register (AArch64 only). //! Cast this register to a 16-bit H register (AArch64 only).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecH h() const noexcept; ASMJIT_INLINE_NODEBUG VecH h() const noexcept;
//! Cast this register to a 32-bit S register. //! Cast this register to a 32-bit S register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecS s() const noexcept; ASMJIT_INLINE_NODEBUG VecS s() const noexcept;
//! Cast this register to a 64-bit D register. //! Cast this register to a 64-bit D register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD d() const noexcept; ASMJIT_INLINE_NODEBUG VecD d() const noexcept;
//! Cast this register to a 128-bit Q register. //! Cast this register to a 128-bit Q register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV q() const noexcept; ASMJIT_INLINE_NODEBUG VecV q() const noexcept;
//! Cast this register to a 128-bit V register. //! Cast this register to a 128-bit V register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV v() const noexcept; ASMJIT_INLINE_NODEBUG VecV v() const noexcept;
//! Casts this register to b (clone). //! Casts this register to b (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v8() const noexcept; ASMJIT_INLINE_NODEBUG Vec v8() const noexcept;
//! Casts this register to h (clone). //! Casts this register to h (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v16() const noexcept; ASMJIT_INLINE_NODEBUG Vec v16() const noexcept;
//! Casts this register to s (clone). //! Casts this register to s (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v32() const noexcept; ASMJIT_INLINE_NODEBUG Vec v32() const noexcept;
//! Casts this register to d (clone). //! Casts this register to d (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v64() const noexcept; ASMJIT_INLINE_NODEBUG Vec v64() const noexcept;
//! Casts this register to q (clone). //! Casts this register to q (clone).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vec v128() const noexcept; ASMJIT_INLINE_NODEBUG Vec v128() const noexcept;
//! Cast this register to a 128-bit V.B[elementIndex] register. //! Cast this register to a 128-bit V.B[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV b(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H[elementIndex] register. //! Cast this register to a 128-bit V.H[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV h(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.S[elementIndex] register. //! Cast this register to a 128-bit V.S[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV s(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV s(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.D[elementIndex] register. //! Cast this register to a 128-bit V.D[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV d(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV d(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.H2[elementIndex] register. //! Cast this register to a 128-bit V.H2[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h2(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV h2(uint32_t elementIndex) const noexcept;
//! Cast this register to a 128-bit V.B4[elementIndex] register. //! Cast this register to a 128-bit V.B4[elementIndex] register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b4(uint32_t elementIndex) const noexcept; ASMJIT_INLINE_NODEBUG VecV b4(uint32_t elementIndex) const noexcept;
//! Cast this register to V.8B. //! Cast this register to V.8B.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD b8() const noexcept; ASMJIT_INLINE_NODEBUG VecD b8() const noexcept;
//! Cast this register to V.16B. //! Cast this register to V.16B.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV b16() const noexcept; ASMJIT_INLINE_NODEBUG VecV b16() const noexcept;
//! Cast this register to V.2H. //! Cast this register to V.2H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecS h2() const noexcept; ASMJIT_INLINE_NODEBUG VecS h2() const noexcept;
//! Cast this register to V.4H. //! Cast this register to V.4H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD h4() const noexcept; ASMJIT_INLINE_NODEBUG VecD h4() const noexcept;
//! Cast this register to V.8H. //! Cast this register to V.8H.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV h8() const noexcept; ASMJIT_INLINE_NODEBUG VecV h8() const noexcept;
//! Cast this register to V.2S. //! Cast this register to V.2S.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecD s2() const noexcept; ASMJIT_INLINE_NODEBUG VecD s2() const noexcept;
//! Cast this register to V.4S. //! Cast this register to V.4S.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV s4() const noexcept; ASMJIT_INLINE_NODEBUG VecV s4() const noexcept;
//! Cast this register to V.2D. //! Cast this register to V.2D.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VecV d2() const noexcept; ASMJIT_INLINE_NODEBUG VecV d2() const noexcept;
static ASMJIT_INLINE_NODEBUG constexpr OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept {
return OperandSignature{ return OperandSignature{
uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) | uint32_t(RegTraits<RegType::kARM_VecV>::kSignature) |
uint32_t(kSignatureRegElementFlagMask) | uint32_t(kSignatureRegElementFlagMask) |
@@ -301,16 +375,24 @@ namespace regs {
#endif #endif
//! Creates a 32-bit W register operand. //! Creates a 32-bit W register operand.
static ASMJIT_INLINE_NODEBUG constexpr GpW w(uint32_t id) noexcept { return GpW(id); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR GpW w(uint32_t id) noexcept { return GpW(id); }
//! Creates a 64-bit X register operand. //! Creates a 64-bit X register operand.
static ASMJIT_INLINE_NODEBUG constexpr GpX x(uint32_t id) noexcept { return GpX(id); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR GpX x(uint32_t id) noexcept { return GpX(id); }
//! Creates a 32-bit S register operand. //! Creates a 32-bit S register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecS s(uint32_t id) noexcept { return VecS(id); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecS s(uint32_t id) noexcept { return VecS(id); }
//! Creates a 64-bit D register operand. //! Creates a 64-bit D register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecD d(uint32_t id) noexcept { return VecD(id); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecD d(uint32_t id) noexcept { return VecD(id); }
//! Creates a 1282-bit V register operand. //! Creates a 1282-bit V register operand.
static ASMJIT_INLINE_NODEBUG constexpr VecV v(uint32_t id) noexcept { return VecV(id); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR VecV v(uint32_t id) noexcept { return VecV(id); }
static constexpr GpW w0 = GpW(0); static constexpr GpW w0 = GpW(0);
static constexpr GpW w1 = GpW(1); static constexpr GpW w1 = GpW(1);
@@ -589,22 +671,36 @@ using namespace regs;
//! \{ //! \{
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend) (AArch64). //! Constructs a `UXTB #value` extend and shift (unsigned byte extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend) (AArch64). //! Constructs a `UXTH #value` extend and shift (unsigned hword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend) (AArch64). //! Constructs a `UXTW #value` extend and shift (unsigned word extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend) (AArch64). //! Constructs a `UXTX #value` extend and shift (unsigned dword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend) (AArch64). //! Constructs a `SXTB #value` extend and shift (signed byte extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend) (AArch64). //! Constructs a `SXTH #value` extend and shift (signed hword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend) (AArch64). //! Constructs a `SXTW #value` extend and shift (signed word extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend) (AArch64). //! Constructs a `SXTX #value` extend and shift (signed dword extend) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); }
//! \} //! \}
@@ -612,49 +708,57 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift sxtx(uint32_t value) noexcept { ret
//! \{ //! \{
//! Creates `[base, offset]` memory operand (offset mode) (AArch64). //! Creates `[base, offset]` memory operand (offset mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, int32_t offset = 0) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset); return Mem(base, offset);
} }
//! Creates `[base, offset]!` memory operand (pre-index mode) (AArch64). //! Creates `[base, offset]!` memory operand (pre-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex)); return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
} }
//! Creates `[base], offset` memory operand (post-index mode) (AArch64). //! Creates `[base], offset` memory operand (post-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex)); return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
} }
//! Creates `[base, index]` memory operand (AArch64). //! Creates `[base, index]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index) noexcept {
return Mem(base, index); return Mem(base, index);
} }
//! Creates `[base, index]!` memory operand (pre-index mode) (AArch64). //! Creates `[base, index]!` memory operand (pre-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_pre(const Gp& base, const Gp& index) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex)); return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
} }
//! Creates `[base], index` memory operand (post-index mode) (AArch64). //! Creates `[base], index` memory operand (post-index mode) (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr_post(const Gp& base, const Gp& index) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex)); return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
} }
//! Creates `[base, index, SHIFT_OP #shift]` memory operand (AArch64). //! Creates `[base, index, SHIFT_OP #shift]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index, const Shift& shift) noexcept {
return Mem(base, index, shift); return Mem(base, index, shift);
} }
//! Creates `[base, offset]` memory operand (AArch64). //! Creates `[base, offset]` memory operand (AArch64).
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const Label& base, int32_t offset = 0) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Label& base, int32_t offset = 0) noexcept {
return Mem(base, offset); return Mem(base, offset);
} }
// TODO: [ARM] PC + offset address. // TODO: [ARM] PC + offset address.
#if 0 #if 0
//! Creates `[PC + offset]` (relative) memory operand. //! Creates `[PC + offset]` (relative) memory operand.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(const PC& pc, int32_t offset = 0) noexcept { static ASMJIT_INLINE_CONSTEXPR Mem ptr(const PC& pc, int32_t offset = 0) noexcept {
return Mem(pc, offset); return Mem(pc, offset);
} }
#endif #endif

View File

@@ -22,7 +22,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// ======================== // ========================
// TODO: [ARM] These should be shared with all backends. // TODO: [ARM] These should be shared with all backends.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept { static inline uint64_t raImmMaskFromSize(uint32_t size) noexcept {
ASMJIT_ASSERT(size > 0 && size < 256); ASMJIT_ASSERT(size > 0 && size < 256);
static const uint64_t masks[] = { static const uint64_t masks[] = {
@@ -47,6 +47,7 @@ static const RegMask raConsecutiveLeadCountToRegMaskFilter[5] = {
0x1FFFFFFFu // [4] 4 consecutive registers. 0x1FFFFFFFu // [4] 4 consecutive registers.
}; };
[[nodiscard]]
static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept { static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
static constexpr RATiedFlags map[] = { static constexpr RATiedFlags map[] = {
RATiedFlags::kNone, RATiedFlags::kNone,
@@ -58,15 +59,18 @@ static inline RATiedFlags raUseOutFlagsFromRWFlags(OpRWFlags rwFlags) noexcept {
return map[uint32_t(rwFlags & OpRWFlags::kRW)]; return map[uint32_t(rwFlags & OpRWFlags::kRW)];
} }
[[nodiscard]]
static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept { static inline RATiedFlags raRegRwFlags(OpRWFlags flags) noexcept {
return raUseOutFlagsFromRWFlags(flags); return raUseOutFlagsFromRWFlags(flags);
} }
[[nodiscard]]
static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept { static inline RATiedFlags raMemBaseRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value; constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemBaseRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW); return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
} }
[[nodiscard]]
static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept { static inline RATiedFlags raMemIndexRwFlags(OpRWFlags flags) noexcept {
constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value; constexpr uint32_t shift = Support::ConstCTZ<uint32_t(OpRWFlags::kMemIndexRW)>::value;
return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW); return raUseOutFlagsFromRWFlags(OpRWFlags(uint32_t(flags) >> shift) & OpRWFlags::kRW);
@@ -82,18 +86,31 @@ public:
: RACFGBuilderT<RACFGBuilder>(pass), : RACFGBuilderT<RACFGBuilder>(pass),
_arch(pass->cc()->arch()) {} _arch(pass->cc()->arch()) {}
[[nodiscard]]
inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); } inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
[[nodiscard]]
Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept; Error onInst(InstNode* inst, InstControlFlow& controlType, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error onBeforeInvoke(InvokeNode* invokeNode) noexcept; Error onBeforeInvoke(InvokeNode* invokeNode) noexcept;
[[nodiscard]]
Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept; Error onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept;
[[nodiscard]]
Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept; Error moveImmToRegArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
[[nodiscard]]
Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept; Error moveImmToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const Imm& imm_) noexcept;
[[nodiscard]]
Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept; Error moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& arg, const BaseReg& reg) noexcept;
[[nodiscard]]
Error onBeforeRet(FuncRetNode* funcRet) noexcept; Error onBeforeRet(FuncRetNode* funcRet) noexcept;
[[nodiscard]]
Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept; Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
}; };
@@ -105,20 +122,26 @@ static InstControlFlow getControlFlowType(InstId instId) noexcept {
switch (BaseInst::extractRealId(instId)) { switch (BaseInst::extractRealId(instId)) {
case Inst::kIdB: case Inst::kIdB:
case Inst::kIdBr: case Inst::kIdBr:
if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) if (BaseInst::extractARMCondCode(instId) == CondCode::kAL) {
return InstControlFlow::kJump; return InstControlFlow::kJump;
else }
else {
return InstControlFlow::kBranch; return InstControlFlow::kBranch;
}
case Inst::kIdBl: case Inst::kIdBl:
case Inst::kIdBlr: case Inst::kIdBlr:
return InstControlFlow::kCall; return InstControlFlow::kCall;
case Inst::kIdCbz: case Inst::kIdCbz:
case Inst::kIdCbnz: case Inst::kIdCbnz:
case Inst::kIdTbz: case Inst::kIdTbz:
case Inst::kIdTbnz: case Inst::kIdTbnz:
return InstControlFlow::kBranch; return InstControlFlow::kBranch;
case Inst::kIdRet: case Inst::kIdRet:
return InstControlFlow::kReturn; return InstControlFlow::kReturn;
default: default:
return InstControlFlow::kRegular; return InstControlFlow::kRegular;
} }
@@ -180,12 +203,14 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
if (opRwInfo.consecutiveLeadCount()) { if (opRwInfo.consecutiveLeadCount()) {
// There must be a single consecutive register lead, otherwise the RW data is invalid. // There must be a single consecutive register lead, otherwise the RW data is invalid.
if (consecutiveOffset != 0xFFFFFFFFu) if (consecutiveOffset != 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct. // A consecutive lead register cannot be used as a consecutive +1/+2/+3 register, the registers must be distinct.
if (RATiedReg::consecutiveDataFromFlags(flags) != 0) if (RATiedReg::consecutiveDataFromFlags(flags) != 0) {
return DebugUtils::errored(kErrorNotConsecutiveRegs); return DebugUtils::errored(kErrorNotConsecutiveRegs);
}
flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1); flags |= RATiedFlags::kLeadConsecutive | RATiedReg::consecutiveDataToFlags(opRwInfo.consecutiveLeadCount() - 1);
consecutiveOffset = 0; consecutiveOffset = 0;
@@ -208,8 +233,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
flags |= RATiedFlags::kUseFixed; flags |= RATiedFlags::kUseFixed;
} }
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu) if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); flags |= RATiedFlags::kUseConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
} }
} }
@@ -220,8 +246,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
flags |= RATiedFlags::kOutFixed; flags |= RATiedFlags::kOutFixed;
} }
else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) { else if (opRwInfo.hasOpFlag(OpRWFlags::kConsecutive)) {
if (consecutiveOffset == 0xFFFFFFFFu) if (consecutiveOffset == 0xFFFFFFFFu) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset); flags |= RATiedFlags::kOutConsecutive | RATiedReg::consecutiveDataToFlags(++consecutiveOffset);
} }
} }
@@ -231,19 +258,23 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
// Only the first 0..15 registers can be used if the register uses // Only the first 0..15 registers can be used if the register uses
// element accessor that accesses half-words (h[0..7] elements). // element accessor that accesses half-words (h[0..7] elements).
if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == VecElementType::kH) { if (instInfo.hasFlag(InstDB::kInstFlagVH0_15) && reg.as<Vec>().elementType() == VecElementType::kH) {
if (Support::test(flags, RATiedFlags::kUse)) if (Support::test(flags, RATiedFlags::kUse)) {
useId &= 0x0000FFFFu; useId &= 0x0000FFFFu;
else }
else {
outId &= 0x0000FFFFu; outId &= 0x0000FFFFu;
}
} }
} }
ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent)); ASMJIT_PROPAGATE(ib.add(workReg, flags, useRegs, useId, useRewriteMask, outRegs, outId, outRewriteMask, opRwInfo.rmSize(), consecutiveParent));
if (singleRegOps == i) if (singleRegOps == i) {
singleRegOps++; singleRegOps++;
}
if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) if (Support::test(flags, RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive)) {
consecutiveParent = workReg->workId(); consecutiveParent = workReg->workId();
}
} }
} }
else if (op.isMem()) { else if (op.isMem()) {
@@ -254,7 +285,9 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
if (mem.isRegHome()) { if (mem.isRegHome()) {
RAWorkReg* workReg; RAWorkReg* workReg;
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg)); ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
_pass->getOrCreateStackSlot(workReg); if (ASMJIT_UNLIKELY(!_pass->getOrCreateStackSlot(workReg))) {
return DebugUtils::errored(kErrorOutOfMemory);
}
} }
else if (mem.hasBaseReg()) { else if (mem.hasBaseReg()) {
uint32_t vIndex = Operand::virtIdToIndex(mem.baseId()); uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
@@ -273,10 +306,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
uint32_t useRewriteMask = 0; uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0; uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse)) if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId)); useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
else }
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId)); outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
}
ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask)); ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
} }
@@ -299,10 +334,12 @@ Error RACFGBuilder::onInst(InstNode* inst, InstControlFlow& controlType, RAInstB
uint32_t useRewriteMask = 0; uint32_t useRewriteMask = 0;
uint32_t outRewriteMask = 0; uint32_t outRewriteMask = 0;
if (Support::test(flags, RATiedFlags::kUse)) if (Support::test(flags, RATiedFlags::kUse)) {
useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
else }
else {
outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId])); outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
}
ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask)); ASMJIT_PROPAGATE(ib.add(workReg, RATiedFlags::kUse | RATiedFlags::kRead, allocable, useId, useRewriteMask, allocable, outId, outRewriteMask));
} }
@@ -374,8 +411,9 @@ Error RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
if (fd.hasRet()) { if (fd.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& ret = fd.ret(valueIndex); const FuncValue& ret = fd.ret(valueIndex);
if (!ret) if (!ret) {
break; break;
}
const Operand& op = invokeNode->ret(valueIndex); const Operand& op = invokeNode->ret(valueIndex);
if (op.isReg()) { if (op.isReg()) {
@@ -411,14 +449,16 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
const FuncValuePack& argPack = fd.argPack(argIndex); const FuncValuePack& argPack = fd.argPack(argIndex);
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
if (!argPack[valueIndex]) if (!argPack[valueIndex]) {
continue; continue;
}
const FuncValue& arg = argPack[valueIndex]; const FuncValue& arg = argPack[valueIndex];
const Operand& op = invokeNode->arg(argIndex, valueIndex); const Operand& op = invokeNode->arg(argIndex, valueIndex);
if (op.isNone()) if (op.isNone()) {
continue; continue;
}
if (op.isReg()) { if (op.isReg()) {
const Reg& reg = op.as<Reg>(); const Reg& reg = op.as<Reg>();
@@ -427,8 +467,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
if (arg.isIndirect()) { if (arg.isIndirect()) {
RegGroup regGroup = workReg->group(); RegGroup regGroup = workReg->group();
if (regGroup != RegGroup::kGp) if (regGroup != RegGroup::kGp) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId())); ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
} }
else if (arg.isReg()) { else if (arg.isReg()) {
@@ -445,8 +486,9 @@ Error RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) { for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
const FuncValue& ret = fd.ret(retIndex); const FuncValue& ret = fd.ret(retIndex);
if (!ret) if (!ret) {
break; break;
}
const Operand& op = invokeNode->ret(retIndex); const Operand& op = invokeNode->ret(retIndex);
if (op.isReg()) { if (op.isReg()) {
@@ -525,11 +567,13 @@ Error RACFGBuilder::moveRegToStackArg(InvokeNode* invokeNode, const FuncValue& a
DebugUtils::unused(invokeNode); DebugUtils::unused(invokeNode);
Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset()); Mem stackPtr = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
if (reg.isGp()) if (reg.isGp()) {
return cc()->str(reg.as<Gp>(), stackPtr); return cc()->str(reg.as<Gp>(), stackPtr);
}
if (reg.isVec()) if (reg.isVec()) {
return cc()->str(reg.as<Vec>(), stackPtr); return cc()->str(reg.as<Vec>(), stackPtr);
}
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
} }
@@ -549,11 +593,14 @@ Error RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
for (uint32_t i = 0; i < opCount; i++) { for (uint32_t i = 0; i < opCount; i++) {
const Operand& op = opArray[i]; const Operand& op = opArray[i];
if (op.isNone()) continue; if (op.isNone()) {
continue;
}
const FuncValue& ret = funcDetail.ret(i); const FuncValue& ret = funcDetail.ret(i);
if (ASMJIT_UNLIKELY(!ret.isReg())) if (ASMJIT_UNLIKELY(!ret.isReg())) {
return DebugUtils::errored(kErrorInvalidAssignment); return DebugUtils::errored(kErrorInvalidAssignment);
}
if (op.isReg()) { if (op.isReg()) {
// Register return value. // Register return value.
@@ -614,8 +661,9 @@ void ARMRAPass::onInit() noexcept {
// Apple ABI requires that the frame-pointer register is not changed by leaf functions and properly updated // Apple ABI requires that the frame-pointer register is not changed by leaf functions and properly updated
// by non-leaf functions. So, let's make this register unavailable as it's just not safe to update it. // by non-leaf functions. So, let's make this register unavailable as it's just not safe to update it.
if (hasFP || cc()->environment().isDarwin()) if (hasFP || cc()->environment().isDarwin()) {
makeUnavailable(RegGroup::kGp, Gp::kIdFp); makeUnavailable(RegGroup::kGp, Gp::kIdFp);
}
makeUnavailable(RegGroup::kGp, Gp::kIdSp); makeUnavailable(RegGroup::kGp, Gp::kIdSp);
makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS. makeUnavailable(RegGroup::kGp, Gp::kIdOs); // OS-specific use, usually TLS.
@@ -663,13 +711,17 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask()); Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
uint32_t useId = tiedReg->useId(); uint32_t useId = tiedReg->useId();
while (useIt.hasNext())
while (useIt.hasNext()) {
inst->rewriteIdAtIndex(useIt.next(), useId); inst->rewriteIdAtIndex(useIt.next(), useId);
}
Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask()); Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
uint32_t outId = tiedReg->outId(); uint32_t outId = tiedReg->outId();
while (outIt.hasNext())
while (outIt.hasNext()) {
inst->rewriteIdAtIndex(outIt.next(), outId); inst->rewriteIdAtIndex(outIt.next(), outId);
}
} }
// This data is allocated by Zone passed to `runOnFunction()`, which // This data is allocated by Zone passed to `runOnFunction()`, which
@@ -703,8 +755,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
BaseMem& mem = op.as<BaseMem>(); BaseMem& mem = op.as<BaseMem>();
if (mem.isRegHome()) { if (mem.isRegHome()) {
uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId()); uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) if (ASMJIT_UNLIKELY(virtIndex >= virtCount)) {
return DebugUtils::errored(kErrorInvalidVirtId); return DebugUtils::errored(kErrorInvalidVirtId);
}
VirtReg* virtReg = cc()->virtRegByIndex(virtIndex); VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
RAWorkReg* workReg = virtReg->workReg(); RAWorkReg* workReg = virtReg->workReg();
@@ -730,8 +783,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
inst->setOp(1, Imm(offset)); inst->setOp(1, Imm(offset));
} }
else { else {
if (mem.hasIndex()) if (mem.hasIndex()) {
return DebugUtils::errored(kErrorInvalidAddressIndex); return DebugUtils::errored(kErrorInvalidAddressIndex);
}
GpX dst(inst->op(0).as<Gp>().id()); GpX dst(inst->op(0).as<Gp>().id());
GpX base(mem.baseId()); GpX base(mem.baseId());
@@ -775,8 +829,9 @@ ASMJIT_FAVOR_SPEED Error ARMRAPass::_rewrite(BaseNode* first, BaseNode* stop) no
// ================================ // ================================
Error ARMRAPass::updateStackFrame() noexcept { Error ARMRAPass::updateStackFrame() noexcept {
if (_func->frame().hasFuncCalls()) if (_func->frame().hasFuncCalls()) {
_func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr)); _func->frame().addDirtyRegs(RegGroup::kGp, Support::bitMask(Gp::kIdLr));
}
return BaseRAPass::updateStackFrame(); return BaseRAPass::updateStackFrame();
} }

View File

@@ -29,10 +29,15 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
class ARMRAPass : public BaseRAPass { class ARMRAPass : public BaseRAPass {
public: public:
ASMJIT_NONCOPYABLE(ARMRAPass) ASMJIT_NONCOPYABLE(ARMRAPass)
typedef BaseRAPass Base; using Base = BaseRAPass;
//! \name Members
//! \{
EmitHelper _emitHelper; EmitHelper _emitHelper;
//! \}
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -45,9 +50,11 @@ public:
//! \{ //! \{
//! Returns the compiler casted to `arm::Compiler`. //! Returns the compiler casted to `arm::Compiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); } ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
//! Returns emit helper. //! Returns emit helper.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; } ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; }
//! \} //! \}

View File

@@ -334,10 +334,12 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
ASMJIT_ASSERT(vReg != nullptr); ASMJIT_ASSERT(vReg != nullptr);
const char* name = vReg->name(); const char* name = vReg->name();
if (name && name[0] != '\0') if (name && name[0] != '\0') {
ASMJIT_PROPAGATE(sb.append(name)); ASMJIT_PROPAGATE(sb.append(name));
else }
else {
ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId)))); ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
}
virtRegFormatted = true; virtRegFormatted = true;
} }
@@ -356,19 +358,22 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
case RegType::kARM_VecD: case RegType::kARM_VecD:
case RegType::kARM_VecV: case RegType::kARM_VecV:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)]; letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kARM_VecB)];
if (elementType) if (elementType) {
letter = 'v'; letter = 'v';
}
break; break;
case RegType::kARM_GpW: case RegType::kARM_GpW:
if (Environment::is64Bit(arch)) { if (Environment::is64Bit(arch)) {
letter = 'w'; letter = 'w';
if (rId == a64::Gp::kIdZr) if (rId == a64::Gp::kIdZr) {
return sb.append("wzr", 3); return sb.append("wzr", 3);
}
if (rId == a64::Gp::kIdSp) if (rId == a64::Gp::kIdSp) {
return sb.append("wsp", 3); return sb.append("wsp", 3);
}
} }
else { else {
letter = 'r'; letter = 'r';
@@ -377,17 +382,20 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
case RegType::kARM_GpX: case RegType::kARM_GpX:
if (Environment::is64Bit(arch)) { if (Environment::is64Bit(arch)) {
if (rId == a64::Gp::kIdZr) if (rId == a64::Gp::kIdZr) {
return sb.append("xzr", 3); return sb.append("xzr", 3);
if (rId == a64::Gp::kIdSp) }
if (rId == a64::Gp::kIdSp) {
return sb.append("sp", 2); return sb.append("sp", 2);
}
letter = 'x'; letter = 'x';
break; break;
} }
// X registers are undefined in 32-bit mode. // X registers are undefined in 32-bit mode.
ASMJIT_FALLTHROUGH; [[fallthrough]];
default: default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?%u", uint32_t(regType), rId)); ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?%u", uint32_t(regType), rId));
@@ -445,8 +453,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegisterList(
count++; count++;
} while (rMask & mask); } while (rMask & mask);
if (!first) if (!first) {
ASMJIT_PROPAGATE(sb.append(", ")); ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu)); ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu));
if (count >= 2u) { if (count >= 2u) {
@@ -477,8 +486,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
uint32_t elementType = op._signature.getField<BaseVec::kSignatureRegElementTypeMask>(); uint32_t elementType = op._signature.getField<BaseVec::kSignatureRegElementTypeMask>();
uint32_t elementIndex = op.as<BaseVec>().elementIndex(); uint32_t elementIndex = op.as<BaseVec>().elementIndex();
if (!op.as<BaseVec>().hasElementIndex()) if (!op.as<BaseVec>().hasElementIndex()) {
elementIndex = 0xFFFFFFFFu; elementIndex = 0xFFFFFFFFu;
}
return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex); return formatRegister(sb, flags, emitter, arch, reg.type(), reg.id(), elementType, elementIndex);
} }
@@ -524,8 +534,9 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
int64_t off = int64_t(m.offset()); int64_t off = int64_t(m.offset());
uint32_t base = 10; uint32_t base = 10;
if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) if (Support::test(flags, FormatFlags::kHexOffsets) && uint64_t(off) > 9) {
base = 16; base = 16;
}
if (base == 10) { if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base)); ASMJIT_PROPAGATE(sb.appendInt(off, base));
@@ -538,16 +549,19 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
if (m.hasShift()) { if (m.hasShift()) {
ASMJIT_PROPAGATE(sb.append(' ')); ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost()) if (!m.isPreOrPost()) {
ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp())); ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp()));
}
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift())); ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
} }
if (!m.isPostIndex()) if (!m.isPostIndex()) {
ASMJIT_PROPAGATE(sb.append(']')); ASMJIT_PROPAGATE(sb.append(']'));
}
if (m.isPreIndex()) if (m.isPreIndex()) {
ASMJIT_PROPAGATE(sb.append('!')); ASMJIT_PROPAGATE(sb.append('!'));
}
return kErrorOk; return kErrorOk;
} }

View File

@@ -47,40 +47,40 @@ public:
ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg) ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
//! Gets whether the register is either `R` or `W` register (32-bit). //! Gets whether the register is either `R` or `W` register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpR() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isGpR() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is either `R` or `W` register (32-bit). //! Gets whether the register is either `R` or `W` register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isGpW() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpW>::kSignature; }
//! Gets whether the register is an `X` register (64-bit). //! Gets whether the register is an `X` register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isGpX() const noexcept { return baseSignature() == RegTraits<RegType::kARM_GpX>::kSignature; }
//! Gets whether the register is a VEC-B register (8-bit). //! Gets whether the register is a VEC-B register (8-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecB() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a VEC-H register (16-bit). //! Gets whether the register is a VEC-H register (16-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecH() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a VEC-S register (32-bit). //! Gets whether the register is a VEC-S register (32-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecS() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a VEC-D register (64-bit). //! Gets whether the register is a VEC-D register (64-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecD() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a VEC-Q register (128-bit). //! Gets whether the register is a VEC-Q register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecQ() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit). //! Gets whether the register is either VEC-D (64-bit) or VEC-Q (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; } ASMJIT_INLINE_CONSTEXPR bool isVecDOrQ() const noexcept { return uint32_t(type()) - uint32_t(RegType::kARM_VecD) <= 1u; }
//! Gets whether the register is a VEC-V register (128-bit). //! Gets whether the register is a VEC-V register (128-bit).
ASMJIT_INLINE_NODEBUG constexpr bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVecV() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
//! Gets whether the register is an 8-bit vector register or view, alias if \ref isVecB(). //! Gets whether the register is an 8-bit vector register or view, alias if \ref isVecB().
ASMJIT_INLINE_NODEBUG constexpr bool isVec8() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVec8() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecB>::kSignature; }
//! Gets whether the register is a 16-bit vector register or view, alias if \ref isVecH(). //! Gets whether the register is a 16-bit vector register or view, alias if \ref isVecH().
ASMJIT_INLINE_NODEBUG constexpr bool isVec16() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVec16() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecH>::kSignature; }
//! Gets whether the register is a 32-bit vector register or view, alias if \ref isVecS(). //! Gets whether the register is a 32-bit vector register or view, alias if \ref isVecS().
ASMJIT_INLINE_NODEBUG constexpr bool isVec32() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVec32() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecS>::kSignature; }
//! Gets whether the register is a 64-bit vector register or view, alias if \ref isVecD(). //! Gets whether the register is a 64-bit vector register or view, alias if \ref isVecD().
ASMJIT_INLINE_NODEBUG constexpr bool isVec64() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVec64() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecD>::kSignature; }
//! Gets whether the register is a 128-bit vector register or view, alias if \ref isVecQ(). //! Gets whether the register is a 128-bit vector register or view, alias if \ref isVecQ().
ASMJIT_INLINE_NODEBUG constexpr bool isVec128() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; } ASMJIT_INLINE_CONSTEXPR bool isVec128() const noexcept { return baseSignature() == RegTraits<RegType::kARM_VecV>::kSignature; }
template<RegType kRegType> template<RegType kRegType>
ASMJIT_INLINE_NODEBUG void setRegT(uint32_t id) noexcept { ASMJIT_INLINE_CONSTEXPR void setRegT(uint32_t id) noexcept {
setSignature(RegTraits<kRegType>::kSignature); setSignature(RegTraits<kRegType>::kSignature);
setId(id); setId(id);
} }
@@ -95,13 +95,13 @@ public:
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); } static ASMJIT_INLINE_NODEBUG OperandSignature signatureOf(RegType type) noexcept { return ArchTraits::byArch(Arch::kAArch64).regTypeToSignature(type); }
template<RegType kRegType> template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; } static ASMJIT_INLINE_CONSTEXPR RegGroup groupOfT() noexcept { return RegTraits<kRegType>::kGroup; }
template<RegType kRegType> template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; } static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return RegTraits<kRegType>::kTypeId; }
template<RegType kRegType> template<RegType kRegType>
static ASMJIT_INLINE_NODEBUG OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits<kRegType>::kSignature}; } static ASMJIT_INLINE_CONSTEXPR OperandSignature signatureOfT() noexcept { return OperandSignature{RegTraits<kRegType>::kSignature}; }
static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); } static ASMJIT_INLINE_NODEBUG bool isGpW(const Operand_& op) noexcept { return op.as<Reg>().isGpW(); }
static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); } static ASMJIT_INLINE_NODEBUG bool isGpX(const Operand_& op) noexcept { return op.as<Reg>().isGpX(); }
@@ -146,16 +146,16 @@ public:
}; };
//! Returns whether the register has element index (it's an element index access). //! Returns whether the register has element index (it's an element index access).
ASMJIT_INLINE_NODEBUG constexpr bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); } ASMJIT_INLINE_CONSTEXPR bool hasElementIndex() const noexcept { return _signature.hasField<kSignatureRegElementFlagMask>(); }
//! Returns element index of the register. //! Returns element index of the register.
ASMJIT_INLINE_NODEBUG constexpr uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); } ASMJIT_INLINE_CONSTEXPR uint32_t elementIndex() const noexcept { return _signature.getField<kSignatureRegElementIndexMask>(); }
//! Sets element index of the register to `elementType`. //! Sets element index of the register to `elementType`.
ASMJIT_INLINE_NODEBUG void setElementIndex(uint32_t elementIndex) noexcept { ASMJIT_INLINE_CONSTEXPR void setElementIndex(uint32_t elementIndex) noexcept {
_signature |= kSignatureRegElementFlagMask; _signature |= kSignatureRegElementFlagMask;
_signature.setField<kSignatureRegElementIndexMask>(elementIndex); _signature.setField<kSignatureRegElementIndexMask>(elementIndex);
} }
//! Resets element index of the register. //! Resets element index of the register.
ASMJIT_INLINE_NODEBUG void resetElementIndex() noexcept { ASMJIT_INLINE_CONSTEXPR void resetElementIndex() noexcept {
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask); _signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
} }
}; };
@@ -187,35 +187,35 @@ public:
//! \{ //! \{
//! Construct a default `Mem` operand, that points to [0]. //! Construct a default `Mem` operand, that points to [0].
ASMJIT_INLINE_NODEBUG constexpr Mem() noexcept ASMJIT_INLINE_CONSTEXPR Mem() noexcept
: BaseMem() {} : BaseMem() {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Mem& other) noexcept ASMJIT_INLINE_CONSTEXPR Mem(const Mem& other) noexcept
: BaseMem(other) {} : BaseMem(other) {}
ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept
: BaseMem(Globals::NoInit) {} : BaseMem(Globals::NoInit) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
: BaseMem(signature, baseId, indexId, offset) {} : BaseMem(signature, baseId, indexId, offset) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept ASMJIT_INLINE_CONSTEXPR explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) | : BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(RegType::kLabelTag) | Signature::fromMemBaseType(RegType::kLabelTag) |
signature, base.id(), 0, off) {} signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept ASMJIT_INLINE_CONSTEXPR explicit Mem(const BaseReg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) | : BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) | Signature::fromMemBaseType(base.type()) |
signature, base.id(), 0, off) {} signature, base.id(), 0, off) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) | : BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) | Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) | Signature::fromMemIndexType(index.type()) |
signature, base.id(), index.id(), 0) {} signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept ASMJIT_INLINE_CONSTEXPR Mem(const BaseReg& base, const BaseReg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) | : BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.type()) | Signature::fromMemBaseType(base.type()) |
Signature::fromMemIndexType(index.type()) | Signature::fromMemIndexType(index.type()) |
@@ -223,7 +223,7 @@ public:
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) | Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
signature, base.id(), index.id(), 0) {} signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_NODEBUG constexpr explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept ASMJIT_INLINE_CONSTEXPR explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) | : BaseMem(Signature::fromOpType(OperandType::kMem) |
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {} signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
@@ -232,7 +232,10 @@ public:
//! \name Overloaded Operators //! \name Overloaded Operators
//! \{ //! \{
ASMJIT_INLINE_NODEBUG Mem& operator=(const Mem& other) noexcept = default; ASMJIT_INLINE_CONSTEXPR Mem& operator=(const Mem& other) noexcept {
copyFrom(other);
return *this;
}
//! \} //! \}
@@ -240,24 +243,24 @@ public:
//! \{ //! \{
//! Clones the memory operand. //! Clones the memory operand.
ASMJIT_INLINE_NODEBUG constexpr Mem clone() const noexcept { return Mem(*this); } ASMJIT_INLINE_CONSTEXPR Mem clone() const noexcept { return Mem(*this); }
//! Gets new memory operand adjusted by `off`. //! Gets new memory operand adjusted by `off`.
ASMJIT_INLINE_NODEBUG Mem cloneAdjusted(int64_t off) const noexcept { ASMJIT_INLINE_CONSTEXPR Mem cloneAdjusted(int64_t off) const noexcept {
Mem result(*this); Mem result(*this);
result.addOffset(off); result.addOffset(off);
return result; return result;
} }
//! Clones the memory operand and makes it pre-index. //! Clones the memory operand and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre() const noexcept { ASMJIT_INLINE_CONSTEXPR Mem pre() const noexcept {
Mem result(*this); Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex); result.setOffsetMode(OffsetMode::kPreIndex);
return result; return result;
} }
//! Clones the memory operand, applies a given offset `off` and makes it pre-index. //! Clones the memory operand, applies a given offset `off` and makes it pre-index.
ASMJIT_INLINE_NODEBUG Mem pre(int64_t off) const noexcept { ASMJIT_INLINE_CONSTEXPR Mem pre(int64_t off) const noexcept {
Mem result(*this); Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex); result.setOffsetMode(OffsetMode::kPreIndex);
result.addOffset(off); result.addOffset(off);
@@ -265,14 +268,14 @@ public:
} }
//! Clones the memory operand and makes it post-index. //! Clones the memory operand and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post() const noexcept { ASMJIT_INLINE_CONSTEXPR Mem post() const noexcept {
Mem result(*this); Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex); result.setOffsetMode(OffsetMode::kPostIndex);
return result; return result;
} }
//! Clones the memory operand, applies a given offset `off` and makes it post-index. //! Clones the memory operand, applies a given offset `off` and makes it post-index.
ASMJIT_INLINE_NODEBUG Mem post(int64_t off) const noexcept { ASMJIT_INLINE_CONSTEXPR Mem post(int64_t off) const noexcept {
Mem result(*this); Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex); result.setOffsetMode(OffsetMode::kPostIndex);
result.addOffset(off); result.addOffset(off);
@@ -296,12 +299,12 @@ public:
using BaseMem::setIndex; using BaseMem::setIndex;
ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, uint32_t shift) noexcept { ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, uint32_t shift) noexcept {
setIndex(index); setIndex(index);
setShift(shift); setShift(shift);
} }
ASMJIT_INLINE_NODEBUG void setIndex(const BaseReg& index, Shift shift) noexcept { ASMJIT_INLINE_CONSTEXPR void setIndex(const BaseReg& index, Shift shift) noexcept {
setIndex(index); setIndex(index);
setShift(shift); setShift(shift);
} }
@@ -312,48 +315,48 @@ public:
//! \{ //! \{
//! Gets offset mode. //! Gets offset mode.
ASMJIT_INLINE_NODEBUG constexpr OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField<kSignatureMemOffsetModeMask>()); } ASMJIT_INLINE_CONSTEXPR OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField<kSignatureMemOffsetModeMask>()); }
//! Sets offset mode to `mode`. //! Sets offset mode to `mode`.
ASMJIT_INLINE_NODEBUG void setOffsetMode(OffsetMode mode) noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(mode)); } ASMJIT_INLINE_CONSTEXPR void setOffsetMode(OffsetMode mode) noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(mode)); }
//! Resets offset mode to default (fixed offset, without write-back). //! Resets offset mode to default (fixed offset, without write-back).
ASMJIT_INLINE_NODEBUG void resetOffsetMode() noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); } ASMJIT_INLINE_CONSTEXPR void resetOffsetMode() noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); }
//! Tests whether the current memory offset mode is fixed (see \ref OffsetMode::kFixed). //! Tests whether the current memory offset mode is fixed (see \ref OffsetMode::kFixed).
ASMJIT_INLINE_NODEBUG constexpr bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; } ASMJIT_INLINE_CONSTEXPR bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is either pre-index or post-index (write-back is used). //! Tests whether the current memory offset mode is either pre-index or post-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; } ASMJIT_INLINE_CONSTEXPR bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is pre-index (write-back is used). //! Tests whether the current memory offset mode is pre-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; } ASMJIT_INLINE_CONSTEXPR bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; }
//! Tests whether the current memory offset mode is post-index (write-back is used). //! Tests whether the current memory offset mode is post-index (write-back is used).
ASMJIT_INLINE_NODEBUG constexpr bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; } ASMJIT_INLINE_CONSTEXPR bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; }
//! Sets offset mode of this memory operand to pre-index (write-back is used). //! Sets offset mode of this memory operand to pre-index (write-back is used).
ASMJIT_INLINE_NODEBUG void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); } ASMJIT_INLINE_CONSTEXPR void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); }
//! Sets offset mode of this memory operand to post-index (write-back is used). //! Sets offset mode of this memory operand to post-index (write-back is used).
ASMJIT_INLINE_NODEBUG void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); } ASMJIT_INLINE_CONSTEXPR void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); }
//! Gets shift operation that is used by index register. //! Gets shift operation that is used by index register.
ASMJIT_INLINE_NODEBUG constexpr ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField<kSignatureMemShiftOpMask>()); } ASMJIT_INLINE_CONSTEXPR ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField<kSignatureMemShiftOpMask>()); }
//! Sets shift operation that is used by index register. //! Sets shift operation that is used by index register.
ASMJIT_INLINE_NODEBUG void setShiftOp(ShiftOp sop) noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(sop)); } ASMJIT_INLINE_CONSTEXPR void setShiftOp(ShiftOp sop) noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(sop)); }
//! Resets shift operation that is used by index register to LSL (default value). //! Resets shift operation that is used by index register to LSL (default value).
ASMJIT_INLINE_NODEBUG void resetShiftOp() noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); } ASMJIT_INLINE_CONSTEXPR void resetShiftOp() noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); }
//! Gets whether the memory operand has shift (aka scale) constant. //! Gets whether the memory operand has shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); } ASMJIT_INLINE_CONSTEXPR bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
//! Gets the memory operand's shift (aka scale) constant. //! Gets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG constexpr uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); } ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
//! Sets the memory operand's shift (aka scale) constant. //! Sets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_NODEBUG void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); } ASMJIT_INLINE_CONSTEXPR void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
//! Sets the memory operand's shift and shift operation. //! Sets the memory operand's shift and shift operation.
ASMJIT_INLINE_NODEBUG void setShift(Shift shift) noexcept { ASMJIT_INLINE_CONSTEXPR void setShift(Shift shift) noexcept {
_signature.setField<kSignatureMemShiftOpMask>(uint32_t(shift.op())); _signature.setField<kSignatureMemShiftOpMask>(uint32_t(shift.op()));
_signature.setField<kSignatureMemShiftValueMask>(shift.value()); _signature.setField<kSignatureMemShiftValueMask>(shift.value());
} }
//! Resets the memory operand's shift (aka scale) constant to zero. //! Resets the memory operand's shift (aka scale) constant to zero.
ASMJIT_INLINE_NODEBUG void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); } ASMJIT_INLINE_CONSTEXPR void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
//! \} //! \}
}; };
@@ -362,17 +365,17 @@ public:
//! \{ //! \{
//! Constructs a `LSL #value` shift (logical shift left). //! Constructs a `LSL #value` shift (logical shift left).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); } static ASMJIT_INLINE_CONSTEXPR Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right). //! Constructs a `LSR #value` shift (logical shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); } static ASMJIT_INLINE_CONSTEXPR Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right). //! Constructs a `ASR #value` shift (arithmetic shift right).
static ASMJIT_INLINE_NODEBUG constexpr Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); } static ASMJIT_INLINE_CONSTEXPR Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); }
//! Constructs a `ROR #value` shift (rotate right). //! Constructs a `ROR #value` shift (rotate right).
static ASMJIT_INLINE_NODEBUG constexpr Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); } static ASMJIT_INLINE_CONSTEXPR Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1). //! Constructs a `RRX` shift (rotate with carry by 1).
static ASMJIT_INLINE_NODEBUG constexpr Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); } static ASMJIT_INLINE_CONSTEXPR Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones). //! Constructs a `MSL #value` shift (logical shift left filling ones).
static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); } static ASMJIT_INLINE_CONSTEXPR Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); }
//! \} //! \}
@@ -385,7 +388,7 @@ static ASMJIT_INLINE_NODEBUG constexpr Shift msl(uint32_t value) noexcept { retu
//! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it //! Absolute memory operands can only be used if it's known that the PC relative offset is encodable and that it
//! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit supports it to //! would be within the limits. Absolute address is also often output from disassemblers, so AsmJit supports it to
//! make it possible to assemble such output back. //! make it possible to assemble such output back.
static ASMJIT_INLINE_NODEBUG constexpr Mem ptr(uint64_t base) noexcept { return Mem(base); } static ASMJIT_INLINE_CONSTEXPR Mem ptr(uint64_t base) noexcept { return Mem(base); }
//! \} //! \}

View File

@@ -18,7 +18,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm)
namespace Utils { namespace Utils {
//! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding. //! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept { static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept {
if (imm & 0xFFFFFFFF00000000u) if (imm & 0xFFFFFFFF00000000u)
return false; return false;
@@ -73,7 +73,7 @@ struct LogicalImm {
//! | 0 | 11110s | .....r | 2 | //! | 0 | 11110s | .....r | 2 |
//! +---+--------+--------+------+ //! +---+--------+--------+------+
//! ``` //! ```
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept { static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits. // Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do { do {
@@ -121,7 +121,7 @@ static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noex
//! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the //! Returns true if the given `imm` value is encodable as a logical immediate. The `width` argument describes the
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate //! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction. //! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept { static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy; LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy); return encodeLogicalImm(imm, width, &dummy);
@@ -129,7 +129,7 @@ static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noe
//! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64. //! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64.
//! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits. //! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept { static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept {
return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0; return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0;
} }
@@ -153,7 +153,7 @@ static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) no
//! \cond //! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8. //! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits> template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_FORCE_INLINE bool isFPImm8Generic(T val) noexcept { static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits); constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1); constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern; constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;

View File

@@ -24,6 +24,8 @@
#ifndef ASMJIT_ASMJIT_H_INCLUDED #ifndef ASMJIT_ASMJIT_H_INCLUDED
#define ASMJIT_ASMJIT_H_INCLUDED #define ASMJIT_ASMJIT_H_INCLUDED
#pragma message("asmjit/asmjit.h is deprecated! Please use asmjit/core.h, asmjit/x86.h, or asmjit/a64.h")
#include "./core.h" #include "./core.h"
#ifndef ASMJIT_NO_X86 #ifndef ASMJIT_NO_X86

View File

@@ -102,21 +102,21 @@ namespace asmjit {
//! //!
//! - Requirements: //! - Requirements:
//! //!
//! - AsmJit won't build without C++11 enabled. If you use older GCC or Clang you would have to enable at least //! - AsmJit won't build without C++17 enabled. If you use older GCC or Clang you would have to enable at least
//! C++11 standard through compiler flags. //! C++17 standard through compiler flags.
//! //!
//! - Tested: //! - Tested:
//! //!
//! - **Clang** - Tested by GitHub Actions - Clang 10+ is officially supported and tested by CI, older Clang versions //! - **Clang** - Tested by GitHub Actions - Clang 10+ is officially supported and tested by CI, older Clang versions
//! having C++11 should work, but are not tested anymore due to upgraded CI images. //! having C++17 should work, but these versions are not tested anymore due to upgraded CI images.
//! //!
//! - **GNU** - Tested by GitHub Actions - GCC 7+ is officially supported, older GCC versions from 4.8+ having C++11 //! - **GNU** - Tested by GitHub Actions - GCC 9+ is officially supported and tested by CI, older GCC versions such
//! enabled should also work, but are not tested anymore due to upgraded CI images. //! as GCC 7 should work, but these versions are not tested anymore due to upgraded CI images.
//! //!
//! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome). //! - **MINGW** - Reported to work, but not tested in our CI environment (help welcome!).
//! //!
//! - **MSVC** - Tested by GitHub Actions - VS2019+ is officially supported, VS2015 and VS2017 is reported to work, //! - **MSVC** - Tested by GitHub Actions - VS2019 and onwards are officially supported and tested by CI, VS2015 and
//! but not tested by CI anymore. //! VS2017 are not tested anymore due to upgraded CI images.
//! //!
//! ### Supported Operating Systems and Platforms //! ### Supported Operating Systems and Platforms
//! //!
@@ -173,7 +173,7 @@ namespace asmjit {
//! cmake_minimum_required(VERSION 3.30) //! cmake_minimum_required(VERSION 3.30)
//! //!
//! project(asmjit_consumer C CXX) # Both C and CXX are required. //! project(asmjit_consumer C CXX) # Both C and CXX are required.
//! set(CMAKE_CXX_STANDARD 17) # C++11 and never is supported. //! set(CMAKE_CXX_STANDARD 17) # C++17 and never is supported.
//! //!
//! set(ASMJIT_DIR "3rdparty/asmjit") # Location of AsmJit. //! set(ASMJIT_DIR "3rdparty/asmjit") # Location of AsmJit.
//! set(ASMJIT_STATIC TRUE) # Force static build. //! set(ASMJIT_STATIC TRUE) # Force static build.
@@ -201,12 +201,12 @@ namespace asmjit {
//! //!
//! \section build_backends AsmJit Backends //! \section build_backends AsmJit Backends
//! //!
//! AsmJit currently supports only X86/X64 backend, but the plan is to add more backends in the future. By default //! All backends AsmJit supports are included by default. To exclude a backend use the following build-type macros:
//! AsmJit builds only the host backend, which is auto-detected at compile-time, but this can be overridden.
//! //!
//! - \ref ASMJIT_NO_X86 - Disables both X86 and X86_64 backends. //! - \ref ASMJIT_NO_X86 - Disables both X86 and X86_64 backends.
//! - \ref ASMJIT_NO_AARCH64 - Disables AArch64 backend. //! - \ref ASMJIT_NO_AARCH64 - Disables AArch64 backend.
//! - \ref ASMJIT_NO_FOREIGN - Disables the support for foreign architecture backends, only keeps a native backend. //! - \ref ASMJIT_NO_FOREIGN - Disables the support for foreign architecture backends, only keeps a native backend.
//! For example if your target is X86, `ASMJIT_NO_FOREIGN` would disable every backend but X86.
//! //!
//! \section build_options Build Options //! \section build_options Build Options
//! //!
@@ -268,14 +268,24 @@ namespace asmjit {
//! - Visit our [Public Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help. //! - Visit our [Public Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help.
//! //!
//! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that you are not using deprecated //! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that you are not using deprecated
//! functionality at all. Deprecated functions are decorated with `ASMJIT_DEPRECATED()` macro, but sometimes //! functionality at all. Deprecated functions are decorated with `[[deprecated]]` attribute, but sometimes
//! it's not possible to decorate everything like classes, which are used by deprecated functions as well, //! it's not possible to decorate everything like classes, which are used by deprecated functions as well,
//! because some compilers would warn about that. If your project compiles fine with `ASMJIT_NO_DEPRECATED` //! because some compilers would warn about that. If your project compiles fine with `ASMJIT_NO_DEPRECATED`
//! it's not using anything, which was deprecated. //! it's not using anything, which was deprecated.
//! //!
//! \section api_changes API Changes //! \section api_changes API Changes
//! //!
//! ### Changes committed at XXXX-XX-XX //! ### Changes committed at 2025-05-24
//!
//! Core changes:
//!
//! - AsmJit now requires C++17 to compile.
//!
//! - Deprecated asmjit/asmjit.h header. Use asmjit/core.h to include everything except backend specific stuff,
//! and asmjit/x86.h or asmjit/a64.h to include tools of a specific architecture. At this time the asmjit.h
//! header is just deprecated, so it will still work as it used to for some time.
//!
//! ### Changes committed at 2025-05-10
//! //!
//! Core changes: //! Core changes:
//! //!
@@ -565,7 +575,7 @@ namespace asmjit {
//! using namespace asmjit; //! using namespace asmjit;
//! //!
//! // Signature of the generated function. //! // Signature of the generated function.
//! typedef int (*Func)(void); //! using Func = int (*)(void);
//! //!
//! int main() { //! int main() {
//! JitRuntime rt; // Runtime specialized for JIT code execution. //! JitRuntime rt; // Runtime specialized for JIT code execution.
@@ -712,7 +722,7 @@ namespace asmjit {
//! //!
//! using namespace asmjit; //! using namespace asmjit;
//! //!
//! typedef void (*SumIntsFunc)(int* dst, const int* a, const int* b); //! using SumIntsFunc = void (*)(int* dst, const int* a, const int* b);
//! //!
//! int main() { //! int main() {
//! // Create a custom environment that matches the current host environment. //! // Create a custom environment that matches the current host environment.

View File

@@ -16,7 +16,7 @@
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch)) #define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference. //! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 15, 0) #define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 16, 0)
//! \def ASMJIT_ABI_NAMESPACE //! \def ASMJIT_ABI_NAMESPACE
//! //!
@@ -27,7 +27,7 @@
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally //! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
//! controlled by users. This is useful especially in cases in which some of such library comes from third party. //! controlled by users. This is useful especially in cases in which some of such library comes from third party.
#if !defined(ASMJIT_ABI_NAMESPACE) #if !defined(ASMJIT_ABI_NAMESPACE)
#define ASMJIT_ABI_NAMESPACE v1_15 #define ASMJIT_ABI_NAMESPACE v1_16
#endif // !ASMJIT_ABI_NAMESPACE #endif // !ASMJIT_ABI_NAMESPACE
//! \} //! \}
@@ -287,27 +287,26 @@ namespace asmjit {
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export //! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API). //! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
//! \def ASMJIT_FORCE_INLINE //! \def ASMJIT_INLINE
//! //!
//! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline, //! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline,
//! depending on C++ compiler. //! depending on C++ compiler.
//! \def ASMJIT_INLINE_NODEBUG //! \def ASMJIT_INLINE_NODEBUG
//! //!
//! Like \ref ASMJIT_FORCE_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the //! Like \ref ASMJIT_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the
//! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't //! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't
//! want to step in. //! want to step in.
//! \def ASMJIT_INLINE_CONSTEXPR
//!
//! Like \ref ASMJIT_INLINE_NODEBUG, but having an additional `constexpr` attribute.
//! \def ASMJIT_NOINLINE //! \def ASMJIT_NOINLINE
//! //!
//! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)` //! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)`
//! depending on C++ compiler. //! depending on C++ compiler.
//! \def ASMJIT_NORETURN
//!
//! Decorator that marks functions that should never return. Typically used to implement assertion handlers that
//! terminate, so the function never returns.
//! \def ASMJIT_CDECL //! \def ASMJIT_CDECL
//! //!
//! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`. //! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`.
@@ -371,11 +370,11 @@ namespace asmjit {
// Function attributes. // Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) #if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_FORCE_INLINE inline __attribute__((__always_inline__)) #define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER) #elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_FORCE_INLINE __forceinline #define ASMJIT_INLINE __forceinline
#else #else
#define ASMJIT_FORCE_INLINE inline #define ASMJIT_INLINE inline
#endif #endif
@@ -387,15 +386,14 @@ namespace asmjit {
#define ASMJIT_INLINE_NODEBUG inline #define ASMJIT_INLINE_NODEBUG inline
#endif #endif
#define ASMJIT_INLINE_CONSTEXPR constexpr ASMJIT_INLINE_NODEBUG
#if defined(__GNUC__) #if defined(__GNUC__)
#define ASMJIT_NOINLINE __attribute__((__noinline__)) #define ASMJIT_NOINLINE __attribute__((__noinline__))
#define ASMJIT_NORETURN __attribute__((__noreturn__))
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#define ASMJIT_NOINLINE __declspec(noinline) #define ASMJIT_NOINLINE __declspec(noinline)
#define ASMJIT_NORETURN __declspec(noreturn)
#else #else
#define ASMJIT_NOINLINE #define ASMJIT_NOINLINE
#define ASMJIT_NORETURN
#endif #endif
// Calling conventions. // Calling conventions.
@@ -424,7 +422,7 @@ namespace asmjit {
#define ASMJIT_VECTORCALL #define ASMJIT_VECTORCALL
#endif #endif
// Type alignment (not allowed by C++11 'alignas' keyword). // Type alignment (not allowed by C++17 'alignas' keyword).
#if defined(__GNUC__) #if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
@@ -442,35 +440,15 @@ namespace asmjit {
#define ASMJIT_MAY_ALIAS #define ASMJIT_MAY_ALIAS
#endif #endif
//! \def ASMJIT_MAYBE_UNUSED #if defined(__clang__) && !defined(_DOXYGEN)
//! // NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides
//! Expands to `[[maybe_unused]]` if supported or a compiler attribute instead. // to support this use, we will enable it for GCC as well. However, until that, it will be clang only, which
#if __cplusplus >= 201703L // is what we need for static analysis.
#define ASMJIT_MAYBE_UNUSED [[maybe_unused]]
#elif defined(__GNUC__)
#define ASMJIT_MAYBE_UNUSED __attribute__((unused))
#else
#define ASMJIT_MAYBE_UNUSED
#endif
#if defined(__clang_major__) && __clang_major__ >= 4 && !defined(_DOXYGEN)
// NOTE: Clang allows to apply this attribute to function arguments, which is what we want. Once GCC decides to
// support this use, we will enable it for GCC as well. However, until that, it will be clang only, which is
// what we need for static analysis.
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__)) #define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT __attribute__((__nonnull__))
#else #else
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT #define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
#endif #endif
//! \def ASMJIT_NOEXCEPT_TYPE
//!
//! Defined to `noexcept` in C++17 mode or nothing otherwise. Used by function typedefs.
#if __cplusplus >= 201703L
#define ASMJIT_NOEXCEPT_TYPE noexcept
#else
#define ASMJIT_NOEXCEPT_TYPE
#endif
//! \def ASMJIT_ASSUME(...) //! \def ASMJIT_ASSUME(...)
//! //!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true. //! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
@@ -504,35 +482,13 @@ namespace asmjit {
#define ASMJIT_UNLIKELY(...) (__VA_ARGS__) #define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
#endif #endif
//! \def ASMJIT_FALLTHROUGH
//!
//! Portable [[fallthrough]] attribute.
#if defined(__clang__) && __cplusplus >= 201103L
#define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
#else
#define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
#endif
//! \def ASMJIT_DEPRECATED
//!
//! Marks function, class, struct, enum, or anything else as deprecated.
#if defined(__GNUC__)
#define ASMJIT_DEPRECATED(MESSAGE) __attribute__((__deprecated__(MESSAGE)))
#elif defined(_MSC_VER)
#define ASMJIT_DEPRECATED(MESSAGE) __declspec(deprecated(MESSAGE))
#else
#define ASMJIT_DEPRECATED(MESSAGE)
#endif
// Utilities. // Utilities.
#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100) #define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0])) #define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0) #if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined"))) #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
#elif defined(__GNUC__) && __GNUC__ >= 5 #elif defined(__GNUC__)
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__)) #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
#else #else
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
@@ -541,25 +497,14 @@ namespace asmjit {
// Diagnostic Macros // Diagnostic Macros
// ====================================== // ======================================
#if !defined(__clang__) && !defined(__INTEL_COMPILER) && !defined(_DOXYGEN) #if defined(_MSC_VER) && !defined(__clang__) && !defined(_DOXYGEN)
#if defined(__GNUC__) && __GNUC__ == 4 #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
// There is a bug in GCC 4.X that has been fixed in GCC 5+, so just silence the warning. __pragma(warning(push)) \
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ __pragma(warning(disable: 4127)) /* conditional expression is const */ \
_Pragma("GCC diagnostic push") \ __pragma(warning(disable: 4201)) /* nameless struct/union */
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") #define ASMJIT_END_DIAGNOSTIC_SCOPE \
#define ASMJIT_END_DIAGNOSTIC_SCOPE \ __pragma(warning(pop))
_Pragma("GCC diagnostic pop") #else
#elif defined(_MSC_VER)
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_DIAGNOSTIC_SCOPE \
__pragma(warning(pop))
#endif
#endif
#if !defined(ASMJIT_BEGIN_DIAGNOSTIC_SCOPE) && !defined(ASMJIT_END_DIAGNOSTIC_SCOPE)
#define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE #define ASMJIT_BEGIN_DIAGNOSTIC_SCOPE
#define ASMJIT_END_DIAGNOSTIC_SCOPE #define ASMJIT_END_DIAGNOSTIC_SCOPE
#endif #endif
@@ -568,19 +513,19 @@ namespace asmjit {
// ====================================== // ======================================
#if !defined(ASMJIT_NO_ABI_NAMESPACE) && !defined(_DOXYGEN) #if !defined(ASMJIT_NO_ABI_NAMESPACE) && !defined(_DOXYGEN)
#define ASMJIT_BEGIN_NAMESPACE \ #define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit { \ namespace asmjit { \
inline namespace ASMJIT_ABI_NAMESPACE { inline namespace ASMJIT_ABI_NAMESPACE {
#define ASMJIT_END_NAMESPACE \ #define ASMJIT_END_NAMESPACE \
}} \ }} \
ASMJIT_END_DIAGNOSTIC_SCOPE ASMJIT_END_DIAGNOSTIC_SCOPE
#else #else
#define ASMJIT_BEGIN_NAMESPACE \ #define ASMJIT_BEGIN_NAMESPACE \
ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \ ASMJIT_BEGIN_DIAGNOSTIC_SCOPE \
namespace asmjit { namespace asmjit {
#define ASMJIT_END_NAMESPACE \ #define ASMJIT_END_NAMESPACE \
} \ } \
ASMJIT_END_DIAGNOSTIC_SCOPE ASMJIT_END_DIAGNOSTIC_SCOPE
#endif #endif
@@ -590,13 +535,13 @@ namespace asmjit {
// C++ Utilities // C++ Utilities
// ============= // =============
#define ASMJIT_NONCOPYABLE(Type) \ #define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \ Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete; Type& operator=(const Type& other) = delete;
#define ASMJIT_NONCONSTRUCTIBLE(Type) \ #define ASMJIT_NONCONSTRUCTIBLE(Type) \
Type() = delete; \ Type() = delete; \
Type(const Type& other) = delete; \ Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete; Type& operator=(const Type& other) = delete;
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T) //! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
@@ -605,38 +550,32 @@ namespace asmjit {
#ifdef _DOXYGEN #ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_FLAGS(T) #define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else #else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \ #define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_INLINE_NODEBUG constexpr T operator~(T a) noexcept { \ static ASMJIT_INLINE_CONSTEXPR T operator~(T a) noexcept { \
return T(~(std::underlying_type<T>::type)(a)); \ return T(~std::underlying_type_t<T>(a)); \
} \ } \
\ \
static ASMJIT_INLINE_NODEBUG constexpr T operator|(T a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR T operator|(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) | \ return T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
(std::underlying_type<T>::type)(b)); \ } \
} \ static ASMJIT_INLINE_CONSTEXPR T operator&(T a, T b) noexcept { \
static ASMJIT_INLINE_NODEBUG constexpr T operator&(T a, T b) noexcept { \ return T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
return T((std::underlying_type<T>::type)(a) & \ } \
(std::underlying_type<T>::type)(b)); \ static ASMJIT_INLINE_CONSTEXPR T operator^(T a, T b) noexcept { \
} \ return T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
static ASMJIT_INLINE_NODEBUG constexpr T operator^(T a, T b) noexcept { \ } \
return T((std::underlying_type<T>::type)(a) ^ \ \
(std::underlying_type<T>::type)(b)); \ static ASMJIT_INLINE_CONSTEXPR T& operator|=(T& a, T b) noexcept { \
} \ a = T(std::underlying_type_t<T>(a) | std::underlying_type_t<T>(b)); \
\ return a; \
static ASMJIT_INLINE_NODEBUG T& operator|=(T& a, T b) noexcept { \ } \
a = T((std::underlying_type<T>::type)(a) | \ static ASMJIT_INLINE_CONSTEXPR T& operator&=(T& a, T b) noexcept { \
(std::underlying_type<T>::type)(b)); \ a = T(std::underlying_type_t<T>(a) & std::underlying_type_t<T>(b)); \
return a; \ return a; \
} \ } \
static ASMJIT_INLINE_NODEBUG T& operator&=(T& a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR T& operator^=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) & \ a = T(std::underlying_type_t<T>(a) ^ std::underlying_type_t<T>(b)); \
(std::underlying_type<T>::type)(b)); \ return a; \
return a; \
} \
static ASMJIT_INLINE_NODEBUG T& operator^=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
return a; \
} }
#endif #endif
@@ -646,18 +585,18 @@ namespace asmjit {
#if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900) #if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900)
#define ASMJIT_DEFINE_ENUM_COMPARE(T) #define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else #else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \ #define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_INLINE_NODEBUG bool operator<(T a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR bool operator<(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) < (std::underlying_type<T>::type)(b); \ return (std::underlying_type_t<T>)(a) < (std::underlying_type_t<T>)(b); \
} \ } \
static ASMJIT_INLINE_NODEBUG bool operator<=(T a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR bool operator<=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) <= (std::underlying_type<T>::type)(b); \ return (std::underlying_type_t<T>)(a) <= (std::underlying_type_t<T>)(b); \
} \ } \
static ASMJIT_INLINE_NODEBUG bool operator>(T a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR bool operator>(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) > (std::underlying_type<T>::type)(b); \ return (std::underlying_type_t<T>)(a) > (std::underlying_type_t<T>)(b); \
} \ } \
static ASMJIT_INLINE_NODEBUG bool operator>=(T a, T b) noexcept { \ static ASMJIT_INLINE_CONSTEXPR bool operator>=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) >= (std::underlying_type<T>::type)(b); \ return (std::underlying_type_t<T>)(a) >= (std::underlying_type_t<T>)(b); \
} }
#endif #endif

View File

@@ -101,9 +101,12 @@ static constexpr CondCode _reverseCondTable[] = {
//! \endcond //! \endcond
//! Reverses a condition code (reverses the corresponding operands of a comparison). //! Reverses a condition code (reverses the corresponding operands of a comparison).
static ASMJIT_INLINE_NODEBUG constexpr CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; }
//! Negates a condition code. //! Negates a condition code.
static ASMJIT_INLINE_NODEBUG constexpr CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
//! Memory offset mode. //! Memory offset mode.
//! //!
@@ -180,20 +183,24 @@ public:
ASMJIT_INLINE_NODEBUG Shift() noexcept = default; ASMJIT_INLINE_NODEBUG Shift() noexcept = default;
//! Copy constructor (default) //! Copy constructor (default)
ASMJIT_INLINE_NODEBUG constexpr Shift(const Shift& other) noexcept = default; ASMJIT_INLINE_CONSTEXPR Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`. //! Constructs Shift from operation `op` and shift `value`.
ASMJIT_INLINE_NODEBUG constexpr Shift(ShiftOp op, uint32_t value) noexcept ASMJIT_INLINE_CONSTEXPR Shift(ShiftOp op, uint32_t value) noexcept
: _op(op), : _op(op),
_value(value) {} _value(value) {}
//! Returns the shift operation. //! Returns the shift operation.
ASMJIT_INLINE_NODEBUG constexpr ShiftOp op() const noexcept { return _op; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`. //! Sets shift operation to `op`.
ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; } ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift amount. //! Returns the shift amount.
ASMJIT_INLINE_NODEBUG constexpr uint32_t value() const noexcept { return _value; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t value() const noexcept { return _value; }
//! Sets shift amount to `value`. //! Sets shift amount to `value`.
ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; } ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; }
}; };

View File

@@ -106,51 +106,64 @@ ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId
// TODO: Remove this, should never be used like this. // TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId? // Passed RegType instead of TypeId?
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) {
typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId))); typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId)));
}
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) {
return DebugUtils::errored(kErrorInvalidTypeId); return DebugUtils::errored(kErrorInvalidTypeId);
}
// First normalize architecture dependent types. // First normalize architecture dependent types.
if (TypeUtils::isAbstract(typeId)) { if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch); bool is32Bit = Environment::is32Bit(arch);
if (typeId == TypeId::kIntPtr) if (typeId == TypeId::kIntPtr) {
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64; typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
else }
else {
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64; typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
} }
// Type size helps to construct all groups of registers. // Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero. // TypeId is invalid if the size is zero.
uint32_t size = TypeUtils::sizeOf(typeId); uint32_t size = TypeUtils::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size)) if (ASMJIT_UNLIKELY(!size)) {
return DebugUtils::errored(kErrorInvalidTypeId); return DebugUtils::errored(kErrorInvalidTypeId);
}
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) {
return DebugUtils::errored(kErrorInvalidUseOfF80); return DebugUtils::errored(kErrorInvalidUseOfF80);
}
RegType regType = RegType::kNone; RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) { if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)]; regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) { if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) {
return DebugUtils::errored(kErrorInvalidUseOfGpq); return DebugUtils::errored(kErrorInvalidUseOfGpq);
else }
else {
return DebugUtils::errored(kErrorInvalidTypeId); return DebugUtils::errored(kErrorInvalidTypeId);
}
} }
} }
else { else {
if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid()) if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid()) {
regType = RegType::kVec64; regType = RegType::kVec64;
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid()) }
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid()) {
regType = RegType::kVec128; regType = RegType::kVec128;
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid()) }
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid()) {
regType = RegType::kVec256; regType = RegType::kVec256;
else if (archTraits._regSignature[RegType::kVec512].isValid()) }
else if (archTraits._regSignature[RegType::kVec512].isValid()) {
regType = RegType::kVec512; regType = RegType::kVec512;
else }
else {
return DebugUtils::errored(kErrorInvalidTypeId); return DebugUtils::errored(kErrorInvalidTypeId);
}
} }
*typeIdOut = typeId; *typeIdOut = typeId;

View File

@@ -205,49 +205,76 @@ struct ArchTraits {
//! \{ //! \{
//! Returns stack pointer register id. //! Returns stack pointer register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; } ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id. //! Returns stack frame register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; } ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it. //! Returns link register id, if the architecture provides it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; } ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it. //! Returns instruction pointer register id, if the architecture provides it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t ipRegId() const noexcept { return _ipRegId; } ASMJIT_INLINE_NODEBUG uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement. //! Returns a hardware stack alignment requirement.
//! //!
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment //! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment. //! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link //! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64). //! register is not provided then a function call pushes the return address on stack (X86/X64).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions. //! Returns minimum addressable offset on stack guaranteed for all instructions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; } ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction. //! Returns maximum addressable offset on stack depending on specific instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; } ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`. //! Returns ISA flags of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; } ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
//! Tests whether the given register `group` has the given `flag` set. //! Tests whether the given register `group` has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); } ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`. //! Tests whether the ISA provides register swap instruction for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); } ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`. //! Tests whether the ISA provides push/pop instructions for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); } ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept { ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept {
return type <= RegType::kMaxValue && _regSignature[type].isValid(); return type <= RegType::kMaxValue && _regSignature[type].isValid();
} }
//! Returns an operand signature from the given register `type` of this architecture. //! Returns an operand signature from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; } ASMJIT_INLINE_NODEBUG OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; }
//! Returns a register from the given register `type` of this architecture. //! Returns a register from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); } ASMJIT_INLINE_NODEBUG RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); }
//! Returns a register size the given register `type` of this architecture. //! Returns a register size the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); } ASMJIT_INLINE_NODEBUG uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); }
//! Returns a corresponding `TypeId` from the given register `type` of this architecture. //! Returns a corresponding `TypeId` from the given register `type` of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; } ASMJIT_INLINE_NODEBUG TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; }
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent. //! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
@@ -257,9 +284,11 @@ struct ArchTraits {
//! - [1] 16-bits //! - [1] 16-bits
//! - [2] 32-bits //! - [2] 32-bits
//! - [3] 64-bits //! - [3] 64-bits
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; } ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details. //! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; } ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
//! \} //! \}
@@ -268,6 +297,7 @@ struct ArchTraits {
//! \{ //! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`. //! Returns a const reference to `ArchTraits` for the given architecture `arch`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept; static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept;
//! \} //! \}

View File

@@ -26,12 +26,14 @@ BaseAssembler::~BaseAssembler() noexcept {}
// ================================= // =================================
Error BaseAssembler::setOffset(size_t offset) { Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset()); size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
if (ASMJIT_UNLIKELY(offset > size)) if (ASMJIT_UNLIKELY(offset > size)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
_bufferPtr = _bufferData + offset; _bufferPtr = _bufferData + offset;
return kErrorOk; return kErrorOk;
@@ -50,15 +52,18 @@ static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noe
} }
Error BaseAssembler::section(Section* section) { Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section) {
return reportError(DebugUtils::errored(kErrorInvalidSection)); return reportError(DebugUtils::errored(kErrorInvalidSection));
}
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
if (_logger) if (_logger) {
_logger->logf(".section %s {#%u}\n", section->name(), section->id()); _logger->logf(".section %s {#%u}\n", section->name(), section->id());
}
#endif #endif
BaseAssembler_initSection(this, section); BaseAssembler_initSection(this, section);
@@ -73,10 +78,12 @@ Label BaseAssembler::newLabel() {
if (ASMJIT_LIKELY(_code)) { if (ASMJIT_LIKELY(_code)) {
LabelEntry* le; LabelEntry* le;
Error err = _code->newLabelEntry(&le); Error err = _code->newLabelEntry(&le);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
reportError(err); reportError(err);
else }
else {
labelId = le->id(); labelId = le->id();
}
} }
return Label(labelId); return Label(labelId);
} }
@@ -86,28 +93,33 @@ Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType
if (ASMJIT_LIKELY(_code)) { if (ASMJIT_LIKELY(_code)) {
LabelEntry* le; LabelEntry* le;
Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId); Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
reportError(err); reportError(err);
else }
else {
labelId = le->id(); labelId = le->id();
}
} }
return Label(labelId); return Label(labelId);
} }
Error BaseAssembler::bind(const Label& label) { Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
Error err = _code->bindLabel(label, _section->id(), offset()); Error err = _code->bindLabel(label, _section->id(), offset());
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
if (_logger) if (_logger) {
EmitterUtils::logLabelBound(this, label); EmitterUtils::logLabelBound(this, label);
}
#endif #endif
resetInlineComment(); resetInlineComment();
if (err) if (err) {
return reportError(err); return reportError(err);
}
return kErrorOk; return kErrorOk;
} }
@@ -116,11 +128,13 @@ Error BaseAssembler::bind(const Label& label) {
// ===================== // =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) { Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (dataSize == 0) if (dataSize == 0) {
return kErrorOk; return kErrorOk;
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -144,11 +158,13 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize()); uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta); TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (itemCount == 0 || repeatCount == 0) if (itemCount == 0 || repeatCount == 0) {
return kErrorOk; return kErrorOk;
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId); uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0; Support::FastUInt8 of = 0;
@@ -156,15 +172,16 @@ Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t item
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of); size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of); size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
if (ASMJIT_UNLIKELY(of)) if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize)); ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
for (size_t i = 0; i < repeatCount; i++) for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize); writer.emitData(data, dataSize);
}
writer.done(this); writer.done(this);
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
@@ -194,18 +211,21 @@ static const TypeId dataTypeIdBySize[9] = {
#endif #endif
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) { Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
if (ASMJIT_UNLIKELY(!isLabelValid(label))) if (ASMJIT_UNLIKELY(!isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel)); return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment()))); ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label)); ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size(); size_t size = pool.size();
if (!size) if (!size) {
return kErrorOk; return kErrorOk;
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size)); ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
@@ -234,21 +254,25 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
} }
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) { Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
ASMJIT_ASSERT(_code != nullptr); ASMJIT_ASSERT(_code != nullptr);
RelocEntry* re; RelocEntry* re;
LabelEntry* le = _code->labelEntry(label); LabelEntry* le = _code->labelEntry(label);
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel)); return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
if (dataSize == 0) if (dataSize == 0) {
dataSize = registerSize(); dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize)); return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -266,8 +290,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
#endif #endif
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs); Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
re->_sourceSectionId = _section->id(); re->_sourceSectionId = _section->id();
re->_sourceOffset = offset(); re->_sourceOffset = offset();
@@ -282,8 +307,9 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize); of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of); LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!link)) if (ASMJIT_UNLIKELY(!link)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
link->relocId = re->id(); link->relocId = re->id();
} }
@@ -296,20 +322,24 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
} }
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) { Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
LabelEntry* labelEntry = _code->labelEntry(label); LabelEntry* labelEntry = _code->labelEntry(label);
LabelEntry* baseEntry = _code->labelEntry(base); LabelEntry* baseEntry = _code->labelEntry(base);
if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel)); return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
if (dataSize == 0) if (dataSize == 0) {
dataSize = registerSize(); dataSize = registerSize();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8)) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize)); return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
}
CodeWriter writer(this); CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize)); ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
@@ -336,12 +366,14 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
else { else {
RelocEntry* re; RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocType::kExpression); Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
Expression* exp = _code->_zone.newT<Expression>(); Expression* exp = _code->_zone.newT<Expression>();
if (ASMJIT_UNLIKELY(!exp)) if (ASMJIT_UNLIKELY(!exp)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
exp->reset(); exp->reset();
exp->opType = ExpressionOpType::kSub; exp->opType = ExpressionOpType::kSub;
@@ -365,8 +397,9 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
Error BaseAssembler::comment(const char* data, size_t size) { Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk; return kErrorOk;
} }

View File

@@ -28,7 +28,7 @@ ASMJIT_BEGIN_NAMESPACE
class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter { class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
public: public:
ASMJIT_NONCOPYABLE(BaseAssembler) ASMJIT_NONCOPYABLE(BaseAssembler)
typedef BaseEmitter Base; using Base = BaseEmitter;
//! Current section where the assembling happens. //! Current section where the assembling happens.
Section* _section = nullptr; Section* _section = nullptr;
@@ -53,11 +53,15 @@ public:
//! \{ //! \{
//! Returns the capacity of the current CodeBuffer. //! Returns the capacity of the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); } ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
//! Returns the number of remaining bytes in the current CodeBuffer. //! Returns the number of remaining bytes in the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); } ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
//! Returns the current position in the CodeBuffer. //! Returns the current position in the CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); } ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
//! Sets the current position in the CodeBuffer to `offset`. //! Sets the current position in the CodeBuffer to `offset`.
@@ -66,10 +70,15 @@ public:
ASMJIT_API Error setOffset(size_t offset); ASMJIT_API Error setOffset(size_t offset);
//! Returns the start of the CodeBuffer in the current section. //! Returns the start of the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; } ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; }
//! Returns the end (first invalid byte) in the current section. //! Returns the end (first invalid byte) in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; } ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
//! Returns the current pointer in the CodeBuffer in the current section. //! Returns the current pointer in the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; } ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
//! \} //! \}
@@ -78,6 +87,7 @@ public:
//! \{ //! \{
//! Returns the current section. //! Returns the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; } ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override; ASMJIT_API Error section(Section* section) override;

View File

@@ -35,8 +35,9 @@ public:
// ======================= // =======================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept { static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes) for (Pass* pass : self->_passes) {
pass->~Pass(); pass->~Pass();
}
self->_passes.reset(); self->_passes.reset();
} }
@@ -62,8 +63,9 @@ Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOp
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity); ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity)); InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
if (ASMJIT_UNLIKELY(!node)) if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
*out = new(Support::PlacementNew{node}) InstNode(this, instId, instOptions, opCount, opCapacity); *out = new(Support::PlacementNew{node}) InstNode(this, instId, instOptions, opCount, opCapacity);
return kErrorOk; return kErrorOk;
@@ -88,15 +90,17 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize()); uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta); TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId); uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0; Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of); size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
if (ASMJIT_UNLIKELY(of)) if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
EmbedDataNode* node; EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node)); ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
@@ -109,13 +113,15 @@ Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const vo
uint8_t* dstData = node->_inlineData; uint8_t* dstData = node->_inlineData;
if (dataSize > EmbedDataNode::kInlineBufferSize) { if (dataSize > EmbedDataNode::kInlineBufferSize) {
dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8)); dstData = static_cast<uint8_t*>(_dataZone.alloc(dataSize, 8));
if (ASMJIT_UNLIKELY(!dstData)) if (ASMJIT_UNLIKELY(!dstData)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node->_externalData = dstData; node->_externalData = dstData;
} }
if (data) if (data) {
memcpy(dstData, data, dataSize); memcpy(dstData, data, dataSize);
}
*out = node; *out = node;
return kErrorOk; return kErrorOk;
@@ -132,13 +138,15 @@ Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t si
*out = nullptr; *out = nullptr;
if (data) { if (data) {
if (size == SIZE_MAX) if (size == SIZE_MAX) {
size = strlen(data); size = strlen(data);
}
if (size > 0) { if (size > 0) {
data = static_cast<char*>(_dataZone.dup(data, size, true)); data = static_cast<char*>(_dataZone.dup(data, size, true));
if (ASMJIT_UNLIKELY(!data)) if (ASMJIT_UNLIKELY(!data)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
} }
} }
@@ -168,15 +176,18 @@ BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
node->_next = next; node->_next = next;
prev->_next = node; prev->_next = node;
if (next) if (next) {
next->_prev = node; next->_prev = node;
else }
else {
_nodeList._last = node; _nodeList._last = node;
}
} }
node->addFlags(NodeFlags::kIsActive); node->addFlags(NodeFlags::kIsActive);
if (node->isSection()) if (node->isSection()) {
_dirtySectionLinks = true; _dirtySectionLinks = true;
}
_cursor = node; _cursor = node;
return node; return node;
@@ -193,14 +204,17 @@ BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
node->_next = next; node->_next = next;
node->addFlags(NodeFlags::kIsActive); node->addFlags(NodeFlags::kIsActive);
if (node->isSection()) if (node->isSection()) {
_dirtySectionLinks = true; _dirtySectionLinks = true;
}
prev->_next = node; prev->_next = node;
if (next) if (next) {
next->_prev = node; next->_prev = node;
else }
else {
_nodeList._last = node; _nodeList._last = node;
}
return node; return node;
} }
@@ -218,43 +232,54 @@ BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
node->_next = next; node->_next = next;
node->addFlags(NodeFlags::kIsActive); node->addFlags(NodeFlags::kIsActive);
if (node->isSection()) if (node->isSection()) {
_dirtySectionLinks = true; _dirtySectionLinks = true;
}
next->_prev = node; next->_prev = node;
if (prev) if (prev) {
prev->_next = node; prev->_next = node;
else }
else {
_nodeList._first = node; _nodeList._first = node;
}
return node; return node;
} }
BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept { BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
if (!node->isActive()) if (!node->isActive()) {
return node; return node;
}
BaseNode* prev = node->prev(); BaseNode* prev = node->prev();
BaseNode* next = node->next(); BaseNode* next = node->next();
if (_nodeList._first == node) if (_nodeList._first == node) {
_nodeList._first = next; _nodeList._first = next;
else }
else {
prev->_next = next; prev->_next = next;
}
if (_nodeList._last == node) if (_nodeList._last == node) {
_nodeList._last = prev; _nodeList._last = prev;
else }
else {
next->_prev = prev; next->_prev = prev;
}
node->_prev = nullptr; node->_prev = nullptr;
node->_next = nullptr; node->_next = nullptr;
node->clearFlags(NodeFlags::kIsActive); node->clearFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
if (_cursor == node) if (node->isSection()) {
_dirtySectionLinks = true;
}
if (_cursor == node) {
_cursor = prev; _cursor = prev;
}
return node; return node;
} }
@@ -265,21 +290,26 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
return; return;
} }
if (!first->isActive()) if (!first->isActive()) {
return; return;
}
BaseNode* prev = first->prev(); BaseNode* prev = first->prev();
BaseNode* next = last->next(); BaseNode* next = last->next();
if (_nodeList._first == first) if (_nodeList._first == first) {
_nodeList._first = next; _nodeList._first = next;
else }
else {
prev->_next = next; prev->_next = next;
}
if (_nodeList._last == last) if (_nodeList._last == last) {
_nodeList._last = prev; _nodeList._last = prev;
else }
else {
next->_prev = prev; next->_prev = prev;
}
BaseNode* node = first; BaseNode* node = first;
uint32_t didRemoveSection = false; uint32_t didRemoveSection = false;
@@ -293,16 +323,19 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
node->clearFlags(NodeFlags::kIsActive); node->clearFlags(NodeFlags::kIsActive);
didRemoveSection |= uint32_t(node->isSection()); didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node) if (_cursor == node) {
_cursor = prev; _cursor = prev;
}
if (node == last) if (node == last) {
break; break;
}
node = next; node = next;
} }
if (didRemoveSection) if (didRemoveSection) {
_dirtySectionLinks = true; _dirtySectionLinks = true;
}
} }
BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept { BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
@@ -317,28 +350,34 @@ BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) { Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr; *out = nullptr;
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId))) {
return reportError(DebugUtils::errored(kErrorInvalidSection)); return reportError(DebugUtils::errored(kErrorInvalidSection));
}
if (sectionId >= _sectionNodes.size()) { if (sectionId >= _sectionNodes.size()) {
Error err = _sectionNodes.reserve(&_allocator, sectionId + 1); Error err = _sectionNodes.reserve(&_allocator, sectionId + 1);
if (ASMJIT_UNLIKELY(err != kErrorOk)) if (ASMJIT_UNLIKELY(err != kErrorOk)) {
return reportError(err); return reportError(err);
}
} }
SectionNode* node = nullptr; SectionNode* node = nullptr;
if (sectionId < _sectionNodes.size()) if (sectionId < _sectionNodes.size()) {
node = _sectionNodes[sectionId]; node = _sectionNodes[sectionId];
}
if (!node) { if (!node) {
ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId)); ASMJIT_PROPAGATE(_newNodeT<SectionNode>(&node, sectionId));
// We have already reserved enough space, this cannot fail now. // We have already reserved enough space, this cannot fail now.
if (sectionId >= _sectionNodes.size()) if (sectionId >= _sectionNodes.size()) {
_sectionNodes.resize(&_allocator, sectionId + 1); // SAFETY: No need to check for error condition as we have already reserved enough space.
(void)_sectionNodes.resize(&_allocator, sectionId + 1);
}
_sectionNodes[sectionId] = node; _sectionNodes[sectionId] = node;
} }
@@ -361,36 +400,42 @@ Error BaseBuilder::section(Section* section) {
// This is a bit tricky. We cache section links to make sure that // This is a bit tricky. We cache section links to make sure that
// switching sections doesn't involve traversal in linked-list unless // switching sections doesn't involve traversal in linked-list unless
// the position of the section has changed. // the position of the section has changed.
if (hasDirtySectionLinks()) if (hasDirtySectionLinks()) {
updateSectionLinks(); updateSectionLinks();
}
if (node->_nextSection) if (node->_nextSection) {
_cursor = node->_nextSection->_prev; _cursor = node->_nextSection->_prev;
else }
else {
_cursor = _nodeList.last(); _cursor = _nodeList.last();
}
} }
return kErrorOk; return kErrorOk;
} }
void BaseBuilder::updateSectionLinks() noexcept { void BaseBuilder::updateSectionLinks() noexcept {
if (!_dirtySectionLinks) if (!_dirtySectionLinks) {
return; return;
}
BaseNode* node_ = _nodeList.first(); BaseNode* node_ = _nodeList.first();
SectionNode* currentSection = nullptr; SectionNode* currentSection = nullptr;
while (node_) { while (node_) {
if (node_->isSection()) { if (node_->isSection()) {
if (currentSection) if (currentSection) {
currentSection->_nextSection = node_->as<SectionNode>(); currentSection->_nextSection = node_->as<SectionNode>();
}
currentSection = node_->as<SectionNode>(); currentSection = node_->as<SectionNode>();
} }
node_ = node_->next(); node_ = node_->next();
} }
if (currentSection) if (currentSection) {
currentSection->_nextSection = nullptr; currentSection->_nextSection = nullptr;
}
_dirtySectionLinks = false; _dirtySectionLinks = false;
} }
@@ -401,15 +446,18 @@ void BaseBuilder::updateSectionLinks() noexcept {
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) { Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr; *out = nullptr;
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
uint32_t index = labelId; uint32_t index = labelId;
if (ASMJIT_UNLIKELY(index >= _code->labelCount())) if (ASMJIT_UNLIKELY(index >= _code->labelCount())) {
return DebugUtils::errored(kErrorInvalidLabel); return DebugUtils::errored(kErrorInvalidLabel);
}
if (index >= _labelNodes.size()) if (index >= _labelNodes.size()) {
ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1)); ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
}
LabelNode* node = _labelNodes[index]; LabelNode* node = _labelNodes[index];
if (!node) { if (!node) {
@@ -422,8 +470,9 @@ Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
} }
Error BaseBuilder::registerLabelNode(LabelNode* node) { Error BaseBuilder::registerLabelNode(LabelNode* node) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
LabelEntry* le; LabelEntry* le;
ASMJIT_PROPAGATE(_code->newLabelEntry(&le)); ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
@@ -445,13 +494,15 @@ static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) {
uint32_t growBy = labelId - self->_labelNodes.size(); uint32_t growBy = labelId - self->_labelNodes.size();
Error err = self->_labelNodes.willGrow(&self->_allocator, growBy); Error err = self->_labelNodes.willGrow(&self->_allocator, growBy);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return self->reportError(err); return self->reportError(err);
}
LabelNode* node; LabelNode* node;
ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId)); ASMJIT_PROPAGATE(self->_newNodeT<LabelNode>(&node, labelId));
self->_labelNodes.resize(&self->_allocator, labelId + 1); // SAFETY: No need to check for error condition as we have already reserved enough space.
(void)self->_labelNodes.resize(&self->_allocator, labelId + 1);
self->_labelNodes[labelId] = node; self->_labelNodes[labelId] = node;
node->_labelId = labelId; node->_labelId = labelId;
return kErrorOk; return kErrorOk;
@@ -495,15 +546,18 @@ Error BaseBuilder::bind(const Label& label) {
// ==================== // ====================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept { ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes) for (Pass* pass : _passes) {
if (strcmp(pass->name(), name) == 0) if (strcmp(pass->name(), name) == 0) {
return pass; return pass;
}
}
return nullptr; return nullptr;
} }
ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept { ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(pass == nullptr)) { if (ASMJIT_UNLIKELY(pass == nullptr)) {
// Since this is directly called by `addPassT()` we treat `null` argument // Since this is directly called by `addPassT()` we treat `null` argument
@@ -512,8 +566,9 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
} }
else if (ASMJIT_UNLIKELY(pass->_cb)) { else if (ASMJIT_UNLIKELY(pass->_cb)) {
// Kinda weird, but okay... // Kinda weird, but okay...
if (pass->_cb == this) if (pass->_cb == this) {
return kErrorOk; return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
} }
@@ -523,15 +578,18 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
} }
ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept { ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(pass == nullptr)) if (ASMJIT_UNLIKELY(pass == nullptr)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (pass->_cb != nullptr) { if (pass->_cb != nullptr) {
if (pass->_cb != this) if (pass->_cb != this) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
uint32_t index = _passes.indexOf(pass); uint32_t index = _passes.indexOf(pass);
ASMJIT_ASSERT(index != Globals::kNotFound); ASMJIT_ASSERT(index != Globals::kNotFound);
@@ -545,11 +603,13 @@ ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
} }
Error BaseBuilder::runPasses() { Error BaseBuilder::runPasses() {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (_passes.empty()) if (_passes.empty()) {
return kErrorOk; return kErrorOk;
}
ErrorHandler* prev = errorHandler(); ErrorHandler* prev = errorHandler();
PostponedErrorHandler postponed; PostponedErrorHandler postponed;
@@ -560,14 +620,16 @@ Error BaseBuilder::runPasses() {
for (Pass* pass : _passes) { for (Pass* pass : _passes) {
_passZone.reset(); _passZone.reset();
err = pass->run(&_passZone, _logger); err = pass->run(&_passZone, _logger);
if (err) if (err) {
break; break;
}
} }
_passZone.reset(); _passZone.reset();
setErrorHandler(prev); setErrorHandler(prev);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr); return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
}
return kErrorOk; return kErrorOk;
} }
@@ -580,8 +642,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
InstOptions options = instOptions() | forcedInstOptions(); InstOptions options = instOptions() | forcedInstOptions();
if (Support::test(options, InstOptions::kReserved)) { if (Support::test(options, InstOptions::kReserved)) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
#ifndef ASMJIT_NO_VALIDATION #ifndef ASMJIT_NO_VALIDATION
// Strict validation. // Strict validation.
@@ -626,12 +689,14 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
node->setOp(0, o0); node->setOp(0, o0);
node->setOp(1, o1); node->setOp(1, o1);
node->setOp(2, o2); node->setOp(2, o2);
for (uint32_t i = 3; i < opCount; i++) for (uint32_t i = 3; i < opCount; i++) {
node->setOp(i, opExt[i - 3]); node->setOp(i, opExt[i - 3]);
}
node->resetOpRange(opCount, opCapacity); node->resetOpRange(opCount, opCapacity);
if (comment) if (comment) {
node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true))); node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
}
addNode(node); addNode(node);
resetExtraReg(); resetExtraReg();
@@ -642,8 +707,9 @@ Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1,
// =================== // ===================
Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) { Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
AlignNode* node; AlignNode* node;
ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment)); ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment));
@@ -657,8 +723,9 @@ Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
// =================== // ===================
Error BaseBuilder::embed(const void* data, size_t dataSize) { Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node; EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize)); ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize));
@@ -669,8 +736,9 @@ Error BaseBuilder::embed(const void* data, size_t dataSize) {
} }
Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) { Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
EmbedDataNode* node; EmbedDataNode* node;
ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat)); ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
@@ -681,11 +749,13 @@ Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCo
} }
Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) { Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (!isLabelValid(label)) if (!isLabelValid(label)) {
return reportError(DebugUtils::errored(kErrorInvalidLabel)); return reportError(DebugUtils::errored(kErrorInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment()))); ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label)); ASMJIT_PROPAGATE(bind(label));
@@ -710,11 +780,13 @@ static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
} }
Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) { Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (!BaseBuilder_checkDataSize(dataSize)) if (!BaseBuilder_checkDataSize(dataSize)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
EmbedLabelNode* node; EmbedLabelNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize))); ASMJIT_PROPAGATE(_newNodeT<EmbedLabelNode>(&node, label.id(), uint32_t(dataSize)));
@@ -724,11 +796,13 @@ Error BaseBuilder::embedLabel(const Label& label, size_t dataSize) {
} }
Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) { Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (!BaseBuilder_checkDataSize(dataSize)) if (!BaseBuilder_checkDataSize(dataSize)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
EmbedLabelDeltaNode* node; EmbedLabelDeltaNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize))); ASMJIT_PROPAGATE(_newNodeT<EmbedLabelDeltaNode>(&node, label.id(), base.id(), uint32_t(dataSize)));
@@ -741,8 +815,9 @@ Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t
// ===================== // =====================
Error BaseBuilder::comment(const char* data, size_t size) { Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code)) if (ASMJIT_UNLIKELY(!_code)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
CommentNode* node; CommentNode* node;
ASMJIT_PROPAGATE(newCommentNode(&node, data, size)); ASMJIT_PROPAGATE(newCommentNode(&node, data, size));
@@ -827,7 +902,9 @@ Error BaseBuilder::serializeTo(BaseEmitter* dst) {
err = dst->comment(node->inlineComment()); err = dst->comment(node->inlineComment());
} }
if (err) break; if (err) {
break;
}
node_ = node_->next(); node_ = node_->next();
} while (node_); } while (node_);
@@ -843,8 +920,9 @@ Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
SectionNode* initialSection; SectionNode* initialSection;
Error err = sectionNodeOf(&initialSection, 0); Error err = sectionNodeOf(&initialSection, 0);
if (!err) if (!err) {
err = _passes.willGrow(&_allocator, 8); err = _passes.willGrow(&_allocator, 8);
}
if (ASMJIT_UNLIKELY(err)) { if (ASMJIT_UNLIKELY(err)) {
onDetach(code); onDetach(code);

View File

@@ -163,9 +163,13 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _first == nullptr; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _first == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; } ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; } ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; }
//! \} //! \}
@@ -185,7 +189,7 @@ public:
class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter { class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter {
public: public:
ASMJIT_NONCOPYABLE(BaseBuilder) ASMJIT_NONCOPYABLE(BaseBuilder)
typedef BaseEmitter Base; using Base = BaseEmitter;
//! \name Members //! \name Members
//! \{ //! \{
@@ -231,11 +235,15 @@ public:
//! \name Node Management //! \name Node Management
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeList nodeList() const noexcept { return _nodeList; } ASMJIT_INLINE_NODEBUG NodeList nodeList() const noexcept { return _nodeList; }
//! Returns the first node. //! Returns the first node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* firstNode() const noexcept { return _nodeList.first(); } ASMJIT_INLINE_NODEBUG BaseNode* firstNode() const noexcept { return _nodeList.first(); }
//! Returns the last node. //! Returns the last node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* lastNode() const noexcept { return _nodeList.last(); } ASMJIT_INLINE_NODEBUG BaseNode* lastNode() const noexcept { return _nodeList.last(); }
//! Allocates and instantiates a new node of type `T` and returns its instance. If the allocation fails `nullptr` //! Allocates and instantiates a new node of type `T` and returns its instance. If the allocation fails `nullptr`
@@ -282,6 +290,7 @@ public:
//! When the Builder/Compiler is created it automatically creates a '.text' \ref SectionNode, which will be the //! When the Builder/Compiler is created it automatically creates a '.text' \ref SectionNode, which will be the
//! initial one. When instructions are added they are always added after the cursor and the cursor is changed //! initial one. When instructions are added they are always added after the cursor and the cursor is changed
//! to be that newly added node. Use `setCursor()` to change where new nodes are inserted. //! to be that newly added node. Use `setCursor()` to change where new nodes are inserted.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* cursor() const noexcept { return _cursor; } ASMJIT_INLINE_NODEBUG BaseNode* cursor() const noexcept { return _cursor; }
//! Sets the current node to `node` and return the previous one. //! Sets the current node to `node` and return the previous one.
@@ -302,11 +311,13 @@ public:
//! //!
//! \note If a section of some id is not associated with the Builder/Compiler it would be null, so always check //! \note If a section of some id is not associated with the Builder/Compiler it would be null, so always check
//! for nulls if you iterate over the vector. //! for nulls if you iterate over the vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<SectionNode*>& sectionNodes() const noexcept { ASMJIT_INLINE_NODEBUG const ZoneVector<SectionNode*>& sectionNodes() const noexcept {
return _sectionNodes; return _sectionNodes;
} }
//! Tests whether the `SectionNode` of the given `sectionId` was registered. //! Tests whether the `SectionNode` of the given `sectionId` was registered.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept { ASMJIT_INLINE_NODEBUG bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept {
return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr; return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr;
} }
@@ -321,6 +332,7 @@ public:
//! Returns whether the section links of active section nodes are dirty. You can update these links by calling //! Returns whether the section links of active section nodes are dirty. You can update these links by calling
//! `updateSectionLinks()` in such case. //! `updateSectionLinks()` in such case.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; } ASMJIT_INLINE_NODEBUG bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; }
//! Updates links of all active section nodes. //! Updates links of all active section nodes.
@@ -335,14 +347,17 @@ public:
//! //!
//! \note If a label of some id is not associated with the Builder/Compiler it would be null, so always check for //! \note If a label of some id is not associated with the Builder/Compiler it would be null, so always check for
//! nulls if you iterate over the vector. //! nulls if you iterate over the vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<LabelNode*>& labelNodes() const noexcept { return _labelNodes; } ASMJIT_INLINE_NODEBUG const ZoneVector<LabelNode*>& labelNodes() const noexcept { return _labelNodes; }
//! Tests whether the `LabelNode` of the given `labelId` was registered. //! Tests whether the `LabelNode` of the given `labelId` was registered.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(uint32_t labelId) const noexcept { ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(uint32_t labelId) const noexcept {
return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr; return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr;
} }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(const Label& label) const noexcept { ASMJIT_INLINE_NODEBUG bool hasRegisteredLabelNode(const Label& label) const noexcept {
return hasRegisteredLabelNode(label.id()); return hasRegisteredLabelNode(label.id());
} }
@@ -364,8 +379,12 @@ public:
//! Use \ref labelNodeOf() functions to get back \ref LabelNode from a label or its identifier. //! Use \ref labelNodeOf() functions to get back \ref LabelNode from a label or its identifier.
ASMJIT_API Error registerLabelNode(LabelNode* ASMJIT_NONNULL(node)); ASMJIT_API Error registerLabelNode(LabelNode* ASMJIT_NONNULL(node));
[[nodiscard]]
ASMJIT_API Label newLabel() override; ASMJIT_API Label newLabel() override;
[[nodiscard]]
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override; ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override; ASMJIT_API Error bind(const Label& label) override;
//! \} //! \}
@@ -374,6 +393,7 @@ public:
//! \{ //! \{
//! Returns a vector of `Pass` instances that will be executed by `runPasses()`. //! Returns a vector of `Pass` instances that will be executed by `runPasses()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Pass*>& passes() const noexcept { return _passes; } ASMJIT_INLINE_NODEBUG const ZoneVector<Pass*>& passes() const noexcept { return _passes; }
//! Allocates and instantiates a new pass of type `T` and returns its instance. If the allocation fails `nullptr` is //! Allocates and instantiates a new pass of type `T` and returns its instance. If the allocation fails `nullptr` is
@@ -384,10 +404,12 @@ public:
//! \remarks The pointer returned (if non-null) is owned by the Builder or Compiler. When the Builder/Compiler is //! \remarks The pointer returned (if non-null) is owned by the Builder or Compiler. When the Builder/Compiler is
//! destroyed it destroys all passes it created so no manual memory management is required. //! destroyed it destroys all passes it created so no manual memory management is required.
template<typename T> template<typename T>
[[nodiscard]]
inline T* newPassT() noexcept { return _codeZone.newT<T>(); } inline T* newPassT() noexcept { return _codeZone.newT<T>(); }
//! \overload //! \overload
template<typename T, typename... Args> template<typename T, typename... Args>
[[nodiscard]]
inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT<T>(std::forward<Args>(args)...); } inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT<T>(std::forward<Args>(args)...); }
template<typename T> template<typename T>
@@ -399,9 +421,12 @@ public:
//! Returns `Pass` by name. //! Returns `Pass` by name.
//! //!
//! If the pass having the given `name` doesn't exist `nullptr` is returned. //! If the pass having the given `name` doesn't exist `nullptr` is returned.
[[nodiscard]]
ASMJIT_API Pass* passByName(const char* name) const noexcept; ASMJIT_API Pass* passByName(const char* name) const noexcept;
//! Adds `pass` to the list of passes. //! Adds `pass` to the list of passes.
ASMJIT_API Error addPass(Pass* pass) noexcept; ASMJIT_API Error addPass(Pass* pass) noexcept;
//! Removes `pass` from the list of passes and delete it. //! Removes `pass` from the list of passes and delete it.
ASMJIT_API Error deletePass(Pass* pass) noexcept; ASMJIT_API Error deletePass(Pass* pass) noexcept;
@@ -604,17 +629,24 @@ public:
//! Casts this node to `T*`. //! Casts this node to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* as() noexcept { return static_cast<T*>(this); } ASMJIT_INLINE_NODEBUG T* as() noexcept { return static_cast<T*>(this); }
//! Casts this node to `const T*`. //! Casts this node to `const T*`.
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return static_cast<const T*>(this); } ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return static_cast<const T*>(this); }
//! Returns previous node or `nullptr` if this node is either first or not part of Builder/Compiler node-list. //! Returns previous node or `nullptr` if this node is either first or not part of Builder/Compiler node-list.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* prev() const noexcept { return _prev; } ASMJIT_INLINE_NODEBUG BaseNode* prev() const noexcept { return _prev; }
//! Returns next node or `nullptr` if this node is either last or not part of Builder/Compiler node-list. //! Returns next node or `nullptr` if this node is either last or not part of Builder/Compiler node-list.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* next() const noexcept { return _next; } ASMJIT_INLINE_NODEBUG BaseNode* next() const noexcept { return _next; }
//! Returns the type of the node, see \ref NodeType. //! Returns the type of the node, see \ref NodeType.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeType type() const noexcept { return _any._nodeType; } ASMJIT_INLINE_NODEBUG NodeType type() const noexcept { return _any._nodeType; }
//! Sets the type of the node, see `NodeType` (internal). //! Sets the type of the node, see `NodeType` (internal).
@@ -624,37 +656,65 @@ public:
ASMJIT_INLINE_NODEBUG void setType(NodeType type) noexcept { _any._nodeType = type; } ASMJIT_INLINE_NODEBUG void setType(NodeType type) noexcept { _any._nodeType = type; }
//! Tests whether this node is either `InstNode` or extends it. //! Tests whether this node is either `InstNode` or extends it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInst() const noexcept { return hasFlag(NodeFlags::kActsAsInst); } ASMJIT_INLINE_NODEBUG bool isInst() const noexcept { return hasFlag(NodeFlags::kActsAsInst); }
//! Tests whether this node is `SectionNode`. //! Tests whether this node is `SectionNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSection() const noexcept { return type() == NodeType::kSection; } ASMJIT_INLINE_NODEBUG bool isSection() const noexcept { return type() == NodeType::kSection; }
//! Tests whether this node is either `LabelNode` or extends it. //! Tests whether this node is either `LabelNode` or extends it.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabel() const noexcept { return hasFlag(NodeFlags::kActsAsLabel); } ASMJIT_INLINE_NODEBUG bool isLabel() const noexcept { return hasFlag(NodeFlags::kActsAsLabel); }
//! Tests whether this node is `AlignNode`. //! Tests whether this node is `AlignNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAlign() const noexcept { return type() == NodeType::kAlign; } ASMJIT_INLINE_NODEBUG bool isAlign() const noexcept { return type() == NodeType::kAlign; }
//! Tests whether this node is `EmbedDataNode`. //! Tests whether this node is `EmbedDataNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedData() const noexcept { return type() == NodeType::kEmbedData; } ASMJIT_INLINE_NODEBUG bool isEmbedData() const noexcept { return type() == NodeType::kEmbedData; }
//! Tests whether this node is `EmbedLabelNode`. //! Tests whether this node is `EmbedLabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedLabel() const noexcept { return type() == NodeType::kEmbedLabel; } ASMJIT_INLINE_NODEBUG bool isEmbedLabel() const noexcept { return type() == NodeType::kEmbedLabel; }
//! Tests whether this node is `EmbedLabelDeltaNode`. //! Tests whether this node is `EmbedLabelDeltaNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedLabelDelta() const noexcept { return type() == NodeType::kEmbedLabelDelta; } ASMJIT_INLINE_NODEBUG bool isEmbedLabelDelta() const noexcept { return type() == NodeType::kEmbedLabelDelta; }
//! Tests whether this node is `ConstPoolNode`. //! Tests whether this node is `ConstPoolNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isConstPool() const noexcept { return type() == NodeType::kConstPool; } ASMJIT_INLINE_NODEBUG bool isConstPool() const noexcept { return type() == NodeType::kConstPool; }
//! Tests whether this node is `CommentNode`. //! Tests whether this node is `CommentNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isComment() const noexcept { return type() == NodeType::kComment; } ASMJIT_INLINE_NODEBUG bool isComment() const noexcept { return type() == NodeType::kComment; }
//! Tests whether this node is `SentinelNode`. //! Tests whether this node is `SentinelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSentinel() const noexcept { return type() == NodeType::kSentinel; } ASMJIT_INLINE_NODEBUG bool isSentinel() const noexcept { return type() == NodeType::kSentinel; }
//! Tests whether this node is `FuncNode`. //! Tests whether this node is `FuncNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFunc() const noexcept { return type() == NodeType::kFunc; } ASMJIT_INLINE_NODEBUG bool isFunc() const noexcept { return type() == NodeType::kFunc; }
//! Tests whether this node is `FuncRetNode`. //! Tests whether this node is `FuncRetNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFuncRet() const noexcept { return type() == NodeType::kFuncRet; } ASMJIT_INLINE_NODEBUG bool isFuncRet() const noexcept { return type() == NodeType::kFuncRet; }
//! Tests whether this node is `InvokeNode`. //! Tests whether this node is `InvokeNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInvoke() const noexcept { return type() == NodeType::kInvoke; } ASMJIT_INLINE_NODEBUG bool isInvoke() const noexcept { return type() == NodeType::kInvoke; }
//! Returns the node flags. //! Returns the node flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeFlags flags() const noexcept { return _any._nodeFlags; } ASMJIT_INLINE_NODEBUG NodeFlags flags() const noexcept { return _any._nodeFlags; }
//! Tests whether the node has the given `flag` set. //! Tests whether the node has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(NodeFlags flag) const noexcept { return Support::test(_any._nodeFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(NodeFlags flag) const noexcept { return Support::test(_any._nodeFlags, flag); }
//! Replaces node flags with `flags`. //! Replaces node flags with `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(NodeFlags flags) noexcept { _any._nodeFlags = flags; } ASMJIT_INLINE_NODEBUG void setFlags(NodeFlags flags) noexcept { _any._nodeFlags = flags; }
//! Adds the given `flags` to node flags. //! Adds the given `flags` to node flags.
@@ -663,24 +723,39 @@ public:
ASMJIT_INLINE_NODEBUG void clearFlags(NodeFlags flags) noexcept { _any._nodeFlags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearFlags(NodeFlags flags) noexcept { _any._nodeFlags &= ~flags; }
//! Tests whether the node is code that can be executed. //! Tests whether the node is code that can be executed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCode() const noexcept { return hasFlag(NodeFlags::kIsCode); } ASMJIT_INLINE_NODEBUG bool isCode() const noexcept { return hasFlag(NodeFlags::kIsCode); }
//! Tests whether the node is data that cannot be executed. //! Tests whether the node is data that cannot be executed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isData() const noexcept { return hasFlag(NodeFlags::kIsData); } ASMJIT_INLINE_NODEBUG bool isData() const noexcept { return hasFlag(NodeFlags::kIsData); }
//! Tests whether the node is informative only (is never encoded like comment, etc...). //! Tests whether the node is informative only (is never encoded like comment, etc...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInformative() const noexcept { return hasFlag(NodeFlags::kIsInformative); } ASMJIT_INLINE_NODEBUG bool isInformative() const noexcept { return hasFlag(NodeFlags::kIsInformative); }
//! Tests whether the node is removable if it's in an unreachable code block. //! Tests whether the node is removable if it's in an unreachable code block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRemovable() const noexcept { return hasFlag(NodeFlags::kIsRemovable); } ASMJIT_INLINE_NODEBUG bool isRemovable() const noexcept { return hasFlag(NodeFlags::kIsRemovable); }
//! Tests whether the node has no effect when executed (label, .align, nop, ...). //! Tests whether the node has no effect when executed (label, .align, nop, ...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasNoEffect() const noexcept { return hasFlag(NodeFlags::kHasNoEffect); } ASMJIT_INLINE_NODEBUG bool hasNoEffect() const noexcept { return hasFlag(NodeFlags::kHasNoEffect); }
//! Tests whether the node is part of the code. //! Tests whether the node is part of the code.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isActive() const noexcept { return hasFlag(NodeFlags::kIsActive); } ASMJIT_INLINE_NODEBUG bool isActive() const noexcept { return hasFlag(NodeFlags::kIsActive); }
//! Tests whether the node has a position assigned. //! Tests whether the node has a position assigned.
//! //!
//! \remarks Returns `true` if node position is non-zero. //! \remarks Returns `true` if node position is non-zero.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPosition() const noexcept { return _position != 0; } ASMJIT_INLINE_NODEBUG bool hasPosition() const noexcept { return _position != 0; }
//! Returns node position. //! Returns node position.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t position() const noexcept { return _position; } ASMJIT_INLINE_NODEBUG uint32_t position() const noexcept { return _position; }
//! Sets node position. //! Sets node position.
//! //!
//! Node position is a 32-bit unsigned integer that is used by Compiler to track where the node is relatively to //! Node position is a 32-bit unsigned integer that is used by Compiler to track where the node is relatively to
@@ -697,10 +772,15 @@ public:
//! size so you can either store a pointer or `int64_t` value through `setUserDataAsPtr()`, `setUserDataAsInt64()` //! size so you can either store a pointer or `int64_t` value through `setUserDataAsPtr()`, `setUserDataAsInt64()`
//! and `setUserDataAsUInt64()`. //! and `setUserDataAsUInt64()`.
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* userDataAsPtr() const noexcept { return static_cast<T*>(_userDataPtr); } ASMJIT_INLINE_NODEBUG T* userDataAsPtr() const noexcept { return static_cast<T*>(_userDataPtr); }
//! Returns user data casted to `int64_t`. //! Returns user data casted to `int64_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); } ASMJIT_INLINE_NODEBUG int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); }
//! Returns user data casted to `uint64_t`. //! Returns user data casted to `uint64_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t userDataAsUInt64() const noexcept { return _userDataU64; } ASMJIT_INLINE_NODEBUG uint64_t userDataAsUInt64() const noexcept { return _userDataU64; }
//! Sets user data to `data`. //! Sets user data to `data`.
@@ -715,10 +795,14 @@ public:
ASMJIT_INLINE_NODEBUG void resetUserData() noexcept { _userDataU64 = 0; } ASMJIT_INLINE_NODEBUG void resetUserData() noexcept { _userDataU64 = 0; }
//! Tests whether the node has an associated pass data. //! Tests whether the node has an associated pass data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPassData() const noexcept { return _passData != nullptr; } ASMJIT_INLINE_NODEBUG bool hasPassData() const noexcept { return _passData != nullptr; }
//! Returns the node pass data - data used during processing & transformations. //! Returns the node pass data - data used during processing & transformations.
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* passData() const noexcept { return (T*)_passData; } ASMJIT_INLINE_NODEBUG T* passData() const noexcept { return (T*)_passData; }
//! Sets the node pass data to `data`. //! Sets the node pass data to `data`.
template<typename T> template<typename T>
ASMJIT_INLINE_NODEBUG void setPassData(T* data) noexcept { _passData = (void*)data; } ASMJIT_INLINE_NODEBUG void setPassData(T* data) noexcept { _passData = (void*)data; }
@@ -726,9 +810,13 @@ public:
ASMJIT_INLINE_NODEBUG void resetPassData() noexcept { _passData = nullptr; } ASMJIT_INLINE_NODEBUG void resetPassData() noexcept { _passData = nullptr; }
//! Tests whether the node has an inline comment/annotation. //! Tests whether the node has an inline comment/annotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInlineComment() const noexcept { return _inlineComment != nullptr; } ASMJIT_INLINE_NODEBUG bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
//! Returns an inline comment/annotation string. //! Returns an inline comment/annotation string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; } ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets an inline comment/annotation string to `s`. //! Sets an inline comment/annotation string to `s`.
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; } ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets an inline comment/annotation string to nullptr. //! Resets an inline comment/annotation string to nullptr.
@@ -752,10 +840,10 @@ public:
//! embed 5. The rest (up to 6 operands) is considered extended. //! embed 5. The rest (up to 6 operands) is considered extended.
//! //!
//! The number of operands InstNode holds is decided when \ref InstNode is created. //! The number of operands InstNode holds is decided when \ref InstNode is created.
static constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_)); static inline constexpr uint32_t kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_));
//! Count of maximum number of operands \ref InstNode can hold. //! Count of maximum number of operands \ref InstNode can hold.
static constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount; static inline constexpr uint32_t kFullOpCapacity = Globals::kMaxOpCount;
//! \} //! \}
@@ -791,7 +879,10 @@ public:
//! \name Instruction Object //! \name Instruction Object
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseInst& baseInst() noexcept { return _baseInst; } ASMJIT_INLINE_NODEBUG BaseInst& baseInst() noexcept { return _baseInst; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BaseInst& baseInst() const noexcept { return _baseInst; } ASMJIT_INLINE_NODEBUG const BaseInst& baseInst() const noexcept { return _baseInst; }
//! \} //! \}
@@ -800,8 +891,11 @@ public:
//! \{ //! \{
//! Returns the instruction id, see `BaseInst::Id`. //! Returns the instruction id, see `BaseInst::Id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _baseInst.id(); } ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _baseInst.id(); }
//! Returns the instruction real id, see `BaseInst::Id`. //! Returns the instruction real id, see `BaseInst::Id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _baseInst.realId(); } ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _baseInst.realId(); }
//! Sets the instruction id to `id`, see `BaseInst::Id`. //! Sets the instruction id to `id`, see `BaseInst::Id`.
@@ -813,9 +907,13 @@ public:
//! \{ //! \{
//! Returns instruction options, see \ref InstOptions for more details. //! Returns instruction options, see \ref InstOptions for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _baseInst.options(); } ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _baseInst.options(); }
//! Tests whether instruction has the given \option` set/enabled. //! Tests whether instruction has the given \option` set/enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return _baseInst.hasOption(option); } ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return _baseInst.hasOption(option); }
//! Sets instruction `options` to the provided value, resetting all others. //! Sets instruction `options` to the provided value, resetting all others.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _baseInst.setOptions(options); } ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _baseInst.setOptions(options); }
//! Adds instruction `options` to the instruction. //! Adds instruction `options` to the instruction.
@@ -831,11 +929,17 @@ public:
//! \{ //! \{
//! Tests whether the node has an extra register operand. //! Tests whether the node has an extra register operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); } ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); }
//! Returns extra register operand. //! Returns extra register operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _baseInst.extraReg(); } ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _baseInst.extraReg(); }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); } ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); }
//! Sets extra register operand to `reg`. //! Sets extra register operand to `reg`.
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); } ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); }
//! Sets extra register operand to `reg`. //! Sets extra register operand to `reg`.
@@ -849,24 +953,30 @@ public:
//! \{ //! \{
//! Returns operand count. //! Returns operand count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _inst._opCount; } ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _inst._opCount; }
//! Returns operand capacity. //! Returns operand capacity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCapacity() const noexcept { return _inst._opCapacity; } ASMJIT_INLINE_NODEBUG uint32_t opCapacity() const noexcept { return _inst._opCapacity; }
//! Sets operand count. //! Sets operand count.
ASMJIT_INLINE_NODEBUG void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); } ASMJIT_INLINE_NODEBUG void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); }
//! Returns operands array. //! Returns operands array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand* operands() noexcept { ASMJIT_INLINE_NODEBUG Operand* operands() noexcept {
return reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(this) + sizeof(InstNode)); return reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(this) + sizeof(InstNode));
} }
//! Returns operands array (const). //! Returns operands array (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand* operands() const noexcept { ASMJIT_INLINE_NODEBUG const Operand* operands() const noexcept {
return reinterpret_cast<const Operand*>(reinterpret_cast<const uint8_t*>(this) + sizeof(InstNode)); return reinterpret_cast<const Operand*>(reinterpret_cast<const uint8_t*>(this) + sizeof(InstNode));
} }
//! Returns operand at the given `index`. //! Returns operand at the given `index`.
[[nodiscard]]
inline Operand& op(uint32_t index) noexcept { inline Operand& op(uint32_t index) noexcept {
ASMJIT_ASSERT(index < opCapacity()); ASMJIT_ASSERT(index < opCapacity());
@@ -875,6 +985,7 @@ public:
} }
//! Returns operand at the given `index` (const). //! Returns operand at the given `index` (const).
[[nodiscard]]
inline const Operand& op(uint32_t index) const noexcept { inline const Operand& op(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < opCapacity()); ASMJIT_ASSERT(index < opCapacity());
@@ -911,6 +1022,7 @@ public:
//! \{ //! \{
//! Tests whether the given operand type `opType` is used by the instruction. //! Tests whether the given operand type `opType` is used by the instruction.
[[nodiscard]]
inline bool hasOpType(OperandType opType) const noexcept { inline bool hasOpType(OperandType opType) const noexcept {
const Operand* ops = operands(); const Operand* ops = operands();
for (uint32_t i = 0, count = opCount(); i < count; i++) for (uint32_t i = 0, count = opCount(); i < count; i++)
@@ -920,18 +1032,26 @@ public:
} }
//! Tests whether the instruction uses at least one register operand. //! Tests whether the instruction uses at least one register operand.
[[nodiscard]]
inline bool hasRegOp() const noexcept { return hasOpType(OperandType::kReg); } inline bool hasRegOp() const noexcept { return hasOpType(OperandType::kReg); }
//! Tests whether the instruction uses at least one memory operand. //! Tests whether the instruction uses at least one memory operand.
[[nodiscard]]
inline bool hasMemOp() const noexcept { return hasOpType(OperandType::kMem); } inline bool hasMemOp() const noexcept { return hasOpType(OperandType::kMem); }
//! Tests whether the instruction uses at least one immediate operand. //! Tests whether the instruction uses at least one immediate operand.
[[nodiscard]]
inline bool hasImmOp() const noexcept { return hasOpType(OperandType::kImm); } inline bool hasImmOp() const noexcept { return hasOpType(OperandType::kImm); }
//! Tests whether the instruction uses at least one label operand. //! Tests whether the instruction uses at least one label operand.
[[nodiscard]]
inline bool hasLabelOp() const noexcept { return hasOpType(OperandType::kLabel); } inline bool hasLabelOp() const noexcept { return hasOpType(OperandType::kLabel); }
//! Returns the index of the given operand type `opType`. //! Returns the index of the given operand type `opType`.
//! //!
//! \note If the operand type wa found, the value returned represents its index in \ref operands() //! \note If the operand type wa found, the value returned represents its index in \ref operands()
//! array, otherwise \ref Globals::kNotFound is returned to signalize that the operand was not found. //! array, otherwise \ref Globals::kNotFound is returned to signalize that the operand was not found.
[[nodiscard]]
inline uint32_t indexOfOpType(OperandType opType) const noexcept { inline uint32_t indexOfOpType(OperandType opType) const noexcept {
uint32_t i = 0; uint32_t i = 0;
uint32_t count = opCount(); uint32_t count = opCount();
@@ -947,10 +1067,15 @@ public:
} }
//! A shortcut that calls `indexOfOpType(OperandType::kMem)`. //! A shortcut that calls `indexOfOpType(OperandType::kMem)`.
[[nodiscard]]
inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(OperandType::kMem); } inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(OperandType::kMem); }
//! A shortcut that calls `indexOfOpType(OperandType::kImm)`. //! A shortcut that calls `indexOfOpType(OperandType::kImm)`.
[[nodiscard]]
inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(OperandType::kImm); } inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(OperandType::kImm); }
//! A shortcut that calls `indexOfOpType(OperandType::kLabel)`. //! A shortcut that calls `indexOfOpType(OperandType::kLabel)`.
[[nodiscard]]
inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(OperandType::kLabel); } inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(OperandType::kLabel); }
//! \} //! \}
@@ -961,18 +1086,22 @@ public:
//! \cond INTERNAL //! \cond INTERNAL
//! Returns uint32_t[] view that represents BaseInst::RegOnly and instruction operands. //! Returns uint32_t[] view that represents BaseInst::RegOnly and instruction operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; } ASMJIT_INLINE_NODEBUG uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; } ASMJIT_INLINE_NODEBUG const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; }
//! Maximum value of rewrite id - 6 operands each having 4 slots is 24, one RegOnly having 2 slots => 26. //! Maximum value of rewrite id - 6 operands each having 4 slots is 24, one RegOnly having 2 slots => 26.
static constexpr uint32_t kMaxRewriteId = 26 - 1; static inline constexpr uint32_t kMaxRewriteId = 26 - 1;
//! Returns a rewrite index of the given pointer to `id`. //! Returns a rewrite index of the given pointer to `id`.
//! //!
//! This function returns a value that can be then passed to `\ref rewriteIdAtIndex() function. It can address //! This function returns a value that can be then passed to `\ref rewriteIdAtIndex() function. It can address
//! any id from any operand that is used by the instruction in addition to \ref BaseInst::regOnly field, which //! any id from any operand that is used by the instruction in addition to \ref BaseInst::regOnly field, which
//! can also be used by the register allocator. //! can also be used by the register allocator.
[[nodiscard]]
inline uint32_t getRewriteIndex(const uint32_t* id) const noexcept { inline uint32_t getRewriteIndex(const uint32_t* id) const noexcept {
const uint32_t* array = _getRewriteArray(); const uint32_t* array = _getRewriteArray();
ASMJIT_ASSERT(array <= id); ASMJIT_ASSERT(array <= id);
@@ -1010,14 +1139,16 @@ public:
//! There are only two capacities used - \ref kBaseOpCapacity and \ref kFullOpCapacity, so this function //! There are only two capacities used - \ref kBaseOpCapacity and \ref kFullOpCapacity, so this function
//! is used to decide between these two. The general rule is that instructions that can be represented with //! is used to decide between these two. The general rule is that instructions that can be represented with
//! \ref kBaseOpCapacity would use this value, and all others would take \ref kFullOpCapacity. //! \ref kBaseOpCapacity would use this value, and all others would take \ref kFullOpCapacity.
static ASMJIT_INLINE_NODEBUG constexpr uint32_t capacityOfOpCount(uint32_t opCount) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
return opCount <= kBaseOpCapacity ? kBaseOpCapacity : kFullOpCapacity; return opCount <= kBaseOpCapacity ? kBaseOpCapacity : kFullOpCapacity;
} }
//! Calculates the size of \ref InstNode required to hold at most `opCapacity` operands. //! Calculates the size of \ref InstNode required to hold at most `opCapacity` operands.
//! //!
//! This function is used internally to allocate \ref InstNode. //! This function is used internally to allocate \ref InstNode.
static ASMJIT_INLINE_NODEBUG constexpr size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
return sizeof(InstNode) + opCapacity * sizeof(Operand); return sizeof(InstNode) + opCapacity * sizeof(Operand);
} }
//! \endcond //! \endcond
@@ -1107,8 +1238,11 @@ public:
//! \{ //! \{
//! Returns \ref Label representation of the \ref LabelNode. //! Returns \ref Label representation of the \ref LabelNode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label. //! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; } ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! \} //! \}
@@ -1146,12 +1280,16 @@ public:
//! \{ //! \{
//! Returns align mode. //! Returns align mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG AlignMode alignMode() const noexcept { return _alignData._alignMode; } ASMJIT_INLINE_NODEBUG AlignMode alignMode() const noexcept { return _alignData._alignMode; }
//! Sets align mode to `alignMode`. //! Sets align mode to `alignMode`.
ASMJIT_INLINE_NODEBUG void setAlignMode(AlignMode alignMode) noexcept { _alignData._alignMode = alignMode; } ASMJIT_INLINE_NODEBUG void setAlignMode(AlignMode alignMode) noexcept { _alignData._alignMode = alignMode; }
//! Returns align offset in bytes. //! Returns align offset in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Sets align offset in bytes to `offset`. //! Sets align offset in bytes to `offset`.
ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
@@ -1167,9 +1305,7 @@ public:
ASMJIT_NONCOPYABLE(EmbedDataNode) ASMJIT_NONCOPYABLE(EmbedDataNode)
//! \cond INTERNAL //! \cond INTERNAL
enum : uint32_t { static inline constexpr uint32_t kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2);
kInlineBufferSize = 128 - (sizeof(BaseNode) + sizeof(size_t) * 2)
};
//! \endcond //! \endcond
//! \name Members //! \name Members
@@ -1204,30 +1340,38 @@ public:
//! \{ //! \{
//! Returns data type as \ref TypeId. //! Returns data type as \ref TypeId.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _embed._typeId; } ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _embed._typeId; }
//! Returns the size of a single data element. //! Returns the size of a single data element.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t typeSize() const noexcept { return _embed._typeSize; } ASMJIT_INLINE_NODEBUG uint32_t typeSize() const noexcept { return _embed._typeSize; }
//! Returns a pointer to the data casted to `uint8_t`. //! Returns a pointer to the data casted to `uint8_t`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() const noexcept { ASMJIT_INLINE_NODEBUG uint8_t* data() const noexcept {
return dataSize() <= kInlineBufferSize ? const_cast<uint8_t*>(_inlineData) : _externalData; return dataSize() <= kInlineBufferSize ? const_cast<uint8_t*>(_inlineData) : _externalData;
} }
//! Returns a pointer to the data casted to `T`. //! Returns a pointer to the data casted to `T`.
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* dataAs() const noexcept { return reinterpret_cast<T*>(data()); } ASMJIT_INLINE_NODEBUG T* dataAs() const noexcept { return reinterpret_cast<T*>(data()); }
//! Returns the number of (typed) items in the array. //! Returns the number of (typed) items in the array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t itemCount() const noexcept { return _itemCount; } ASMJIT_INLINE_NODEBUG size_t itemCount() const noexcept { return _itemCount; }
//! Returns how many times the data is repeated (default 1). //! Returns how many times the data is repeated (default 1).
//! //!
//! Repeated data is useful when defining constants for SIMD, for example. //! Repeated data is useful when defining constants for SIMD, for example.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t repeatCount() const noexcept { return _repeatCount; } ASMJIT_INLINE_NODEBUG size_t repeatCount() const noexcept { return _repeatCount; }
//! Returns the size of the data, not considering the number of times it repeats. //! Returns the size of the data, not considering the number of times it repeats.
//! //!
//! \note The returned value is the same as `typeSize() * itemCount()`. //! \note The returned value is the same as `typeSize() * itemCount()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return typeSize() * _itemCount; } ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return typeSize() * _itemCount; }
//! \} //! \}
@@ -1261,17 +1405,23 @@ public:
//! \{ //! \{
//! Returns the label to embed as \ref Label operand. //! Returns the label to embed as \ref Label operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label.
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id from `label` operand. //! Sets the label id from `label` operand.
ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); } ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
//! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id (use with caution, improper use can break a lot of things). //! Sets the label id (use with caution, improper use can break a lot of things).
ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; } ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
//! Returns the data size. //! Returns the data size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; } ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; }
//! Sets the data size. //! Sets the data size.
ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; } ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
@@ -1308,18 +1458,25 @@ public:
//! \{ //! \{
//! Returns the label as `Label` operand. //! Returns the label as `Label` operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); } ASMJIT_INLINE_NODEBUG Label label() const noexcept { return Label(_labelId); }
//! Returns the id of the label.
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id from `label` operand. //! Sets the label id from `label` operand.
ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); } ASMJIT_INLINE_NODEBUG void setLabel(const Label& label) noexcept { setLabelId(label.id()); }
//! Returns the id of the label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelId() const noexcept { return _labelId; }
//! Sets the label id. //! Sets the label id.
ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; } ASMJIT_INLINE_NODEBUG void setLabelId(uint32_t labelId) noexcept { _labelId = labelId; }
//! Returns the base label as `Label` operand. //! Returns the base label as `Label` operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label baseLabel() const noexcept { return Label(_baseLabelId); } ASMJIT_INLINE_NODEBUG Label baseLabel() const noexcept { return Label(_baseLabelId); }
//! Returns the id of the base label. //! Returns the id of the base label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t baseLabelId() const noexcept { return _baseLabelId; } ASMJIT_INLINE_NODEBUG uint32_t baseLabelId() const noexcept { return _baseLabelId; }
//! Sets the base label id from `label` operand. //! Sets the base label id from `label` operand.
@@ -1328,7 +1485,9 @@ public:
ASMJIT_INLINE_NODEBUG void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; } ASMJIT_INLINE_NODEBUG void setBaseLabelId(uint32_t baseLabelId) noexcept { _baseLabelId = baseLabelId; }
//! Returns the size of the embedded label address. //! Returns the size of the embedded label address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; } ASMJIT_INLINE_NODEBUG uint32_t dataSize() const noexcept { return _dataSize; }
//! Sets the size of the embedded label address. //! Sets the size of the embedded label address.
ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; } ASMJIT_INLINE_NODEBUG void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
@@ -1366,15 +1525,23 @@ public:
//! \{ //! \{
//! Tests whether the constant-pool is empty. //! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _constPool.empty(); } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _constPool.empty(); }
//! Returns the size of the constant-pool in bytes. //! Returns the size of the constant-pool in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _constPool.size(); } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _constPool.size(); }
//! Returns minimum alignment. //! Returns minimum alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _constPool.alignment(); } ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _constPool.alignment(); }
//! Returns the wrapped `ConstPool` instance. //! Returns the wrapped `ConstPool` instance.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ConstPool& constPool() noexcept { return _constPool; } ASMJIT_INLINE_NODEBUG ConstPool& constPool() noexcept { return _constPool; }
//! Returns the wrapped `ConstPool` instance (const). //! Returns the wrapped `ConstPool` instance (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ConstPool& constPool() const noexcept { return _constPool; } ASMJIT_INLINE_NODEBUG const ConstPool& constPool() const noexcept { return _constPool; }
//! \} //! \}
@@ -1431,6 +1598,7 @@ public:
//! \{ //! \{
//! Returns the type of the sentinel. //! Returns the type of the sentinel.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelType sentinelType() const noexcept { ASMJIT_INLINE_NODEBUG SentinelType sentinelType() const noexcept {
return _sentinel._sentinelType; return _sentinel._sentinelType;
} }
@@ -1471,8 +1639,11 @@ public:
//! \{ //! \{
//! Returns \ref BaseBuilder associated with the pass. //! Returns \ref BaseBuilder associated with the pass.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BaseBuilder* cb() const noexcept { return _cb; } ASMJIT_INLINE_NODEBUG const BaseBuilder* cb() const noexcept { return _cb; }
//! Returns the name of the pass. //! Returns the name of the pass.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name; } ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name; }
//! \} //! \}

View File

@@ -45,11 +45,14 @@ struct CodeBuffer {
//! \{ //! \{
//! Returns a reference to the byte at the given `index`. //! Returns a reference to the byte at the given `index`.
[[nodiscard]]
inline uint8_t& operator[](size_t index) noexcept { inline uint8_t& operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < _size); ASMJIT_ASSERT(index < _size);
return _data[index]; return _data[index];
} }
//! \overload //! \overload
[[nodiscard]]
inline const uint8_t& operator[](size_t index) const noexcept { inline const uint8_t& operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < _size); ASMJIT_ASSERT(index < _size);
return _data[index]; return _data[index];
@@ -61,34 +64,47 @@ struct CodeBuffer {
//! \{ //! \{
//! Returns code buffer flags. //! Returns code buffer flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG CodeBufferFlags flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set. //! Tests whether the code buffer has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size. //! Tests whether this code buffer has a fixed size.
//! //!
//! Fixed size means that the code buffer is fixed and cannot grow. //! Fixed size means that the code buffer is fixed and cannot grow.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); } ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external. //! Tests whether the data in this code buffer is external.
//! //!
//! External data can only be provided by users, it's never used by AsmJit. //! External data can only be provided by users, it's never used by AsmJit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); } ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null). //! Tests whether the data in this code buffer is allocated (non-null).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; } ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty. //! Tests whether the code buffer is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; }
//! Returns the size of the data. //! Returns the size of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the capacity of the data. //! Returns the capacity of the data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; } ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! Returns the pointer to the data the buffer references. //! Returns the pointer to the data the buffer references.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; } ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _data; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; } ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _data; }
//! \} //! \}
@@ -96,10 +112,16 @@ struct CodeBuffer {
//! \name Iterators //! \name Iterators
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; } ASMJIT_INLINE_NODEBUG uint8_t* begin() noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; } ASMJIT_INLINE_NODEBUG const uint8_t* begin() const noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; } ASMJIT_INLINE_NODEBUG uint8_t* end() noexcept { return _data + _size; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; } ASMJIT_INLINE_NODEBUG const uint8_t* end() const noexcept { return _data + _size; }
//! \} //! \}

View File

@@ -83,8 +83,9 @@ static void CodeHolder_resetInternal(CodeHolder* self, ResetPolicy resetPolicy)
uint32_t numSections = self->_sections.size(); uint32_t numSections = self->_sections.size();
for (i = 0; i < numSections; i++) { for (i = 0; i < numSections; i++) {
Section* section = self->_sections[i]; Section* section = self->_sections[i];
if (section->_buffer.data() && !section->_buffer.isExternal()) if (section->_buffer.data() && !section->_buffer.isExternal()) {
::free(section->_buffer._data); ::free(section->_buffer._data);
}
section->_buffer._data = nullptr; section->_buffer._data = nullptr;
section->_buffer._capacity = 0; section->_buffer._capacity = 0;
} }
@@ -150,8 +151,9 @@ Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noe
Error CodeHolder::init(const Environment& environment, const CpuFeatures& cpuFeatures, uint64_t baseAddress) noexcept { Error CodeHolder::init(const Environment& environment, const CpuFeatures& cpuFeatures, uint64_t baseAddress) noexcept {
// Cannot reinitialize if it's locked or there is one or more emitter attached. // Cannot reinitialize if it's locked or there is one or more emitter attached.
if (isInitialized()) if (isInitialized()) {
return DebugUtils::errored(kErrorAlreadyInitialized); return DebugUtils::errored(kErrorAlreadyInitialized);
}
// If we are just initializing there should be no emitters attached. // If we are just initializing there should be no emitters attached.
ASMJIT_ASSERT(_emitters.empty()); ASMJIT_ASSERT(_emitters.empty());
@@ -193,23 +195,27 @@ void CodeHolder::reset(ResetPolicy resetPolicy) noexcept {
Error CodeHolder::attach(BaseEmitter* emitter) noexcept { Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
// Catch a possible misuse of the API. // Catch a possible misuse of the API.
if (ASMJIT_UNLIKELY(!emitter)) if (ASMJIT_UNLIKELY(!emitter)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
// Invalid emitter, this should not be possible. // Invalid emitter, this should not be possible.
EmitterType type = emitter->emitterType(); EmitterType type = emitter->emitterType();
if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue))) if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
uint64_t archMask = emitter->_archMask; uint64_t archMask = emitter->_archMask;
if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch()))))) if (ASMJIT_UNLIKELY(!(archMask & (uint64_t(1) << uint32_t(arch()))))) {
return DebugUtils::errored(kErrorInvalidArch); return DebugUtils::errored(kErrorInvalidArch);
}
// This is suspicious, but don't fail if `emitter` is already attached // This is suspicious, but don't fail if `emitter` is already attached
// to this code holder. This is not error, but it's not recommended. // to this code holder. This is not error, but it's not recommended.
if (emitter->_code != nullptr) { if (emitter->_code != nullptr) {
if (emitter->_code == this) if (emitter->_code == this) {
return kErrorOk; return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
} }
@@ -225,18 +231,21 @@ Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
} }
Error CodeHolder::detach(BaseEmitter* emitter) noexcept { Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
if (ASMJIT_UNLIKELY(!emitter)) if (ASMJIT_UNLIKELY(!emitter)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(emitter->_code != this)) if (ASMJIT_UNLIKELY(emitter->_code != this)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// NOTE: We always detach if we were asked to, if error happens during // NOTE: We always detach if we were asked to, if error happens during
// `emitter->onDetach()` we just propagate it, but the BaseEmitter will // `emitter->onDetach()` we just propagate it, but the BaseEmitter will
// be detached. // be detached.
Error err = kErrorOk; Error err = kErrorOk;
if (!emitter->isDestroyed()) if (!emitter->isDestroyed()) {
err = emitter->onDetach(this); err = emitter->onDetach(this);
}
// Disconnect CodeHolder <-> BaseEmitter. // Disconnect CodeHolder <-> BaseEmitter.
uint32_t index = _emitters.indexOf(emitter); uint32_t index = _emitters.indexOf(emitter);
@@ -275,13 +284,16 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t
uint8_t* oldData = cb->_data; uint8_t* oldData = cb->_data;
uint8_t* newData; uint8_t* newData;
if (oldData && !cb->isExternal()) if (oldData && !cb->isExternal()) {
newData = static_cast<uint8_t*>(::realloc(oldData, n)); newData = static_cast<uint8_t*>(::realloc(oldData, n));
else }
else {
newData = static_cast<uint8_t*>(::malloc(n)); newData = static_cast<uint8_t*>(::malloc(n));
}
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
cb->_data = newData; cb->_data = newData;
cb->_capacity = n; cb->_capacity = n;
@@ -306,35 +318,44 @@ static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t
Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept { Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
// The size of the section must be valid. // The size of the section must be valid.
size_t size = cb->size(); size_t size = cb->size();
if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size)) if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// We can now check if growing the buffer is really necessary. It's unlikely // We can now check if growing the buffer is really necessary. It's unlikely
// that this function is called while there is still room for `n` bytes. // that this function is called while there is still room for `n` bytes.
size_t capacity = cb->capacity(); size_t capacity = cb->capacity();
size_t required = cb->size() + n; size_t required = cb->size() + n;
if (ASMJIT_UNLIKELY(required <= capacity))
return kErrorOk;
if (cb->isFixed()) if (ASMJIT_UNLIKELY(required <= capacity)) {
return kErrorOk;
}
if (cb->isFixed()) {
return DebugUtils::errored(kErrorTooLarge); return DebugUtils::errored(kErrorTooLarge);
}
size_t kInitialCapacity = 8096; size_t kInitialCapacity = 8096;
if (capacity < kInitialCapacity) if (capacity < kInitialCapacity) {
capacity = kInitialCapacity; capacity = kInitialCapacity;
else }
else {
capacity += Globals::kAllocOverhead; capacity += Globals::kAllocOverhead;
}
do { do {
size_t old = capacity; size_t old = capacity;
if (capacity < Globals::kGrowThreshold) if (capacity < Globals::kGrowThreshold) {
capacity *= 2; capacity *= 2;
else }
else {
capacity += Globals::kGrowThreshold; capacity += Globals::kGrowThreshold;
}
// Overflow. // Overflow.
if (ASMJIT_UNLIKELY(old > capacity)) if (ASMJIT_UNLIKELY(old > capacity)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
} while (capacity - Globals::kAllocOverhead < required); } while (capacity - Globals::kAllocOverhead < required);
return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead); return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
@@ -343,11 +364,13 @@ Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept { Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
size_t capacity = cb->capacity(); size_t capacity = cb->capacity();
if (n <= capacity) if (n <= capacity) {
return kErrorOk; return kErrorOk;
}
if (cb->isFixed()) if (cb->isFixed()) {
return DebugUtils::errored(kErrorTooLarge); return DebugUtils::errored(kErrorTooLarge);
}
return CodeHolder_reserveInternal(this, cb, n); return CodeHolder_reserveInternal(this, cb, n);
} }
@@ -358,28 +381,34 @@ Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, SectionFlags flags, uint32_t alignment, int32_t order) noexcept { Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, SectionFlags flags, uint32_t alignment, int32_t order) noexcept {
*sectionOut = nullptr; *sectionOut = nullptr;
if (nameSize == SIZE_MAX) if (nameSize == SIZE_MAX) {
nameSize = strlen(name); nameSize = strlen(name);
}
if (alignment == 0) if (alignment == 0) {
alignment = 1; alignment = 1;
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment))) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize)) {
return DebugUtils::errored(kErrorInvalidSectionName); return DebugUtils::errored(kErrorInvalidSectionName);
}
uint32_t sectionId = _sections.size(); uint32_t sectionId = _sections.size();
if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManySections); return DebugUtils::errored(kErrorTooManySections);
}
ASMJIT_PROPAGATE(_sections.willGrow(&_allocator)); ASMJIT_PROPAGATE(_sections.willGrow(&_allocator));
ASMJIT_PROPAGATE(_sectionsByOrder.willGrow(&_allocator)); ASMJIT_PROPAGATE(_sectionsByOrder.willGrow(&_allocator));
Section* section = _allocator.allocZeroedT<Section>(); Section* section = _allocator.allocZeroedT<Section>();
if (ASMJIT_UNLIKELY(!section)) if (ASMJIT_UNLIKELY(!section)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
section->_id = sectionId; section->_id = sectionId;
section->_flags = flags; section->_flags = flags;
@@ -399,24 +428,28 @@ Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t name
} }
Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept { Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept {
if (nameSize == SIZE_MAX) if (nameSize == SIZE_MAX) {
nameSize = strlen(name); nameSize = strlen(name);
}
// This could be also put in a hash-table similarly like we do with labels, // This could be also put in a hash-table similarly like we do with labels,
// however it's questionable as the number of sections should be pretty low // however it's questionable as the number of sections should be pretty low
// in general. Create an issue if this becomes a problem. // in general. Create an issue if this becomes a problem.
if (nameSize <= Globals::kMaxSectionNameSize) { if (nameSize <= Globals::kMaxSectionNameSize) {
for (Section* section : _sections) for (Section* section : _sections) {
if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0') {
return section; return section;
}
}
} }
return nullptr; return nullptr;
} }
Section* CodeHolder::ensureAddressTableSection() noexcept { Section* CodeHolder::ensureAddressTableSection() noexcept {
if (_addressTableSection) if (_addressTableSection) {
return _addressTableSection; return _addressTableSection;
}
newSection(&_addressTableSection, newSection(&_addressTableSection,
CodeHolder_addrTabName, CodeHolder_addrTabName,
@@ -429,16 +462,19 @@ Section* CodeHolder::ensureAddressTableSection() noexcept {
Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept { Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
AddressTableEntry* entry = _addressTableEntries.get(address); AddressTableEntry* entry = _addressTableEntries.get(address);
if (entry) if (entry) {
return kErrorOk; return kErrorOk;
}
Section* section = ensureAddressTableSection(); Section* section = ensureAddressTableSection();
if (ASMJIT_UNLIKELY(!section)) if (ASMJIT_UNLIKELY(!section)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
entry = _zone.newT<AddressTableEntry>(address); entry = _zone.newT<AddressTableEntry>(address);
if (ASMJIT_UNLIKELY(!entry)) if (ASMJIT_UNLIKELY(!entry)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
_addressTableEntries.insert(entry); _addressTableEntries.insert(entry);
section->_virtualSize += _environment.registerSize(); section->_virtualSize += _environment.registerSize();
@@ -452,24 +488,26 @@ Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
//! Only used to lookup a label from `_namedLabels`. //! Only used to lookup a label from `_namedLabels`.
class LabelByName { class LabelByName {
public: public:
const char* _key {};
uint32_t _keySize {};
uint32_t _hashCode {};
uint32_t _parentId {};
inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept inline LabelByName(const char* key, size_t keySize, uint32_t hashCode, uint32_t parentId) noexcept
: _key(key), : _key(key),
_keySize(uint32_t(keySize)), _keySize(uint32_t(keySize)),
_hashCode(hashCode), _hashCode(hashCode),
_parentId(parentId) {} _parentId(parentId) {}
[[nodiscard]]
inline uint32_t hashCode() const noexcept { return _hashCode; } inline uint32_t hashCode() const noexcept { return _hashCode; }
[[nodiscard]]
inline bool matches(const LabelEntry* entry) const noexcept { inline bool matches(const LabelEntry* entry) const noexcept {
return entry->nameSize() == _keySize && return entry->nameSize() == _keySize &&
entry->parentId() == _parentId && entry->parentId() == _parentId &&
::memcmp(entry->name(), _key, _keySize) == 0; ::memcmp(entry->name(), _key, _keySize) == 0;
} }
const char* _key;
uint32_t _keySize;
uint32_t _hashCode;
uint32_t _parentId;
}; };
// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`. // Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`.
@@ -479,7 +517,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
size_t i = 0; size_t i = 0;
for (;;) { for (;;) {
uint8_t c = uint8_t(name[i]); uint8_t c = uint8_t(name[i]);
if (!c) break; if (!c) {
break;
}
hashCode = Support::hashRound(hashCode, c); hashCode = Support::hashRound(hashCode, c);
i++; i++;
} }
@@ -488,7 +528,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
else { else {
for (size_t i = 0; i < nameSize; i++) { for (size_t i = 0; i < nameSize; i++) {
uint8_t c = uint8_t(name[i]); uint8_t c = uint8_t(name[i]);
if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName); if (ASMJIT_UNLIKELY(!c)) {
return DebugUtils::errored(kErrorInvalidLabelName);
}
hashCode = Support::hashRound(hashCode, c); hashCode = Support::hashRound(hashCode, c);
} }
} }
@@ -497,7 +539,9 @@ static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize
LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept { LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept {
LabelLink* link = _allocator.allocT<LabelLink>(); LabelLink* link = _allocator.allocT<LabelLink>();
if (ASMJIT_UNLIKELY(!link)) return nullptr; if (ASMJIT_UNLIKELY(!link)) {
return nullptr;
}
link->next = le->_links; link->next = le->_links;
le->_links = link; le->_links = link;
@@ -516,14 +560,16 @@ Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
*entryOut = nullptr; *entryOut = nullptr;
uint32_t labelId = _labelEntries.size(); uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels); return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
LabelEntry* le = _allocator.allocZeroedT<LabelEntry>(); LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
le->_setId(labelId); le->_setId(labelId);
le->_parentId = Globals::kInvalidId; le->_parentId = Globals::kInvalidId;
@@ -539,10 +585,12 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
if (ASMJIT_UNLIKELY(nameSize == 0)) { if (ASMJIT_UNLIKELY(nameSize == 0)) {
if (type == LabelType::kAnonymous) if (type == LabelType::kAnonymous) {
return newLabelEntry(entryOut); return newLabelEntry(entryOut);
else }
else {
return DebugUtils::errored(kErrorInvalidLabelName); return DebugUtils::errored(kErrorInvalidLabelName);
}
} }
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize)) if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
@@ -551,18 +599,21 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
switch (type) { switch (type) {
case LabelType::kAnonymous: { case LabelType::kAnonymous: {
// Anonymous labels cannot have a parent (or more specifically, parent is useless here). // Anonymous labels cannot have a parent (or more specifically, parent is useless here).
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) {
return DebugUtils::errored(kErrorInvalidParentLabel); return DebugUtils::errored(kErrorInvalidParentLabel);
}
uint32_t labelId = _labelEntries.size(); uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels); return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
LabelEntry* le = _allocator.allocZeroedT<LabelEntry>(); LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// NOTE: This LabelEntry has a name, but we leave its hashCode as zero as it's anonymous. // NOTE: This LabelEntry has a name, but we leave its hashCode as zero as it's anonymous.
le->_setId(labelId); le->_setId(labelId);
@@ -577,8 +628,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
} }
case LabelType::kLocal: { case LabelType::kLocal: {
if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size())) {
return DebugUtils::errored(kErrorInvalidParentLabel); return DebugUtils::errored(kErrorInvalidParentLabel);
}
hashCode ^= parentId; hashCode ^= parentId;
break; break;
@@ -586,8 +638,9 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
case LabelType::kGlobal: case LabelType::kGlobal:
case LabelType::kExternal: { case LabelType::kExternal: {
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId)) {
return DebugUtils::errored(kErrorInvalidParentLabel); return DebugUtils::errored(kErrorInvalidParentLabel);
}
break; break;
} }
@@ -600,20 +653,23 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
// different id, this is already accomplished by having a different hashes // different id, this is already accomplished by having a different hashes
// between the same label names having different parent labels. // between the same label names having different parent labels.
LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId)); LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
if (ASMJIT_UNLIKELY(le)) if (ASMJIT_UNLIKELY(le)) {
return DebugUtils::errored(kErrorLabelAlreadyDefined); return DebugUtils::errored(kErrorLabelAlreadyDefined);
}
Error err = kErrorOk; Error err = kErrorOk;
uint32_t labelId = _labelEntries.size(); uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyLabels); return DebugUtils::errored(kErrorTooManyLabels);
}
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator)); ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
le = _allocator.allocZeroedT<LabelEntry>(); le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
le->_hashCode = hashCode; le->_hashCode = hashCode;
le->_setId(labelId); le->_setId(labelId);
@@ -631,24 +687,28 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept { uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize); uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
if (ASMJIT_UNLIKELY(!nameSize)) if (ASMJIT_UNLIKELY(!nameSize)) {
return 0; return 0;
}
if (parentId != Globals::kInvalidId) if (parentId != Globals::kInvalidId) {
hashCode ^= parentId; hashCode ^= parentId;
}
LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId)); LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode, parentId));
return le ? le->id() : uint32_t(Globals::kInvalidId); return le ? le->id() : uint32_t(Globals::kInvalidId);
} }
ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept { ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
if (!hasUnresolvedLinks()) if (!hasUnresolvedLinks()) {
return kErrorOk; return kErrorOk;
}
Error err = kErrorOk; Error err = kErrorOk;
for (LabelEntry* le : labelEntries()) { for (LabelEntry* le : labelEntries()) {
if (!le->isBound()) if (!le->isBound()) {
continue; continue;
}
LabelLinkIterator link(le); LabelLinkIterator link(le);
if (link) { if (link) {
@@ -695,15 +755,18 @@ ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept { ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept {
LabelEntry* le = labelEntry(label); LabelEntry* le = labelEntry(label);
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return DebugUtils::errored(kErrorInvalidLabel); return DebugUtils::errored(kErrorInvalidLabel);
}
if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) if (ASMJIT_UNLIKELY(toSectionId > _sections.size())) {
return DebugUtils::errored(kErrorInvalidSection); return DebugUtils::errored(kErrorInvalidSection);
}
// Label can be bound only once. // Label can be bound only once.
if (ASMJIT_UNLIKELY(le->isBound())) if (ASMJIT_UNLIKELY(le->isBound())) {
return DebugUtils::errored(kErrorLabelAlreadyBound); return DebugUtils::errored(kErrorLabelAlreadyBound);
}
// Bind the label. // Bind the label.
Section* section = _sections[toSectionId]; Section* section = _sections[toSectionId];
@@ -761,12 +824,14 @@ Error CodeHolder::newRelocEntry(RelocEntry** dst, RelocType relocType) noexcept
ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator)); ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
uint32_t relocId = _relocations.size(); uint32_t relocId = _relocations.size();
if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId)) {
return DebugUtils::errored(kErrorTooManyRelocations); return DebugUtils::errored(kErrorTooManyRelocations);
}
RelocEntry* re = _allocator.allocZeroedT<RelocEntry>(); RelocEntry* re = _allocator.allocZeroedT<RelocEntry>();
if (ASMJIT_UNLIKELY(!re)) if (ASMJIT_UNLIKELY(!re)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
re->_id = relocId; re->_id = relocId;
re->_relocType = relocType; re->_relocType = relocType;
@@ -798,8 +863,9 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui
case ExpressionValueType::kLabel: { case ExpressionValueType::kLabel: {
LabelEntry* le = exp->value[i].label; LabelEntry* le = exp->value[i].label;
if (!le->isBound()) if (!le->isBound()) {
return DebugUtils::errored(kErrorExpressionLabelNotBound); return DebugUtils::errored(kErrorExpressionLabelNotBound);
}
v = le->section()->offset() + le->offset(); v = le->section()->offset() + le->offset();
break; break;
} }
@@ -863,14 +929,16 @@ Error CodeHolder::flatten() noexcept {
uint64_t realSize = section->realSize(); uint64_t realSize = section->realSize();
if (realSize) { if (realSize) {
uint64_t alignedOffset = Support::alignUp(offset, section->alignment()); uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
if (ASMJIT_UNLIKELY(alignedOffset < offset)) if (ASMJIT_UNLIKELY(alignedOffset < offset)) {
return DebugUtils::errored(kErrorTooLarge); return DebugUtils::errored(kErrorTooLarge);
}
Support::FastUInt8 of = 0; Support::FastUInt8 of = 0;
offset = Support::addOverflow(alignedOffset, realSize, &of); offset = Support::addOverflow(alignedOffset, realSize, &of);
if (ASMJIT_UNLIKELY(of)) if (ASMJIT_UNLIKELY(of)) {
return DebugUtils::errored(kErrorTooLarge); return DebugUtils::errored(kErrorTooLarge);
}
} }
} }
@@ -879,13 +947,15 @@ Error CodeHolder::flatten() noexcept {
offset = 0; offset = 0;
for (Section* section : _sectionsByOrder) { for (Section* section : _sectionsByOrder) {
uint64_t realSize = section->realSize(); uint64_t realSize = section->realSize();
if (realSize) if (realSize) {
offset = Support::alignUp(offset, section->alignment()); offset = Support::alignUp(offset, section->alignment());
}
section->_offset = offset; section->_offset = offset;
// Make sure the previous section extends a bit to cover the alignment. // Make sure the previous section extends a bit to cover the alignment.
if (prev) if (prev) {
prev->_virtualSize = offset - prev->_offset; prev->_virtualSize = offset - prev->_offset;
}
prev = section; prev = section;
offset += realSize; offset += realSize;
@@ -908,16 +978,18 @@ size_t CodeHolder::codeSize() const noexcept {
} }
} }
if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of) if ((sizeof(uint64_t) > sizeof(size_t) && offset > uint64_t(SIZE_MAX)) || of) {
return SIZE_MAX; return SIZE_MAX;
}
return size_t(offset); return size_t(offset);
} }
Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept { Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
// Base address must be provided. // Base address must be provided.
if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
_baseAddress = baseAddress; _baseAddress = baseAddress;
uint32_t addressSize = _environment.registerSize(); uint32_t addressSize = _environment.registerSize();
@@ -927,22 +999,23 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
uint8_t* addressTableEntryData = nullptr; uint8_t* addressTableEntryData = nullptr;
if (addressTableSection) { if (addressTableSection) {
ASMJIT_PROPAGATE( ASMJIT_PROPAGATE(reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
addressTableEntryData = addressTableSection->_buffer.data(); addressTableEntryData = addressTableSection->_buffer.data();
} }
// Relocate all recorded locations. // Relocate all recorded locations.
for (const RelocEntry* re : _relocations) { for (const RelocEntry* re : _relocations) {
// Possibly deleted or optimized-out entry. // Possibly deleted or optimized-out entry.
if (re->relocType() == RelocType::kNone) if (re->relocType() == RelocType::kNone) {
continue; continue;
}
Section* sourceSection = sectionById(re->sourceSectionId()); Section* sourceSection = sectionById(re->sourceSectionId());
Section* targetSection = nullptr; Section* targetSection = nullptr;
if (re->targetSectionId() != Globals::kInvalidId) if (re->targetSectionId() != Globals::kInvalidId) {
targetSection = sectionById(re->targetSectionId()); targetSection = sectionById(re->targetSectionId());
}
uint64_t value = re->payload(); uint64_t value = re->payload();
uint64_t sectionOffset = sourceSection->offset(); uint64_t sectionOffset = sourceSection->offset();
@@ -951,8 +1024,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
// Make sure that the `RelocEntry` doesn't go out of bounds. // Make sure that the `RelocEntry` doesn't go out of bounds.
size_t regionSize = re->format().regionSize(); size_t regionSize = re->format().regionSize();
if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() || if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() ||
sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize)) {
return DebugUtils::errored(kErrorInvalidRelocEntry); return DebugUtils::errored(kErrorInvalidRelocEntry);
}
uint8_t* buffer = sourceSection->data(); uint8_t* buffer = sourceSection->data();
@@ -970,8 +1044,9 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
case RelocType::kRelToAbs: { case RelocType::kRelToAbs: {
// Value is currently a relative offset from the start of its section. // Value is currently a relative offset from the start of its section.
// We have to convert it to an absolute offset (including base address). // We have to convert it to an absolute offset (including base address).
if (ASMJIT_UNLIKELY(!targetSection)) if (ASMJIT_UNLIKELY(!targetSection)) {
return DebugUtils::errored(kErrorInvalidRelocEntry); return DebugUtils::errored(kErrorInvalidRelocEntry);
}
//value += baseAddress + sectionOffset + sourceOffset + regionSize; //value += baseAddress + sectionOffset + sourceOffset + regionSize;
value += baseAddress + targetSection->offset(); value += baseAddress + targetSection->offset();
@@ -982,40 +1057,46 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
value -= baseAddress + sectionOffset + sourceOffset + regionSize; value -= baseAddress + sectionOffset + sourceOffset + regionSize;
// Sign extend as we are not interested in the high 32-bit word in a 32-bit address space. // Sign extend as we are not interested in the high 32-bit word in a 32-bit address space.
if (addressSize <= 4) if (addressSize <= 4) {
value = uint64_t(int64_t(int32_t(value & 0xFFFFFFFFu))); value = uint64_t(int64_t(int32_t(value & 0xFFFFFFFFu)));
else if (!Support::isInt32(int64_t(value))) }
else if (!Support::isInt32(int64_t(value))) {
return DebugUtils::errored(kErrorRelocOffsetOutOfRange); return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
}
break; break;
} }
case RelocType::kX64AddressEntry: { case RelocType::kX64AddressEntry: {
size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset(); size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
if (re->format().valueSize() != 4 || valueOffset < 2) if (re->format().valueSize() != 4 || valueOffset < 2) {
return DebugUtils::errored(kErrorInvalidRelocEntry); return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// First try whether a relative 32-bit displacement would work. // First try whether a relative 32-bit displacement would work.
value -= baseAddress + sectionOffset + sourceOffset + regionSize; value -= baseAddress + sectionOffset + sourceOffset + regionSize;
if (!Support::isInt32(int64_t(value))) { if (!Support::isInt32(int64_t(value))) {
// Relative 32-bit displacement is not possible, use '.addrtab' section. // Relative 32-bit displacement is not possible, use '.addrtab' section.
AddressTableEntry* atEntry = _addressTableEntries.get(re->payload()); AddressTableEntry* atEntry = _addressTableEntries.get(re->payload());
if (ASMJIT_UNLIKELY(!atEntry)) if (ASMJIT_UNLIKELY(!atEntry)) {
return DebugUtils::errored(kErrorInvalidRelocEntry); return DebugUtils::errored(kErrorInvalidRelocEntry);
}
// Cannot be null as we have just matched the `AddressTableEntry`. // Cannot be null as we have just matched the `AddressTableEntry`.
ASMJIT_ASSERT(addressTableSection != nullptr); ASMJIT_ASSERT(addressTableSection != nullptr);
if (!atEntry->hasAssignedSlot()) if (!atEntry->hasAssignedSlot()) {
atEntry->_slot = addressTableEntryCount++; atEntry->_slot = addressTableEntryCount++;
}
size_t atEntryIndex = size_t(atEntry->slot()) * addressSize; size_t atEntryIndex = size_t(atEntry->slot()) * addressSize;
uint64_t addrSrc = sectionOffset + sourceOffset + regionSize; uint64_t addrSrc = sectionOffset + sourceOffset + regionSize;
uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex); uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex);
value = addrDst - addrSrc; value = addrDst - addrSrc;
if (!Support::isInt32(int64_t(value))) if (!Support::isInt32(int64_t(value))) {
return DebugUtils::errored(kErrorRelocOffsetOutOfRange); return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
}
// Bytes that replace [REX, OPCODE] bytes. // Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF; uint32_t byte0 = 0xFF;
@@ -1064,14 +1145,16 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
} }
Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, CopySectionFlags copyFlags) noexcept { Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, CopySectionFlags copyFlags) noexcept {
if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) if (ASMJIT_UNLIKELY(!isSectionValid(sectionId))) {
return DebugUtils::errored(kErrorInvalidSection); return DebugUtils::errored(kErrorInvalidSection);
}
Section* section = sectionById(sectionId); Section* section = sectionById(sectionId);
size_t bufferSize = section->bufferSize(); size_t bufferSize = section->bufferSize();
if (ASMJIT_UNLIKELY(dstSize < bufferSize)) if (ASMJIT_UNLIKELY(dstSize < bufferSize)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
memcpy(dst, section->data(), bufferSize); memcpy(dst, section->data(), bufferSize);
@@ -1086,14 +1169,16 @@ Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId,
Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, CopySectionFlags copyFlags) noexcept { Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, CopySectionFlags copyFlags) noexcept {
size_t end = 0; size_t end = 0;
for (Section* section : _sectionsByOrder) { for (Section* section : _sectionsByOrder) {
if (section->offset() > dstSize) if (section->offset() > dstSize) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
size_t bufferSize = section->bufferSize(); size_t bufferSize = section->bufferSize();
size_t offset = size_t(section->offset()); size_t offset = size_t(section->offset());
if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
uint8_t* dstTarget = static_cast<uint8_t*>(dst) + offset; uint8_t* dstTarget = static_cast<uint8_t*>(dst) + offset;
size_t paddingSize = 0; size_t paddingSize = 0;

View File

@@ -175,34 +175,50 @@ public:
//! \{ //! \{
//! Returns the section id. //! Returns the section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the section name, as a null terminated string. //! Returns the section name, as a null terminated string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.str; } ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.str; }
//! Returns the section data. //! Returns the section data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _buffer.data(); } ASMJIT_INLINE_NODEBUG uint8_t* data() noexcept { return _buffer.data(); }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _buffer.data(); } ASMJIT_INLINE_NODEBUG const uint8_t* data() const noexcept { return _buffer.data(); }
//! Returns the section flags. //! Returns the section flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SectionFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG SectionFlags flags() const noexcept { return _flags; }
//! Tests whether the section has the given `flag`. //! Tests whether the section has the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(SectionFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(SectionFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Adds `flags` to the section flags. //! Adds `flags` to the section flags.
ASMJIT_INLINE_NODEBUG void addFlags(SectionFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(SectionFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from the section flags. //! Removes `flags` from the section flags.
ASMJIT_INLINE_NODEBUG void clearFlags(SectionFlags flags) noexcept { _flags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearFlags(SectionFlags flags) noexcept { _flags &= ~flags; }
//! Returns the minimum section alignment //! Returns the minimum section alignment
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Sets the minimum section alignment //! Sets the minimum section alignment
ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; } ASMJIT_INLINE_NODEBUG void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
//! Returns the section order, which has a higher priority than section id. //! Returns the section order, which has a higher priority than section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t order() const noexcept { return _order; } ASMJIT_INLINE_NODEBUG int32_t order() const noexcept { return _order; }
//! Returns the section offset, relative to base. //! Returns the section offset, relative to base.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; } ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; }
//! Set the section offset. //! Set the section offset.
ASMJIT_INLINE_NODEBUG void setOffset(uint64_t offset) noexcept { _offset = offset; } ASMJIT_INLINE_NODEBUG void setOffset(uint64_t offset) noexcept { _offset = offset; }
@@ -212,18 +228,26 @@ public:
//! size returned by `bufferSize()` as the buffer stores real data emitted by assemblers or appended by users. //! size returned by `bufferSize()` as the buffer stores real data emitted by assemblers or appended by users.
//! //!
//! Use `realSize()` to get the real and final size of this section. //! Use `realSize()` to get the real and final size of this section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t virtualSize() const noexcept { return _virtualSize; } ASMJIT_INLINE_NODEBUG uint64_t virtualSize() const noexcept { return _virtualSize; }
//! Sets the virtual size of the section. //! Sets the virtual size of the section.
ASMJIT_INLINE_NODEBUG void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; } ASMJIT_INLINE_NODEBUG void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; }
//! Returns the buffer size of the section. //! Returns the buffer size of the section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferSize() const noexcept { return _buffer.size(); } ASMJIT_INLINE_NODEBUG size_t bufferSize() const noexcept { return _buffer.size(); }
//! Returns the real size of the section calculated from virtual and buffer sizes. //! Returns the real size of the section calculated from virtual and buffer sizes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t realSize() const noexcept { return Support::max<uint64_t>(virtualSize(), bufferSize()); } ASMJIT_INLINE_NODEBUG uint64_t realSize() const noexcept { return Support::max<uint64_t>(virtualSize(), bufferSize()); }
//! Returns the `CodeBuffer` used by this section. //! Returns the `CodeBuffer` used by this section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeBuffer& buffer() noexcept { return _buffer; } ASMJIT_INLINE_NODEBUG CodeBuffer& buffer() noexcept { return _buffer; }
//! Returns the `CodeBuffer` used by this section (const). //! Returns the `CodeBuffer` used by this section (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CodeBuffer& buffer() const noexcept { return _buffer; } ASMJIT_INLINE_NODEBUG const CodeBuffer& buffer() const noexcept { return _buffer; }
//! \} //! \}
@@ -256,15 +280,25 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t address() const noexcept { return _address; } ASMJIT_INLINE_NODEBUG uint64_t address() const noexcept { return _address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t slot() const noexcept { return _slot; } ASMJIT_INLINE_NODEBUG uint32_t slot() const noexcept { return _slot; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; } ASMJIT_INLINE_NODEBUG bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; } ASMJIT_INLINE_NODEBUG bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; } ASMJIT_INLINE_NODEBUG bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; } ASMJIT_INLINE_NODEBUG bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; } ASMJIT_INLINE_NODEBUG bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; }
//! \} //! \}
@@ -434,19 +468,32 @@ struct OffsetFormat {
} }
//! Returns flags. //! Returns flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
//! Returns the size of the region/instruction where the offset is encoded. //! Returns the size of the region/instruction where the offset is encoded.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; } ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; }
//! Returns the offset of the word relative to the start of the region where the offset is. //! Returns the offset of the word relative to the start of the region where the offset is.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; } ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; }
//! Returns the size of the data-type (word) that contains the offset, in bytes. //! Returns the size of the data-type (word) that contains the offset, in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; } ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; }
//! Returns the count of bits of the offset value in the data it's stored in. //! Returns the count of bits of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; } ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; }
//! Returns the bit-shift of the offset value in the data it's stored in. //! Returns the bit-shift of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; } ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; }
//! Returns the number of least significant bits of the offset value, that must be zero and that are not part of //! Returns the number of least significant bits of the offset value, that must be zero and that are not part of
//! the encoded data. //! the encoded data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; } ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; }
//! Resets this offset format to a simple data value of `dataSize` bytes. //! Resets this offset format to a simple data value of `dataSize` bytes.
@@ -536,17 +583,28 @@ struct RelocEntry {
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RelocType relocType() const noexcept { return _relocType; } ASMJIT_INLINE_NODEBUG RelocType relocType() const noexcept { return _relocType; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OffsetFormat& format() const noexcept { return _format; } ASMJIT_INLINE_NODEBUG const OffsetFormat& format() const noexcept { return _format; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sourceSectionId() const noexcept { return _sourceSectionId; } ASMJIT_INLINE_NODEBUG uint32_t sourceSectionId() const noexcept { return _sourceSectionId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t targetSectionId() const noexcept { return _targetSectionId; } ASMJIT_INLINE_NODEBUG uint32_t targetSectionId() const noexcept { return _targetSectionId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t sourceOffset() const noexcept { return _sourceOffset; } ASMJIT_INLINE_NODEBUG uint64_t sourceOffset() const noexcept { return _sourceOffset; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t payload() const noexcept { return _payload; } ASMJIT_INLINE_NODEBUG uint64_t payload() const noexcept { return _payload; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Expression* payloadAsExpression() const noexcept { ASMJIT_INLINE_NODEBUG Expression* payloadAsExpression() const noexcept {
return reinterpret_cast<Expression*>(uintptr_t(_payload)); return reinterpret_cast<Expression*>(uintptr_t(_payload));
} }
@@ -603,15 +661,14 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { //! SSO size of \ref _name.
//! SSO size of \ref _name. //!
//! //! \cond INTERNAL
//! \cond INTERNAL //! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This
//! Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has granularity of 32 bytes anyway). This //! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
//! gives `_name` the remaining space, which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures. //! \endcond
//! \endcond static inline constexpr uint32_t kStaticNameSize =
kStaticNameSize = 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*)) 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*));
};
//! \} //! \}
@@ -642,52 +699,68 @@ public:
// compiler targeting 64-bit CPU will add to align the structure to 64-bits. // compiler targeting 64-bit CPU will add to align the structure to 64-bits.
//! Returns label id. //! Returns label id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _customData; } ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _customData; }
//! Sets label id (internal, used only by `CodeHolder`). //! Sets label id (internal, used only by `CodeHolder`).
ASMJIT_INLINE_NODEBUG void _setId(uint32_t id) noexcept { _customData = id; } ASMJIT_INLINE_NODEBUG void _setId(uint32_t id) noexcept { _customData = id; }
//! Returns label type. //! Returns label type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelType type() const noexcept { return _type; } ASMJIT_INLINE_NODEBUG LabelType type() const noexcept { return _type; }
//! Tests whether the label has a parent label. //! Tests whether the label has a parent label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; } ASMJIT_INLINE_NODEBUG bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; }
//! Returns label's parent id. //! Returns label's parent id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t parentId() const noexcept { return _parentId; } ASMJIT_INLINE_NODEBUG uint32_t parentId() const noexcept { return _parentId; }
//! Returns the section where the label was bound. //! Returns the section where the label was bound.
//! //!
//! If the label was not yet bound the return value is `nullptr`. //! If the label was not yet bound the return value is `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* section() const noexcept { return _section; } ASMJIT_INLINE_NODEBUG Section* section() const noexcept { return _section; }
//! Tests whether the label has name. //! Tests whether the label has name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasName() const noexcept { return !_name.empty(); } ASMJIT_INLINE_NODEBUG bool hasName() const noexcept { return !_name.empty(); }
//! Returns the label's name. //! Returns the label's name.
//! //!
//! \note Local labels will return their local name without their parent part, for example ".L1". //! \note Local labels will return their local name without their parent part, for example ".L1".
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); } ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns size of label's name. //! Returns size of label's name.
//! //!
//! \note Label name is always null terminated, so you can use `strlen()` to get it, however, it's also cached in //! \note Label name is always null terminated, so you can use `strlen()` to get it, however, it's also cached in
//! `LabelEntry` itself, so if you want to know the size the fastest way is to call `LabelEntry::nameSize()`. //! `LabelEntry` itself, so if you want to know the size the fastest way is to call `LabelEntry::nameSize()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); } ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns links associated with this label. //! Returns links associated with this label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelLink* links() const noexcept { return _links; } ASMJIT_INLINE_NODEBUG LabelLink* links() const noexcept { return _links; }
//! Tests whether the label is bound. //! Tests whether the label is bound.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBound() const noexcept { return _section != nullptr; } ASMJIT_INLINE_NODEBUG bool isBound() const noexcept { return _section != nullptr; }
//! Tests whether the label is bound to a the given `sectionId`. //! Tests whether the label is bound to a the given `sectionId`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBoundTo(Section* section) const noexcept { return _section == section; } ASMJIT_INLINE_NODEBUG bool isBoundTo(Section* section) const noexcept { return _section == section; }
//! Returns the label offset (only useful if the label is bound). //! Returns the label offset (only useful if the label is bound).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; } ASMJIT_INLINE_NODEBUG uint64_t offset() const noexcept { return _offset; }
//! Returns the hash-value of label's name and its parent label (if any). //! Returns the hash-value of label's name and its parent label (if any).
//! //!
//! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function is implemented in `Support::hashString()` //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function is implemented in `Support::hashString()`
//! and `Support::hashRound()`. //! and `Support::hashRound()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hashCode() const noexcept { return _hashCode; } ASMJIT_INLINE_NODEBUG uint32_t hashCode() const noexcept { return _hashCode; }
//! \} //! \}
@@ -776,6 +849,7 @@ public:
//! Tests whether the `CodeHolder` has been initialized. //! Tests whether the `CodeHolder` has been initialized.
//! //!
//! Emitters can be only attached to initialized `CodeHolder` instances. //! Emitters can be only attached to initialized `CodeHolder` instances.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _environment.isInitialized(); } ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _environment.isInitialized(); }
//! Initializes CodeHolder to hold code described by the given `environment` and `baseAddress`. //! Initializes CodeHolder to hold code described by the given `environment` and `baseAddress`.
@@ -805,6 +879,7 @@ public:
//! \note This should be only used for AsmJit's purposes. Code holder uses arena allocator to allocate everything, //! \note This should be only used for AsmJit's purposes. Code holder uses arena allocator to allocate everything,
//! so anything allocated through this allocator will be invalidated by \ref CodeHolder::reset() or by CodeHolder's //! so anything allocated through this allocator will be invalidated by \ref CodeHolder::reset() or by CodeHolder's
//! destructor. //! destructor.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); } ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
//! \} //! \}
@@ -813,19 +888,27 @@ public:
//! \{ //! \{
//! Returns the target environment information. //! Returns the target environment information.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture. //! Returns the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target sub-architecture. //! Returns the target sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); } ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the minimum CPU features of the target architecture. //! Returns the minimum CPU features of the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; } ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; }
//! Tests whether a static base-address is set. //! Tests whether a static base-address is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; } ASMJIT_INLINE_NODEBUG bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
//! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set. //! Returns a static base-address or \ref Globals::kNoBaseAddress, if not set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t baseAddress() const noexcept { return _baseAddress; } ASMJIT_INLINE_NODEBUG uint64_t baseAddress() const noexcept { return _baseAddress; }
//! \} //! \}
@@ -834,6 +917,7 @@ public:
//! \{ //! \{
//! Returns a vector of attached emitters. //! Returns a vector of attached emitters.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<BaseEmitter*>& emitters() const noexcept { return _emitters; } ASMJIT_INLINE_NODEBUG const ZoneVector<BaseEmitter*>& emitters() const noexcept { return _emitters; }
//! \} //! \}
@@ -842,6 +926,7 @@ public:
//! \{ //! \{
//! Returns the attached logger. //! Returns the attached logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Attaches a `logger` to CodeHolder and propagates it to all attached emitters. //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters.
ASMJIT_API void setLogger(Logger* logger) noexcept; ASMJIT_API void setLogger(Logger* logger) noexcept;
@@ -852,8 +937,10 @@ public:
//! \{ //! \{
//! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler. //! Tests whether the CodeHolder has an attached error handler, see \ref ErrorHandler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Returns the attached error handler. //! Returns the attached error handler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; } ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Attach an error handler to this `CodeHolder`. //! Attach an error handler to this `CodeHolder`.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept; ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
@@ -881,13 +968,19 @@ public:
//! \{ //! \{
//! Returns an array of `Section*` records. //! Returns an array of `Section*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sections() const noexcept { return _sections; } ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sections() const noexcept { return _sections; }
//! Returns an array of `Section*` records sorted according to section order first, then section id. //! Returns an array of `Section*` records sorted according to section order first, then section id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sectionsByOrder() const noexcept { return _sectionsByOrder; } ASMJIT_INLINE_NODEBUG const ZoneVector<Section*>& sectionsByOrder() const noexcept { return _sectionsByOrder; }
//! Returns the number of sections. //! Returns the number of sections.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sectionCount() const noexcept { return _sections.size(); } ASMJIT_INLINE_NODEBUG uint32_t sectionCount() const noexcept { return _sections.size(); }
//! Tests whether the given `sectionId` is valid. //! Tests whether the given `sectionId` is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); } ASMJIT_INLINE_NODEBUG bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); }
//! Creates a new section and return its pointer in `sectionOut`. //! Creates a new section and return its pointer in `sectionOut`.
@@ -896,19 +989,23 @@ public:
ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, SectionFlags flags = SectionFlags::kNone, uint32_t alignment = 1, int32_t order = 0) noexcept; ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, SectionFlags flags = SectionFlags::kNone, uint32_t alignment = 1, int32_t order = 0) noexcept;
//! Returns a section entry of the given index. //! Returns a section entry of the given index.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; } ASMJIT_INLINE_NODEBUG Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; }
//! Returns section-id that matches the given `name`. //! Returns section-id that matches the given `name`.
//! //!
//! If there is no such section `Section::kInvalidId` is returned. //! If there is no such section `Section::kInvalidId` is returned.
[[nodiscard]]
ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept; ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept;
//! Returns '.text' section (section that commonly represents code). //! Returns '.text' section (section that commonly represents code).
//! //!
//! \note Text section is always the first section in \ref CodeHolder::sections() array. //! \note Text section is always the first section in \ref CodeHolder::sections() array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* textSection() const noexcept { return _sections[0]; } ASMJIT_INLINE_NODEBUG Section* textSection() const noexcept { return _sections[0]; }
//! Tests whether '.addrtab' section exists. //! Tests whether '.addrtab' section exists.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; } ASMJIT_INLINE_NODEBUG bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; }
//! Returns '.addrtab' section. //! Returns '.addrtab' section.
@@ -917,10 +1014,12 @@ public:
//! addresses that cannot be encoded in instructions like 'jmp' or 'call'. //! addresses that cannot be encoded in instructions like 'jmp' or 'call'.
//! //!
//! \note This section is created on demand, the returned pointer can be null. //! \note This section is created on demand, the returned pointer can be null.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* addressTableSection() const noexcept { return _addressTableSection; } ASMJIT_INLINE_NODEBUG Section* addressTableSection() const noexcept { return _addressTableSection; }
//! Ensures that '.addrtab' section exists (creates it if it doesn't) and //! Ensures that '.addrtab' section exists (creates it if it doesn't) and
//! returns it. Can return `nullptr` on out of memory condition. //! returns it. Can return `nullptr` on out of memory condition.
[[nodiscard]]
ASMJIT_API Section* ensureAddressTableSection() noexcept; ASMJIT_API Section* ensureAddressTableSection() noexcept;
//! Used to add an address to an address table. //! Used to add an address to an address table.
@@ -939,22 +1038,27 @@ public:
//! \{ //! \{
//! Returns array of `LabelEntry*` records. //! Returns array of `LabelEntry*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<LabelEntry*>& labelEntries() const noexcept { return _labelEntries; } ASMJIT_INLINE_NODEBUG const ZoneVector<LabelEntry*>& labelEntries() const noexcept { return _labelEntries; }
//! Returns number of labels created. //! Returns number of labels created.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t labelCount() const noexcept { return _labelEntries.size(); } ASMJIT_INLINE_NODEBUG uint32_t labelCount() const noexcept { return _labelEntries.size(); }
//! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`). //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(uint32_t labelId) const noexcept { ASMJIT_INLINE_NODEBUG bool isLabelValid(uint32_t labelId) const noexcept {
return labelId < _labelEntries.size(); return labelId < _labelEntries.size();
} }
//! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`). //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept {
return label.id() < _labelEntries.size(); return label.id() < _labelEntries.size();
} }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelBound(uint32_t labelId) const noexcept { ASMJIT_INLINE_NODEBUG bool isLabelBound(uint32_t labelId) const noexcept {
return isLabelValid(labelId) && _labelEntries[labelId]->isBound(); return isLabelValid(labelId) && _labelEntries[labelId]->isBound();
} }
@@ -962,16 +1066,19 @@ public:
//! Tests whether the `label` is already bound. //! Tests whether the `label` is already bound.
//! //!
//! Returns `false` if the `label` is not valid. //! Returns `false` if the `label` is not valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelBound(const Label& label) const noexcept { ASMJIT_INLINE_NODEBUG bool isLabelBound(const Label& label) const noexcept {
return isLabelBound(label.id()); return isLabelBound(label.id());
} }
//! Returns LabelEntry of the given label `id`. //! Returns LabelEntry of the given label `id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(uint32_t labelId) const noexcept { ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(uint32_t labelId) const noexcept {
return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast<LabelEntry*>(nullptr); return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast<LabelEntry*>(nullptr);
} }
//! Returns LabelEntry of the given `label`. //! Returns LabelEntry of the given `label`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(const Label& label) const noexcept { ASMJIT_INLINE_NODEBUG LabelEntry* labelEntry(const Label& label) const noexcept {
return labelEntry(label.id()); return labelEntry(label.id());
} }
@@ -980,12 +1087,14 @@ public:
//! //!
//! The offset returned is relative to the start of the section. Zero offset is returned for unbound labels, //! The offset returned is relative to the start of the section. Zero offset is returned for unbound labels,
//! which is their initial offset value. //! which is their initial offset value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t labelOffset(uint32_t labelId) const noexcept { ASMJIT_INLINE_NODEBUG uint64_t labelOffset(uint32_t labelId) const noexcept {
ASMJIT_ASSERT(isLabelValid(labelId)); ASMJIT_ASSERT(isLabelValid(labelId));
return _labelEntries[labelId]->offset(); return _labelEntries[labelId]->offset();
} }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t labelOffset(const Label& label) const noexcept { ASMJIT_INLINE_NODEBUG uint64_t labelOffset(const Label& label) const noexcept {
return labelOffset(label.id()); return labelOffset(label.id());
} }
@@ -994,6 +1103,7 @@ public:
//! //!
//! \remarks The offset of the section where the label is bound must be valid in order to use this function, //! \remarks The offset of the section where the label is bound must be valid in order to use this function,
//! otherwise the value returned will not be reliable. //! otherwise the value returned will not be reliable.
[[nodiscard]]
inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept { inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept {
ASMJIT_ASSERT(isLabelValid(labelId)); ASMJIT_ASSERT(isLabelValid(labelId));
const LabelEntry* le = _labelEntries[labelId]; const LabelEntry* le = _labelEntries[labelId];
@@ -1001,6 +1111,7 @@ public:
} }
//! \overload //! \overload
[[nodiscard]]
inline uint64_t labelOffsetFromBase(const Label& label) const noexcept { inline uint64_t labelOffsetFromBase(const Label& label) const noexcept {
return labelOffsetFromBase(label.id()); return labelOffsetFromBase(label.id());
} }
@@ -1031,6 +1142,7 @@ public:
//! //!
//! If the named label doesn't a default constructed \ref Label is returned, //! If the named label doesn't a default constructed \ref Label is returned,
//! which has its id set to \ref Globals::kInvalidId. //! which has its id set to \ref Globals::kInvalidId.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept { ASMJIT_INLINE_NODEBUG Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept {
return Label(labelIdByName(name, nameSize, parentId)); return Label(labelIdByName(name, nameSize, parentId));
} }
@@ -1038,16 +1150,21 @@ public:
//! Returns a label id by name. //! Returns a label id by name.
//! //!
//! If the named label doesn't exist \ref Globals::kInvalidId is returned. //! If the named label doesn't exist \ref Globals::kInvalidId is returned.
[[nodiscard]]
ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Tests whether there are any unresolved label links. //! Tests whether there are any unresolved label links.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; } ASMJIT_INLINE_NODEBUG bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; }
//! Returns the number of label links, which are unresolved. //! Returns the number of label links, which are unresolved.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; } ASMJIT_INLINE_NODEBUG size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; }
//! Creates a new label-link used to store information about yet unbound labels. //! Creates a new label-link used to store information about yet unbound labels.
//! //!
//! Returns `null` if the allocation failed. //! Returns `null` if the allocation failed.
[[nodiscard]]
ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept; ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel, const OffsetFormat& format) noexcept;
//! Resolves cross-section links (`LabelLink`) associated with each label that was used as a destination in code //! Resolves cross-section links (`LabelLink`) associated with each label that was used as a destination in code
@@ -1066,11 +1183,15 @@ public:
//! \{ //! \{
//! Tests whether the code contains relocation entries. //! Tests whether the code contains relocation entries.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRelocEntries() const noexcept { return !_relocations.empty(); } ASMJIT_INLINE_NODEBUG bool hasRelocEntries() const noexcept { return !_relocations.empty(); }
//! Returns array of `RelocEntry*` records. //! Returns array of `RelocEntry*` records.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<RelocEntry*>& relocEntries() const noexcept { return _relocations; } ASMJIT_INLINE_NODEBUG const ZoneVector<RelocEntry*>& relocEntries() const noexcept { return _relocations; }
//! Returns a RelocEntry of the given `id`. //! Returns a RelocEntry of the given `id`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; } ASMJIT_INLINE_NODEBUG RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; }
//! Creates a new relocation entry of type `relocType`. //! Creates a new relocation entry of type `relocType`.
@@ -1093,6 +1214,7 @@ public:
//! \note All sections will be iterated over and the code size returned would represent the minimum code size of //! \note All sections will be iterated over and the code size returned would represent the minimum code size of
//! all combined sections after applying minimum alignment. Code size may decrease after calling `flatten()` and //! all combined sections after applying minimum alignment. Code size may decrease after calling `flatten()` and
//! `relocateToBase()`. //! `relocateToBase()`.
[[nodiscard]]
ASMJIT_API size_t codeSize() const noexcept; ASMJIT_API size_t codeSize() const noexcept;
//! Relocates the code to the given `baseAddress`. //! Relocates the code to the given `baseAddress`.

View File

@@ -16,8 +16,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
uint32_t discardLsb = format.immDiscardLsb(); uint32_t discardLsb = format.immDiscardLsb();
// Invalid offset (should not happen). // Invalid offset (should not happen).
if (!bitCount || bitCount > format.valueSize() * 8u) if (!bitCount || bitCount > format.valueSize() * 8u) {
return false; return false;
}
uint32_t value; uint32_t value;
uint32_t u = 0; uint32_t u = 0;
@@ -27,8 +28,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// absolute value. // absolute value.
if (format.hasSignBit()) { if (format.hasSignBit()) {
u = uint32_t(offset64 >= 0); u = uint32_t(offset64 >= 0);
if (u == 0) if (u == 0) {
offset64 = -offset64; offset64 = -offset64;
}
unsignedLogic = true; unsignedLogic = true;
} }
@@ -36,30 +38,35 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
if (unsignedLogic) { if (unsignedLogic) {
if (discardLsb) { if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32); ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false; return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb); offset64 = int64_t(uint64_t(offset64) >> discardLsb);
} }
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount)); value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
if (value != offset64) if (value != offset64) {
return false; return false;
}
} }
else { else {
// The rest of OffsetType options are all signed. // The rest of OffsetType options are all signed.
if (discardLsb) { if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32); ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false; return false;
}
offset64 >>= discardLsb; offset64 >>= discardLsb;
} }
if (!Support::isInt32(offset64)) if (!Support::isInt32(offset64)) {
return false; return false;
}
value = uint32_t(int32_t(offset64)); value = uint32_t(int32_t(offset64));
if (!Support::isEncodableOffset32(int32_t(value), bitCount)) if (!Support::isEncodableOffset32(int32_t(value), bitCount)) {
return false; return false;
}
} }
switch (format.type()) { switch (format.type()) {
@@ -72,8 +79,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8} // Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8}
case OffsetType::kThumb32_ADR: { case OffsetType::kThumb32_ADR: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) {
return false; return false;
}
uint32_t imm8 = (value & 0x00FFu); uint32_t imm8 = (value & 0x00FFu);
uint32_t imm3 = (value & 0x0700u) << (12 - 8); uint32_t imm3 = (value & 0x0700u) << (12 - 8);
@@ -88,13 +96,14 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kThumb32_BLX: case OffsetType::kThumb32_BLX:
// The calculation is the same as `B`, but the first LSB bit must be zero, so account for that. // The calculation is the same as `B`, but the first LSB bit must be zero, so account for that.
value <<= 1; value <<= 1;
ASMJIT_FALLTHROUGH; [[fallthrough]];
// Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]} // Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_B: { case OffsetType::kThumb32_B: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4) if (format.valueSize() != 4) {
return false; return false;
}
uint32_t ia = (value & 0x0007FFu); uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x1FF800u) << (16 - 11); uint32_t ib = (value & 0x1FF800u) << (16 - 11);
@@ -109,8 +118,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]} // Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_BCond: { case OffsetType::kThumb32_BCond: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) {
return false; return false;
}
uint32_t ia = (value & 0x0007FFu); uint32_t ia = (value & 0x0007FFu);
uint32_t ib = (value & 0x01F800u) << (16 - 11); uint32_t ib = (value & 0x01F800u) << (16 - 11);
@@ -124,8 +134,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_ADR: { case OffsetType::kAArch32_ADR: {
uint32_t encodedImm; uint32_t encodedImm;
if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) {
return false; return false;
}
*dst = (Support::bitMask(22) << u) | (encodedImm << bitShift); *dst = (Support::bitMask(22) << u) | (encodedImm << bitShift);
return true; return true;
@@ -138,8 +149,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_U23_0To3At0_4To7At8: { case OffsetType::kAArch32_U23_0To3At0_4To7At8: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) {
return false; return false;
}
uint32_t immLo = (value & 0x0Fu); uint32_t immLo = (value & 0x0Fu);
uint32_t immHi = (value & 0xF0u) << (8 - 4); uint32_t immHi = (value & 0xF0u) << (8 - 4);
@@ -150,8 +162,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch32_1To24At0_0At24: { case OffsetType::kAArch32_1To24At0_0At24: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) {
return false; return false;
}
uint32_t immLo = (value & 0x0000001u) << 24; uint32_t immLo = (value & 0x0000001u) << 24;
uint32_t immHi = (value & 0x1FFFFFEu) >> 1; uint32_t immHi = (value & 0x1FFFFFEu) >> 1;
@@ -163,8 +176,9 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
case OffsetType::kAArch64_ADR: case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: { case OffsetType::kAArch64_ADRP: {
// Sanity checks. // Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) {
return false; return false;
}
uint32_t immLo = value & 0x3u; uint32_t immLo = value & 0x3u;
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19); uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
@@ -182,8 +196,9 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
uint32_t bitCount = format.immBitCount(); uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb(); uint32_t discardLsb = format.immDiscardLsb();
if (!bitCount || bitCount > format.valueSize() * 8u) if (!bitCount || bitCount > format.valueSize() * 8u) {
return false; return false;
}
uint64_t value; uint64_t value;
@@ -191,26 +206,30 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
if (format.type() == OffsetType::kUnsignedOffset) { if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) { if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32); ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false; return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb); offset64 = int64_t(uint64_t(offset64) >> discardLsb);
} }
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount); value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
if (value != uint64_t(offset64)) if (value != uint64_t(offset64)) {
return false; return false;
}
} }
else { else {
// The rest of OffsetType options are all signed. // The rest of OffsetType options are all signed.
if (discardLsb) { if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32); ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
return false; return false;
}
offset64 >>= discardLsb; offset64 >>= discardLsb;
} }
if (!Support::isEncodableOffset64(offset64, bitCount)) if (!Support::isEncodableOffset64(offset64, bitCount)) {
return false; return false;
}
value = uint64_t(offset64); value = uint64_t(offset64);
} }
@@ -235,8 +254,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
switch (format.valueSize()) { switch (format.valueSize()) {
case 1: { case 1: {
uint32_t mask; uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) if (!encodeOffset32(&mask, offset64, format)) {
return false; return false;
}
Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask)); Support::writeU8(dst, uint8_t(Support::readU8(dst) | mask));
return true; return true;
@@ -244,8 +264,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 2: { case 2: {
uint32_t mask; uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) if (!encodeOffset32(&mask, offset64, format)) {
return false; return false;
}
Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask)); Support::writeU16uLE(dst, uint16_t(Support::readU16uLE(dst) | mask));
return true; return true;
@@ -263,8 +284,9 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 8: { case 8: {
uint64_t mask; uint64_t mask;
if (!encodeOffset64(&mask, offset64, format)) if (!encodeOffset64(&mask, offset64, format)) {
return false; return false;
}
Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask); Support::writeU64uLE(dst, Support::readU64uLE(dst) | mask);
return true; return true;

View File

@@ -23,10 +23,11 @@ class CodeWriter {
public: public:
uint8_t* _cursor; uint8_t* _cursor;
ASMJIT_FORCE_INLINE explicit CodeWriter(BaseAssembler* a) noexcept ASMJIT_INLINE_NODEBUG explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {} : _cursor(a->_bufferPtr) {}
ASMJIT_FORCE_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept { [[nodiscard]]
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor); size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) { if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer; CodeBuffer& buffer = a->_section->_buffer;
@@ -38,25 +39,28 @@ public:
return kErrorOk; return kErrorOk;
} }
ASMJIT_FORCE_INLINE uint8_t* cursor() const noexcept { return _cursor; } [[nodiscard]]
ASMJIT_FORCE_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; } ASMJIT_INLINE_NODEBUG uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_FORCE_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_FORCE_INLINE size_t offsetFrom(uint8_t* from) const noexcept { ASMJIT_INLINE_NODEBUG void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE_NODEBUG void advance(size_t n) noexcept { _cursor += n; }
[[nodiscard]]
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from); ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from); return (size_t)(_cursor - from);
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emit8(T val) noexcept { ASMJIT_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
_cursor[0] = uint8_t(U(val) & U(0xFF)); _cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++; _cursor++;
} }
template<typename T, typename Y> template<typename T, typename Y>
ASMJIT_FORCE_INLINE void emit8If(T val, Y cond) noexcept { ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size_t(cond) <= 1u); ASMJIT_ASSERT(size_t(cond) <= 1u);
_cursor[0] = uint8_t(U(val) & U(0xFF)); _cursor[0] = uint8_t(U(val) & U(0xFF));
@@ -64,42 +68,42 @@ public:
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept { ASMJIT_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu)); Support::writeU16uLE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2; _cursor += 2;
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept { ASMJIT_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu)); Support::writeU16uBE(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2; _cursor += 2;
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emit32uLE(T val) noexcept { ASMJIT_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4; _cursor += 4;
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emit32uBE(T val) noexcept { ASMJIT_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu)); Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4; _cursor += 4;
} }
ASMJIT_FORCE_INLINE void emitData(const void* data, size_t size) noexcept { ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0); ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size); memcpy(_cursor, data, size);
_cursor += size; _cursor += size;
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emitValueLE(const T& value, size_t size) noexcept { ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T)); ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value); U v = U(value);
@@ -111,8 +115,8 @@ public:
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void emitValueBE(const T& value, size_t size) noexcept { ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U; using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T)); ASMJIT_ASSERT(size <= sizeof(T));
U v = U(value); U v = U(value);
@@ -123,13 +127,13 @@ public:
_cursor += size; _cursor += size;
} }
ASMJIT_FORCE_INLINE void emitZeros(size_t size) noexcept { ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0); ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size); memset(_cursor, 0, size);
_cursor += size; _cursor += size;
} }
ASMJIT_FORCE_INLINE void remove8(uint8_t* where) noexcept { ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor); ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where; uint8_t* p = where;
@@ -139,7 +143,7 @@ public:
} }
template<typename T> template<typename T>
ASMJIT_FORCE_INLINE void insert8(uint8_t* where, T val) noexcept { ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor; uint8_t* p = _cursor;
while (p != where) { while (p != where) {
@@ -151,7 +155,7 @@ public:
_cursor++; _cursor++;
} }
ASMJIT_FORCE_INLINE void done(BaseAssembler* a) noexcept { ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer; CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData); size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity()); ASMJIT_ASSERT(newSize <= buffer.capacity());
@@ -164,9 +168,13 @@ public:
//! Code writer utilities. //! Code writer utilities.
namespace CodeWriterUtils { namespace CodeWriterUtils {
[[nodiscard]]
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept; bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept; bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept; bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils} } // {CodeWriterUtils}

View File

@@ -22,11 +22,11 @@ ASMJIT_BEGIN_NAMESPACE
// =================== // ===================
class GlobalConstPoolPass : public Pass { class GlobalConstPoolPass : public Pass {
public:
typedef Pass Base;
public:
ASMJIT_NONCOPYABLE(GlobalConstPoolPass) ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
public:
using Base = Pass;
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {} GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override { Error run(Zone* zone, Logger* logger) override {
@@ -73,27 +73,31 @@ Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature)
// Initialize the function's detail info. // Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment()); Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
// If the Target guarantees greater stack alignment than required by the calling convention // If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment // then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment(); uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) {
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment); funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
}
// Initialize the function frame. // Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail); err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
// Allocate space for function arguments. // Allocate space for function arguments.
funcNode->_args = nullptr; funcNode->_args = nullptr;
if (funcNode->argCount() != 0) { if (funcNode->argCount() != 0) {
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack)); funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args)) if (ASMJIT_UNLIKELY(!funcNode->_args)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack)); memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
} }
@@ -159,8 +163,9 @@ Error BaseCompiler::endFunc() {
FuncNode* func = _func; FuncNode* func = _func;
resetState(); resetState();
if (ASMJIT_UNLIKELY(!func)) if (ASMJIT_UNLIKELY(!func)) {
return reportError(DebugUtils::errored(kErrorInvalidState)); return reportError(DebugUtils::errored(kErrorInvalidState));
}
// Add the local constant pool at the end of the function (if exists). // Add the local constant pool at the end of the function (if exists).
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)]; ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
@@ -191,15 +196,17 @@ Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand
node->resetOpRange(1, node->opCapacity()); node->resetOpRange(1, node->opCapacity());
Error err = node->detail().init(signature, environment()); Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
// Skip the allocation if there are no arguments. // Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount(); uint32_t argCount = signature.argCount();
if (argCount) { if (argCount) {
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack))); node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
if (!node->_args) if (!node->_args) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack)); memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
} }
@@ -235,15 +242,18 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si
*out = nullptr; *out = nullptr;
uint32_t index = _vRegArray.size(); uint32_t index = _vRegArray.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) {
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs)); return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
}
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>(); VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
if (ASMJIT_UNLIKELY(!vReg)) if (ASMJIT_UNLIKELY(!vReg)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
uint32_t size = TypeUtils::sizeOf(typeId); uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64); uint32_t alignment = Support::min<uint32_t>(size, 64);
@@ -251,10 +261,12 @@ Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature si
vReg = new(Support::PlacementNew{vReg}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId); vReg = new(Support::PlacementNew{vReg}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0') if (name && name[0] != '\0') {
vReg->_name.setData(&_dataZone, name, SIZE_MAX); vReg->_name.setData(&_dataZone, name, SIZE_MAX);
else }
else {
BaseCompiler_assignGenericName(this, vReg); BaseCompiler_assignGenericName(this, vReg);
}
#else #else
DebugUtils::unused(name); DebugUtils::unused(name);
#endif #endif
@@ -270,8 +282,9 @@ Error BaseCompiler::_newReg(BaseReg* out, TypeId typeId, const char* name) {
out->reset(); out->reset();
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature); Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
VirtReg* vReg; VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name)); ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
@@ -345,8 +358,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name)
} }
} }
if (typeId == TypeId::kVoid) if (typeId == TypeId::kVoid) {
return reportError(DebugUtils::errored(kErrorInvalidState)); return reportError(DebugUtils::errored(kErrorInvalidState));
}
} }
} }
else { else {
@@ -354,8 +368,9 @@ Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name)
} }
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature); Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
VirtReg* vReg; VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name)); ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
@@ -379,17 +394,21 @@ Error BaseCompiler::_newRegFmt(BaseReg* out, const BaseReg& ref, const char* fmt
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) { Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
out->reset(); out->reset();
if (size == 0) if (size == 0) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment == 0) if (alignment == 0) {
alignment = 1; alignment = 1;
}
if (!Support::isPowerOf2(alignment)) if (!Support::isPowerOf2(alignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (alignment > 64) if (alignment > 64) {
alignment = 64; alignment = 64;
}
VirtReg* vReg; VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name)); ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name));
@@ -408,21 +427,26 @@ Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, c
} }
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) { Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId)) if (!isVirtIdValid(virtId)) {
return DebugUtils::errored(kErrorInvalidVirtId); return DebugUtils::errored(kErrorInvalidVirtId);
}
if (newAlignment && !Support::isPowerOf2(newAlignment)) if (newAlignment && !Support::isPowerOf2(newAlignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (newAlignment > 64) if (newAlignment > 64) {
newAlignment = 64; newAlignment = 64;
}
VirtReg* vReg = virtRegById(virtId); VirtReg* vReg = virtRegById(virtId);
if (newSize) if (newSize) {
vReg->_virtSize = newSize; vReg->_virtSize = newSize;
}
if (newAlignment) if (newAlignment) {
vReg->_alignment = uint8_t(newAlignment); vReg->_alignment = uint8_t(newAlignment);
}
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already // This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory. // allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
@@ -438,18 +462,21 @@ Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t new
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) { Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
out->reset(); out->reset();
if (uint32_t(scope) > 1) if (uint32_t(scope) > 1) {
return reportError(DebugUtils::errored(kErrorInvalidArgument)); return reportError(DebugUtils::errored(kErrorInvalidArgument));
}
if (!_constPools[uint32_t(scope)]) if (!_constPools[uint32_t(scope)]) {
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)])); ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
}
ConstPoolNode* pool = _constPools[uint32_t(scope)]; ConstPoolNode* pool = _constPools[uint32_t(scope)];
size_t off; size_t off;
Error err = pool->add(data, size, off); Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err)) if (ASMJIT_UNLIKELY(err)) {
return reportError(err); return reportError(err);
}
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) | *out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) | OperandSignature::fromMemBaseType(RegType::kLabelTag) |
@@ -462,7 +489,9 @@ void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return; if (!reg.isVirtReg()) return;
VirtReg* vReg = virtRegById(reg.id()); VirtReg* vReg = virtRegById(reg.id());
if (!vReg) return; if (!vReg) {
return;
}
if (fmt && fmt[0] != '\0') { if (fmt && fmt[0] != '\0') {
char buf[128]; char buf[128];
@@ -487,8 +516,9 @@ Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instO
uint32_t opCount = 1; uint32_t opCount = 1;
*out = node; *out = node;
if (ASMJIT_UNLIKELY(!node)) if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory)); return reportError(DebugUtils::errored(kErrorOutOfMemory));
}
node = new(Support::PlacementNew{node}) JumpNode(this, instId, instOptions, opCount, annotation); node = new(Support::PlacementNew{node}) JumpNode(this, instId, instOptions, opCount, annotation);
node->setOp(0, o0); node->setOp(0, o0);

View File

@@ -51,7 +51,7 @@ class InvokeNode;
class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder { class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
public: public:
ASMJIT_NONCOPYABLE(BaseCompiler) ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base; using Base = BaseBuilder;
//! \name Members //! \name Members
//! \{ //! \{
@@ -96,6 +96,7 @@ public:
ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1); ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
//! Returns the current function. //! Returns the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; } ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it. //! Creates a new \ref FuncNode with the given `signature` and returns it.
@@ -163,31 +164,38 @@ public:
ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* fmt, ...); ASMJIT_API Error _newRegFmt(BaseReg* ASMJIT_NONNULL(out), const BaseReg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id. //! Tests whether the given `id` is a valid virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept { ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id); uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size(); return index < _vRegArray.size();
} }
//! Tests whether the given `reg` is a virtual register having a valid id. //! Tests whether the given `reg` is a virtual register having a valid id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const BaseReg& reg) const noexcept { ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const BaseReg& reg) const noexcept {
return isVirtIdValid(reg.id()); return isVirtIdValid(reg.id());
} }
//! Returns \ref VirtReg associated with the given `id`. //! Returns \ref VirtReg associated with the given `id`.
[[nodiscard]]
inline VirtReg* virtRegById(uint32_t id) const noexcept { inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id)); ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)]; return _vRegArray[Operand::virtIdToIndex(id)];
} }
//! Returns \ref VirtReg associated with the given `reg`. //! Returns \ref VirtReg associated with the given `reg`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); } ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`. //! Returns \ref VirtReg associated with the given virtual register `index`.
//! //!
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented //! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions. //! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; } ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler. //! Returns an array of all virtual registers managed by the Compiler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; } ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
//! \name Stack //! \name Stack
@@ -230,6 +238,7 @@ public:
//! \name Jump Annotations //! \name Jump Annotations
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept { ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations; return _jumpAnnotations;
} }
@@ -239,6 +248,7 @@ public:
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the //! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables. //! target is not a label, for example to implement jump tables.
[[nodiscard]]
ASMJIT_API JumpAnnotation* newJumpAnnotation(); ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \} //! \}
@@ -287,15 +297,23 @@ public:
//! \{ //! \{
//! Returns the compiler that owns this JumpAnnotation. //! Returns the compiler that owns this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; } ASMJIT_INLINE_NODEBUG BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id. //! Returns the annotation id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; } ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; }
//! Returns a vector of label identifiers that lists all targets of the jump. //! Returns a vector of label identifiers that lists all targets of the jump.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; } ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
//! Tests whether the given `label` is a target of this JumpAnnotation. //! Tests whether the given `label` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); } ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation. //! Tests whether the given `labelId` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); } ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! \} //! \}
@@ -342,9 +360,13 @@ public:
//! \{ //! \{
//! Tests whether this JumpNode has associated a \ref JumpAnnotation. //! Tests whether this JumpNode has associated a \ref JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; } ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`. //! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; } ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`. //! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; } ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
@@ -446,37 +468,54 @@ public:
//! \name Accessors //! \name Accessors
//! Returns function exit `LabelNode`. //! Returns function exit `LabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; } ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; }
//! Returns function exit label. //! Returns function exit label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); } ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel node. //! Returns "End of Func" sentinel node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; } ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; }
//! Returns function detail. //! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; } ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function detail. //! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; } ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame. //! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; } ASMJIT_INLINE_NODEBUG FuncFrame& frame() noexcept { return _frame; }
//! Returns function frame. //! Returns function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; } ASMJIT_INLINE_NODEBUG const FuncFrame& frame() const noexcept { return _frame; }
//! Returns function attributes. //! Returns function attributes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); } ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes. //! Adds `attrs` to the function attributes.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); } ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
//! Returns arguments count. //! Returns arguments count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); } ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs. //! Returns argument packs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; } ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; }
//! Tests whether the function has a return value. //! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); } ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns argument pack at `argIndex`. //! Returns argument pack at `argIndex`.
[[nodiscard]]
inline ArgPack& argPack(size_t argIndex) const noexcept { inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount()); ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex]; return _args[argIndex];
@@ -557,12 +596,14 @@ public:
} }
//! Returns an operand at the given `valueIndex`. //! Returns an operand at the given `valueIndex`.
[[nodiscard]]
inline Operand& operator[](size_t valueIndex) noexcept { inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack); ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>(); return _data[valueIndex].as<Operand>();
} }
//! Returns an operand at the given `valueIndex` (const). //! Returns an operand at the given `valueIndex` (const).
[[nodiscard]]
const inline Operand& operator[](size_t valueIndex) const noexcept { const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack); ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>(); return _data[valueIndex].as<Operand>();
@@ -601,52 +642,74 @@ public:
//! \{ //! \{
//! Sets the function signature. //! Sets the function signature.
[[nodiscard]]
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept { inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment); return _funcDetail.init(signature, environment);
} }
//! Returns the function detail. //! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; } ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns the function detail. //! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; } ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns the target operand. //! Returns the target operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return op(0); } ASMJIT_INLINE_NODEBUG Operand& target() noexcept { return op(0); }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return op(0); } ASMJIT_INLINE_NODEBUG const Operand& target() const noexcept { return op(0); }
//! Returns the number of function return values. //! Returns the number of function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); } ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns the number of function arguments. //! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); } ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns operand pack representing function return value(s). //! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; } ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s). //! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; } ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`. //! Returns the return value at the given `valueIndex`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; } ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; } ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns operand pack representing function return value(s). //! Returns operand pack representing function return value(s).
[[nodiscard]]
inline OperandPack& argPack(size_t argIndex) noexcept { inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount()); ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex]; return _args[argIndex];
} }
//! \overload //! \overload
[[nodiscard]]
inline const OperandPack& argPack(size_t argIndex) const noexcept { inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount()); ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex]; return _args[argIndex];
} }
//! Returns a function argument at the given `argIndex`. //! Returns a function argument at the given `argIndex`.
[[nodiscard]]
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept { inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount()); ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex]; return _args[argIndex][valueIndex];
} }
//! \overload //! \overload
[[nodiscard]]
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept { inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount()); ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex]; return _args[argIndex][valueIndex];
@@ -680,7 +743,7 @@ public:
class ASMJIT_VIRTAPI FuncPass : public Pass { class ASMJIT_VIRTAPI FuncPass : public Pass {
public: public:
ASMJIT_NONCOPYABLE(FuncPass) ASMJIT_NONCOPYABLE(FuncPass)
typedef Pass Base; using Base = Pass;
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -693,6 +756,7 @@ public:
//! \{ //! \{
//! Returns the associated `BaseCompiler`. //! Returns the associated `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); } ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
//! \} //! \}

View File

@@ -96,18 +96,27 @@ public:
//! \{ //! \{
//! Returns the virtual register id. //! Returns the virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; } ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name. //! Returns the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); } ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name. //! Returns the size of the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); } ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register signature of this virtual register. //! Returns a register signature of this virtual register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; } ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well). //! Returns a virtual register type (maps to the physical register type as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); } ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
//! Returns a virtual register group (maps to the physical register group as well). //! Returns a virtual register group (maps to the physical register group as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); } ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to. //! Returns a real size of the register this virtual register maps to.
@@ -115,23 +124,29 @@ public:
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then //! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size //! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type. //! of that register type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); } ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); }
//! Returns the virtual register size. //! Returns the virtual register size.
//! //!
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be //! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`. //! smaller than the physical register size, see `regSize()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; } ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment. //! Returns the virtual register alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id. //! Returns the virtual register type id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; } ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill //! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions. //! decisions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; } ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for //! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing. //! alloc/spill decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); } ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
@@ -139,17 +154,20 @@ public:
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated). //! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//! //!
//! \note This is only used for special purposes and it's mostly internal. //! \note This is only used for special purposes and it's mostly internal.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); } ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); }
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id. //! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//! //!
//! \note It's an error if a stack is accessed as a register. //! \note It's an error if a stack is accessed as a register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); } ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); }
//! Tests whether this virtual register (or stack) has assigned a stack offset. //! Tests whether this virtual register (or stack) has assigned a stack offset.
//! //!
//! If this is a virtual register that was never allocated on stack, it would return false, otherwise if //! If this is a virtual register that was never allocated on stack, it would return false, otherwise if
//! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true. //! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); } ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); }
//! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true. //! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true.
@@ -159,9 +177,13 @@ public:
} }
//! Tests whether this virtual register has assigned a physical register as a hint to the register allocator. //! Tests whether this virtual register has assigned a physical register as a hint to the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != BaseReg::kIdBad; }
//! Returns a physical register hint, which will be used by the register allocator. //! Returns a physical register hint, which will be used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; } ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; }
//! Assigns a physical register hint, which will be used by the register allocator. //! Assigns a physical register hint, which will be used by the register allocator.
ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); } ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); }
//! Resets a physical register hint. //! Resets a physical register hint.
@@ -171,14 +193,20 @@ public:
//! //!
//! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return //! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return
//! value will be zero when the stack offset was not assigned. //! value will be zero when the stack offset was not assigned.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; } ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment. //! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; } ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; }
//! Returns an associated RAWorkReg with this virtual register (only valid during register allocation). //! Returns an associated RAWorkReg with this virtual register (only valid during register allocation).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; } ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; }
//! Associates a RAWorkReg with this virtual register (used by register allocator). //! Associates a RAWorkReg with this virtual register (used by register allocator).
ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; } ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
//! Reset the RAWorkReg association (used by register allocator). //! Reset the RAWorkReg association (used by register allocator).
ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; } ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; }

View File

@@ -40,8 +40,9 @@ void ConstPool::reset(Zone* zone) noexcept {
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept { static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool; ConstPool::Gap* gap = self->_gapPool;
if (!gap) if (!gap) {
return self->_zone->allocT<ConstPool::Gap>(); return self->_zone->allocT<ConstPool::Gap>();
}
self->_gapPool = gap->_next; self->_gapPool = gap->_next;
return gap; return gap;
@@ -87,8 +88,9 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be // We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error. // visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self); ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap) if (!gap) {
return; return;
}
gap->_next = self->_gaps[gapIndex]; gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap; self->_gaps[gapIndex] = gap;
@@ -102,24 +104,19 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
} }
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept { Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex; constexpr size_t kMaxSize = size_t(1) << (kIndexCount - 1);
if (size == 64) // Avoid sizes outside of the supported range.
treeIndex = kIndex64; if (ASMJIT_UNLIKELY(size == 0 || size > kMaxSize)) {
else if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
else if (size == 8)
treeIndex = kIndex8;
else if (size == 4)
treeIndex = kIndex4;
else if (size == 2)
treeIndex = kIndex2;
else if (size == 1)
treeIndex = kIndex1;
else
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
size_t treeIndex = Support::ctz(size);
// Avoid sizes, which are not aligned to power of 2.
if (ASMJIT_UNLIKELY((size_t(1) << treeIndex) != size)) {
return DebugUtils::errored(kErrorInvalidArgument);
}
ConstPool::Node* node = _tree[treeIndex].get(data); ConstPool::Node* node = _tree[treeIndex].get(data);
if (node) { if (node) {
@@ -147,8 +144,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size)); ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
gapSize -= size; gapSize -= size;
if (gapSize > 0) if (gapSize > 0) {
ConstPool_addGap(this, gapOffset, gapSize); ConstPool_addGap(this, gapOffset, gapSize);
}
} }
gapIndex++; gapIndex++;
@@ -169,8 +167,9 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
// Add the initial node to the right index. // Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false); node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
if (ASMJIT_UNLIKELY(!node)) if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
_tree[treeIndex].insert(node); _tree[treeIndex].insert(node);
_alignment = Support::max<size_t>(_alignment, size); _alignment = Support::max<size_t>(_alignment, size);
@@ -192,18 +191,16 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
const uint8_t* pData = static_cast<const uint8_t*>(data); const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += smallerSize) { for (size_t i = 0; i < pCount; i++, pData += smallerSize) {
node = _tree[treeIndex].get(pData); node = _tree[treeIndex].get(pData);
if (node) continue; if (node) {
continue;
}
node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true); node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true);
_tree[treeIndex].insert(node); _tree[treeIndex].insert(node);
} }
} }
if (_minItemSize == 0) _minItemSize = !_minItemSize ? size : Support::min(_minItemSize, size);
_minItemSize = size;
else
_minItemSize = Support::min(_minItemSize, size);
return kErrorOk; return kErrorOk;
} }
@@ -216,8 +213,9 @@ struct ConstPoolFill {
_dataSize(dataSize) {} _dataSize(dataSize) {}
inline void operator()(const ConstPool::Node* node) noexcept { inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared) if (!node->_shared) {
memcpy(_dst + node->_offset, node->data(), _dataSize); memcpy(_dst + node->_offset, node->data(), _dataSize);
}
} }
uint8_t* _dst; uint8_t* _dst;

View File

@@ -73,6 +73,7 @@ public:
_shared(shared), _shared(shared),
_offset(uint32_t(offset)) {} _offset(uint32_t(offset)) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* data() const noexcept { ASMJIT_INLINE_NODEBUG void* data() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1); return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
} }
@@ -86,10 +87,12 @@ public:
ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {} : _dataSize(dataSize) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept { ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize); return ::memcmp(a.data(), b.data(), _dataSize);
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept { ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize); return ::memcmp(a.data(), data, _dataSize);
} }
@@ -114,7 +117,10 @@ public:
_size = 0; _size = 0;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept { inline void setDataSize(size_t dataSize) noexcept {
@@ -122,6 +128,7 @@ public:
_dataSize = dataSize; _dataSize = dataSize;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept { ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept {
Compare cmp(_dataSize); Compare cmp(_dataSize);
return _tree.get(data, cmp); return _tree.get(data, cmp);
@@ -166,6 +173,7 @@ public:
} }
} }
[[nodiscard]]
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept { static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(Support::alignUp(sizeof(Node) + size, alignof(Node))); Node* node = zone->allocT<Node>(Support::alignUp(sizeof(Node) + size, alignof(Node)));
if (ASMJIT_UNLIKELY(!node)) return nullptr; if (ASMJIT_UNLIKELY(!node)) return nullptr;
@@ -221,12 +229,19 @@ public:
//! \{ //! \{
//! Tests whether the constant-pool is empty. //! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes. //! Returns the size of the constant-pool in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns minimum alignment. //! Returns minimum alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; } ASMJIT_INLINE_NODEBUG size_t alignment() const noexcept { return _alignment; }
//! Returns the minimum size of all items added to the constant pool. //! Returns the minimum size of all items added to the constant pool.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; } ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; }
//! \} //! \}

View File

@@ -58,13 +58,6 @@
#include <unistd.h> #include <unistd.h>
#endif #endif
// Unfortunately when compiling in C++11 mode MSVC would warn about unused functions as
// [[maybe_unused]] attribute is not used in that case (it's used only by C++17 mode and later).
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4505) // unreferenced local function has been removed.
#endif // _MSC_VER
ASMJIT_BEGIN_NAMESPACE ASMJIT_BEGIN_NAMESPACE
// CpuInfo - Detect - Compatibility // CpuInfo - Detect - Compatibility
@@ -128,7 +121,7 @@ static inline uint32_t detectHWThreadCount() noexcept {
namespace x86 { namespace x86 {
typedef CpuFeatures::X86 Ext; using Ext = CpuFeatures::X86;
struct cpuid_t { uint32_t eax, ebx, ecx, edx; }; struct cpuid_t { uint32_t eax, ebx, ecx, edx; };
struct xgetbv_t { uint32_t eax, edx; }; struct xgetbv_t { uint32_t eax, edx; };
@@ -190,9 +183,11 @@ static inline void simplifyCpuVendor(CpuInfo& cpu, uint32_t d0, uint32_t d1, uin
}; };
uint32_t i; uint32_t i;
for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++) for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++) {
if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2) if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2) {
break; break;
}
}
memcpy(cpu._vendor.str, table[i].normalized, 8); memcpy(cpu._vendor.str, table[i].normalized, 8);
} }
@@ -207,8 +202,9 @@ static ASMJIT_FAVOR_SIZE void simplifyCpuBrand(char* s) noexcept {
s[0] = '\0'; s[0] = '\0';
for (;;) { for (;;) {
if (!c) if (!c) {
break; break;
}
if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@' || s[1] == '\0'))) { if (!(c == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@' || s[1] == '\0'))) {
*d++ = c; *d++ = c;
@@ -258,11 +254,13 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept {
uint32_t familyId = (regs.eax >> 8) & 0x0F; uint32_t familyId = (regs.eax >> 8) & 0x0F;
// Use extended family and model fields. // Use extended family and model fields.
if (familyId == 0x06u || familyId == 0x0Fu) if (familyId == 0x06u || familyId == 0x0Fu) {
modelId += (((regs.eax >> 16) & 0x0Fu) << 4); modelId += (((regs.eax >> 16) & 0x0Fu) << 4);
}
if (familyId == 0x0Fu) if (familyId == 0x0Fu) {
familyId += ((regs.eax >> 20) & 0xFFu); familyId += ((regs.eax >> 20) & 0xFFu);
}
cpu._modelId = modelId; cpu._modelId = modelId;
cpu._familyId = familyId; cpu._familyId = familyId;
@@ -621,7 +619,7 @@ static ASMJIT_FAVOR_SIZE void detectX86Cpu(CpuInfo& cpu) noexcept {
namespace arm { namespace arm {
// ARM commonly refers to CPU features using FEAT_ prefix, we use Ext:: to make it compatible with other parts. // ARM commonly refers to CPU features using FEAT_ prefix, we use Ext:: to make it compatible with other parts.
typedef CpuFeatures::ARM Ext; using Ext = CpuFeatures::ARM;
// CpuInfo - Detect - ARM - OS Kernel Version // CpuInfo - Detect - ARM - OS Kernel Version
// ========================================== // ==========================================
@@ -632,14 +630,12 @@ struct UNameKernelVersion {
inline bool atLeast(int major, int minor, int patch = 0) const noexcept { inline bool atLeast(int major, int minor, int patch = 0) const noexcept {
if (parts[0] >= major) { if (parts[0] >= major) {
if (parts[0] > major) if (parts[0] > major) {
return true; return true;
}
if (parts[1] >= minor) { if (parts[1] >= minor) {
if (parts[1] > minor) return parts[1] > minor ? true : parts[2] >= patch;
return true;
return parts[2] >= patch;
} }
} }
@@ -647,14 +643,15 @@ struct UNameKernelVersion {
} }
}; };
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static UNameKernelVersion getUNameKernelVersion() noexcept { static UNameKernelVersion getUNameKernelVersion() noexcept {
UNameKernelVersion ver{}; UNameKernelVersion ver{};
ver.parts[0] = -1; ver.parts[0] = -1;
utsname buffer; utsname buffer;
if (uname(&buffer) != 0) if (uname(&buffer) != 0) {
return ver; return ver;
}
size_t count = 0; size_t count = 0;
char* p = buffer.release; char* p = buffer.release;
@@ -662,8 +659,9 @@ static UNameKernelVersion getUNameKernelVersion() noexcept {
uint32_t c = uint8_t(*p); uint32_t c = uint8_t(*p);
if (c >= uint32_t('0') && c <= uint32_t('9')) { if (c >= uint32_t('0') && c <= uint32_t('9')) {
ver.parts[count] = int(strtol(p, &p, 10)); ver.parts[count] = int(strtol(p, &p, 10));
if (++count == 3) if (++count == 3) {
break; break;
}
} }
else if (c == '.' || c == '-') { else if (c == '.' || c == '-') {
p++; p++;
@@ -680,13 +678,13 @@ static UNameKernelVersion getUNameKernelVersion() noexcept {
// CpuInfo - Detect - ARM - Baseline Features of ARM Architectures // CpuInfo - Detect - ARM - Baseline Features of ARM Architectures
// =============================================================== // ===============================================================
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void populateBaseAArch32Features(CpuFeatures::ARM& features) noexcept { static inline void populateBaseAArch32Features(CpuFeatures::ARM& features) noexcept {
// No baseline flags at the moment. // No baseline flags at the moment.
DebugUtils::unused(features); DebugUtils::unused(features);
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void populateBaseAArch64Features(CpuFeatures::ARM& features) noexcept { static inline void populateBaseAArch64Features(CpuFeatures::ARM& features) noexcept {
// AArch64 is based on ARMv8.0 and later. // AArch64 is based on ARMv8.0 and later.
features.add(Ext::kARMv6); features.add(Ext::kARMv6);
@@ -711,40 +709,40 @@ static inline void populateBaseARMFeatures(CpuInfo& cpu) noexcept {
// ================================================================ // ================================================================
// Populates mandatory ARMv8.[v]A features. // Populates mandatory ARMv8.[v]A features.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept { static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept {
switch (v) { switch (v) {
default: default:
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 9: // ARMv8.9 case 9: // ARMv8.9
features.add(Ext::kCLRBHB, Ext::kCSSC, Ext::kPRFMSLC, Ext::kSPECRES2, Ext::kRAS2); features.add(Ext::kCLRBHB, Ext::kCSSC, Ext::kPRFMSLC, Ext::kSPECRES2, Ext::kRAS2);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 8: // ARMv8.8 case 8: // ARMv8.8
features.add(Ext::kHBC, Ext::kMOPS, Ext::kNMI); features.add(Ext::kHBC, Ext::kMOPS, Ext::kNMI);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 7: // ARMv8.7 case 7: // ARMv8.7
features.add(Ext::kHCX, Ext::kPAN3, Ext::kWFXT, Ext::kXS); features.add(Ext::kHCX, Ext::kPAN3, Ext::kWFXT, Ext::kXS);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 6: // ARMv8.6 case 6: // ARMv8.6
features.add(Ext::kAMU1_1, Ext::kBF16, Ext::kECV, Ext::kFGT, Ext::kI8MM); features.add(Ext::kAMU1_1, Ext::kBF16, Ext::kECV, Ext::kFGT, Ext::kI8MM);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 5: // ARMv8.5 case 5: // ARMv8.5
features.add(Ext::kBTI, Ext::kCSV2, Ext::kDPB2, Ext::kFLAGM2, Ext::kFRINTTS, Ext::kSB, Ext::kSPECRES, Ext::kSSBS); features.add(Ext::kBTI, Ext::kCSV2, Ext::kDPB2, Ext::kFLAGM2, Ext::kFRINTTS, Ext::kSB, Ext::kSPECRES, Ext::kSSBS);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 4: // ARMv8.4 case 4: // ARMv8.4
features.add(Ext::kAMU1, Ext::kDIT, Ext::kDOTPROD, Ext::kFLAGM, features.add(Ext::kAMU1, Ext::kDIT, Ext::kDOTPROD, Ext::kFLAGM,
Ext::kLRCPC2, Ext::kLSE2, Ext::kMPAM, Ext::kNV, Ext::kLRCPC2, Ext::kLSE2, Ext::kMPAM, Ext::kNV,
Ext::kSEL2, Ext::kTLBIOS, Ext::kTLBIRANGE, Ext::kTRF); Ext::kSEL2, Ext::kTLBIOS, Ext::kTLBIRANGE, Ext::kTRF);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 3: // ARMv8.3 case 3: // ARMv8.3
features.add(Ext::kCCIDX, Ext::kFCMA, Ext::kJSCVT, Ext::kLRCPC, Ext::kPAUTH); features.add(Ext::kCCIDX, Ext::kFCMA, Ext::kJSCVT, Ext::kLRCPC, Ext::kPAUTH);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 2: // ARMv8.2 case 2: // ARMv8.2
features.add(Ext::kDPB, Ext::kPAN2, Ext::kRAS, Ext::kUAO); features.add(Ext::kDPB, Ext::kPAN2, Ext::kRAS, Ext::kUAO);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 1: // ARMv8.1 case 1: // ARMv8.1
features.add(Ext::kCRC32, Ext::kLOR, Ext::kLSE, Ext::kPAN, Ext::kRDM, Ext::kVHE); features.add(Ext::kCRC32, Ext::kLOR, Ext::kLSE, Ext::kPAN, Ext::kRDM, Ext::kVHE);
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 0: // ARMv8.0 case 0: // ARMv8.0
features.add(Ext::kASIMD, Ext::kFP, Ext::kIDIVA, Ext::kVFP_D32); features.add(Ext::kASIMD, Ext::kFP, Ext::kIDIVA, Ext::kVFP_D32);
break; break;
@@ -752,21 +750,21 @@ static ASMJIT_FAVOR_SIZE void populateARMv8AFeatures(CpuFeatures::ARM& features,
} }
// Populates mandatory ARMv9.[v] features. // Populates mandatory ARMv9.[v] features.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept { static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features, uint32_t v) noexcept {
populateARMv8AFeatures(features, v <= 4u ? 5u + v : 9u); populateARMv8AFeatures(features, v <= 4u ? 5u + v : 9u);
switch (v) { switch (v) {
default: default:
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 4: // ARMv9.4 - based on ARMv8.9. case 4: // ARMv9.4 - based on ARMv8.9.
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 3: // ARMv9.3 - based on ARMv8.8. case 3: // ARMv9.3 - based on ARMv8.8.
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 2: // ARMv9.2 - based on ARMv8.7. case 2: // ARMv9.2 - based on ARMv8.7.
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 1: // ARMv9.1 - based on ARMv8.6. case 1: // ARMv9.1 - based on ARMv8.6.
ASMJIT_FALLTHROUGH; [[fallthrough]];
case 0: // ARMv9.0 - based on ARMv8.5. case 0: // ARMv9.0 - based on ARMv8.5.
features.add(Ext::kRME, Ext::kSVE, Ext::kSVE2); features.add(Ext::kRME, Ext::kSVE, Ext::kSVE2);
break; break;
@@ -780,44 +778,45 @@ static ASMJIT_FAVOR_SIZE void populateARMv9AFeatures(CpuFeatures::ARM& features,
// of the registers so it's an implementation that can theoretically be tested / used in mocks. // of the registers so it's an implementation that can theoretically be tested / used in mocks.
// Merges a feature that contains 0b1111 when it doesn't exist and starts at 0b0000 when it does. // Merges a feature that contains 0b1111 when it doesn't exist and starts at 0b0000 when it does.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeatureNA(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, static ASMJIT_INLINE void mergeAArch64CPUIDFeatureNA(
CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
Ext::Id f0, Ext::Id f0,
Ext::Id f1 = Ext::kNone, Ext::Id f1 = Ext::kNone,
Ext::Id f2 = Ext::kNone, Ext::Id f2 = Ext::kNone,
Ext::Id f3 = Ext::kNone) noexcept { Ext::Id f3 = Ext::kNone) noexcept {
uint32_t val = uint32_t((regBits >> offset) & 0xFu); uint32_t val = uint32_t((regBits >> offset) & 0xFu);
if (val == 0xFu) {
// If val == 0b1111 then the feature is not implemented in this case (some early extensions). // If val == 0b1111 then the feature is not implemented in this case (some early extensions).
if (val == 0xFu)
return; return;
}
if (f0 != Ext::kNone) features.add(f0); features.addIf(f0 != Ext::kNone, f0);
if (f1 != Ext::kNone) features.addIf(val >= 1, f1); features.addIf(f1 != Ext::kNone && val >= 1, f1);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2); features.addIf(f2 != Ext::kNone && val >= 2, f2);
if (f3 != Ext::kNone) features.addIf(val >= 3, f3); features.addIf(f3 != Ext::kNone && val >= 3, f3);
} }
// Merges a feature identified by a single bit at `offset`. // Merges a feature identified by a single bit at `offset`.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept { static ASMJIT_INLINE void mergeAArch64CPUIDFeature1B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1) noexcept {
features.addIf((regBits & (uint64_t(1) << offset)) != 0, f1); features.addIf((regBits & (uint64_t(1) << offset)) != 0, f1);
} }
// Merges a feature-list starting from 0b01 when it does (0b00 means feature not supported). // Merges a feature-list starting from 0b01 when it does (0b00 means feature not supported).
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept { static ASMJIT_INLINE void mergeAArch64CPUIDFeature2B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, Ext::Id f1, Ext::Id f2, Ext::Id f3) noexcept {
uint32_t val = uint32_t((regBits >> offset) & 0x3u); uint32_t val = uint32_t((regBits >> offset) & 0x3u);
if (f1 != Ext::kNone) features.addIf(val >= 1, f1); features.addIf(f1 != Ext::kNone && val >= 1, f1);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2); features.addIf(f2 != Ext::kNone && val >= 2, f2);
if (f3 != Ext::kNone) features.addIf(val == 3, f3); features.addIf(f3 != Ext::kNone && val == 3, f3);
} }
// Merges a feature-list starting from 0b0001 when it does (0b0000 means feature not supported). // Merges a feature-list starting from 0b0001 when it does (0b0000 means feature not supported).
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, static ASMJIT_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset,
Ext::Id f1, Ext::Id f1,
Ext::Id f2 = Ext::kNone, Ext::Id f2 = Ext::kNone,
Ext::Id f3 = Ext::kNone, Ext::Id f3 = Ext::kNone,
@@ -826,16 +825,15 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4B(CpuFeatures::ARM& fea
uint32_t val = uint32_t((regBits >> offset) & 0xFu); uint32_t val = uint32_t((regBits >> offset) & 0xFu);
// if val == 0 it means that this feature is not supported. // if val == 0 it means that this feature is not supported.
features.addIf(f1 != Ext::kNone && val >= 1, f1);
if (f1 != Ext::kNone) features.addIf(val >= 1, f1); features.addIf(f2 != Ext::kNone && val >= 2, f2);
if (f2 != Ext::kNone) features.addIf(val >= 2, f2); features.addIf(f3 != Ext::kNone && val >= 3, f3);
if (f3 != Ext::kNone) features.addIf(val >= 3, f3); features.addIf(f4 != Ext::kNone && val >= 4, f4);
if (f4 != Ext::kNone) features.addIf(val >= 4, f4);
} }
// Merges a feature that is identified by an exact bit-combination of 4 bits. // Merges a feature that is identified by an exact bit-combination of 4 bits.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept { static ASMJIT_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& features, uint64_t regBits, uint32_t offset, uint32_t value, Ext::Id f1) noexcept {
features.addIf(uint32_t((regBits >> offset) & 0xFu) == value, f1); features.addIf(uint32_t((regBits >> offset) & 0xFu) == value, f1);
} }
@@ -846,7 +844,7 @@ static ASMJIT_FORCE_INLINE void mergeAArch64CPUIDFeature4S(CpuFeatures::ARM& fea
#define MERGE_FEATURE_4S(identifier, reg, offset, ...) mergeAArch64CPUIDFeature4S(cpu.features().arm(), reg, offset, __VA_ARGS__) #define MERGE_FEATURE_4S(identifier, reg, offset, ...) mergeAArch64CPUIDFeature4S(cpu.features().arm(), reg, offset, __VA_ARGS__)
// Detects features based on the content of ID_AA64PFR0_EL1 and ID_AA64PFR1_EL1 registers. // Detects features based on the content of ID_AA64PFR0_EL1 and ID_AA64PFR1_EL1 registers.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu, uint64_t fpr0, uint64_t fpr1) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu, uint64_t fpr0, uint64_t fpr1) noexcept {
// ID_AA64PFR0_EL1 // ID_AA64PFR0_EL1
// =============== // ===============
@@ -911,12 +909,13 @@ static inline void detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(CpuInfo& cpu,
uint32_t mpamMain = uint32_t((fpr0 >> 40) & 0xFu); uint32_t mpamMain = uint32_t((fpr0 >> 40) & 0xFu);
uint32_t mpamFrac = uint32_t((fpr1 >> 16) & 0xFu); uint32_t mpamFrac = uint32_t((fpr1 >> 16) & 0xFu);
if (mpamMain || mpamFrac) if (mpamMain || mpamFrac) {
cpu.features().arm().add(Ext::kMPAM); cpu.features().arm().add(Ext::kMPAM);
}
} }
// Detects features based on the content of ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 registers. // Detects features based on the content of ID_AA64ISAR0_EL1 and ID_AA64ISAR1_EL1 registers.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cpu, uint64_t isar0, uint64_t isar1) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cpu, uint64_t isar0, uint64_t isar1) noexcept {
// ID_AA64ISAR0_EL1 // ID_AA64ISAR0_EL1
// ================ // ================
@@ -965,7 +964,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(CpuInfo& cp
} }
// Detects features based on the content of ID_AA64ISAR2_EL1 register. // Detects features based on the content of ID_AA64ISAR2_EL1 register.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_t isar2) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_t isar2) noexcept {
MERGE_FEATURE_4B("WFxT bits [3:0]" , isar2, 0, Ext::kNone, Ext::kWFXT); MERGE_FEATURE_4B("WFxT bits [3:0]" , isar2, 0, Ext::kNone, Ext::kWFXT);
MERGE_FEATURE_4B("RPRES bits [7:4]" , isar2, 4, Ext::kRPRES); MERGE_FEATURE_4B("RPRES bits [7:4]" , isar2, 4, Ext::kRPRES);
@@ -988,7 +987,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR2(CpuInfo& cpu, uint64_
// TODO: This register is not accessed at the moment. // TODO: This register is not accessed at the moment.
#if 0 #if 0
// Detects features based on the content of ID_AA64ISAR3_EL1register. // Detects features based on the content of ID_AA64ISAR3_EL1register.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_t isar3) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_t isar3) noexcept {
// ID_AA64ISAR3_EL1 // ID_AA64ISAR3_EL1
// ================ // ================
@@ -999,7 +998,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ISAR3(CpuInfo& cpu, uint64_
} }
#endif #endif
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_t mmfr0) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_t mmfr0) noexcept {
// ID_AA64MMFR0_EL1 // ID_AA64MMFR0_EL1
// ================ // ================
@@ -1022,7 +1021,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR0(CpuInfo& cpu, uint64_
MERGE_FEATURE_4B("ECV bits [63:60]" , mmfr0, 60, Ext::kECV); MERGE_FEATURE_4B("ECV bits [63:60]" , mmfr0, 60, Ext::kECV);
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_t mmfr1) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_t mmfr1) noexcept {
// ID_AA64MMFR1_EL1 // ID_AA64MMFR1_EL1
// ================ // ================
@@ -1051,7 +1050,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR1(CpuInfo& cpu, uint64_
MERGE_FEATURE_4B("ECBHB bits [63:60]" , mmfr1, 60, Ext::kECBHB); MERGE_FEATURE_4B("ECBHB bits [63:60]" , mmfr1, 60, Ext::kECBHB);
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_t mmfr2) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_t mmfr2) noexcept {
// ID_AA64MMFR2_EL1 // ID_AA64MMFR2_EL1
// ================ // ================
@@ -1082,7 +1081,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64MMFR2(CpuInfo& cpu, uint64_
} }
// Detects features based on the content of ID_AA64ZFR0_EL1 register. // Detects features based on the content of ID_AA64ZFR0_EL1 register.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t zfr0) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t zfr0) noexcept {
MERGE_FEATURE_4B("SVEver bits [3:0]" , zfr0, 0, Ext::kSVE2, Ext::kSVE2_1); MERGE_FEATURE_4B("SVEver bits [3:0]" , zfr0, 0, Ext::kSVE2, Ext::kSVE2_1);
MERGE_FEATURE_4B("AES bits [7:4]" , zfr0, 4, Ext::kSVE_AES, Ext::kSVE_PMULL128); MERGE_FEATURE_4B("AES bits [7:4]" , zfr0, 4, Ext::kSVE_AES, Ext::kSVE_PMULL128);
@@ -1096,7 +1095,7 @@ static inline void detectAArch64FeaturesViaCPUID_AA64ZFR0(CpuInfo& cpu, uint64_t
MERGE_FEATURE_4B("F64MM bits [59:56]" , zfr0, 56, Ext::kSVE_F64MM); MERGE_FEATURE_4B("F64MM bits [59:56]" , zfr0, 56, Ext::kSVE_F64MM);
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline void detectAArch64FeaturesViaCPUID_AA64SMFR0(CpuInfo& cpu, uint64_t smfr0) noexcept { static inline void detectAArch64FeaturesViaCPUID_AA64SMFR0(CpuInfo& cpu, uint64_t smfr0) noexcept {
MERGE_FEATURE_1B("SF8DP2 bit [28]" , smfr0, 29, Ext::kSSVE_FP8DOT2); MERGE_FEATURE_1B("SF8DP2 bit [28]" , smfr0, 29, Ext::kSSVE_FP8DOT2);
MERGE_FEATURE_1B("SF8DP4 bit [29]" , smfr0, 29, Ext::kSSVE_FP8DOT4); MERGE_FEATURE_1B("SF8DP4 bit [29]" , smfr0, 29, Ext::kSSVE_FP8DOT4);
@@ -1143,9 +1142,9 @@ enum class AppleFamilyId : uint32_t {
kEVEREST_SAWTOOTH = 0X8765EDEAu // Apple A16. kEVEREST_SAWTOOTH = 0X8765EDEAu // Apple A16.
}; };
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) noexcept {
typedef AppleFamilyId Id; using Id = AppleFamilyId;
CpuFeatures::ARM& features = cpu.features().arm(); CpuFeatures::ARM& features = cpu.features().arm();
switch (cpu.familyId()) { switch (cpu.familyId()) {
@@ -1219,7 +1218,7 @@ static ASMJIT_FAVOR_SIZE bool detectARMFeaturesViaAppleFamilyId(CpuInfo& cpu) no
// target it was compiled to. // target it was compiled to.
#if ASMJIT_ARCH_ARM == 32 #if ASMJIT_ARCH_ARM == 32
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
DebugUtils::unused(cpu); DebugUtils::unused(cpu);
@@ -1257,7 +1256,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch32FeaturesViaCompilerFlags(CpuInfo& cpu
#endif // ASMJIT_ARCH_ARM == 32 #endif // ASMJIT_ARCH_ARM == 32
#if ASMJIT_ARCH_ARM == 64 #if ASMJIT_ARCH_ARM == 64
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
DebugUtils::unused(cpu); DebugUtils::unused(cpu);
@@ -1413,7 +1412,7 @@ static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCompilerFlags(CpuInfo& cpu
} }
#endif // ASMJIT_ARCH_ARM == 64 #endif // ASMJIT_ARCH_ARM == 64
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) noexcept {
#if ASMJIT_ARCH_ARM == 32 #if ASMJIT_ARCH_ARM == 32
detectAArch32FeaturesViaCompilerFlags(cpu); detectAArch32FeaturesViaCompilerFlags(cpu);
@@ -1426,7 +1425,7 @@ static ASMJIT_FAVOR_SIZE void detectARMFeaturesViaCompilerFlags(CpuInfo& cpu) no
// ===================================================== // =====================================================
// Postprocesses AArch32 features. // Postprocesses AArch32 features.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& features) noexcept { static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& features) noexcept {
DebugUtils::unused(features); DebugUtils::unused(features);
} }
@@ -1434,22 +1433,26 @@ static ASMJIT_FAVOR_SIZE void postProcessAArch32Features(CpuFeatures::ARM& featu
// Postprocesses AArch64 features. // Postprocesses AArch64 features.
// //
// The only reason to use this function is to deduce some flags from others. // The only reason to use this function is to deduce some flags from others.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessAArch64Features(CpuFeatures::ARM& features) noexcept { static ASMJIT_FAVOR_SIZE void postProcessAArch64Features(CpuFeatures::ARM& features) noexcept {
if (features.hasFP16()) if (features.hasFP16()) {
features.add(Ext::kFP16CONV); features.add(Ext::kFP16CONV);
}
if (features.hasMTE3()) if (features.hasMTE3()) {
features.add(Ext::kMTE2); features.add(Ext::kMTE2);
}
if (features.hasMTE2()) if (features.hasMTE2()) {
features.add(Ext::kMTE); features.add(Ext::kMTE);
}
if (features.hasSSBS2()) if (features.hasSSBS2()) {
features.add(Ext::kSSBS); features.add(Ext::kSSBS);
}
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept {
#if ASMJIT_ARCH_ARM == 32 #if ASMJIT_ARCH_ARM == 32
postProcessAArch32Features(cpu.features().arm()); postProcessAArch32Features(cpu.features().arm());
@@ -1466,7 +1469,7 @@ static ASMJIT_FAVOR_SIZE void postProcessARMCpuInfo(CpuInfo& cpu) noexcept {
// Since the register ID is encoded with the instruction we have to create a function for each register ID to read. // Since the register ID is encoded with the instruction we have to create a function for each register ID to read.
#define ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(func, regId) \ #define ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(func, regId) \
ASMJIT_MAYBE_UNUSED \ [[maybe_unused]] \
static inline uint64_t func() noexcept { \ static inline uint64_t func() noexcept { \
uint64_t output; \ uint64_t output; \
__asm__ __volatile__("mrs %0, " #regId : "=r"(output)); \ __asm__ __volatile__("mrs %0, " #regId : "=r"(output)); \
@@ -1494,17 +1497,12 @@ ASMJIT_AARCH64_DEFINE_CPUID_READ_FN(aarch64ReadZFR0, S3_0_C0_C4_4) // ID_AA64ZFR
// //
// References: // References:
// - https://docs.kernel.org/arch/arm64/cpu-feature-registers.html // - https://docs.kernel.org/arch/arm64/cpu-feature-registers.html
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCPUID(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void detectAArch64FeaturesViaCPUID(CpuInfo& cpu) noexcept {
populateBaseARMFeatures(cpu); populateBaseARMFeatures(cpu);
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, aarch64ReadPFR0(), aarch64ReadPFR1());
aarch64ReadPFR0(), detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, aarch64ReadISAR0(), aarch64ReadISAR1());
aarch64ReadPFR1());
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
aarch64ReadISAR0(),
aarch64ReadISAR1());
// TODO: Fix this on FreeBSD - I don't know what kernel version allows to access the registers below... // TODO: Fix this on FreeBSD - I don't know what kernel version allows to access the registers below...
@@ -1867,13 +1865,8 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
const char sysctlCpuPath[] = "machdep.cpu0.cpu_id"; const char sysctlCpuPath[] = "machdep.cpu0.cpu_id";
if (sysctlbyname(sysctlCpuPath, &regs, &len, nullptr, 0) == 0) { if (sysctlbyname(sysctlCpuPath, &regs, &len, nullptr, 0) == 0) {
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, regs.r64(Regs::k64_AA64PFR0), regs.r64(Regs::k64_AA64PFR1));
regs.r64(Regs::k64_AA64PFR0), detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, regs.r64(Regs::k64_AA64ISAR0), regs.r64(Regs::k64_AA64ISAR1));
regs.r64(Regs::k64_AA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
regs.r64(Regs::k64_AA64ISAR0),
regs.r64(Regs::k64_AA64ISAR1));
// TODO: AA64ISAR2 should be added when it's provided by NetBSD. // TODO: AA64ISAR2 should be added when it's provided by NetBSD.
// detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, regs.r64Regs::k64_AA64ISAR2)); // detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, regs.r64Regs::k64_AA64ISAR2));
@@ -1925,18 +1918,12 @@ static uint64_t openbsdReadAArch64CPUID(OpenBSDAArch64CPUID id) noexcept {
} }
static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
typedef OpenBSDAArch64CPUID ID; using ID = OpenBSDAArch64CPUID;
populateBaseARMFeatures(cpu); populateBaseARMFeatures(cpu);
detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, detectAArch64FeaturesViaCPUID_AA64PFR0_AA64PFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64PFR0), openbsdReadAArch64CPUID(ID::kAA64PFR1));
openbsdReadAArch64CPUID(ID::kAA64PFR0), detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR0), openbsdReadAArch64CPUID(ID::kAA64ISAR1));
openbsdReadAArch64CPUID(ID::kAA64PFR1));
detectAArch64FeaturesViaCPUID_AA64ISAR0_AA64ISAR1(cpu,
openbsdReadAArch64CPUID(ID::kAA64ISAR0),
openbsdReadAArch64CPUID(ID::kAA64ISAR1));
detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR2)); detectAArch64FeaturesViaCPUID_AA64ISAR2(cpu, openbsdReadAArch64CPUID(ID::kAA64ISAR2));
detectAArch64FeaturesViaCPUID_AA64MMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR0)); detectAArch64FeaturesViaCPUID_AA64MMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR0));
detectAArch64FeaturesViaCPUID_AA64MMFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR1)); detectAArch64FeaturesViaCPUID_AA64MMFR1(cpu, openbsdReadAArch64CPUID(ID::kAA64MMFR1));
@@ -1946,8 +1933,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
if (cpu.features().arm().hasAny(Ext::kSVE, Ext::kSME)) { if (cpu.features().arm().hasAny(Ext::kSVE, Ext::kSME)) {
detectAArch64FeaturesViaCPUID_AA64ZFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64ZFR0)); detectAArch64FeaturesViaCPUID_AA64ZFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64ZFR0));
if (cpu.features().arm().hasSME()) if (cpu.features().arm().hasSME()) {
detectAArch64FeaturesViaCPUID_AA64SMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64SMFR0)); detectAArch64FeaturesViaCPUID_AA64SMFR0(cpu, openbsdReadAArch64CPUID(ID::kAA64SMFR0));
}
} }
postProcessARMCpuInfo(cpu); postProcessARMCpuInfo(cpu);
@@ -1989,15 +1977,16 @@ static ASMJIT_FAVOR_SIZE long appleDetectARMFeatureViaSysctl(AppleFeatureType ty
memcpy(sysctlName + prefixSize, featureName, featureNameSize + 1u); // Include NULL terminator. memcpy(sysctlName + prefixSize, featureName, featureNameSize + 1u); // Include NULL terminator.
long val = 0; long val = 0;
if (appleSysctlByName<long>(sysctlName, &val)) if (appleSysctlByName<long>(sysctlName, &val)) {
return val; return val;
}
} }
return 0; return 0;
} }
static ASMJIT_FAVOR_SIZE void appleDetectARMFeaturesViaSysctl(CpuInfo& cpu) noexcept { static ASMJIT_FAVOR_SIZE void appleDetectARMFeaturesViaSysctl(CpuInfo& cpu) noexcept {
typedef AppleFeatureType FT; using FT = AppleFeatureType;
// Based on: // Based on:
// - https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics // - https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics
@@ -2069,8 +2058,9 @@ static ASMJIT_FAVOR_SIZE void detectARMCpu(CpuInfo& cpu) noexcept {
memcpy(cpu._vendor.str, "APPLE", 6); memcpy(cpu._vendor.str, "APPLE", 6);
bool cpuFeaturesPopulated = detectARMFeaturesViaAppleFamilyId(cpu); bool cpuFeaturesPopulated = detectARMFeaturesViaAppleFamilyId(cpu);
if (!cpuFeaturesPopulated) if (!cpuFeaturesPopulated) {
appleDetectARMFeaturesViaSysctl(cpu); appleDetectARMFeaturesViaSysctl(cpu);
}
postProcessARMCpuInfo(cpu); postProcessARMCpuInfo(cpu);
} }
@@ -2125,8 +2115,4 @@ const CpuInfo& CpuInfo::host() noexcept {
return cpuInfoGlobal; return cpuInfoGlobal;
} }
#if defined(_MSC_VER)
#pragma warning(pop)
#endif // _MSC_VER
ASMJIT_END_NAMESPACE ASMJIT_END_NAMESPACE

View File

@@ -26,18 +26,21 @@ public:
//! \{ //! \{
//! \cond INTERNAL //! \cond INTERNAL
enum : uint32_t { static inline constexpr uint32_t kMaxFeatures = 256;
kMaxFeatures = 256, static inline constexpr uint32_t kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits;
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
//! \endcond //! \endcond
//! A word that is used to represents feature bits. //! \}
typedef Support::BitWord BitWord;
//! Iterator that can iterate all CPU features set.
typedef Support::BitVectorIterator<BitWord> Iterator;
typedef Support::Array<BitWord, kNumBitWords> Bits; //! \name Types
//! \{
//! A word that is used to represents feature bits.
using BitWord = Support::BitWord;
//! Iterator that can iterate all CPU features set.
using Iterator = Support::BitVectorIterator<BitWord>;
using Bits = Support::Array<BitWord, kNumBitWords>;
//! \} //! \}
@@ -57,7 +60,10 @@ public:
//! \name Overloaded Operators //! \name Overloaded Operators
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const Data& other) const noexcept { return equals(other); } ASMJIT_INLINE_NODEBUG bool operator==(const Data& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const Data& other) const noexcept { return !equals(other); } ASMJIT_INLINE_NODEBUG bool operator!=(const Data& other) const noexcept { return !equals(other); }
//! \} //! \}
@@ -66,21 +72,28 @@ public:
//! \{ //! \{
//! Returns true if there are no features set. //! Returns true if there are no features set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _bits.aggregate<Support::Or>(0) == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _bits.aggregate<Support::Or>(0) == 0; }
//! Returns all features as array of bitwords (see \ref Support::BitWord). //! Returns all features as array of bitwords (see \ref Support::BitWord).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BitWord* bits() noexcept { return _bits.data(); } ASMJIT_INLINE_NODEBUG BitWord* bits() noexcept { return _bits.data(); }
//! Returns all features as array of bitwords (const). //! Returns all features as array of bitwords (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BitWord* bits() const noexcept { return _bits.data(); } ASMJIT_INLINE_NODEBUG const BitWord* bits() const noexcept { return _bits.data(); }
//! Returns the number of BitWords returned by \ref bits(). //! Returns the number of BitWords returned by \ref bits().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bitWordCount() const noexcept { return kNumBitWords; } ASMJIT_INLINE_NODEBUG size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently. //! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Iterator iterator() const noexcept { return Iterator(_bits.data(), kNumBitWords); } ASMJIT_INLINE_NODEBUG Iterator iterator() const noexcept { return Iterator(_bits.data(), kNumBitWords); }
//! Tests whether the feature `featureId` is present. //! Tests whether the feature `featureId` is present.
template<typename FeatureId> template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool has(const FeatureId& featureId) const noexcept { ASMJIT_INLINE_NODEBUG bool has(const FeatureId& featureId) const noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
@@ -92,6 +105,7 @@ public:
//! \cond NONE //! \cond NONE
template<typename FeatureId> template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId) const noexcept { ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId) const noexcept {
return has(featureId); return has(featureId);
} }
@@ -101,11 +115,13 @@ public:
//! //!
//! \note This is a variadic function template that can be used with multiple features. //! \note This is a variadic function template that can be used with multiple features.
template<typename FeatureId, typename... Args> template<typename FeatureId, typename... Args>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId, Args&&... otherFeatureIds) const noexcept { ASMJIT_INLINE_NODEBUG bool hasAny(const FeatureId& featureId, Args&&... otherFeatureIds) const noexcept {
return bool(unsigned(has(featureId)) | unsigned(hasAny(std::forward<Args>(otherFeatureIds)...))); return bool(unsigned(has(featureId)) | unsigned(hasAny(std::forward<Args>(otherFeatureIds)...)));
} }
//! Tests whether all features as defined by `other` are present. //! Tests whether all features as defined by `other` are present.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAll(const Data& other) const noexcept { ASMJIT_INLINE_NODEBUG bool hasAll(const Data& other) const noexcept {
uint32_t result = 1; uint32_t result = 1;
for (uint32_t i = 0; i < kNumBitWords; i++) for (uint32_t i = 0; i < kNumBitWords; i++)
@@ -123,7 +139,7 @@ public:
//! Adds the given CPU `featureId` to the list of features. //! Adds the given CPU `featureId` to the list of features.
template<typename FeatureId> template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId) noexcept { inline void add(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -133,13 +149,13 @@ public:
} }
template<typename FeatureId, typename... Args> template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { inline void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
add(featureId); add(featureId);
add(std::forward<Args>(otherFeatureIds)...); add(std::forward<Args>(otherFeatureIds)...);
} }
template<typename FeatureId> template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId) noexcept { inline void addIf(bool condition, const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -149,14 +165,14 @@ public:
} }
template<typename FeatureId, typename... Args> template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { inline void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
addIf(condition, featureId); addIf(condition, featureId);
addIf(condition, std::forward<Args>(otherFeatureIds)...); addIf(condition, std::forward<Args>(otherFeatureIds)...);
} }
//! Removes the given CPU `featureId` from the list of features. //! Removes the given CPU `featureId` from the list of features.
template<typename FeatureId> template<typename FeatureId>
ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId) noexcept { inline void remove(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures); ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits; uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
@@ -166,7 +182,7 @@ public:
} }
template<typename FeatureId, typename... Args> template<typename FeatureId, typename... Args>
ASMJIT_INLINE_NODEBUG void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept { inline void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
remove(featureId); remove(featureId);
remove(std::forward<Args>(otherFeatureIds)...); remove(std::forward<Args>(otherFeatureIds)...);
} }
@@ -1104,6 +1120,7 @@ public:
//! Returns the host CPU information. //! Returns the host CPU information.
//! //!
//! \note The returned reference is global - it's setup only once and then shared. //! \note The returned reference is global - it's setup only once and then shared.
[[nodiscard]]
ASMJIT_API static const CpuInfo& host() noexcept; ASMJIT_API static const CpuInfo& host() noexcept;
//! \} //! \}
@@ -1134,15 +1151,18 @@ public:
//! \{ //! \{
//! Returns the CPU architecture this information relates to. //! Returns the CPU architecture this information relates to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the CPU sub-architecture this information relates to. //! Returns the CPU sub-architecture this information relates to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; } ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns whether the CPU was detected successfully. //! Returns whether the CPU was detected successfully.
//! //!
//! If the returned value is false it means that AsmJit either failed to detect the CPU or it doesn't have //! If the returned value is false it means that AsmJit either failed to detect the CPU or it doesn't have
//! implementation targeting the host architecture and operating system. //! implementation targeting the host architecture and operating system.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool wasDetected() const noexcept { return _wasDetected; } ASMJIT_INLINE_NODEBUG bool wasDetected() const noexcept { return _wasDetected; }
//! Returns the CPU family ID. //! Returns the CPU family ID.
@@ -1152,6 +1172,7 @@ public:
//! - Family identifier matches the FamilyId read by using CPUID. //! - Family identifier matches the FamilyId read by using CPUID.
//! - ARM: //! - ARM:
//! - Apple - returns Apple Family identifier returned by sysctlbyname("hw.cpufamily"). //! - Apple - returns Apple Family identifier returned by sysctlbyname("hw.cpufamily").
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t familyId() const noexcept { return _familyId; } ASMJIT_INLINE_NODEBUG uint32_t familyId() const noexcept { return _familyId; }
//! Returns the CPU model ID. //! Returns the CPU model ID.
@@ -1159,6 +1180,7 @@ public:
//! The information provided depends on architecture and OS: //! The information provided depends on architecture and OS:
//! - X86: //! - X86:
//! - Model identifier matches the ModelId read by using CPUID. //! - Model identifier matches the ModelId read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t modelId() const noexcept { return _modelId; } ASMJIT_INLINE_NODEBUG uint32_t modelId() const noexcept { return _modelId; }
//! Returns the CPU brand id. //! Returns the CPU brand id.
@@ -1166,6 +1188,7 @@ public:
//! The information provided depends on architecture and OS: //! The information provided depends on architecture and OS:
//! - X86: //! - X86:
//! - Brand identifier matches the BrandId read by using CPUID. //! - Brand identifier matches the BrandId read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t brandId() const noexcept { return _brandId; } ASMJIT_INLINE_NODEBUG uint32_t brandId() const noexcept { return _brandId; }
//! Returns the CPU stepping. //! Returns the CPU stepping.
@@ -1173,6 +1196,7 @@ public:
//! The information provided depends on architecture and OS: //! The information provided depends on architecture and OS:
//! - X86: //! - X86:
//! - Stepping identifier matches the Stepping information read by using CPUID. //! - Stepping identifier matches the Stepping information read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stepping() const noexcept { return _stepping; } ASMJIT_INLINE_NODEBUG uint32_t stepping() const noexcept { return _stepping; }
//! Returns the processor type. //! Returns the processor type.
@@ -1180,34 +1204,46 @@ public:
//! The information provided depends on architecture and OS: //! The information provided depends on architecture and OS:
//! - X86: //! - X86:
//! - Processor type identifier matches the ProcessorType read by using CPUID. //! - Processor type identifier matches the ProcessorType read by using CPUID.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t processorType() const noexcept { return _processorType; } ASMJIT_INLINE_NODEBUG uint32_t processorType() const noexcept { return _processorType; }
//! Returns the maximum number of logical processors. //! Returns the maximum number of logical processors.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; } ASMJIT_INLINE_NODEBUG uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
//! Returns the size of a CPU cache line. //! Returns the size of a CPU cache line.
//! //!
//! On a multi-architecture system this should return the smallest cache line of all CPUs. //! On a multi-architecture system this should return the smallest cache line of all CPUs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t cacheLineSize() const noexcept { return _cacheLineSize; } ASMJIT_INLINE_NODEBUG uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
//! Returns number of hardware threads available. //! Returns number of hardware threads available.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwThreadCount() const noexcept { return _hwThreadCount; } ASMJIT_INLINE_NODEBUG uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
//! Returns a CPU vendor string. //! Returns a CPU vendor string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* vendor() const noexcept { return _vendor.str; } ASMJIT_INLINE_NODEBUG const char* vendor() const noexcept { return _vendor.str; }
//! Tests whether the CPU vendor string is equal to `s`. //! Tests whether the CPU vendor string is equal to `s`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVendor(const char* s) const noexcept { return _vendor.equals(s); } ASMJIT_INLINE_NODEBUG bool isVendor(const char* s) const noexcept { return _vendor.equals(s); }
//! Returns a CPU brand string. //! Returns a CPU brand string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* brand() const noexcept { return _brand.str; } ASMJIT_INLINE_NODEBUG const char* brand() const noexcept { return _brand.str; }
//! Returns CPU features. //! Returns CPU features.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuFeatures& features() noexcept { return _features; } ASMJIT_INLINE_NODEBUG CpuFeatures& features() noexcept { return _features; }
//! Returns CPU features (const). //! Returns CPU features (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CpuFeatures& features() const noexcept { return _features; } ASMJIT_INLINE_NODEBUG const CpuFeatures& features() const noexcept { return _features; }
//! Tests whether the CPU has the given `feature`. //! Tests whether the CPU has the given `feature`.
template<typename FeatureId> template<typename FeatureId>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFeature(const FeatureId& featureId) const noexcept { return _features.has(featureId); } ASMJIT_INLINE_NODEBUG bool hasFeature(const FeatureId& featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `featureId` to the list of features. //! Adds the given CPU `featureId` to the list of features.

View File

@@ -23,22 +23,27 @@ static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcep
Formatter::formatTypeId(sb, value.typeId()); Formatter::formatTypeId(sb, value.typeId());
sb.append('@'); sb.append('@');
if (value.isIndirect()) if (value.isIndirect()) {
sb.append('['); sb.append('[');
}
if (value.isReg()) if (value.isReg()) {
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId()); Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
else if (value.isStack()) }
else if (value.isStack()) {
sb.appendFormat("[%d]", value.stackOffset()); sb.appendFormat("[%d]", value.stackOffset());
else }
else {
sb.append("<none>"); sb.append("<none>");
}
if (value.isIndirect()) if (value.isIndirect()) {
sb.append(']'); sb.append(']');
}
} }
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept { static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var; using Var = FuncArgsContext::Var;
Arch arch = ctx.arch(); Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount(); uint32_t varCount = ctx.varCount();
@@ -53,8 +58,9 @@ static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
sb.append(" <- "); sb.append(" <- ");
dumpFuncValue(sb, arch, cur); dumpFuncValue(sb, arch, cur);
if (var.isDone()) if (var.isDone()) {
sb.append(" {Done}"); sb.append(" {Done}");
}
sb.append('\n'); sb.append('\n');
} }
@@ -83,8 +89,8 @@ Error BaseEmitHelper::emitArgMove(const BaseReg& dst_, TypeId dstTypeId, const O
// =================================== // ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) { ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var; using Var = FuncArgsContext::Var;
typedef FuncArgsContext::WorkData WorkData; using WorkData = FuncArgsContext::WorkData;
enum WorkFlags : uint32_t { enum WorkFlags : uint32_t {
kWorkNone = 0x00, kWorkNone = 0x00,
@@ -118,10 +124,12 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
BaseReg sa = sp; BaseReg sa = sp;
if (frame.hasDynamicAlignment()) { if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP()) if (frame.hasPreservedFP()) {
sa.setId(archTraits.fpRegId()); sa.setId(archTraits.fpRegId());
else }
else {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId()); sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
} }
// Register to stack and stack to stack moves must be first as now we have // Register to stack and stack to stack moves must be first as now we have
@@ -135,8 +143,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
for (uint32_t varId = 0; varId < varCount; varId++) { for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId]; Var& var = ctx._vars[varId];
if (!var.out.isStack()) if (!var.out.isStack()) {
continue; continue;
}
FuncValue& cur = var.cur; FuncValue& cur = var.cur;
FuncValue& out = var.out; FuncValue& out = var.out;
@@ -169,13 +178,15 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend, // we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move. // and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId()); OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid())) if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
WorkData& wd = workData[signature.regGroup()]; WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs(); RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs)) if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
uint32_t availableId = Support::ctz(availableRegs); uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId); reg.setSignatureAndId(signature, availableId);
@@ -183,8 +194,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId())); ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
} }
if (cur.isIndirect() && cur.isReg()) if (cur.isIndirect() && cur.isReg()) {
workData[RegGroup::kGp].unassign(varId, cur.regId()); workData[RegGroup::kGp].unassign(varId, cur.regId());
}
// Register to stack move. // Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId())); ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
@@ -198,8 +210,9 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
for (;;) { for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) { for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId]; Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg()) if (var.isDone() || !var.cur.isReg()) {
continue; continue;
}
FuncValue& cur = var.cur; FuncValue& cur = var.cur;
FuncValue& out = var.out; FuncValue& out = var.out;
@@ -224,13 +237,15 @@ EmitMove:
BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId())); BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
// Only reassign if this is not a sign/zero extension that happens on the same in/out register. // Only reassign if this is not a sign/zero extension that happens on the same in/out register.
if (curId != outId) if (curId != outId) {
wd.reassign(varId, outId, curId); wd.reassign(varId, outId, curId);
}
cur.initReg(out.regType(), outId, out.typeId()); cur.initReg(out.regType(), outId, out.typeId());
if (outId == out.regId()) if (outId == out.regId()) {
var.markDone(); var.markDone();
}
workFlags |= kWorkDidSome | kWorkPending; workFlags |= kWorkDidSome | kWorkPending;
} }
else { else {
@@ -241,20 +256,21 @@ EmitMove:
// Only few architectures provide swap operations, and only for few register groups. // Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasInstRegSwap(curGroup)) { if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType()); RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) {
highestType = RegType::kGp32; highestType = RegType::kGp32;
}
OperandSignature signature = archTraits.regTypeToSignature(highestType); OperandSignature signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE( ASMJIT_PROPAGATE(emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
wd.swap(varId, curId, altId, outId); wd.swap(varId, curId, altId, outId);
cur.setRegId(outId); cur.setRegId(outId);
var.markDone(); var.markDone();
altVar.cur.setRegId(curId); altVar.cur.setRegId(curId);
if (altVar.out.isInitialized()) if (altVar.out.isInitialized()) {
altVar.markDone(); altVar.markDone();
}
workFlags |= kWorkDidSome; workFlags |= kWorkDidSome;
} }
else { else {
@@ -262,8 +278,9 @@ EmitMove:
RegMask availableRegs = wd.availableRegs(); RegMask availableRegs = wd.availableRegs();
if (availableRegs) { if (availableRegs) {
RegMask inOutRegs = wd.dstRegs(); RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs) if (availableRegs & ~inOutRegs) {
availableRegs &= ~inOutRegs; availableRegs &= ~inOutRegs;
}
outId = Support::ctz(availableRegs); outId = Support::ctz(availableRegs);
goto EmitMove; goto EmitMove;
} }
@@ -279,12 +296,14 @@ EmitMove:
} }
} }
if (!(workFlags & kWorkPending)) if (!(workFlags & kWorkPending)) {
break; break;
}
// If we did nothing twice it means that something is really broken. // If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed; workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
} }
@@ -294,8 +313,9 @@ EmitMove:
if (ctx._hasStackSrc) { if (ctx._hasStackSrc) {
uint32_t iterCount = 1; uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId()); sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// Base address of all arguments passed by stack. // Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id()))); BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
@@ -303,8 +323,9 @@ EmitMove:
for (uint32_t iter = 0; iter < iterCount; iter++) { for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) { for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId]; Var& var = ctx._vars[varId];
if (var.isDone()) if (var.isDone()) {
continue; continue;
}
if (var.cur.isStack()) { if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg()); ASMJIT_ASSERT(var.out.isReg());

View File

@@ -26,7 +26,9 @@ public:
ASMJIT_INLINE_NODEBUG virtual ~BaseEmitHelper() noexcept = default; ASMJIT_INLINE_NODEBUG virtual ~BaseEmitHelper() noexcept = default;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; } ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; }
ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; } ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or between a register and its home //! Emits a pure move operation between two registers or the same type or between a register and its home

View File

@@ -52,17 +52,21 @@ static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) n
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate); hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
} }
if (emitComments) if (emitComments) {
self->_addEmitterFlags(EmitterFlags::kLogComments); self->_addEmitterFlags(EmitterFlags::kLogComments);
else }
else {
self->_clearEmitterFlags(EmitterFlags::kLogComments); self->_clearEmitterFlags(EmitterFlags::kLogComments);
}
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border // The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required. // case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) {
self->_forcedInstOptions |= InstOptions::kReserved; self->_forcedInstOptions |= InstOptions::kReserved;
else }
else {
self->_forcedInstOptions &= ~InstOptions::kReserved; self->_forcedInstOptions &= ~InstOptions::kReserved;
}
} }
// BaseEmitter - Diagnostic Options // BaseEmitter - Diagnostic Options
@@ -90,8 +94,9 @@ void BaseEmitter::setLogger(Logger* logger) noexcept {
else { else {
_logger = nullptr; _logger = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnLogger); _clearEmitterFlags(EmitterFlags::kOwnLogger);
if (_code) if (_code) {
_logger = _code->logger(); _logger = _code->logger();
}
} }
BaseEmitter_updateForcedOptions(this); BaseEmitter_updateForcedOptions(this);
#else #else
@@ -110,16 +115,18 @@ void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
else { else {
_errorHandler = nullptr; _errorHandler = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler); _clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
if (_code) if (_code) {
_errorHandler = _code->errorHandler(); _errorHandler = _code->errorHandler();
}
} }
} }
Error BaseEmitter::reportError(Error err, const char* message) { Error BaseEmitter::reportError(Error err, const char* message) {
ErrorHandler* eh = _errorHandler; ErrorHandler* eh = _errorHandler;
if (eh) { if (eh) {
if (!message) if (!message) {
message = DebugUtils::errorAsString(err); message = DebugUtils::errorAsString(err);
}
eh->handleError(err, message, this); eh->handleError(err, message, this);
} }
return err; return err;
@@ -318,8 +325,9 @@ Error BaseEmitter::comment(const char* data, size_t size) {
Error BaseEmitter::commentf(const char* fmt, ...) { Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk; return kErrorOk;
} }
@@ -341,8 +349,9 @@ Error BaseEmitter::commentf(const char* fmt, ...) {
Error BaseEmitter::commentv(const char* fmt, va_list ap) { Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) { if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized)); return reportError(DebugUtils::errored(kErrorNotInitialized));
}
return kErrorOk; return kErrorOk;
} }
@@ -377,11 +386,13 @@ Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
Error BaseEmitter::onDetach(CodeHolder* code) noexcept { Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
DebugUtils::unused(code); DebugUtils::unused(code);
if (!hasOwnLogger()) if (!hasOwnLogger()) {
_logger = nullptr; _logger = nullptr;
}
if (!hasOwnErrorHandler()) if (!hasOwnErrorHandler()) {
_errorHandler = nullptr; _errorHandler = nullptr;
}
_clearEmitterFlags(~kEmitterPreservedFlags); _clearEmitterFlags(~kEmitterPreservedFlags);
_instructionAlignment = uint8_t(0); _instructionAlignment = uint8_t(0);
@@ -403,11 +414,13 @@ void BaseEmitter::onSettingsUpdated() noexcept {
// Only called when attached to CodeHolder by CodeHolder. // Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr); ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger()) if (!hasOwnLogger()) {
_logger = _code->logger(); _logger = _code->logger();
}
if (!hasOwnErrorHandler()) if (!hasOwnErrorHandler()) {
_errorHandler = _code->errorHandler(); _errorHandler = _code->errorHandler();
}
BaseEmitter_updateForcedOptions(this); BaseEmitter_updateForcedOptions(this);
} }

View File

@@ -257,18 +257,18 @@ public:
//! //!
//! These are typically shared between Assembler/Builder/Compiler of a single backend. //! These are typically shared between Assembler/Builder/Compiler of a single backend.
struct Funcs { struct Funcs {
typedef Error (ASMJIT_CDECL* EmitProlog)(BaseEmitter* emitter, const FuncFrame& frame); using EmitProlog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitEpilog)(BaseEmitter* emitter, const FuncFrame& frame); using EmitEpilog = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame);
typedef Error (ASMJIT_CDECL* EmitArgsAssignment)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args); using EmitArgsAssignment = Error (ASMJIT_CDECL*)(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
typedef Error (ASMJIT_CDECL* FormatInstruction)( using FormatInstruction = Error (ASMJIT_CDECL*)(
String& sb, String& sb,
FormatFlags formatFlags, FormatFlags formatFlags,
const BaseEmitter* emitter, const BaseEmitter* emitter,
Arch arch, Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) ASMJIT_NOEXCEPT_TYPE; const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
typedef Error (ASMJIT_CDECL* ValidateFunc)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) ASMJIT_NOEXCEPT_TYPE; using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
//! Emit prolog implementation. //! Emit prolog implementation.
EmitProlog emitProlog; EmitProlog emitProlog;
@@ -306,9 +306,11 @@ public:
//! \{ //! \{
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast<T*>(this); } ASMJIT_INLINE_NODEBUG T* as() noexcept { return reinterpret_cast<T*>(this); }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast<const T*>(this); } ASMJIT_INLINE_NODEBUG const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
//! \} //! \}
@@ -317,24 +319,37 @@ public:
//! \{ //! \{
//! Returns the type of this emitter, see `EmitterType`. //! Returns the type of this emitter, see `EmitterType`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; } ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`. //! Returns emitter flags , see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; } ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`. //! Tests whether the emitter inherits from `BaseAssembler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; } ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`. //! Tests whether the emitter inherits from `BaseBuilder`.
//! //!
//! \note Both Builder and Compiler emitters would return `true`. //! \note Both Builder and Compiler emitters would return `true`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); } ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`. //! Tests whether the emitter inherits from `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; } ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled. //! Tests whether the emitter has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
//! Tests whether the emitter is finalized. //! Tests whether the emitter is finalized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); } ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction). //! Tests whether the emitter is destroyed (only used during destruction).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); } ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
//! \} //! \}
@@ -353,27 +368,37 @@ public:
//! \{ //! \{
//! Returns the CodeHolder this emitter is attached to. //! Returns the CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; } ASMJIT_INLINE_NODEBUG CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment. //! Returns the target environment.
//! //!
//! The returned \ref Environment reference matches \ref CodeHolder::environment(). //! The returned \ref Environment reference matches \ref CodeHolder::environment().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Tests whether the target architecture is 32-bit. //! Tests whether the target architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); } ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); }
//! Tests whether the target architecture is 64-bit. //! Tests whether the target architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); } ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type. //! Returns the target architecture type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type. //! Returns the target architecture sub-type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); } ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes). //! Returns the target architecture's GP register size (4 or 8 bytes).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); } ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
//! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture). //! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; } ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; }
//! Returns instruction alignment. //! Returns instruction alignment.
@@ -382,6 +407,7 @@ public:
//! - X86 and X86_64 - instruction alignment is 1 //! - X86 and X86_64 - instruction alignment is 1
//! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode. //! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode.
//! - AArch64 - instruction alignment is 4 //! - AArch64 - instruction alignment is 4
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; } ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; }
//! \} //! \}
@@ -390,6 +416,7 @@ public:
//! \{ //! \{
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder). //! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; } ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter. //! Finalizes this emitter.
@@ -407,18 +434,21 @@ public:
//! \{ //! \{
//! Tests whether the emitter has a logger. //! Tests whether the emitter has a logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; } ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger. //! Tests whether the emitter has its own logger.
//! //!
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is //! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to. //! attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); } ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses. //! Returns the logger this emitter uses.
//! //!
//! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter //! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter
//! is attached to. //! is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter. //! Sets or resets the logger of the emitter.
@@ -440,18 +470,21 @@ public:
//! \{ //! \{
//! Tests whether the emitter has an error handler attached. //! Tests whether the emitter has an error handler attached.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; } ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
//! Tests whether the emitter has its own error handler. //! Tests whether the emitter has its own error handler.
//! //!
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this //! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to. //! emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); } ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses. //! Returns the error handler this emitter uses.
//! //!
//! The returned error handler is either the emitter's own error handler or it's error handler used by //! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to. //! \ref CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; } ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter. //! Sets or resets the error handler of the emitter.
@@ -472,8 +505,11 @@ public:
//! \{ //! \{
//! Returns encoding options. //! Returns encoding options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; } ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set. //! Tests whether the encoding `option` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); } ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
//! Enables the given encoding `options`. //! Enables the given encoding `options`.
@@ -487,9 +523,11 @@ public:
//! \{ //! \{
//! Returns the emitter's diagnostic options. //! Returns the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; } ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
//! Tests whether the given `option` is present in the emitter's diagnostic options. //! Tests whether the given `option` is present in the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); } ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Activates the given diagnostic `options`. //! Activates the given diagnostic `options`.
@@ -527,35 +565,49 @@ public:
//! Forced instruction options are merged with next instruction options before the instruction is encoded. These //! Forced instruction options are merged with next instruction options before the instruction is encoded. These
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes. //! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction. //! Other options are globals that affect each instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; } ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction. //! Returns options of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; } ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction. //! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; } ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
//! Adds options of the next instruction. //! Adds options of the next instruction.
ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; } ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction. //! Resets options of the next instruction.
ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; } ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
//! Tests whether the extra register operand is valid. //! Tests whether the extra register operand is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); } ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific). //! Returns an extra operand that will be used by the next instruction (architecture specific).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; } ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific). //! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific). //! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific). //! Resets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); } ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! Returns comment/annotation of the next instruction. //! Returns comment/annotation of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; } ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction. //! Sets comment/annotation of the next instruction.
//! //!
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not //! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction). //! required to make a copy of it (and it would be slow to do that for each instruction).
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; } ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr. //! Resets the comment/annotation to nullptr.
ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; } ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; }
@@ -581,6 +633,7 @@ public:
//! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter //! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter
//! had before the state was reset. //! had before the state was reset.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG State _grabState() noexcept { ASMJIT_INLINE_NODEBUG State _grabState() noexcept {
State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment}; State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment};
resetState(); resetState();
@@ -604,13 +657,19 @@ public:
//! \{ //! \{
//! Creates a new label. //! Creates a new label.
[[nodiscard]]
ASMJIT_API virtual Label newLabel(); ASMJIT_API virtual Label newLabel();
//! Creates a new named label. //! Creates a new named label.
[[nodiscard]]
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId); ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId);
//! Creates a new anonymous label with a name, which can only be used for debugging purposes. //! Creates a new anonymous label with a name, which can only be used for debugging purposes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); } ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
//! Creates a new external label. //! Creates a new external label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); } ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
//! Returns `Label` by `name`. //! Returns `Label` by `name`.
@@ -619,6 +678,7 @@ public:
//! //!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must //! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned. //! always check the validity of the `Label` returned.
[[nodiscard]]
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept; ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section. //! Binds the `label` to the current position of the current section.
@@ -627,8 +687,11 @@ public:
ASMJIT_API virtual Error bind(const Label& label); ASMJIT_API virtual Error bind(const Label& label);
//! Tests whether the label `id` is valid (i.e. registered). //! Tests whether the label `id` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept; ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
//! Tests whether the `label` is valid (i.e. registered). //! Tests whether the `label` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); } ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
//! \} //! \}
@@ -676,7 +739,7 @@ public:
//! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed //! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed
//! by an array of `operands`. //! by an array of `operands`.
ASMJIT_FORCE_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) { ASMJIT_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options()); setInstOptions(inst.options());
setExtraReg(inst.extraReg()); setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount); return _emitOpArray(inst.id(), operands, opCount);
@@ -794,6 +857,7 @@ public:
//! Called after the emitter was attached to `CodeHolder`. //! Called after the emitter was attached to `CodeHolder`.
ASMJIT_API virtual Error onAttach(CodeHolder* ASMJIT_NONNULL(code)) noexcept; ASMJIT_API virtual Error onAttach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;
//! Called after the emitter was detached from `CodeHolder`. //! Called after the emitter was detached from `CodeHolder`.
ASMJIT_API virtual Error onDetach(CodeHolder* ASMJIT_NONNULL(code)) noexcept; ASMJIT_API virtual Error onDetach(CodeHolder* ASMJIT_NONNULL(code)) noexcept;

View File

@@ -88,10 +88,12 @@ void logInstructionEmitted(
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode)); sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount); self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (Support::test(formatFlags, FormatFlags::kMachineCode)) if (Support::test(formatFlags, FormatFlags::kMachineCode)) {
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment()); finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else }
else {
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment()); finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
}
logger->log(sb); logger->log(sb);
} }

View File

@@ -31,7 +31,8 @@ enum kOpIndex : uint32_t {
kOp5 = 2 kOp5 = 2
}; };
static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { [[nodiscard]]
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0; uint32_t opCount = 0;
if (opExt[kOp3].isNone()) { if (opExt[kOp3].isNone()) {
@@ -49,7 +50,7 @@ static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, cons
return opCount; return opCount;
} }
static ASMJIT_FORCE_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept { static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0); dst[0].copyFrom(o0);
dst[1].copyFrom(o1); dst[1].copyFrom(o1);
dst[2].copyFrom(o2); dst[2].copyFrom(o2);

View File

@@ -35,8 +35,9 @@ uint32_t Environment::stackAlignment() const noexcept {
return 16u; return 16u;
} }
if (isFamilyARM()) if (isFamilyARM()) {
return 8; return 8;
}
// Bail to 4-byte alignment if we don't know. // Bail to 4-byte alignment if we don't know.
return 4; return 4;

View File

@@ -225,13 +225,13 @@ public:
//! \{ //! \{
//! Creates a default initialized environment (all values either unknown or set to safe defaults). //! Creates a default initialized environment (all values either unknown or set to safe defaults).
ASMJIT_INLINE_NODEBUG constexpr Environment() noexcept = default; ASMJIT_INLINE_CONSTEXPR Environment() noexcept = default;
//! Creates a copy of `other` instance. //! Creates a copy of `other` instance.
ASMJIT_INLINE_NODEBUG constexpr Environment(const Environment& other) noexcept = default; ASMJIT_INLINE_CONSTEXPR Environment(const Environment& other) noexcept = default;
//! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`, //! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`. //! and `floatABI`.
ASMJIT_INLINE_NODEBUG constexpr explicit Environment( ASMJIT_INLINE_CONSTEXPR explicit Environment(
Arch arch, Arch arch,
SubArch subArch = SubArch::kUnknown, SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown, Vendor vendor = Vendor::kUnknown,
@@ -251,7 +251,7 @@ public:
//! //!
//! The returned environment should precisely match the target host architecture, sub-architecture, platform, //! The returned environment should precisely match the target host architecture, sub-architecture, platform,
//! and ABI. //! and ABI.
static ASMJIT_INLINE_NODEBUG Environment host() noexcept { static ASMJIT_INLINE_CONSTEXPR Environment host() noexcept {
return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown, FloatABI::kHost); return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown, FloatABI::kHost);
} }
@@ -262,7 +262,10 @@ public:
ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default; ASMJIT_INLINE_NODEBUG Environment& operator=(const Environment& other) noexcept = default;
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); } [[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const Environment& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); } ASMJIT_INLINE_NODEBUG bool operator!=(const Environment& other) const noexcept { return !equals(other); }
//! \} //! \}
@@ -273,6 +276,7 @@ public:
//! Tests whether the environment is not set up. //! Tests whether the environment is not set up.
//! //!
//! Returns true if all members are zero, and thus unknown. //! Returns true if all members are zero, and thus unknown.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { ASMJIT_INLINE_NODEBUG bool empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one... // Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0; return _packed() == 0;
@@ -280,10 +284,12 @@ public:
//! Tests whether the environment is initialized, which means it must have //! Tests whether the environment is initialized, which means it must have
//! a valid architecture. //! a valid architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept {
return _arch != Arch::kUnknown; return _arch != Arch::kUnknown;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept { ASMJIT_INLINE_NODEBUG uint64_t _packed() const noexcept {
uint64_t x; uint64_t x;
memcpy(&x, this, 8); memcpy(&x, this, 8);
@@ -294,21 +300,35 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Environment{}; } ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Environment{}; }
//! Tests whether this environment is equal to `other`. //! Tests whether this environment is equal to `other`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept { return _packed() == other._packed(); } ASMJIT_INLINE_NODEBUG bool equals(const Environment& other) const noexcept { return _packed() == other._packed(); }
//! Returns the architecture. //! Returns the architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns the sub-architecture. //! Returns the sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; } ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
//! Returns vendor. //! Returns vendor.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; } ASMJIT_INLINE_NODEBUG Vendor vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system. //! Returns target's platform or operating system.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; } ASMJIT_INLINE_NODEBUG Platform platform() const noexcept { return _platform; }
//! Returns target's ABI. //! Returns target's ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; } ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; }
//! Returns target's object format. //! Returns target's object format.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; } ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; }
//! Returns floating point ABI. //! Returns floating point ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; } ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; }
//! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`, //! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
@@ -333,57 +353,99 @@ public:
} }
//! Tests whether this environment describes a 32-bit X86. //! Tests whether this environment describes a 32-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; } ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; }
//! Tests whether this environment describes a 64-bit X86. //! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; } ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; }
//! Tests whether this environment describes a 32-bit ARM. //! Tests whether this environment describes a 32-bit ARM.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); } ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); }
//! Tests whether this environment describes a 32-bit ARM in THUMB mode. //! Tests whether this environment describes a 32-bit ARM in THUMB mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); } ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); }
//! Tests whether this environment describes a 64-bit X86. //! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); } ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
//! Tests whether this environment describes a 32-bit MIPS. //! Tests whether this environment describes a 32-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); } ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
//! Tests whether this environment describes a 64-bit MIPS. //! Tests whether this environment describes a 64-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); } ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
//! Tests whether this environment describes a 32-bit RISC-V. //! Tests whether this environment describes a 32-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; } ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
//! Tests whether this environment describes a 64-bit RISC-V. //! Tests whether this environment describes a 64-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; } ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit. //! Tests whether the architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); } ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); }
//! Tests whether the architecture is 64-bit. //! Tests whether the architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); } ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); }
//! Tests whether the architecture is little endian. //! Tests whether the architecture is little endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); } ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
//! Tests whether the architecture is big endian. //! Tests whether the architecture is big endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); } ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); }
//! Tests whether this architecture is of X86 family. //! Tests whether this architecture is of X86 family.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is ARM, THUMB, or AArch64. //! Tests whether this architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is AArch32 (ARM or THUMB). //! Tests whether this architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
//! Tests whether this architecture family is AArch64. //! Tests whether this architecture family is AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
//! Tests whether this architecture family is MISP or MIPS64. //! Tests whether this architecture family is MISP or MIPS64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit). //! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); } ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether the environment platform is Windows. //! Tests whether the environment platform is Windows.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; } ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux. //! Tests whether the environment platform is Linux.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; } ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd. //! Tests whether the environment platform is Hurd.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; } ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku. //! Tests whether the environment platform is Haiku.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; } ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD. //! Tests whether the environment platform is any BSD.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept { ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept {
return _platform == Platform::kFreeBSD || return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD || _platform == Platform::kOpenBSD ||
@@ -392,6 +454,7 @@ public:
} }
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS). //! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept { ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept {
return _platform == Platform::kOSX || return _platform == Platform::kOSX ||
_platform == Platform::kIOS || _platform == Platform::kIOS ||
@@ -400,16 +463,23 @@ public:
} }
//! Tests whether the ABI is MSVC. //! Tests whether the ABI is MSVC.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; } ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU. //! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; } ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
//! Tests whether the ABI is GNU. //! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; } ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; }
//! Returns a calculated stack alignment for this environment. //! Returns a calculated stack alignment for this environment.
[[nodiscard]]
ASMJIT_API uint32_t stackAlignment() const noexcept; ASMJIT_API uint32_t stackAlignment() const noexcept;
//! Returns a native register size of this architecture. //! Returns a native register size of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); } ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`. //! Sets the architecture to `arch`.
@@ -433,90 +503,108 @@ public:
//! \name Static Utilities //! \name Static Utilities
//! \{ //! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue); return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
} }
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue); return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
} }
//! Tests whether the given architecture `arch` is 32-bit. //! Tests whether the given architecture `arch` is 32-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask); return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
} }
//! Tests whether the given architecture `arch` is 64-bit. //! Tests whether the given architecture `arch` is 64-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0; return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
} }
//! Tests whether the given architecture `arch` is little endian. //! Tests whether the given architecture `arch` is little endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian); return uint32_t(arch) < uint32_t(Arch::kBigEndian);
} }
//! Tests whether the given architecture `arch` is big endian. //! Tests whether the given architecture `arch` is big endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian); return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
} }
//! Tests whether the given architecture is Thumb or Thumb_BE. //! Tests whether the given architecture is Thumb or Thumb_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE; return arch == Arch::kThumb || arch == Arch::kThumb_BE;
} }
//! Tests whether the given architecture is ARM or ARM_BE. //! Tests whether the given architecture is ARM or ARM_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE; return arch == Arch::kARM || arch == Arch::kARM_BE;
} }
//! Tests whether the given architecture is AArch64 or AArch64_BE. //! Tests whether the given architecture is AArch64 or AArch64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE; return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
} }
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE. //! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE; return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
} }
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE. //! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE; return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
} }
//! Tests whether the given architecture family is X86 or X64. //! Tests whether the given architecture family is X86 or X64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64; return arch == Arch::kX86 || arch == Arch::kX64;
} }
//! Tests whether the given architecture family is AArch32 (ARM or THUMB). //! Tests whether the given architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept {
return isArchARM(arch) || isArchThumb(arch); return isArchARM(arch) || isArchThumb(arch);
} }
//! Tests whether the given architecture family is AArch64. //! Tests whether the given architecture family is AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept {
return isArchAArch64(arch); return isArchAArch64(arch);
} }
//! Tests whether the given architecture family is ARM, THUMB, or AArch64. //! Tests whether the given architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept {
return isFamilyAArch32(arch) || isFamilyAArch64(arch); return isFamilyAArch32(arch) || isFamilyAArch64(arch);
} }
//! Tests whether the given architecture family is MIPS or MIPS64. //! Tests whether the given architecture family is MIPS or MIPS64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch); return isArchMIPS32(arch) || isArchMIPS64(arch);
} }
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit). //! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64; return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
} }
//! Returns a native general purpose register size from the given architecture. //! Returns a native general purpose register size from the given architecture.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept { static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u; return is32Bit(arch) ? 4u : 8u;
} }

View File

@@ -51,11 +51,13 @@ static const char wordNameTable[][8] = {
Error formatTypeId(String& sb, TypeId typeId) noexcept { Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid) if (typeId == TypeId::kVoid) {
return sb.append("void"); return sb.append("void");
}
if (!TypeUtils::isValid(typeId)) if (!TypeUtils::isValid(typeId)) {
return sb.append("unknown"); return sb.append("unknown");
}
const char* typeName = nullptr; const char* typeName = nullptr;
uint32_t typeSize = TypeUtils::sizeOf(typeId); uint32_t typeSize = TypeUtils::sizeOf(typeId);
@@ -103,13 +105,15 @@ Error formatFeature(
uint32_t featureId) noexcept { uint32_t featureId) noexcept {
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatFeature(sb, featureId); return x86::FormatterInternal::formatFeature(sb, featureId);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatFeature(sb, featureId); return arm::FormatterInternal::formatFeature(sb, featureId);
}
#endif #endif
return kErrorInvalidArch; return kErrorInvalidArch;
@@ -125,26 +129,31 @@ Error formatLabel(
if (emitter && emitter->code()) { if (emitter && emitter->code()) {
const LabelEntry* le = emitter->code()->labelEntry(labelId); const LabelEntry* le = emitter->code()->labelEntry(labelId);
if (ASMJIT_UNLIKELY(!le)) if (ASMJIT_UNLIKELY(!le)) {
return sb.appendFormat("<InvalidLabel:%u>", labelId); return sb.appendFormat("<InvalidLabel:%u>", labelId);
}
if (le->hasName()) { if (le->hasName()) {
if (le->hasParent()) { if (le->hasParent()) {
uint32_t parentId = le->parentId(); uint32_t parentId = le->parentId();
const LabelEntry* pe = emitter->code()->labelEntry(parentId); const LabelEntry* pe = emitter->code()->labelEntry(parentId);
if (ASMJIT_UNLIKELY(!pe)) if (ASMJIT_UNLIKELY(!pe)) {
ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId)); ASMJIT_PROPAGATE(sb.appendFormat("<InvalidLabel:%u>", labelId));
else if (ASMJIT_UNLIKELY(!pe->hasName())) }
else if (ASMJIT_UNLIKELY(!pe->hasName())) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId)); ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
else }
else {
ASMJIT_PROPAGATE(sb.append(pe->name())); ASMJIT_PROPAGATE(sb.append(pe->name()));
}
ASMJIT_PROPAGATE(sb.append('.')); ASMJIT_PROPAGATE(sb.append('.'));
} }
if (le->type() == LabelType::kAnonymous) if (le->type() == LabelType::kAnonymous) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId)); ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId));
}
return sb.append(le->name()); return sb.append(le->name());
} }
} }
@@ -161,13 +170,15 @@ Error formatRegister(
uint32_t regId) noexcept { uint32_t regId) noexcept {
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId); return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId); return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
}
#endif #endif
return kErrorInvalidArch; return kErrorInvalidArch;
@@ -181,13 +192,15 @@ Error formatOperand(
const Operand_& op) noexcept { const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op); return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op); return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
}
#endif #endif
return kErrorInvalidArch; return kErrorInvalidArch;
@@ -201,12 +214,14 @@ ASMJIT_API Error formatDataType(
{ {
DebugUtils::unused(formatFlags); DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidArch); return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId); uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8) if (typeSize == 0 || typeSize > 8) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
uint32_t typeSizeLog2 = Support::ctz(typeSize); uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]); return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
@@ -220,8 +235,9 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz
for (size_t i = 0; i < itemCount; i++) { for (size_t i = 0; i < itemCount; i++) {
uint64_t v = 0; uint64_t v = 0;
if (i != 0) if (i != 0) {
ASMJIT_PROPAGATE(sb.append(", ", 2)); ASMJIT_PROPAGATE(sb.append(", ", 2));
}
switch (typeSize) { switch (typeSize) {
case 1: v = data[0]; break; case 1: v = data[0]; break;
@@ -241,16 +257,18 @@ Error formatData(
String& sb, String& sb,
FormatFlags formatFlags, FormatFlags formatFlags,
Arch arch, Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept TypeId typeId, const void* data, size_t itemCount, size_t repeatCount
{ ) noexcept {
DebugUtils::unused(formatFlags); DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) {
return DebugUtils::errored(kErrorInvalidArch); return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId); uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0) if (typeSize == 0) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
if (!Support::isPowerOf2(typeSize)) { if (!Support::isPowerOf2(typeSize)) {
itemCount *= typeSize; itemCount *= typeSize;
@@ -265,8 +283,9 @@ Error formatData(
uint32_t typeSizeLog2 = Support::ctz(typeSize); uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]; const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
if (repeatCount > 1) if (repeatCount > 1) {
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount)); ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
}
return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount); return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount);
} }
@@ -279,13 +298,15 @@ Error formatInstruction(
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept { const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount); return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) if (Environment::isFamilyAArch64(arch)) {
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount); return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
}
#endif #endif
return kErrorInvalidArch; return kErrorInvalidArch;
@@ -301,8 +322,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit
if (value.isAssigned()) { if (value.isAssigned()) {
ASMJIT_PROPAGATE(sb.append('@')); ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect()) if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append('[')); ASMJIT_PROPAGATE(sb.append('['));
}
// NOTE: It should be either reg or stack, but never both. We // NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would // use two IFs on purpose so if the FuncValue is both it would
@@ -315,8 +337,9 @@ static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmit
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset()))); ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
} }
if (value.isIndirect()) if (value.isIndirect()) {
ASMJIT_PROPAGATE(sb.append(']')); ASMJIT_PROPAGATE(sb.append(']'));
}
} }
return kErrorOk; return kErrorOk;
@@ -330,19 +353,23 @@ static Error formatFuncValuePack(
const RegOnly* vRegs) noexcept { const RegOnly* vRegs) noexcept {
size_t count = pack.count(); size_t count = pack.count();
if (!count) if (!count) {
return sb.append("void"); return sb.append("void");
}
if (count > 1) if (count > 1) {
sb.append('['); ASMJIT_PROPAGATE(sb.append('['));
}
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) { for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex]; const FuncValue& value = pack[valueIndex];
if (!value) if (!value) {
break; break;
}
if (valueIndex) if (valueIndex) {
ASMJIT_PROPAGATE(sb.append(", ")); ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value)); ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
@@ -350,15 +377,17 @@ static Error formatFuncValuePack(
const VirtReg* virtReg = nullptr; const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>"; static const char nullReg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) {
virtReg = cc->virtRegById(vRegs[valueIndex].id()); virtReg = cc->virtRegById(vRegs[valueIndex].id());
}
ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg)); ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg));
} }
} }
if (count > 1) if (count > 1) {
sb.append(']'); ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk; return kErrorOk;
} }
@@ -380,13 +409,14 @@ static Error formatFuncArgs(
const FuncNode::ArgPack* argPacks) noexcept { const FuncNode::ArgPack* argPacks) noexcept {
uint32_t argCount = fd.argCount(); uint32_t argCount = fd.argCount();
if (!argCount) if (!argCount) {
return sb.append("void"); return sb.append("void");
}
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) { for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex) if (argIndex) {
ASMJIT_PROPAGATE(sb.append(", ")); ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data)); ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
} }
@@ -400,8 +430,9 @@ Error formatNode(
const BaseBuilder* builder, const BaseBuilder* builder,
const BaseNode* node) noexcept { const BaseNode* node) noexcept {
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) {
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position())); ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
}
size_t startLineIndex = sb.size(); size_t startLineIndex = sb.size();
@@ -542,8 +573,9 @@ Error formatNode(
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine); size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex; size_t currentPadding = sb.size() - startLineIndex;
if (currentPadding < requiredPadding) if (currentPadding < requiredPadding) {
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding)); ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
}
ASMJIT_PROPAGATE(sb.append("; ")); ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment())); ASMJIT_PROPAGATE(sb.append(node->inlineComment()));

View File

@@ -108,28 +108,39 @@ public:
//! \{ //! \{
//! Returns format flags. //! Returns format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags. //! Tests whether the given `flag` is set in format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Resets all format flags to `flags`. //! Resets all format flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; } ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags. //! Adds `flags` to format flags.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags. //! Removes `flags` from format flags.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`. //! Returns indentation for the given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; } ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`. //! Sets indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); } ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero. //! Resets indentation for the given indentation `group` to zero.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); } ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns padding for the given padding `group`. //! Returns padding for the given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; } ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets padding for the given padding `group`. //! Sets padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); } ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets padding for the given padding `group` to zero, which means that a default padding will be used //! Resets padding for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties. //! based on the target architecture properties.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); } ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
namespace Formatter { namespace Formatter {
static ASMJIT_FORCE_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept { static ASMJIT_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 }; static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here"); static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");

View File

@@ -27,13 +27,15 @@ ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& envir
reset(); reset();
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) if (environment.isFamilyX86()) {
return x86::FuncInternal::initCallConv(*this, ccId, environment); return x86::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initCallConv(*this, ccId, environment); return a64::FuncInternal::initCallConv(*this, ccId, environment);
}
#endif #endif
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
@@ -46,8 +48,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
CallConvId ccId = signature.callConvId(); CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount(); uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
CallConv& cc = _callConv; CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment)); ASMJIT_PROPAGATE(cc.init(ccId, environment));
@@ -65,17 +68,20 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
_vaIndex = uint8_t(signature.vaIndex()); _vaIndex = uint8_t(signature.vaIndex());
TypeId ret = signature.ret(); TypeId ret = signature.ret();
if (ret != TypeId::kVoid) if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta)); _rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
}
#if !defined(ASMJIT_NO_X86) #if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) if (environment.isFamilyX86()) {
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize); return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
}
#endif #endif
#if !defined(ASMJIT_NO_AARCH64) #if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initFuncDetail(*this, signature); return a64::FuncInternal::initFuncDetail(*this, signature);
}
#endif #endif
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current // We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
@@ -88,8 +94,9 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept { ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
Arch arch = func.callConv().arch(); Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch)) if (!Environment::isValidArch(arch)) {
return DebugUtils::errored(kErrorInvalidArch); return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch); const ArchTraits& archTraits = ArchTraits::byArch(arch);
@@ -104,8 +111,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment(); uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16); uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
if (minDynamicAlignment == naturalStackAlignment) if (minDynamicAlignment == naturalStackAlignment) {
minDynamicAlignment <<= 1; minDynamicAlignment <<= 1;
}
_naturalStackAlignment = uint8_t(naturalStackAlignment); _naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment); _minDynamicAlignment = uint8_t(minDynamicAlignment);
@@ -137,8 +145,9 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
// ==================== // ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept { ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch())) if (!Environment::isValidArch(arch())) {
return DebugUtils::errored(kErrorInvalidArch); return DebugUtils::errored(kErrorInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch()); const ArchTraits& archTraits = ArchTraits::byArch(arch());
@@ -148,9 +157,7 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// The final stack alignment must be updated accordingly to call and local stack alignments. // The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment; uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment));
_callStackAlignment,
_localStackAlignment));
bool hasFP = hasPreservedFP(); bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment(); bool hasDA = hasDynamicAlignment();
@@ -165,32 +172,37 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// Currently required by ARM, if this works differently across architectures we would have to generalize most // Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv. // likely in CallConv.
if (kLr != BaseReg::kIdBad) if (kLr != BaseReg::kIdBad) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr); _dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
}
} }
// These two are identical if the function doesn't align its stack dynamically. // These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId; uint32_t saRegId = _saRegId;
if (saRegId == BaseReg::kIdBad) if (saRegId == BaseReg::kIdBad) {
saRegId = kSp; saRegId = kSp;
}
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs // Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment. // dynamic stack alignment.
if (hasDA && saRegId == kSp) if (hasDA && saRegId == kSp) {
saRegId = kFp; saRegId = kFp;
}
// Mark as dirty any register but SP if used as SA pointer. // Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp) if (saRegId != kSp) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId); _dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
}
_spRegId = uint8_t(kSp); _spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId); _saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers. // Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {}; uint32_t saveRestoreSizes[2] {};
for (RegGroup group : RegGroupVirtValues{}) for (RegGroup group : RegGroupVirtValues{}) {
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))] saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group)); += Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
}
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]); _pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]); _extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
@@ -235,22 +247,25 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size // (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider // that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed // this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes (depending on the // it pushes the current EIP|RIP onto the stack, and unaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning). // architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize) if (v || hasFuncCalls() || !returnAddressSize) {
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment); v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
}
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here. _pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'. _stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here. v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function. _finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
if (!archTraits.hasLinkReg()) if (!archTraits.hasLinkReg()) {
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack. v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
}
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned. // If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA) if (hasDA) {
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment); _stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
}
// Calculate where the function arguments start relative to SP. // Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v; _saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
@@ -269,8 +284,9 @@ ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) co
Arch arch = frame.arch(); Arch arch = frame.arch();
const FuncDetail* func = funcDetail(); const FuncDetail* func = funcDetail();
if (!func) if (!func) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
RAConstraints constraints; RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch)); ASMJIT_PROPAGATE(constraints.init(arch));

View File

@@ -152,7 +152,7 @@ struct CallConv {
//! \note This is not really AsmJit's limitation, it's just the number that makes sense considering all common //! \note This is not really AsmJit's limitation, it's just the number that makes sense considering all common
//! calling conventions. Usually even conventions that use registers to pass function arguments are limited to 8 //! calling conventions. Usually even conventions that use registers to pass function arguments are limited to 8
//! and less arguments passed via registers per group. //! and less arguments passed via registers per group.
static constexpr uint32_t kMaxRegArgsPerGroup = 16; static inline constexpr uint32_t kMaxRegArgsPerGroup = 16;
//! \} //! \}
@@ -228,46 +228,66 @@ struct CallConv {
//! \{ //! \{
//! Returns the target architecture of this calling convention. //! Returns the target architecture of this calling convention.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Sets the target architecture of this calling convention. //! Sets the target architecture of this calling convention.
ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; } ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; }
//! Returns the calling convention id. //! Returns the calling convention id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvId id() const noexcept { return _id; } ASMJIT_INLINE_NODEBUG CallConvId id() const noexcept { return _id; }
//! Sets the calling convention id. //! Sets the calling convention id.
ASMJIT_INLINE_NODEBUG void setId(CallConvId ccId) noexcept { _id = ccId; } ASMJIT_INLINE_NODEBUG void setId(CallConvId ccId) noexcept { _id = ccId; }
//! Returns the strategy used to assign registers to arguments. //! Returns the strategy used to assign registers to arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvStrategy strategy() const noexcept { return _strategy; } ASMJIT_INLINE_NODEBUG CallConvStrategy strategy() const noexcept { return _strategy; }
//! Sets the strategy used to assign registers to arguments. //! Sets the strategy used to assign registers to arguments.
ASMJIT_INLINE_NODEBUG void setStrategy(CallConvStrategy ccStrategy) noexcept { _strategy = ccStrategy; } ASMJIT_INLINE_NODEBUG void setStrategy(CallConvStrategy ccStrategy) noexcept { _strategy = ccStrategy; }
//! Tests whether the calling convention has the given `flag` set. //! Tests whether the calling convention has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Returns the calling convention flags, see `Flags`. //! Returns the calling convention flags, see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _flags; }
//! Adds the calling convention flags, see `Flags`. //! Adds the calling convention flags, see `Flags`.
ASMJIT_INLINE_NODEBUG void setFlags(CallConvFlags flag) noexcept { _flags = flag; }; ASMJIT_INLINE_NODEBUG void setFlags(CallConvFlags flag) noexcept { _flags = flag; };
//! Adds the calling convention flags, see `Flags`. //! Adds the calling convention flags, see `Flags`.
ASMJIT_INLINE_NODEBUG void addFlags(CallConvFlags flags) noexcept { _flags |= flags; }; ASMJIT_INLINE_NODEBUG void addFlags(CallConvFlags flags) noexcept { _flags |= flags; };
//! Tests whether this calling convention specifies 'RedZone'. //! Tests whether this calling convention specifies 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; } ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; }
//! Tests whether this calling convention specifies 'SpillZone'. //! Tests whether this calling convention specifies 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; } ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns size of 'RedZone'. //! Returns size of 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; } ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; }
//! Returns size of 'SpillZone'.
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Sets size of 'RedZone'. //! Sets size of 'RedZone'.
ASMJIT_INLINE_NODEBUG void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); } ASMJIT_INLINE_NODEBUG void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); }
//! Returns size of 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Sets size of 'SpillZone'. //! Sets size of 'SpillZone'.
ASMJIT_INLINE_NODEBUG void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); } ASMJIT_INLINE_NODEBUG void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); }
//! Returns a natural stack alignment. //! Returns a natural stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
//! Sets a natural stack alignment. //! Sets a natural stack alignment.
//! //!
//! This function can be used to override the default stack alignment in case that you know that it's alignment is //! This function can be used to override the default stack alignment in case that you know that it's alignment is
@@ -275,22 +295,28 @@ struct CallConv {
ASMJIT_INLINE_NODEBUG void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); } ASMJIT_INLINE_NODEBUG void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); }
//! Returns the size of a register (or its part) to be saved and restored of the given `group`. //! Returns the size of a register (or its part) to be saved and restored of the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saveRestoreRegSize(RegGroup group) const noexcept { return _saveRestoreRegSize[group]; } ASMJIT_INLINE_NODEBUG uint32_t saveRestoreRegSize(RegGroup group) const noexcept { return _saveRestoreRegSize[group]; }
//! Sets the size of a vector register (or its part) to be saved and restored. //! Sets the size of a vector register (or its part) to be saved and restored.
ASMJIT_INLINE_NODEBUG void setSaveRestoreRegSize(RegGroup group, uint32_t size) noexcept { _saveRestoreRegSize[group] = uint8_t(size); } ASMJIT_INLINE_NODEBUG void setSaveRestoreRegSize(RegGroup group, uint32_t size) noexcept { _saveRestoreRegSize[group] = uint8_t(size); }
//! Returns the alignment of a save-restore area of the given `group`. //! Returns the alignment of a save-restore area of the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saveRestoreAlignment(RegGroup group) const noexcept { return _saveRestoreAlignment[group]; } ASMJIT_INLINE_NODEBUG uint32_t saveRestoreAlignment(RegGroup group) const noexcept { return _saveRestoreAlignment[group]; }
//! Sets the alignment of a save-restore area of the given `group`. //! Sets the alignment of a save-restore area of the given `group`.
ASMJIT_INLINE_NODEBUG void setSaveRestoreAlignment(RegGroup group, uint32_t alignment) noexcept { _saveRestoreAlignment[group] = uint8_t(alignment); } ASMJIT_INLINE_NODEBUG void setSaveRestoreAlignment(RegGroup group, uint32_t alignment) noexcept { _saveRestoreAlignment[group] = uint8_t(alignment); }
//! Returns the order of passed registers of the given `group`. //! Returns the order of passed registers of the given `group`.
[[nodiscard]]
inline const uint8_t* passedOrder(RegGroup group) const noexcept { inline const uint8_t* passedOrder(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _passedOrder[size_t(group)].id; return _passedOrder[size_t(group)].id;
} }
//! Returns the mask of passed registers of the given `group`. //! Returns the mask of passed registers of the given `group`.
[[nodiscard]]
inline RegMask passedRegs(RegGroup group) const noexcept { inline RegMask passedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _passedRegs[size_t(group)]; return _passedRegs[size_t(group)];
@@ -335,6 +361,7 @@ struct CallConv {
} }
//! Returns preserved register mask of the given `group`. //! Returns preserved register mask of the given `group`.
[[nodiscard]]
inline RegMask preservedRegs(RegGroup group) const noexcept { inline RegMask preservedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _preservedRegs[group]; return _preservedRegs[group];
@@ -365,7 +392,7 @@ struct FuncSignature {
//! \{ //! \{
//! Doesn't have variable number of arguments (`...`). //! Doesn't have variable number of arguments (`...`).
static constexpr uint8_t kNoVarArgs = 0xFFu; static inline constexpr uint8_t kNoVarArgs = 0xFFu;
//! \} //! \}
@@ -391,19 +418,19 @@ struct FuncSignature {
//! \{ //! \{
//! Default constructed function signature, initialized to \ref CallConvId::kCDecl, having no return value and no arguments. //! Default constructed function signature, initialized to \ref CallConvId::kCDecl, having no return value and no arguments.
ASMJIT_FORCE_INLINE constexpr FuncSignature() = default; ASMJIT_INLINE_CONSTEXPR FuncSignature() = default;
//! Copy constructor, which is initialized to the same function signature as `other`. //! Copy constructor, which is initialized to the same function signature as `other`.
ASMJIT_FORCE_INLINE constexpr FuncSignature(const FuncSignature& other) = default; ASMJIT_INLINE_CONSTEXPR FuncSignature(const FuncSignature& other) = default;
//! Initializes the function signature with calling convention id `ccId` and variable argument's index `vaIndex`. //! Initializes the function signature with calling convention id `ccId` and variable argument's index `vaIndex`.
ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex = kNoVarArgs) noexcept
: _ccId(ccId), : _ccId(ccId),
_vaIndex(uint8_t(vaIndex)) {} _vaIndex(uint8_t(vaIndex)) {}
//! Initializes the function signature with calling convention id `ccId`, `vaIndex`, return value, and function arguments. //! Initializes the function signature with calling convention id `ccId`, `vaIndex`, return value, and function arguments.
template<typename... Args> template<typename... Args>
ASMJIT_FORCE_INLINE constexpr FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept ASMJIT_INLINE_CONSTEXPR FuncSignature(CallConvId ccId, uint32_t vaIndex, TypeId ret, Args&&...args) noexcept
: _ccId(ccId), : _ccId(ccId),
_argCount(uint8_t(sizeof...(args))), _argCount(uint8_t(sizeof...(args))),
_vaIndex(uint8_t(vaIndex)), _vaIndex(uint8_t(vaIndex)),
@@ -417,7 +444,8 @@ struct FuncSignature {
//! a convenience function that allows to build function signature statically based on types known at compile time, //! a convenience function that allows to build function signature statically based on types known at compile time,
//! which is common in JIT code generation. //! which is common in JIT code generation.
template<typename... RetValueAndArgs> template<typename... RetValueAndArgs>
static ASMJIT_INLINE_NODEBUG constexpr FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR FuncSignature build(CallConvId ccId = CallConvId::kCDecl, uint32_t vaIndex = kNoVarArgs) noexcept {
return FuncSignature(ccId, vaIndex, (TypeId(TypeUtils::TypeIdOfT<RetValueAndArgs>::kTypeId))... ); return FuncSignature(ccId, vaIndex, (TypeId(TypeUtils::TypeIdOfT<RetValueAndArgs>::kTypeId))... );
} }
@@ -427,12 +455,15 @@ struct FuncSignature {
//! \{ //! \{
//! Copy assignment - function signature can be copied by value. //! Copy assignment - function signature can be copied by value.
ASMJIT_FORCE_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default; ASMJIT_INLINE FuncSignature& operator=(const FuncSignature& other) noexcept = default;
//! Compares this function signature with `other` for equality.. //! Compares this function signature with `other` for equality..
ASMJIT_FORCE_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); } [[nodiscard]]
ASMJIT_INLINE bool operator==(const FuncSignature& other) const noexcept { return equals(other); }
//! Compares this function signature with `other` for inequality.. //! Compares this function signature with `other` for inequality..
ASMJIT_FORCE_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); } [[nodiscard]]
ASMJIT_INLINE bool operator!=(const FuncSignature& other) const noexcept { return !equals(other); }
//! \} //! \}
@@ -448,6 +479,7 @@ struct FuncSignature {
//! \{ //! \{
//! Compares this function signature with `other` for equality.. //! Compares this function signature with `other` for equality..
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const FuncSignature& other) const noexcept { ASMJIT_INLINE_NODEBUG bool equals(const FuncSignature& other) const noexcept {
return _ccId == other._ccId && return _ccId == other._ccId &&
_argCount == other._argCount && _argCount == other._argCount &&
@@ -462,27 +494,38 @@ struct FuncSignature {
//! \{ //! \{
//! Returns the calling convention. //! Returns the calling convention.
ASMJIT_INLINE_NODEBUG CallConvId callConvId() const noexcept { return _ccId; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR CallConvId callConvId() const noexcept { return _ccId; }
//! Sets the calling convention to `ccId`; //! Sets the calling convention to `ccId`;
ASMJIT_INLINE_NODEBUG void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; } ASMJIT_INLINE_CONSTEXPR void setCallConvId(CallConvId ccId) noexcept { _ccId = ccId; }
//! Tests whether the function signature has a return value. //! Tests whether the function signature has a return value.
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _ret != TypeId::kVoid; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasRet() const noexcept { return _ret != TypeId::kVoid; }
//! Returns the type of the return value. //! Returns the type of the return value.
ASMJIT_INLINE_NODEBUG TypeId ret() const noexcept { return _ret; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR TypeId ret() const noexcept { return _ret; }
//! Sets the return type to `retType`. //! Sets the return type to `retType`.
ASMJIT_INLINE_NODEBUG void setRet(TypeId retType) noexcept { _ret = retType; } ASMJIT_INLINE_CONSTEXPR void setRet(TypeId retType) noexcept { _ret = retType; }
//! Sets the return type based on `T`. //! Sets the return type based on `T`.
template<typename T> template<typename T>
ASMJIT_INLINE_NODEBUG void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); } ASMJIT_INLINE_CONSTEXPR void setRetT() noexcept { setRet(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
//! Returns the array of function arguments' types. //! Returns the array of function arguments' types.
ASMJIT_INLINE_NODEBUG const TypeId* args() const noexcept { return _args; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR const TypeId* args() const noexcept { return _args; }
//! Returns the number of function arguments. //! Returns the number of function arguments.
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; } [[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t argCount() const noexcept { return _argCount; }
//! Returns the type of the argument at index `i`. //! Returns the type of the argument at index `i`.
[[nodiscard]]
inline TypeId arg(uint32_t i) const noexcept { inline TypeId arg(uint32_t i) const noexcept {
ASMJIT_ASSERT(i < _argCount); ASMJIT_ASSERT(i < _argCount);
return _args[i]; return _args[i];
@@ -493,6 +536,7 @@ struct FuncSignature {
ASMJIT_ASSERT(index < _argCount); ASMJIT_ASSERT(index < _argCount);
_args[index] = argType; _args[index] = argType;
} }
//! Sets the argument at index `i` to the type based on `T`. //! Sets the argument at index `i` to the type based on `T`.
template<typename T> template<typename T>
inline void setArgT(uint32_t index) noexcept { setArg(index, TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); } inline void setArgT(uint32_t index) noexcept { setArg(index, TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
@@ -503,6 +547,7 @@ struct FuncSignature {
//! to use this function. However, if you are adding arguments based on user input, for example, then either check //! to use this function. However, if you are adding arguments based on user input, for example, then either check
//! the number of arguments before using function signature or use \ref canAddArg() before actually adding them to //! the number of arguments before using function signature or use \ref canAddArg() before actually adding them to
//! the function signature. //! the function signature.
[[nodiscard]]
inline bool canAddArg() const noexcept { return _argCount < Globals::kMaxFuncArgs; } inline bool canAddArg() const noexcept { return _argCount < Globals::kMaxFuncArgs; }
//! Appends an argument of `type` to the function prototype. //! Appends an argument of `type` to the function prototype.
@@ -516,11 +561,16 @@ struct FuncSignature {
inline void addArgT() noexcept { addArg(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); } inline void addArgT() noexcept { addArg(TypeId(TypeUtils::TypeIdOfT<T>::kTypeId)); }
//! Tests whether the function has variable number of arguments (...). //! Tests whether the function has variable number of arguments (...).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
//! Returns the variable arguments (...) index, `kNoVarArgs` if none. //! Returns the variable arguments (...) index, `kNoVarArgs` if none.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; } ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; }
//! Sets the variable arguments (...) index to `index`. //! Sets the variable arguments (...) index to `index`.
ASMJIT_INLINE_NODEBUG void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); } ASMJIT_INLINE_NODEBUG void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); }
//! Resets the variable arguments index (making it a non-va function). //! Resets the variable arguments index (making it a non-va function).
ASMJIT_INLINE_NODEBUG void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; } ASMJIT_INLINE_NODEBUG void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; }
@@ -620,45 +670,68 @@ struct FuncValue {
//! \endcond //! \endcond
//! Tests whether the `FuncValue` has a flag `flag` set. //! Tests whether the `FuncValue` has a flag `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const noexcept { return Support::test(_data, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const noexcept { return Support::test(_data, flag); }
//! Adds `flags` to `FuncValue`. //! Adds `flags` to `FuncValue`.
ASMJIT_INLINE_NODEBUG void addFlags(uint32_t flags) noexcept { _data |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(uint32_t flags) noexcept { _data |= flags; }
//! Clears `flags` of `FuncValue`. //! Clears `flags` of `FuncValue`.
ASMJIT_INLINE_NODEBUG void clearFlags(uint32_t flags) noexcept { _data &= ~flags; } ASMJIT_INLINE_NODEBUG void clearFlags(uint32_t flags) noexcept { _data &= ~flags; }
//! Tests whether the value is initialized (i.e. contains a valid data). //! Tests whether the value is initialized (i.e. contains a valid data).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _data != 0; } ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _data != 0; }
//! Tests whether the argument is passed by register. //! Tests whether the argument is passed by register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReg() const noexcept { return hasFlag(kFlagIsReg); } ASMJIT_INLINE_NODEBUG bool isReg() const noexcept { return hasFlag(kFlagIsReg); }
//! Tests whether the argument is passed by stack. //! Tests whether the argument is passed by stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return hasFlag(kFlagIsStack); } ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return hasFlag(kFlagIsStack); }
//! Tests whether the argument is passed by register. //! Tests whether the argument is passed by register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); } ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); }
//! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM). //! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); } ASMJIT_INLINE_NODEBUG bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); }
//! Tests whether the argument was already processed (used internally). //! Tests whether the argument was already processed (used internally).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return hasFlag(kFlagIsDone); } ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return hasFlag(kFlagIsDone); }
//! Returns a register type of the register used to pass function argument or return value. //! Returns a register type of the register used to pass function argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType regType() const noexcept { return RegType((_data & kRegTypeMask) >> kRegTypeShift); } ASMJIT_INLINE_NODEBUG RegType regType() const noexcept { return RegType((_data & kRegTypeMask) >> kRegTypeShift); }
//! Sets a register type of the register used to pass function argument or return value. //! Sets a register type of the register used to pass function argument or return value.
ASMJIT_INLINE_NODEBUG void setRegType(RegType regType) noexcept { _replaceValue(kRegTypeMask, uint32_t(regType) << kRegTypeShift); } ASMJIT_INLINE_NODEBUG void setRegType(RegType regType) noexcept { _replaceValue(kRegTypeMask, uint32_t(regType) << kRegTypeShift); }
//! Returns a physical id of the register used to pass function argument or return value. //! Returns a physical id of the register used to pass function argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; } ASMJIT_INLINE_NODEBUG uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; }
//! Sets a physical id of the register used to pass function argument or return value. //! Sets a physical id of the register used to pass function argument or return value.
ASMJIT_INLINE_NODEBUG void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); } ASMJIT_INLINE_NODEBUG void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); }
//! Returns a stack offset of this argument. //! Returns a stack offset of this argument.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; } ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; }
//! Sets a stack offset of this argument. //! Sets a stack offset of this argument.
ASMJIT_INLINE_NODEBUG void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); } ASMJIT_INLINE_NODEBUG void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); }
//! Tests whether the argument or return value has associated `TypeId`. //! Tests whether the argument or return value has associated `TypeId`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTypeId() const noexcept { return Support::test(_data, kTypeIdMask); } ASMJIT_INLINE_NODEBUG bool hasTypeId() const noexcept { return Support::test(_data, kTypeIdMask); }
//! Returns a TypeId of this argument or return value. //! Returns a TypeId of this argument or return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return TypeId((_data & kTypeIdMask) >> kTypeIdShift); } ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return TypeId((_data & kTypeIdMask) >> kTypeIdShift); }
//! Sets a TypeId of this argument or return value. //! Sets a TypeId of this argument or return value.
ASMJIT_INLINE_NODEBUG void setTypeId(TypeId typeId) noexcept { _replaceValue(kTypeIdMask, uint32_t(typeId) << kTypeIdShift); } ASMJIT_INLINE_NODEBUG void setTypeId(TypeId typeId) noexcept { _replaceValue(kTypeIdMask, uint32_t(typeId) << kTypeIdShift); }
@@ -692,6 +765,7 @@ public:
//! \{ //! \{
//! Calculates how many values are in the pack, checking for non-values from the end. //! Calculates how many values are in the pack, checking for non-values from the end.
[[nodiscard]]
inline uint32_t count() const noexcept { inline uint32_t count() const noexcept {
uint32_t n = Globals::kMaxValuePack; uint32_t n = Globals::kMaxValuePack;
while (n && !_values[n - 1]) while (n && !_values[n - 1])
@@ -702,8 +776,11 @@ public:
//! Returns values in this value in the pack. //! Returns values in this value in the pack.
//! //!
//! \note The returned array has exactly \ref Globals::kMaxValuePack elements. //! \note The returned array has exactly \ref Globals::kMaxValuePack elements.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValue* values() noexcept { return _values; } ASMJIT_INLINE_NODEBUG FuncValue* values() noexcept { return _values; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValue* values() const noexcept { return _values; } ASMJIT_INLINE_NODEBUG const FuncValue* values() const noexcept { return _values; }
//! Resets a value at the given `index` in the pack, which makes it unassigned. //! Resets a value at the given `index` in the pack, which makes it unassigned.
@@ -740,11 +817,14 @@ public:
//! Accesses the value in the pack at the given `index`. //! Accesses the value in the pack at the given `index`.
//! //!
//! \note The maximum index value is `Globals::kMaxValuePack - 1`. //! \note The maximum index value is `Globals::kMaxValuePack - 1`.
[[nodiscard]]
inline FuncValue& operator[](size_t index) { inline FuncValue& operator[](size_t index) {
ASMJIT_ASSERT(index < Globals::kMaxValuePack); ASMJIT_ASSERT(index < Globals::kMaxValuePack);
return _values[index]; return _values[index];
} }
//! \overload //! \overload
[[nodiscard]]
inline const FuncValue& operator[](size_t index) const { inline const FuncValue& operator[](size_t index) const {
ASMJIT_ASSERT(index < Globals::kMaxValuePack); ASMJIT_ASSERT(index < Globals::kMaxValuePack);
return _values[index]; return _values[index];
@@ -809,7 +889,7 @@ public:
//! \{ //! \{
//! Function doesn't have a variable number of arguments (`...`). //! Function doesn't have a variable number of arguments (`...`).
static constexpr uint8_t kNoVarArgs = 0xFFu; static inline constexpr uint8_t kNoVarArgs = 0xFFu;
//! \} //! \}
@@ -871,52 +951,72 @@ public:
//! \{ //! \{
//! Returns the function's calling convention, see `CallConv`. //! Returns the function's calling convention, see `CallConv`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const CallConv& callConv() const noexcept { return _callConv; } ASMJIT_INLINE_NODEBUG const CallConv& callConv() const noexcept { return _callConv; }
//! Returns the associated calling convention flags, see `CallConv::Flags`. //! Returns the associated calling convention flags, see `CallConv::Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _callConv.flags(); } ASMJIT_INLINE_NODEBUG CallConvFlags flags() const noexcept { return _callConv.flags(); }
//! Checks whether a CallConv `flag` is set, see `CallConv::Flags`. //! Checks whether a CallConv `flag` is set, see `CallConv::Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); } ASMJIT_INLINE_NODEBUG bool hasFlag(CallConvFlags ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); }
//! Tests whether the function has a return value. //! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return bool(_rets[0]); } ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return bool(_rets[0]); }
//! Returns the number of function arguments. //! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; } ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _argCount; }
//! Returns function return values. //! Returns function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValuePack& retPack() noexcept { return _rets; } ASMJIT_INLINE_NODEBUG FuncValuePack& retPack() noexcept { return _rets; }
//! Returns function return values. //! Returns function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValuePack& retPack() const noexcept { return _rets; } ASMJIT_INLINE_NODEBUG const FuncValuePack& retPack() const noexcept { return _rets; }
//! Returns a function return value associated with the given `valueIndex`. //! Returns a function return value associated with the given `valueIndex`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValue& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; } ASMJIT_INLINE_NODEBUG FuncValue& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
//! Returns a function return value associated with the given `valueIndex` (const). //! Returns a function return value associated with the given `valueIndex` (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValue& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; } ASMJIT_INLINE_NODEBUG const FuncValue& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
//! Returns function argument packs array. //! Returns function argument packs array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncValuePack* argPacks() noexcept { return _args; } ASMJIT_INLINE_NODEBUG FuncValuePack* argPacks() noexcept { return _args; }
//! Returns function argument packs array (const). //! Returns function argument packs array (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncValuePack* argPacks() const noexcept { return _args; } ASMJIT_INLINE_NODEBUG const FuncValuePack* argPacks() const noexcept { return _args; }
//! Returns function argument pack at the given `argIndex`. //! Returns function argument pack at the given `argIndex`.
[[nodiscard]]
inline FuncValuePack& argPack(size_t argIndex) noexcept { inline FuncValuePack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex]; return _args[argIndex];
} }
//! Returns function argument pack at the given `argIndex` (const). //! Returns function argument pack at the given `argIndex` (const).
[[nodiscard]]
inline const FuncValuePack& argPack(size_t argIndex) const noexcept { inline const FuncValuePack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex]; return _args[argIndex];
} }
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex`. //! Returns an argument at `valueIndex` from the argument pack at the given `argIndex`.
[[nodiscard]]
inline FuncValue& arg(size_t argIndex, size_t valueIndex = 0) noexcept { inline FuncValue& arg(size_t argIndex, size_t valueIndex = 0) noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex][valueIndex]; return _args[argIndex][valueIndex];
} }
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex` (const). //! Returns an argument at `valueIndex` from the argument pack at the given `argIndex` (const).
[[nodiscard]]
inline const FuncValue& arg(size_t argIndex, size_t valueIndex = 0) const noexcept { inline const FuncValue& arg(size_t argIndex, size_t valueIndex = 0) const noexcept {
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs); ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
return _args[argIndex][valueIndex]; return _args[argIndex][valueIndex];
@@ -931,28 +1031,43 @@ public:
} }
//! Tests whether the function has variable arguments. //! Tests whether the function has variable arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; } ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
//! Returns an index of a first variable argument. //! Returns an index of a first variable argument.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; } ASMJIT_INLINE_NODEBUG uint32_t vaIndex() const noexcept { return _vaIndex; }
//! Tests whether the function passes one or more argument by stack. //! Tests whether the function passes one or more argument by stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackArgs() const noexcept { return _argStackSize != 0; } ASMJIT_INLINE_NODEBUG bool hasStackArgs() const noexcept { return _argStackSize != 0; }
//! Returns stack size needed for function arguments passed on the stack. //! Returns stack size needed for function arguments passed on the stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argStackSize() const noexcept { return _argStackSize; } ASMJIT_INLINE_NODEBUG uint32_t argStackSize() const noexcept { return _argStackSize; }
//! Returns red zone size. //! Returns red zone size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); } ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); }
//! Returns spill zone size. //! Returns spill zone size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); } ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); }
//! Returns natural stack alignment. //! Returns natural stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); } ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); }
//! Returns a mask of all passed registers of the given register `group`. //! Returns a mask of all passed registers of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask passedRegs(RegGroup group) const noexcept { return _callConv.passedRegs(group); } ASMJIT_INLINE_NODEBUG RegMask passedRegs(RegGroup group) const noexcept { return _callConv.passedRegs(group); }
//! Returns a mask of all preserved registers of the given register `group`. //! Returns a mask of all preserved registers of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask preservedRegs(RegGroup group) const noexcept { return _callConv.preservedRegs(group); } ASMJIT_INLINE_NODEBUG RegMask preservedRegs(RegGroup group) const noexcept { return _callConv.preservedRegs(group); }
//! Returns a mask of all used registers of the given register `group`. //! Returns a mask of all used registers of the given register `group`.
[[nodiscard]]
inline RegMask usedRegs(RegGroup group) const noexcept { inline RegMask usedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _usedRegs[size_t(group)]; return _usedRegs[size_t(group)];
@@ -1012,10 +1127,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { //! Tag used to inform that some offset is invalid.
//! Tag used to inform that some offset is invalid. static inline constexpr uint32_t kTagInvalidOffset = 0xFFFFFFFFu;
kTagInvalidOffset = 0xFFFFFFFFu
};
//! \} //! \}
@@ -1129,90 +1242,133 @@ public:
//! \{ //! \{
//! Returns the target architecture of the function frame. //! Returns the target architecture of the function frame.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
//! Returns function frame attributes, see `Attributes`. //! Returns function frame attributes, see `Attributes`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _attributes; } ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _attributes; }
//! Checks whether the FuncFame contains an attribute `attr`. //! Checks whether the FuncFame contains an attribute `attr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAttribute(FuncAttributes attr) const noexcept { return Support::test(_attributes, attr); } ASMJIT_INLINE_NODEBUG bool hasAttribute(FuncAttributes attr) const noexcept { return Support::test(_attributes, attr); }
//! Adds attributes `attrs` to the FuncFrame. //! Adds attributes `attrs` to the FuncFrame.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _attributes |= attrs; } ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _attributes |= attrs; }
//! Clears attributes `attrs` from the FrameFrame. //! Clears attributes `attrs` from the FrameFrame.
ASMJIT_INLINE_NODEBUG void clearAttributes(FuncAttributes attrs) noexcept { _attributes &= ~attrs; } ASMJIT_INLINE_NODEBUG void clearAttributes(FuncAttributes attrs) noexcept { _attributes &= ~attrs; }
//! Tests whether the function has variable number of arguments. //! Tests whether the function has variable number of arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return hasAttribute(FuncAttributes::kHasVarArgs); } ASMJIT_INLINE_NODEBUG bool hasVarArgs() const noexcept { return hasAttribute(FuncAttributes::kHasVarArgs); }
//! Sets the variable arguments flag. //! Sets the variable arguments flag.
ASMJIT_INLINE_NODEBUG void setVarArgs() noexcept { addAttributes(FuncAttributes::kHasVarArgs); } ASMJIT_INLINE_NODEBUG void setVarArgs() noexcept { addAttributes(FuncAttributes::kHasVarArgs); }
//! Resets variable arguments flag. //! Resets variable arguments flag.
ASMJIT_INLINE_NODEBUG void resetVarArgs() noexcept { clearAttributes(FuncAttributes::kHasVarArgs); } ASMJIT_INLINE_NODEBUG void resetVarArgs() noexcept { clearAttributes(FuncAttributes::kHasVarArgs); }
//! Tests whether the function preserves frame pointer (EBP|ESP on X86). //! Tests whether the function preserves frame pointer (EBP|ESP on X86).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPreservedFP() const noexcept { return hasAttribute(FuncAttributes::kHasPreservedFP); } ASMJIT_INLINE_NODEBUG bool hasPreservedFP() const noexcept { return hasAttribute(FuncAttributes::kHasPreservedFP); }
//! Enables preserved frame pointer. //! Enables preserved frame pointer.
ASMJIT_INLINE_NODEBUG void setPreservedFP() noexcept { addAttributes(FuncAttributes::kHasPreservedFP); } ASMJIT_INLINE_NODEBUG void setPreservedFP() noexcept { addAttributes(FuncAttributes::kHasPreservedFP); }
//! Disables preserved frame pointer. //! Disables preserved frame pointer.
ASMJIT_INLINE_NODEBUG void resetPreservedFP() noexcept { clearAttributes(FuncAttributes::kHasPreservedFP); } ASMJIT_INLINE_NODEBUG void resetPreservedFP() noexcept { clearAttributes(FuncAttributes::kHasPreservedFP); }
//! Tests whether the function calls other functions. //! Tests whether the function calls other functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFuncCalls() const noexcept { return hasAttribute(FuncAttributes::kHasFuncCalls); } ASMJIT_INLINE_NODEBUG bool hasFuncCalls() const noexcept { return hasAttribute(FuncAttributes::kHasFuncCalls); }
//! Sets `FuncAttributes::kHasFuncCalls` to true. //! Sets `FuncAttributes::kHasFuncCalls` to true.
ASMJIT_INLINE_NODEBUG void setFuncCalls() noexcept { addAttributes(FuncAttributes::kHasFuncCalls); } ASMJIT_INLINE_NODEBUG void setFuncCalls() noexcept { addAttributes(FuncAttributes::kHasFuncCalls); }
//! Sets `FuncAttributes::kHasFuncCalls` to false. //! Sets `FuncAttributes::kHasFuncCalls` to false.
ASMJIT_INLINE_NODEBUG void resetFuncCalls() noexcept { clearAttributes(FuncAttributes::kHasFuncCalls); } ASMJIT_INLINE_NODEBUG void resetFuncCalls() noexcept { clearAttributes(FuncAttributes::kHasFuncCalls); }
//! Tests whether the function uses indirect branch protection, see \ref FuncAttributes::kIndirectBranchProtection. //! Tests whether the function uses indirect branch protection, see \ref FuncAttributes::kIndirectBranchProtection.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasIndirectBranchProtection() const noexcept { return hasAttribute(FuncAttributes::kIndirectBranchProtection); } ASMJIT_INLINE_NODEBUG bool hasIndirectBranchProtection() const noexcept { return hasAttribute(FuncAttributes::kIndirectBranchProtection); }
//! Enabled indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to true). //! Enabled indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to true).
ASMJIT_INLINE_NODEBUG void setIndirectBranchProtection() noexcept { addAttributes(FuncAttributes::kIndirectBranchProtection); } ASMJIT_INLINE_NODEBUG void setIndirectBranchProtection() noexcept { addAttributes(FuncAttributes::kIndirectBranchProtection); }
//! Disables indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to false). //! Disables indirect branch protection (sets `FuncAttributes::kIndirectBranchProtection` attribute to false).
ASMJIT_INLINE_NODEBUG void resetIndirectBranchProtection() noexcept { clearAttributes(FuncAttributes::kIndirectBranchProtection); } ASMJIT_INLINE_NODEBUG void resetIndirectBranchProtection() noexcept { clearAttributes(FuncAttributes::kIndirectBranchProtection); }
//! Tests whether the function has AVX enabled. //! Tests whether the function has AVX enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAvxEnabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXEnabled); } ASMJIT_INLINE_NODEBUG bool isAvxEnabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXEnabled); }
//! Enables AVX use. //! Enables AVX use.
ASMJIT_INLINE_NODEBUG void setAvxEnabled() noexcept { addAttributes(FuncAttributes::kX86_AVXEnabled); } ASMJIT_INLINE_NODEBUG void setAvxEnabled() noexcept { addAttributes(FuncAttributes::kX86_AVXEnabled); }
//! Disables AVX use. //! Disables AVX use.
ASMJIT_INLINE_NODEBUG void resetAvxEnabled() noexcept { clearAttributes(FuncAttributes::kX86_AVXEnabled); } ASMJIT_INLINE_NODEBUG void resetAvxEnabled() noexcept { clearAttributes(FuncAttributes::kX86_AVXEnabled); }
//! Tests whether the function has AVX-512 enabled. //! Tests whether the function has AVX-512 enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAvx512Enabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVX512Enabled); } ASMJIT_INLINE_NODEBUG bool isAvx512Enabled() const noexcept { return hasAttribute(FuncAttributes::kX86_AVX512Enabled); }
//! Enables AVX-512 use. //! Enables AVX-512 use.
ASMJIT_INLINE_NODEBUG void setAvx512Enabled() noexcept { addAttributes(FuncAttributes::kX86_AVX512Enabled); } ASMJIT_INLINE_NODEBUG void setAvx512Enabled() noexcept { addAttributes(FuncAttributes::kX86_AVX512Enabled); }
//! Disables AVX-512 use. //! Disables AVX-512 use.
ASMJIT_INLINE_NODEBUG void resetAvx512Enabled() noexcept { clearAttributes(FuncAttributes::kX86_AVX512Enabled); } ASMJIT_INLINE_NODEBUG void resetAvx512Enabled() noexcept { clearAttributes(FuncAttributes::kX86_AVX512Enabled); }
//! Tests whether the function has MMX cleanup - 'emms' instruction in epilog. //! Tests whether the function has MMX cleanup - 'emms' instruction in epilog.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasMmxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_MMXCleanup); } ASMJIT_INLINE_NODEBUG bool hasMmxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_MMXCleanup); }
//! Enables MMX cleanup. //! Enables MMX cleanup.
ASMJIT_INLINE_NODEBUG void setMmxCleanup() noexcept { addAttributes(FuncAttributes::kX86_MMXCleanup); } ASMJIT_INLINE_NODEBUG void setMmxCleanup() noexcept { addAttributes(FuncAttributes::kX86_MMXCleanup); }
//! Disables MMX cleanup. //! Disables MMX cleanup.
ASMJIT_INLINE_NODEBUG void resetMmxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_MMXCleanup); } ASMJIT_INLINE_NODEBUG void resetMmxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_MMXCleanup); }
//! Tests whether the function has AVX cleanup - 'vzeroupper' instruction in epilog. //! Tests whether the function has AVX cleanup - 'vzeroupper' instruction in epilog.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAvxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXCleanup); } ASMJIT_INLINE_NODEBUG bool hasAvxCleanup() const noexcept { return hasAttribute(FuncAttributes::kX86_AVXCleanup); }
//! Enables AVX cleanup. //! Enables AVX cleanup.
ASMJIT_INLINE_NODEBUG void setAvxCleanup() noexcept { addAttributes(FuncAttributes::kX86_AVXCleanup); } ASMJIT_INLINE_NODEBUG void setAvxCleanup() noexcept { addAttributes(FuncAttributes::kX86_AVXCleanup); }
//! Disables AVX cleanup. //! Disables AVX cleanup.
ASMJIT_INLINE_NODEBUG void resetAvxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_AVXCleanup); } ASMJIT_INLINE_NODEBUG void resetAvxCleanup() noexcept { clearAttributes(FuncAttributes::kX86_AVXCleanup); }
//! Tests whether the function uses call stack. //! Tests whether the function uses call stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasCallStack() const noexcept { return _callStackSize != 0; } ASMJIT_INLINE_NODEBUG bool hasCallStack() const noexcept { return _callStackSize != 0; }
//! Tests whether the function uses local stack. //! Tests whether the function uses local stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLocalStack() const noexcept { return _localStackSize != 0; } ASMJIT_INLINE_NODEBUG bool hasLocalStack() const noexcept { return _localStackSize != 0; }
//! Tests whether vector registers can be saved and restored by using aligned reads and writes. //! Tests whether vector registers can be saved and restored by using aligned reads and writes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAlignedVecSR() const noexcept { return hasAttribute(FuncAttributes::kAlignedVecSR); } ASMJIT_INLINE_NODEBUG bool hasAlignedVecSR() const noexcept { return hasAttribute(FuncAttributes::kAlignedVecSR); }
//! Tests whether the function has to align stack dynamically. //! Tests whether the function has to align stack dynamically.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; } ASMJIT_INLINE_NODEBUG bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; }
//! Tests whether the calling convention specifies 'RedZone'. //! Tests whether the calling convention specifies 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; } ASMJIT_INLINE_NODEBUG bool hasRedZone() const noexcept { return _redZoneSize != 0; }
//! Tests whether the calling convention specifies 'SpillZone'.
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns the size of 'RedZone'. //! Returns the size of 'RedZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; } ASMJIT_INLINE_NODEBUG uint32_t redZoneSize() const noexcept { return _redZoneSize; }
//! Tests whether the calling convention specifies 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
//! Returns the size of 'SpillZone'. //! Returns the size of 'SpillZone'.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; } ASMJIT_INLINE_NODEBUG uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
//! Resets the size of red zone, which would disable it entirely. //! Resets the size of red zone, which would disable it entirely.
@@ -1224,20 +1380,31 @@ public:
ASMJIT_INLINE_NODEBUG void resetRedZone() noexcept { _redZoneSize = 0; } ASMJIT_INLINE_NODEBUG void resetRedZone() noexcept { _redZoneSize = 0; }
//! Returns natural stack alignment (guaranteed stack alignment upon entry). //! Returns natural stack alignment (guaranteed stack alignment upon entry).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
//! Returns natural stack alignment (guaranteed stack alignment upon entry). //! Returns natural stack alignment (guaranteed stack alignment upon entry).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; } ASMJIT_INLINE_NODEBUG uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; }
//! Tests whether the callee must adjust SP before returning (X86-STDCALL only) //! Tests whether the callee must adjust SP before returning (X86-STDCALL only)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; } ASMJIT_INLINE_NODEBUG bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; }
//! Returns home many bytes of the stack the callee must adjust before returning (X86-STDCALL only) //! Returns home many bytes of the stack the callee must adjust before returning (X86-STDCALL only)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; } ASMJIT_INLINE_NODEBUG uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; }
//! Returns call stack alignment. //! Returns call stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t callStackAlignment() const noexcept { return _callStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t callStackAlignment() const noexcept { return _callStackAlignment; }
//! Returns local stack alignment. //! Returns local stack alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackAlignment() const noexcept { return _localStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t localStackAlignment() const noexcept { return _localStackAlignment; }
//! Returns final stack alignment (the maximum value of call, local, and natural stack alignments). //! Returns final stack alignment (the maximum value of call, local, and natural stack alignments).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; } ASMJIT_INLINE_NODEBUG uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; }
//! Sets call stack alignment. //! Sets call stack alignment.
@@ -1273,42 +1440,57 @@ public:
} }
//! Returns call stack size. //! Returns call stack size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t callStackSize() const noexcept { return _callStackSize; } ASMJIT_INLINE_NODEBUG uint32_t callStackSize() const noexcept { return _callStackSize; }
//! Returns local stack size. //! Returns local stack size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackSize() const noexcept { return _localStackSize; } ASMJIT_INLINE_NODEBUG uint32_t localStackSize() const noexcept { return _localStackSize; }
//! Sets call stack size. //! Sets call stack size.
ASMJIT_INLINE_NODEBUG void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; } ASMJIT_INLINE_NODEBUG void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; }
//! Sets local stack size. //! Sets local stack size.
ASMJIT_INLINE_NODEBUG void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; } ASMJIT_INLINE_NODEBUG void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; }
//! Combines call stack size with `size`, updating it to the greater value. //! Combines call stack size with `size`, updating it to the greater value.
ASMJIT_INLINE_NODEBUG void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); } ASMJIT_INLINE_NODEBUG void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); }
//! Combines local stack size with `size`, updating it to the greater value. //! Combines local stack size with `size`, updating it to the greater value.
ASMJIT_INLINE_NODEBUG void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); } ASMJIT_INLINE_NODEBUG void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); }
//! Returns final stack size (only valid after the FuncFrame is finalized). //! Returns final stack size (only valid after the FuncFrame is finalized).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t finalStackSize() const noexcept { return _finalStackSize; } ASMJIT_INLINE_NODEBUG uint32_t finalStackSize() const noexcept { return _finalStackSize; }
//! Returns an offset to access the local stack (non-zero only if call stack is used). //! Returns an offset to access the local stack (non-zero only if call stack is used).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t localStackOffset() const noexcept { return _localStackOffset; } ASMJIT_INLINE_NODEBUG uint32_t localStackOffset() const noexcept { return _localStackOffset; }
//! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP. //! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; } ASMJIT_INLINE_NODEBUG bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; }
//! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP). //! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t daOffset() const noexcept { return _daOffset; } ASMJIT_INLINE_NODEBUG uint32_t daOffset() const noexcept { return _daOffset; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffset(uint32_t regId) const noexcept { ASMJIT_INLINE_NODEBUG uint32_t saOffset(uint32_t regId) const noexcept {
return regId == _spRegId ? saOffsetFromSP() return regId == _spRegId ? saOffsetFromSP()
: saOffsetFromSA(); : saOffsetFromSA();
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; } ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; } ASMJIT_INLINE_NODEBUG uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; }
//! Returns mask of registers of the given register `group` that are modified by the function. The engine would //! Returns mask of registers of the given register `group` that are modified by the function. The engine would
//! then calculate which registers must be saved & restored by the function by using the data provided by the //! then calculate which registers must be saved & restored by the function by using the data provided by the
//! calling convention. //! calling convention.
[[nodiscard]]
inline RegMask dirtyRegs(RegGroup group) const noexcept { inline RegMask dirtyRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _dirtyRegs[group]; return _dirtyRegs[group];
@@ -1360,61 +1542,82 @@ public:
//! Returns a calculated mask of registers of the given `group` that will be saved and restored in the function's //! Returns a calculated mask of registers of the given `group` that will be saved and restored in the function's
//! prolog and epilog, respectively. The register mask is calculated from both `dirtyRegs` (provided by user) and //! prolog and epilog, respectively. The register mask is calculated from both `dirtyRegs` (provided by user) and
//! `preservedMask` (provided by the calling convention). //! `preservedMask` (provided by the calling convention).
[[nodiscard]]
inline RegMask savedRegs(RegGroup group) const noexcept { inline RegMask savedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _dirtyRegs[group] & _preservedRegs[group]; return _dirtyRegs[group] & _preservedRegs[group];
} }
//! Returns all dirty registers as a Support::Array<> type. //! Returns all dirty registers as a Support::Array<> type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegMasks& dirtyRegs() const noexcept { return _dirtyRegs; } ASMJIT_INLINE_NODEBUG const RegMasks& dirtyRegs() const noexcept { return _dirtyRegs; }
//! Returns all preserved registers as a Support::Array<> type. //! Returns all preserved registers as a Support::Array<> type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegMasks& preservedRegs() const noexcept { return _preservedRegs; } ASMJIT_INLINE_NODEBUG const RegMasks& preservedRegs() const noexcept { return _preservedRegs; }
//! Returns the mask of preserved registers of the given register `group`. //! Returns the mask of preserved registers of the given register `group`.
//! //!
//! Preserved registers are those that must survive the function call unmodified. The function can only modify //! Preserved registers are those that must survive the function call unmodified. The function can only modify
//! preserved registers it they are saved and restored in function's prolog and epilog, respectively. //! preserved registers it they are saved and restored in function's prolog and epilog, respectively.
[[nodiscard]]
inline RegMask preservedRegs(RegGroup group) const noexcept { inline RegMask preservedRegs(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _preservedRegs[group]; return _preservedRegs[group];
} }
//! Returns the size of a save-restore are for the required register `group`. //! Returns the size of a save-restore are for the required register `group`.
[[nodiscard]]
inline uint32_t saveRestoreRegSize(RegGroup group) const noexcept { inline uint32_t saveRestoreRegSize(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _saveRestoreRegSize[group]; return _saveRestoreRegSize[group];
} }
//! Returns the alignment that must be guaranteed to save/restore the required register `group`.
[[nodiscard]]
inline uint32_t saveRestoreAlignment(RegGroup group) const noexcept { inline uint32_t saveRestoreAlignment(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _saveRestoreAlignment[group]; return _saveRestoreAlignment[group];
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; } ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; }
ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
ASMJIT_INLINE_NODEBUG void resetSARegId() { setSARegId(BaseReg::kIdBad); } ASMJIT_INLINE_NODEBUG void resetSARegId() { setSARegId(BaseReg::kIdBad); }
//! Returns stack size required to save/restore registers via push/pop. //! Returns stack size required to save/restore registers via push/pop.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveSize() const noexcept { return _pushPopSaveSize; } ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveSize() const noexcept { return _pushPopSaveSize; }
//! Returns an offset to the stack where registers are saved via push/pop. //! Returns an offset to the stack where registers are saved via push/pop.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveOffset() const noexcept { return _pushPopSaveOffset; } ASMJIT_INLINE_NODEBUG uint32_t pushPopSaveOffset() const noexcept { return _pushPopSaveOffset; }
//! Returns stack size required to save/restore extra registers that don't use push/pop/ //! Returns stack size required to save/restore extra registers that don't use push/pop/
//! //!
//! \note On X86 this covers all registers except GP registers, on other architectures it can be always //! \note On X86 this covers all registers except GP registers, on other architectures it can be always
//! zero (for example AArch64 saves all registers via push/pop like instructions, so this would be zero). //! zero (for example AArch64 saves all registers via push/pop like instructions, so this would be zero).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveSize() const noexcept { return _extraRegSaveSize; } ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveSize() const noexcept { return _extraRegSaveSize; }
//! Returns an offset to the stack where extra registers are saved. //! Returns an offset to the stack where extra registers are saved.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveOffset() const noexcept { return _extraRegSaveOffset; } ASMJIT_INLINE_NODEBUG uint32_t extraRegSaveOffset() const noexcept { return _extraRegSaveOffset; }
//! Tests whether the functions contains stack adjustment. //! Tests whether the functions contains stack adjustment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; } ASMJIT_INLINE_NODEBUG bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; }
//! Returns function's stack adjustment used in function's prolog and epilog. //! Returns function's stack adjustment used in function's prolog and epilog.
//! //!
//! If the returned value is zero it means that the stack is not adjusted. This can mean both that the stack //! If the returned value is zero it means that the stack is not adjusted. This can mean both that the stack
//! is not used and/or the stack is only adjusted by instructions that pust/pop registers into/from stack. //! is not used and/or the stack is only adjusted by instructions that pust/pop registers into/from stack.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stackAdjustment() const noexcept { return _stackAdjustment; } ASMJIT_INLINE_NODEBUG uint32_t stackAdjustment() const noexcept { return _stackAdjustment; }
//! \} //! \}
@@ -1477,30 +1680,41 @@ public:
//! \{ //! \{
//! Returns the associated \ref FuncDetail of this `FuncArgsAssignment`. //! Returns the associated \ref FuncDetail of this `FuncArgsAssignment`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail* funcDetail() const noexcept { return _funcDetail; } ASMJIT_INLINE_NODEBUG const FuncDetail* funcDetail() const noexcept { return _funcDetail; }
//! Associates \ref FuncDetails with this `FuncArgsAssignment`. //! Associates \ref FuncDetails with this `FuncArgsAssignment`.
ASMJIT_INLINE_NODEBUG void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; } ASMJIT_INLINE_NODEBUG void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; } ASMJIT_INLINE_NODEBUG uint32_t saRegId() const noexcept { return _saRegId; }
ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); } ASMJIT_INLINE_NODEBUG void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
ASMJIT_INLINE_NODEBUG void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); } ASMJIT_INLINE_NODEBUG void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); }
//! Returns assigned argument at `argIndex` and `valueIndex`. //! Returns assigned argument at `argIndex` and `valueIndex`.
//! //!
//! \note `argIndex` refers to he function argument and `valueIndex` refers to a value pack (in case multiple //! \note `argIndex` refers to he function argument and `valueIndex` refers to a value pack (in case multiple
//! values are passed as a single argument). //! values are passed as a single argument).
[[nodiscard]]
inline FuncValue& arg(size_t argIndex, size_t valueIndex) noexcept { inline FuncValue& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex]; return _argPacks[argIndex][valueIndex];
} }
//! \overload //! \overload
[[nodiscard]]
inline const FuncValue& arg(size_t argIndex, size_t valueIndex) const noexcept { inline const FuncValue& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex]; return _argPacks[argIndex][valueIndex];
} }
//! Tests whether argument at `argIndex` and `valueIndex` has been assigned. //! Tests whether argument at `argIndex` and `valueIndex` has been assigned.
[[nodiscard]]
inline bool isAssigned(size_t argIndex, size_t valueIndex) const noexcept { inline bool isAssigned(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks)); ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
return _argPacks[argIndex][valueIndex].isAssigned(); return _argPacks[argIndex][valueIndex].isAssigned();

View File

@@ -28,26 +28,29 @@ static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId d
uint32_t regSize = Environment::registerSizeFromArch(arch); uint32_t regSize = Environment::registerSizeFromArch(arch);
OperandSignature signature{0}; OperandSignature signature{0};
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) {
signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32) signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32)
: archTraits.regTypeToSignature(RegType::kGp64); : archTraits.regTypeToSignature(RegType::kGp64);
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) }
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) {
signature = archTraits.regTypeToSignature(RegType::kVec64); signature = archTraits.regTypeToSignature(RegType::kVec64);
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) }
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) {
signature = archTraits.regTypeToSignature(RegType::kVec128); signature = archTraits.regTypeToSignature(RegType::kVec128);
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) }
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) {
signature = archTraits.regTypeToSignature(RegType::kVec256); signature = archTraits.regTypeToSignature(RegType::kVec256);
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) }
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) {
signature = archTraits.regTypeToSignature(RegType::kVec512); signature = archTraits.regTypeToSignature(RegType::kVec512);
}
return signature; return signature;
} }
class FuncArgsContext { class FuncArgsContext {
public: public:
enum VarId : uint32_t { static inline constexpr uint32_t kVarIdNone = 0xFF;
kVarIdNone = 0xFF
};
//! Contains information about a single argument or SA register that may need shuffling. //! Contains information about a single argument or SA register that may need shuffling.
struct Var { struct Var {
@@ -107,6 +110,7 @@ public:
memset(_physToVarId, kVarIdNone, 32); memset(_physToVarId, kVarIdNone, 32);
} }
[[nodiscard]]
inline bool isAssigned(uint32_t regId) const noexcept { inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32); ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId); return Support::bitTest(_assignedRegs, regId);
@@ -150,11 +154,22 @@ public:
_assignedRegs ^= Support::bitMask(regId); _assignedRegs ^= Support::bitMask(regId);
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; } ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; } ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; } ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; } ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; } ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; } ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
}; };
@@ -179,13 +194,22 @@ public:
FuncArgsContext() noexcept; FuncArgsContext() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; } ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; } ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); } ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; } ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; } ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept; Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;

View File

@@ -26,19 +26,19 @@ struct PlacementNew { void* ptr; };
#if defined(ASMJIT_NO_STDCXX) #if defined(ASMJIT_NO_STDCXX)
namespace Support { namespace Support {
ASMJIT_FORCE_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); } ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_FORCE_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); } ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support} } // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \ #define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_FORCE_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \ ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \
ASMJIT_FORCE_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \ ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \
\ \
ASMJIT_FORCE_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \ ASMJIT_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ASMJIT_FORCE_INLINE void operator delete(void*, void*) noexcept {} \ ASMJIT_INLINE void operator delete(void*, void*) noexcept {} \
\ \
ASMJIT_FORCE_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \ ASMJIT_INLINE void* operator new(size_t, Support::PlacementNew ptr) noexcept { return ptr.ptr; } \
ASMJIT_FORCE_INLINE void operator delete(void*, Support::PlacementNew) noexcept {} ASMJIT_INLINE void operator delete(void*, Support::PlacementNew) noexcept {}
#else #else
#define ASMJIT_BASE_CLASS(TYPE) #define ASMJIT_BASE_CLASS(TYPE)
#endif #endif
@@ -69,7 +69,7 @@ enum class ResetPolicy : uint32_t {
kHard = 1 kHard = 1
}; };
//! Contains typedefs, constants, and variables used globally by AsmJit. //! Contains constants and variables used globally across AsmJit.
namespace Globals { namespace Globals {
//! Host memory allocator overhead. //! Host memory allocator overhead.
@@ -152,7 +152,7 @@ static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Supp
//! \{ //! \{
//! AsmJit error type (uint32_t). //! AsmJit error type (uint32_t).
typedef uint32_t Error; using Error = uint32_t;
//! AsmJit error codes. //! AsmJit error codes.
enum ErrorCode : uint32_t { enum ErrorCode : uint32_t {
@@ -357,9 +357,11 @@ static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {}
//! //!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any //! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit. //! error reported / returned by AsmJit.
[[nodiscard]]
static constexpr Error errored(Error err) noexcept { return err; } static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code. //! Returns a printable version of `asmjit::Error` code.
[[nodiscard]]
ASMJIT_API const char* errorAsString(Error err) noexcept; ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s). //! Called to output debugging message(s).
@@ -375,7 +377,8 @@ ASMJIT_API void debugOutput(const char* str) noexcept;
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit //! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out //! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc... //! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept; [[noreturn]]
ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils} } // {DebugUtils}
@@ -385,9 +388,9 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
#if defined(ASMJIT_BUILD_DEBUG) #if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_ASSERT(...) \ #define ASMJIT_ASSERT(...) \
do { \ do { \
if (ASMJIT_LIKELY(__VA_ARGS__)) \ if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
break; \ ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \ } \
} while (0) } while (0)
#else #else
#define ASMJIT_ASSERT(...) ((void)0) #define ASMJIT_ASSERT(...) ((void)0)
@@ -399,9 +402,10 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller. //! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \ #define ASMJIT_PROPAGATE(...) \
do { \ do { \
::asmjit::Error _err = __VA_ARGS__; \ ::asmjit::Error _err_ = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err)) \ if (ASMJIT_UNLIKELY(_err_)) { \
return _err; \ return _err_; \
} \
} while (0) } while (0)
//! \} //! \}

View File

@@ -26,7 +26,7 @@ ASMJIT_BEGIN_NAMESPACE
//! //!
//! - \ref x86::Inst (X86 and X86_64) //! - \ref x86::Inst (X86 and X86_64)
//! - \ref a64::Inst (AArch64) //! - \ref a64::Inst (AArch64)
typedef uint32_t InstId; using InstId = uint32_t;
//! Instruction id parts. //! Instruction id parts.
//! //!
@@ -267,16 +267,21 @@ public:
//! \{ //! \{
//! Returns the instruction id with modifiers. //! Returns the instruction id with modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; } ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; }
//! Sets the instruction id and modiiers from `id`. //! Sets the instruction id and modiiers from `id`.
ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; } ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; }
//! Resets the instruction id and modifiers to zero, see \ref kIdNone. //! Resets the instruction id and modifiers to zero, see \ref kIdNone.
ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; } ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; }
//! Returns a real instruction id that doesn't contain any modifiers. //! Returns a real instruction id that doesn't contain any modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); } ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
template<InstIdParts kPart> template<InstIdParts kPart>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept { ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept {
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value; return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
} }
@@ -291,11 +296,24 @@ public:
//! \name Instruction Options //! \name Instruction Options
//! \{ //! \{
//! Returns instruction options associated with this instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; } ASMJIT_INLINE_NODEBUG InstOptions options() const noexcept { return _options; }
//! Tests whether the given instruction `option` is enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); } ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
//! Replaces all instruction options by the given `options`.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; } ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; }
//! Adds instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; } ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; }
//! Clears instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; } ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; }
//! Resets all instruction options to `InstOptions::kNone` (there will be no instruction options active after reset).
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; } ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; }
//! \} //! \}
@@ -303,11 +321,23 @@ public:
//! \name Extra Register //! \name Extra Register
//! \{ //! \{
//! Tests whether the instruction has associated an extra register.
//!
//! \note Extra registers are currently only used on X86 by AVX-512 masking such as `{k}` and `{k}{z}` and by repeated
//! instructions to explicitly assign a virtual register that would be ECX/RCX.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); } ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; } ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; } ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); } ASMJIT_INLINE_NODEBUG void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); } ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); } ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
//! \} //! \}
@@ -315,10 +345,15 @@ public:
//! \name ARM Specific //! \name ARM Specific
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); } ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); } ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT>(); } ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT>(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT2>(); } ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT2>(); }
//! \} //! \}
@@ -326,26 +361,31 @@ public:
//! \name Statics //! \name Statics
//! \{ //! \{
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value); return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
} }
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value) return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value); | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
} }
static ASMJIT_INLINE_NODEBUG constexpr InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value) return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value) | (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value); | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
} }
static ASMJIT_INLINE_NODEBUG constexpr InstId extractRealId(uint32_t id) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId extractRealId(uint32_t id) noexcept {
return id & uint32_t(InstIdParts::kRealId); return id & uint32_t(InstIdParts::kRealId);
} }
static ASMJIT_INLINE_NODEBUG constexpr arm::CondCode extractARMCondCode(uint32_t id) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR arm::CondCode extractARMCondCode(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value); return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
} }
@@ -543,39 +583,56 @@ struct OpRWInfo {
//! \{ //! \{
//! Returns operand flags. //! Returns operand flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; } ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`. //! Tests whether operand flags contain the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
//! Adds the given `flags` to operand flags. //! Adds the given `flags` to operand flags.
ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; } ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags. //! Removes the given `flags` from operand flags.
ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from. //! Tests whether this operand is read from.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); } ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
//! Tests whether this operand is written to. //! Tests whether this operand is written to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); } ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write. //! Tests whether this operand is both read and write.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; } ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only. //! Tests whether this operand is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; } ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only. //! Tests whether this operand is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; } ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers. //! Returns the type of a lead register, which is followed by consecutive registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; } ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
//! Tests whether this operand is Reg/Mem //! Tests whether this operand is Reg/Mem
//! //!
//! Reg/Mem operands can use either register or memory. //! Reg/Mem operands can use either register or memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); } ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended. //! Tests whether the operand will be zero extended.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); } ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
//! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register //! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register
//! operands. //! operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); } ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); }
//! \} //! \}
@@ -585,37 +642,63 @@ struct OpRWInfo {
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do //! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers. //! not access any memory, they are only used to encode registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); } ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used. //! Tests whether the instruction's memory BASE register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); } ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers. //! Tests whether the instruction reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); } ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers. //! Tests whether the instruction writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); } ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers. //! Tests whether the instruction reads and writes from/to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; } ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers. //! Tests whether the instruction only reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; } ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers. //! Tests whether the instruction only writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; } ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address. //! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); } ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address. //! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); } ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used. //! Tests whether the instruction's memory INDEX register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); } ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers. //! Tests whether the instruction reads the INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); } ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers. //! Tests whether the instruction writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); } ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers. //! Tests whether the instruction reads and writes from/to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; } ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers. //! Tests whether the instruction only reads from its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; } ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers. //! Tests whether the instruction only writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; } ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \} //! \}
@@ -626,9 +709,13 @@ struct OpRWInfo {
//! Returns a physical id of the register that is fixed for this operand. //! Returns a physical id of the register that is fixed for this operand.
//! //!
//! Returns \ref BaseReg::kIdBad if any register can be used. //! Returns \ref BaseReg::kIdBad if any register can be used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; } ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; }
//! Tests whether \ref physId() would return a valid physical register id. //! Tests whether \ref physId() would return a valid physical register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand. //! Sets physical register id, which would be fixed for this operand.
ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); } ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
@@ -638,7 +725,9 @@ struct OpRWInfo {
//! \{ //! \{
//! Returns Reg/Mem size of the operand. //! Returns Reg/Mem size of the operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; } ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
//! Sets Reg/Mem size of the operand. //! Sets Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); } ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
@@ -648,16 +737,23 @@ struct OpRWInfo {
//! \{ //! \{
//! Returns read mask. //! Returns read mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; } ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; }
//! Returns write mask.
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Returns extend mask.
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets read mask. //! Sets read mask.
ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; } ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
//! Returns write mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
//! Sets write mask. //! Sets write mask.
ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; } ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
//! Returns extend mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
//! Sets extend mask. //! Sets extend mask.
ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; } ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
@@ -712,12 +808,15 @@ struct InstRWInfo {
//! \{ //! \{
//! Returns flags associated with the instruction, see \ref InstRWFlags. //! Returns flags associated with the instruction, see \ref InstRWFlags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; } ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; }
//! Tests whether the instruction flags contain `flag`. //! Tests whether the instruction flags contain `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); }
//! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp. //! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); } ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); }
//! \} //! \}
@@ -726,8 +825,11 @@ struct InstRWInfo {
//! \{ //! \{
//! Returns a mask of CPU flags read. //! Returns a mask of CPU flags read.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; } ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; }
//! Returns a mask of CPU flags written. //! Returns a mask of CPU flags written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; } ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
//! \} //! \}
@@ -745,6 +847,7 @@ struct InstRWInfo {
//! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example //! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires //! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination. //! AVX-512 for `vpslldq reg, mem, imm` combination.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; } ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \} //! \}
@@ -753,18 +856,22 @@ struct InstRWInfo {
//! \{ //! \{
//! Returns RW information of extra register operand (extraReg). //! Returns RW information of extra register operand (extraReg).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; } ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; }
//! Returns RW information of all instruction's operands. //! Returns RW information of all instruction's operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; } ASMJIT_INLINE_NODEBUG const OpRWInfo* operands() const noexcept { return _operands; }
//! Returns RW information of the operand at the given `index`. //! Returns RW information of the operand at the given `index`.
[[nodiscard]]
inline const OpRWInfo& operand(size_t index) const noexcept { inline const OpRWInfo& operand(size_t index) const noexcept {
ASMJIT_ASSERT(index < Globals::kMaxOpCount); ASMJIT_ASSERT(index < Globals::kMaxOpCount);
return _operands[index]; return _operands[index];
} }
//! Returns the number of operands this instruction has. //! Returns the number of operands this instruction has.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; } ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; }
//! \} //! \}
@@ -789,7 +896,7 @@ namespace InstAPI {
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended. //! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept; ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept;
ASMJIT_DEPRECATED("Use `instIdToString()` with `InstStringifyOptions` parameter") [[deprecated("Use `instIdToString()` with `InstStringifyOptions` parameter")]]
static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept { static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept {
return instIdToString(arch, instId, InstStringifyOptions::kNone, output); return instIdToString(arch, instId, InstStringifyOptions::kNone, output);
} }
@@ -798,11 +905,13 @@ static inline Error instIdToString(Arch arch, InstId instId, String& output) noe
//! `SIZE_MAX` if `s` is known to be null terminated. //! `SIZE_MAX` if `s` is known to be null terminated.
//! //!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists. //! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
[[nodiscard]]
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept; ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT #endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION #ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the given `validationFlags`. //! Validates the given instruction considering the given `validationFlags`.
[[nodiscard]]
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept; ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION #endif // !ASMJIT_NO_VALIDATION

View File

@@ -12,12 +12,12 @@ namespace InstNameUtils {
static constexpr uint32_t kBufferSize = 32; static constexpr uint32_t kBufferSize = 32;
static ASMJIT_FORCE_INLINE char decode5BitChar(uint32_t c) noexcept { static ASMJIT_INLINE_CONSTEXPR char decode5BitChar(uint32_t c) noexcept {
uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u; uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u;
return char(base + c); return char(base + c);
} }
static ASMJIT_FORCE_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept { static ASMJIT_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept {
size_t i; size_t i;
if (nameValue & 0x80000000u) { if (nameValue & 0x80000000u) {

View File

@@ -57,22 +57,22 @@ public:
size_t _end; size_t _end;
T _bitWord; T _bitWord;
enum : uint32_t { kBitWordSize = Support::bitSizeOf<T>() }; static inline constexpr uint32_t kBitWordSize = Support::bitSizeOf<T>();
enum : T { kXorMask = B == 0 ? Support::allOnes<T>() : T(0) }; static inline constexpr T kXorMask = B == 0 ? Support::allOnes<T>() : T(0);
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept { ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords); init(data, numBitWords);
} }
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
init(data, numBitWords, start, end); init(data, numBitWords, start, end);
} }
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords) noexcept { ASMJIT_INLINE void init(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords, 0, numBitWords * kBitWordSize); init(data, numBitWords, 0, numBitWords * kBitWordSize);
} }
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept { ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize); ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize);
DebugUtils::unused(numBitWords); DebugUtils::unused(numBitWords);
@@ -80,8 +80,9 @@ public:
const T* ptr = data + (idx / kBitWordSize); const T* ptr = data + (idx / kBitWordSize);
T bitWord = 0; T bitWord = 0;
if (idx < end) if (idx < end) {
bitWord = (*ptr ^ kXorMask) & (Support::allOnes<T>() << (start % kBitWordSize)); bitWord = (*ptr ^ kXorMask) & (Support::allOnes<T>() << (start % kBitWordSize));
}
_ptr = ptr; _ptr = ptr;
_idx = idx; _idx = idx;
@@ -89,12 +90,13 @@ public:
_bitWord = bitWord; _bitWord = bitWord;
} }
ASMJIT_FORCE_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept { ASMJIT_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
// Skip all empty BitWords. // Skip all empty BitWords.
while (_bitWord == 0) { while (_bitWord == 0) {
_idx += kBitWordSize; _idx += kBitWordSize;
if (_idx >= _end) if (_idx >= _end) {
return false; return false;
}
_bitWord = (*++_ptr) ^ kXorMask; _bitWord = (*++_ptr) ^ kXorMask;
} }
@@ -107,8 +109,9 @@ public:
*rangeEnd = Support::min(_idx + kBitWordSize, _end); *rangeEnd = Support::min(_idx + kBitWordSize, _end);
while (*rangeEnd - *rangeStart < rangeHint) { while (*rangeEnd - *rangeStart < rangeHint) {
_idx += kBitWordSize; _idx += kBitWordSize;
if (_idx >= _end) if (_idx >= _end) {
break; break;
}
_bitWord = (*++_ptr) ^ kXorMask; _bitWord = (*++_ptr) ^ kXorMask;
if (_bitWord != Support::allOnes<T>()) { if (_bitWord != Support::allOnes<T>()) {
@@ -248,7 +251,8 @@ public:
uint32_t blockFlags, uint32_t blockFlags,
Support::BitWord* usedBitVector, Support::BitWord* usedBitVector,
Support::BitWord* stopBitVector, Support::BitWord* stopBitVector,
uint32_t areaSize) noexcept uint32_t areaSize
) noexcept
: ZoneTreeNodeT(), : ZoneTreeNodeT(),
_pool(pool), _pool(pool),
_mapping(mapping), _mapping(mapping),
@@ -265,29 +269,51 @@ public:
clearBlock(); clearBlock();
} }
[[nodiscard]]
inline JitAllocatorPool* pool() const noexcept { return _pool; } inline JitAllocatorPool* pool() const noexcept { return _pool; }
[[nodiscard]]
inline uint8_t* rxPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rx); } inline uint8_t* rxPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rx); }
[[nodiscard]]
inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rw); } inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rw); }
[[nodiscard]]
inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; } inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; }
inline void addFlags(uint32_t f) noexcept { _flags |= f; } inline void addFlags(uint32_t f) noexcept { _flags |= f; }
inline void clearFlags(uint32_t f) noexcept { _flags &= ~f; } inline void clearFlags(uint32_t f) noexcept { _flags &= ~f; }
[[nodiscard]]
inline bool empty() const noexcept { return hasFlag(kFlagEmpty); } inline bool empty() const noexcept { return hasFlag(kFlagEmpty); }
[[nodiscard]]
inline bool isDirty() const noexcept { return hasFlag(kFlagDirty); } inline bool isDirty() const noexcept { return hasFlag(kFlagDirty); }
inline void makeDirty() noexcept { addFlags(kFlagDirty); } inline void makeDirty() noexcept { addFlags(kFlagDirty); }
[[nodiscard]]
inline bool hasLargePages() const noexcept { return hasFlag(kFlagLargePages); } inline bool hasLargePages() const noexcept { return hasFlag(kFlagLargePages); }
[[nodiscard]]
inline bool hasInitialPadding() const noexcept { return hasFlag(kFlagInitialPadding); } inline bool hasInitialPadding() const noexcept { return hasFlag(kFlagInitialPadding); }
[[nodiscard]]
inline uint32_t initialAreaStart() const noexcept { return initialAreaStartByFlags(_flags); } inline uint32_t initialAreaStart() const noexcept { return initialAreaStartByFlags(_flags); }
[[nodiscard]]
inline size_t blockSize() const noexcept { return _blockSize; } inline size_t blockSize() const noexcept { return _blockSize; }
[[nodiscard]]
inline uint32_t areaSize() const noexcept { return _areaSize; } inline uint32_t areaSize() const noexcept { return _areaSize; }
[[nodiscard]]
inline uint32_t areaUsed() const noexcept { return _areaUsed; } inline uint32_t areaUsed() const noexcept { return _areaUsed; }
[[nodiscard]]
inline uint32_t areaAvailable() const noexcept { return _areaSize - _areaUsed; } inline uint32_t areaAvailable() const noexcept { return _areaSize - _areaUsed; }
[[nodiscard]]
inline uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; } inline uint32_t largestUnusedArea() const noexcept { return _largestUnusedArea; }
inline void decreaseUsedArea(uint32_t value) noexcept { inline void decreaseUsedArea(uint32_t value) noexcept {
@@ -334,10 +360,13 @@ public:
clearFlags(kFlagDirty | kFlagEmpty); clearFlags(kFlagDirty | kFlagEmpty);
} }
else { else {
if (_searchStart == allocatedAreaStart) if (_searchStart == allocatedAreaStart) {
_searchStart = allocatedAreaEnd; _searchStart = allocatedAreaEnd;
if (_searchEnd == allocatedAreaEnd) }
if (_searchEnd == allocatedAreaEnd) {
_searchEnd = allocatedAreaStart; _searchEnd = allocatedAreaStart;
}
addFlags(kFlagDirty); addFlags(kFlagDirty);
clearFlags(kFlagEmpty); clearFlags(kFlagEmpty);
@@ -447,33 +476,40 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
// Setup pool count to [1..3]. // Setup pool count to [1..3].
size_t poolCount = 1; size_t poolCount = 1;
if (Support::test(options, JitAllocatorOptions::kUseMultiplePools)) if (Support::test(options, JitAllocatorOptions::kUseMultiplePools)) {
poolCount = kJitAllocatorMultiPoolCount; poolCount = kJitAllocatorMultiPoolCount;
}
// Setup block size [64kB..256MB]. // Setup block size [64kB..256MB].
if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize)) {
blockSize = vmInfo.pageGranularity; blockSize = vmInfo.pageGranularity;
}
// Setup granularity [64..256]. // Setup granularity [64..256].
if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity)) {
granularity = kJitAllocatorBaseGranularity; granularity = kJitAllocatorBaseGranularity;
}
// Setup fill-pattern. // Setup fill-pattern.
if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0) if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0) {
fillPattern = JitAllocator_defaultFillPattern(); fillPattern = JitAllocator_defaultFillPattern();
}
size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount; size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
void* p = ::malloc(size); void* p = ::malloc(size);
if (ASMJIT_UNLIKELY(!p))
if (ASMJIT_UNLIKELY(!p)) {
return nullptr; return nullptr;
}
VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo(); VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo();
if (Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kEnabled)) { if (Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kEnabled)) {
// If we are running within a hardened environment (mapping RWX is not allowed) then we have to use dual mapping // If we are running within a hardened environment (mapping RWX is not allowed) then we have to use dual mapping
// or other runtime capabilities like Apple specific MAP_JIT. There is no point in not enabling these as otherwise // or other runtime capabilities like Apple specific MAP_JIT. There is no point in not enabling these as otherwise
// the allocation would fail and JitAllocator would not be able to allocate memory. // the allocation would fail and JitAllocator would not be able to allocate memory.
if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit)) if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit)) {
options |= JitAllocatorOptions::kUseDualMapping; options |= JitAllocatorOptions::kUseDualMapping;
}
} }
JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl)); JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
@@ -485,8 +521,9 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
impl->fillPattern = fillPattern; impl->fillPattern = fillPattern;
impl->pageSize = vmInfo.pageSize; impl->pageSize = vmInfo.pageSize;
for (size_t poolId = 0; poolId < poolCount; poolId++) for (size_t poolId = 0; poolId < poolCount; poolId++) {
new(Support::PlacementNew{&pools[poolId]}) JitAllocatorPool(granularity << poolId); new(Support::PlacementNew{&pools[poolId]}) JitAllocatorPool(granularity << poolId);
}
return impl; return impl;
} }
@@ -501,8 +538,9 @@ static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl
size_t granularity = size_t(impl->granularity) << poolId; size_t granularity = size_t(impl->granularity) << poolId;
while (poolId) { while (poolId) {
if (Support::alignUp(size, granularity) == size) if (Support::alignUp(size, granularity) == size) {
break; break;
}
poolId--; poolId--;
granularity >>= 1; granularity >>= 1;
} }
@@ -522,18 +560,21 @@ static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivat
// We have to increase the allocationSize if we know that the block must provide padding. // We have to increase the allocationSize if we know that the block must provide padding.
if (!Support::test(impl->options, JitAllocatorOptions::kDisableInitialPadding)) { if (!Support::test(impl->options, JitAllocatorOptions::kDisableInitialPadding)) {
size_t granularity = pool->granularity; size_t granularity = pool->granularity;
if (SIZE_MAX - allocationSize < granularity) if (SIZE_MAX - allocationSize < granularity) {
return 0; // Overflown return 0; // Overflown
}
allocationSize += granularity; allocationSize += granularity;
} }
if (blockSize < kJitAllocatorMaxBlockSize) if (blockSize < kJitAllocatorMaxBlockSize) {
blockSize *= 2u; blockSize *= 2u;
}
if (allocationSize > blockSize) { if (allocationSize > blockSize) {
blockSize = Support::alignUp(allocationSize, impl->blockSize); blockSize = Support::alignUp(allocationSize, impl->blockSize);
if (ASMJIT_UNLIKELY(blockSize < allocationSize)) if (ASMJIT_UNLIKELY(blockSize < allocationSize)) {
return 0; // Overflown. return 0; // Overflown.
}
} }
return blockSize; return blockSize;
@@ -562,8 +603,9 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t
uint32_t* mem32 = static_cast<uint32_t*>(mem); uint32_t* mem32 = static_cast<uint32_t*>(mem);
size_t n = byteSize / 4u; size_t n = byteSize / 4u;
for (size_t i = 0; i < n; i++) for (size_t i = 0; i < n; i++) {
mem32[i] = pattern; mem32[i] = pattern;
}
} }
// Allocate a new `JitAllocatorBlock` for the given `blockSize`. // Allocate a new `JitAllocatorBlock` for the given `blockSize`.
@@ -619,10 +661,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat
// Out of memory... // Out of memory...
if (ASMJIT_UNLIKELY(blockPtr == nullptr)) { if (ASMJIT_UNLIKELY(blockPtr == nullptr)) {
if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) {
VirtMem::releaseDualMapping(&virtMem, blockSize); (void)VirtMem::releaseDualMapping(&virtMem, blockSize);
else }
VirtMem::release(virtMem.rx, blockSize); else {
(void)VirtMem::release(virtMem.rx, blockSize);
}
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
} }
@@ -640,10 +684,12 @@ static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocat
static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
DebugUtils::unused(impl); DebugUtils::unused(impl);
if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped)) if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped)) {
VirtMem::releaseDualMapping(&block->_mapping, block->blockSize()); (void)VirtMem::releaseDualMapping(&block->_mapping, block->blockSize());
else }
VirtMem::release(block->rxPtr(), block->blockSize()); else {
(void)VirtMem::release(block->rxPtr(), block->blockSize());
}
::free(block); ::free(block);
} }
@@ -651,8 +697,9 @@ static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAlloc
static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
JitAllocatorPool* pool = block->pool(); JitAllocatorPool* pool = block->pool();
if (!pool->cursor) if (!pool->cursor) {
pool->cursor = block; pool->cursor = block;
}
// Add to RBTree and List. // Add to RBTree and List.
impl->tree.insert(block); impl->tree.insert(block);
@@ -670,8 +717,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc
JitAllocatorPool* pool = block->pool(); JitAllocatorPool* pool = block->pool();
// Remove from RBTree and List. // Remove from RBTree and List.
if (pool->cursor == block) if (pool->cursor == block) {
pool->cursor = block->hasPrev() ? block->prev() : block->next(); pool->cursor = block->hasPrev() ? block->prev() : block->next();
}
impl->tree.remove(block); impl->tree.remove(block);
pool->blocks.unlink(block); pool->blocks.unlink(block);
@@ -685,8 +733,9 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc
} }
static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) {
return; return;
}
JitAllocatorPool* pool = block->pool(); JitAllocatorPool* pool = block->pool();
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) { if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
@@ -717,13 +766,15 @@ static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllo
JitAllocator::JitAllocator(const CreateParams* params) noexcept { JitAllocator::JitAllocator(const CreateParams* params) noexcept {
_impl = JitAllocatorImpl_new(params); _impl = JitAllocatorImpl_new(params);
if (ASMJIT_UNLIKELY(!_impl)) if (ASMJIT_UNLIKELY(!_impl)) {
_impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none); _impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none);
}
} }
JitAllocator::~JitAllocator() noexcept { JitAllocator::~JitAllocator() noexcept {
if (_impl == &JitAllocatorImpl_none) if (_impl == &JitAllocatorImpl_none) {
return; return;
}
reset(ResetPolicy::kHard); reset(ResetPolicy::kHard);
JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl)); JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
@@ -733,8 +784,9 @@ JitAllocator::~JitAllocator() noexcept {
// ==================== // ====================
void JitAllocator::reset(ResetPolicy resetPolicy) noexcept { void JitAllocator::reset(ResetPolicy resetPolicy) noexcept {
if (_impl == &JitAllocatorImpl_none) if (_impl == &JitAllocatorImpl_none) {
return; return;
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl); JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
impl->tree.reset(); impl->tree.reset();
@@ -802,19 +854,22 @@ JitAllocator::Statistics JitAllocator::statistics() const noexcept {
Error JitAllocator::alloc(Span& out, size_t size) noexcept { Error JitAllocator::alloc(Span& out, size_t size) noexcept {
out = Span{}; out = Span{};
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl); JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max(); constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
// Align to the minimum granularity by default. // Align to the minimum granularity by default.
size = Support::alignUp<size_t>(size, impl->granularity); size = Support::alignUp<size_t>(size, impl->granularity);
if (ASMJIT_UNLIKELY(size == 0)) if (ASMJIT_UNLIKELY(size == 0)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2)) if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2)) {
return DebugUtils::errored(kErrorTooLarge); return DebugUtils::errored(kErrorTooLarge);
}
LockGuard guard(impl->lock); LockGuard guard(impl->lock);
JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)]; JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)];
@@ -849,8 +904,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
largestArea = Support::max(largestArea, rangeSize); largestArea = Support::max(largestArea, rangeSize);
} }
if (areaIndex != kNoIndex) if (areaIndex != kNoIndex) {
break; break;
}
if (searchStart != SIZE_MAX) { if (searchStart != SIZE_MAX) {
// Because we have iterated over the entire block, we can now mark the // Because we have iterated over the entire block, we can now mark the
@@ -872,8 +928,9 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
// Allocate a new block if there is no region of a required size. // Allocate a new block if there is no region of a required size.
if (areaIndex == kNoIndex) { if (areaIndex == kNoIndex) {
size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size); size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size);
if (ASMJIT_UNLIKELY(!blockSize)) if (ASMJIT_UNLIKELY(!blockSize)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize)); ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize));
areaIndex = block->initialAreaStart(); areaIndex = block->initialAreaStart();
@@ -904,18 +961,21 @@ Error JitAllocator::alloc(Span& out, size_t size) noexcept {
} }
Error JitAllocator::release(void* rx) noexcept { Error JitAllocator::release(void* rx) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!rx)) if (ASMJIT_UNLIKELY(!rx)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl); JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock); LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx)); JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx));
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// Offset relative to the start of the block. // Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool(); JitAllocatorPool* pool = block->pool();
@@ -954,8 +1014,9 @@ Error JitAllocator::release(void* rx) noexcept {
static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator::Span& span, size_t newSize, bool alreadyUnderWriteScope) noexcept { static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator::Span& span, size_t newSize, bool alreadyUnderWriteScope) noexcept {
JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(span._block); JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(span._block);
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
LockGuard guard(impl->lock); LockGuard guard(impl->lock);
@@ -968,16 +1029,18 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator
// Don't trust `span.size()` - if it has been already truncated we would be off... // Don't trust `span.size()` - if it has been already truncated we would be off...
bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart); bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
if (ASMJIT_UNLIKELY(!isUsed)) if (ASMJIT_UNLIKELY(!isUsed)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1; uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
uint32_t areaPrevSize = areaEnd - areaStart; uint32_t areaPrevSize = areaEnd - areaStart;
uint32_t spanPrevSize = areaPrevSize * pool->granularity; uint32_t spanPrevSize = areaPrevSize * pool->granularity;
uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize); uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize);
if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize)) if (ASMJIT_UNLIKELY(areaShrunkSize > areaPrevSize)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaDiff = areaPrevSize - areaShrunkSize; uint32_t areaDiff = areaPrevSize - areaShrunkSize;
if (areaDiff) { if (areaDiff) {
@@ -1003,11 +1066,13 @@ static Error JitAllocatorImpl_shrink(JitAllocatorPrivateImpl* impl, JitAllocator
} }
Error JitAllocator::shrink(Span& span, size_t newSize) noexcept { Error JitAllocator::shrink(Span& span, size_t newSize) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
if (ASMJIT_UNLIKELY(!span.rx())) if (ASMJIT_UNLIKELY(!span.rx())) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(newSize == 0)) { if (ASMJIT_UNLIKELY(newSize == 0)) {
Error err = release(span.rx()); Error err = release(span.rx());
@@ -1021,15 +1086,17 @@ Error JitAllocator::shrink(Span& span, size_t newSize) noexcept {
Error JitAllocator::query(Span& out, void* rx) const noexcept { Error JitAllocator::query(Span& out, void* rx) const noexcept {
out = Span{}; out = Span{};
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none)) {
return DebugUtils::errored(kErrorNotInitialized); return DebugUtils::errored(kErrorNotInitialized);
}
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl); JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock); LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx)); JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rx));
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
// Offset relative to the start of the block. // Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool(); JitAllocatorPool* pool = block->pool();
@@ -1039,8 +1106,9 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept {
uint32_t areaStart = uint32_t(offset >> pool->granularityLog2); uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart); bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
if (ASMJIT_UNLIKELY(!isUsed)) if (ASMJIT_UNLIKELY(!isUsed)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1; uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
size_t byteOffset = pool->byteSizeFromAreaSize(areaStart); size_t byteOffset = pool->byteSizeFromAreaSize(areaStart);
@@ -1057,22 +1125,27 @@ Error JitAllocator::query(Span& out, void* rx) const noexcept {
// JitAllocator - Write // JitAllocator - Write
// ==================== // ====================
static ASMJIT_FORCE_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept { static ASMJIT_INLINE VirtMem::CachePolicy JitAllocator_defaultPolicyForSpan(const JitAllocator::Span& span) noexcept {
if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean)) if (Support::test(span.flags(), JitAllocator::Span::Flags::kInstructionCacheClean)) {
return VirtMem::CachePolicy::kNeverFlush; return VirtMem::CachePolicy::kNeverFlush;
else }
else {
return VirtMem::CachePolicy::kFlushAfterWrite; return VirtMem::CachePolicy::kFlushAfterWrite;
}
} }
Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t size, VirtMem::CachePolicy policy) noexcept { Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t size, VirtMem::CachePolicy policy) noexcept {
if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size)) if (ASMJIT_UNLIKELY(span._block == nullptr || offset > span.size() || span.size() - offset < size)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
if (ASMJIT_UNLIKELY(size == 0)) if (ASMJIT_UNLIKELY(size == 0)) {
return kErrorOk; return kErrorOk;
}
if (policy == VirtMem::CachePolicy::kDefault) if (policy == VirtMem::CachePolicy::kDefault) {
policy = JitAllocator_defaultPolicyForSpan(span); policy = JitAllocator_defaultPolicyForSpan(span);
}
VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy); VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy);
memcpy(static_cast<uint8_t*>(span.rw()) + offset, src, size); memcpy(static_cast<uint8_t*>(span.rw()) + offset, src, size);
@@ -1080,15 +1153,18 @@ Error JitAllocator::write(Span& span, size_t offset, const void* src, size_t siz
} }
Error JitAllocator::write(Span& span, WriteFunc writeFunc, void* userData, VirtMem::CachePolicy policy) noexcept { Error JitAllocator::write(Span& span, WriteFunc writeFunc, void* userData, VirtMem::CachePolicy policy) noexcept {
if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0) if (ASMJIT_UNLIKELY(span._block == nullptr) || span.size() == 0) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
size_t size = span.size(); size_t size = span.size();
if (ASMJIT_UNLIKELY(size == 0)) if (ASMJIT_UNLIKELY(size == 0)) {
return kErrorOk; return kErrorOk;
}
if (policy == VirtMem::CachePolicy::kDefault) if (policy == VirtMem::CachePolicy::kDefault) {
policy = JitAllocator_defaultPolicyForSpan(span); policy = JitAllocator_defaultPolicyForSpan(span);
}
VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy); VirtMem::ProtectJitReadWriteScope writeScope(span.rx(), span.size(), policy);
ASMJIT_PROPAGATE(writeFunc(span, userData)); ASMJIT_PROPAGATE(writeFunc(span, userData));
@@ -1113,30 +1189,34 @@ Error JitAllocator::beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy
} }
Error JitAllocator::endWriteScope(WriteScopeData& scope) noexcept { Error JitAllocator::endWriteScope(WriteScopeData& scope) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator)) if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk; return kErrorOk;
} }
Error JitAllocator::flushWriteScope(WriteScopeData& scope) noexcept { Error JitAllocator::flushWriteScope(WriteScopeData& scope) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator)) if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk; return kErrorOk;
} }
Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept { Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator)) if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]); VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]);
return scope._allocator->write(span, offset, src, size, policy); return scope._allocator->write(span, offset, src, size, policy);
} }
Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept { Error JitAllocator::scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept {
if (ASMJIT_UNLIKELY(!scope._allocator)) if (ASMJIT_UNLIKELY(!scope._allocator)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]); VirtMem::CachePolicy policy = VirtMem::CachePolicy(scope._data[0]);
return scope._allocator->write(span, writeFunc, userData, policy); return scope._allocator->write(span, writeFunc, userData, policy);
@@ -1334,8 +1414,9 @@ public:
Record* record = _records.get(static_cast<uint8_t*>(p)); Record* record = _records.get(static_cast<uint8_t*>(p));
EXPECT_NOT_NULL(record); EXPECT_NOT_NULL(record);
if (!newSize) if (!newSize) {
return release(p); return release(p);
}
JitAllocator::Span span; JitAllocator::Span span;
EXPECT_EQ(_allocator.query(span, p), kErrorOk); EXPECT_EQ(_allocator.query(span, p), kErrorOk);
@@ -1375,10 +1456,12 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
BitVectorRangeIterator<T, Bit> it(in, kPatternSize); BitVectorRangeIterator<T, Bit> it(in, kPatternSize);
size_t rangeStart, rangeEnd; size_t rangeStart, rangeEnd;
while (it.nextRange(&rangeStart, &rangeEnd)) { while (it.nextRange(&rangeStart, &rangeEnd)) {
if (Bit) if (Bit) {
Support::bitVectorFill(out, rangeStart, rangeEnd - rangeStart); Support::bitVectorFill(out, rangeStart, rangeEnd - rangeStart);
else }
else {
Support::bitVectorClear(out, rangeStart, rangeEnd - rangeStart); Support::bitVectorClear(out, rangeStart, rangeEnd - rangeStart);
}
} }
} }
@@ -1463,80 +1546,94 @@ static void test_jit_allocator_alloc_release() noexcept {
// Random blocks tests... // Random blocks tests...
INFO(" Allocating random blocks..."); INFO(" Allocating random blocks...");
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the beginning..."); INFO(" Releasing all allocated blocks from the beginning...");
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[i]); wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating random blocks again...", kCount); INFO(" Allocating random blocks again...", kCount);
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shuffling allocated blocks..."); INFO(" Shuffling allocated blocks...");
JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng); JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
INFO(" Releasing 50%% of allocated blocks..."); INFO(" Releasing 50%% of allocated blocks...");
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
wrapper.release(ptrArray[i]); wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating 50%% more blocks again..."); INFO(" Allocating 50%% more blocks again...");
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8); ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the end..."); INFO(" Releasing all allocated blocks from the end...");
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[kCount - i - 1]); wrapper.release(ptrArray[kCount - i - 1]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
// Fixed blocks tests... // Fixed blocks tests...
INFO(" Allocating %zuB blocks...", fixedBlockSize); INFO(" Allocating %zuB blocks...", fixedBlockSize);
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize); ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shrinking each %zuB block to 1 byte", fixedBlockSize); INFO(" Shrinking each %zuB block to 1 byte", fixedBlockSize);
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
wrapper.shrink(ptrArray[i], 1); wrapper.shrink(ptrArray[i], 1);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating more 64B blocks...", 64); INFO(" Allocating more 64B blocks...", 64);
for (i = kCount / 2; i < kCount; i++) for (i = kCount / 2; i < kCount; i++) {
ptrArray[i] = wrapper.alloc(64); ptrArray[i] = wrapper.alloc(64);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all blocks from the beginning..."); INFO(" Releasing all blocks from the beginning...");
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[i]); wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating %zuB blocks...", fixedBlockSize); INFO(" Allocating %zuB blocks...", fixedBlockSize);
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize); ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Shuffling allocated blocks..."); INFO(" Shuffling allocated blocks...");
JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng); JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
INFO(" Releasing 50%% of allocated blocks..."); INFO(" Releasing 50%% of allocated blocks...");
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
wrapper.release(ptrArray[i]); wrapper.release(ptrArray[i]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Allocating 50%% more %zuB blocks again...", fixedBlockSize); INFO(" Allocating 50%% more %zuB blocks again...", fixedBlockSize);
for (i = 0; i < kCount / 2; i++) for (i = 0; i < kCount / 2; i++) {
ptrArray[i] = wrapper.alloc(fixedBlockSize); ptrArray[i] = wrapper.alloc(fixedBlockSize);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
INFO(" Releasing all allocated blocks from the end..."); INFO(" Releasing all allocated blocks from the end...");
for (i = 0; i < kCount; i++) for (i = 0; i < kCount; i++) {
wrapper.release(ptrArray[kCount - i - 1]); wrapper.release(ptrArray[kCount - i - 1]);
}
JitAllocatorTest_usage(wrapper._allocator); JitAllocatorTest_usage(wrapper._allocator);
::free(ptrArray); ::free(ptrArray);

View File

@@ -182,6 +182,7 @@ public:
//! Destroys the `JitAllocator` instance and release all blocks held. //! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept; ASMJIT_API ~JitAllocator() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; } ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid. //! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
@@ -196,15 +197,23 @@ public:
//! \{ //! \{
//! Returns allocator options, see `Flags`. //! Returns allocator options, see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; } ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set. //! Tests whether the allocator has the given `option` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; } ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate). //! Returns a base block size (a minimum size of block that the allocator would allocate).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; } ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator. //! Returns granularity of the allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; } ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set. //! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; } ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \} //! \}
@@ -265,6 +274,7 @@ public:
//! Returns a pointer having Read & Execute permissions (references executable memory). //! Returns a pointer having Read & Execute permissions (references executable memory).
//! //!
//! This pointer is never NULL if the allocation succeeded, it points to an executable memory. //! This pointer is never NULL if the allocation succeeded, it points to an executable memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; } ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; }
//! Returns a pointer having Read & Write permissions (references writable memory). //! Returns a pointer having Read & Write permissions (references writable memory).
@@ -284,12 +294,15 @@ public:
//! //!
//! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via //! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via
//! \ref VirtMem::flushInstructionCache() after the write is done. //! \ref VirtMem::flushInstructionCache() after the write is done.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; } ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; }
//! Returns size of this span, aligned to the allocator granularity. //! Returns size of this span, aligned to the allocator granularity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns span flags. //! Returns span flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; }
//! Shrinks this span to `newSize`. //! Shrinks this span to `newSize`.
@@ -300,12 +313,14 @@ public:
ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); } ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); }
//! Returns whether \ref rw() returns a non-null pointer. //! Returns whether \ref rw() returns a non-null pointer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; } ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; }
//! \} //! \}
}; };
//! Allocates a new memory span of the requested `size`. //! Allocates a new memory span of the requested `size`.
[[nodiscard]]
ASMJIT_API Error alloc(Span& out, size_t size) noexcept; ASMJIT_API Error alloc(Span& out, size_t size) noexcept;
//! Releases a memory block returned by `alloc()`. //! Releases a memory block returned by `alloc()`.
@@ -321,6 +336,7 @@ public:
//! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`. //! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`.
//! //!
//! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span. //! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span.
[[nodiscard]]
ASMJIT_API Error query(Span& out, void* rx) const noexcept; ASMJIT_API Error query(Span& out, void* rx) const noexcept;
//! \} //! \}
@@ -328,7 +344,7 @@ public:
//! \name Write Operations //! \name Write Operations
//! \{ //! \{
typedef Error (ASMJIT_CDECL* WriteFunc)(Span& span, void* userData) ASMJIT_NOEXCEPT_TYPE; using WriteFunc = Error (ASMJIT_CDECL*)(Span& span, void* userData) noexcept;
ASMJIT_API Error write( ASMJIT_API Error write(
Span& span, Span& span,
@@ -344,7 +360,7 @@ public:
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept; VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
template<class Lambda> template<class Lambda>
ASMJIT_FORCE_INLINE Error write( ASMJIT_INLINE Error write(
Span& span, Span& span,
Lambda&& lambdaFunc, Lambda&& lambdaFunc,
VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept { VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
@@ -445,7 +461,12 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
//! Returns \ref JitAllocator associated with this write scope.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; } ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; }
//! Returns cache policy this write scope is using.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; } ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; }
//! \} //! \}
@@ -499,27 +520,40 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; } ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; }
//! Returns count of blocks managed by `JitAllocator` at the moment. //! Returns count of blocks managed by `JitAllocator` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; } ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; }
//! Returns the number of active allocations. //! Returns the number of active allocations.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; } ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used. //! Returns how many bytes are currently used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; } ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment. //! Returns the number of bytes unused by the allocator at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; } ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
//! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks). //! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; } ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory. //! Returns the number of bytes the allocator needs to manage the allocated memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; } ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept { ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept { ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0; return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept { ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0; return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
} }
@@ -528,6 +562,7 @@ public:
//! Returns JIT allocator statistics. //! Returns JIT allocator statistics.
//! //!
//! \remarks This function is thread-safe. //! \remarks This function is thread-safe.
[[nodiscard]]
ASMJIT_API Statistics statistics() const noexcept; ASMJIT_API Statistics statistics() const noexcept;
//! \} //! \}

View File

@@ -27,8 +27,9 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(code->resolveUnresolvedLinks()); ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
size_t estimatedCodeSize = code->codeSize(); size_t estimatedCodeSize = code->codeSize();
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) if (ASMJIT_UNLIKELY(estimatedCodeSize == 0)) {
return DebugUtils::errored(kErrorNoCodeGenerated); return DebugUtils::errored(kErrorNoCodeGenerated);
}
JitAllocator::Span span; JitAllocator::Span span;
ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize)); ASMJIT_PROPAGATE(_allocator.alloc(span, estimatedCodeSize));

View File

@@ -59,6 +59,7 @@ public:
//! \{ //! \{
//! Returns the associated `JitAllocator`. //! Returns the associated `JitAllocator`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); } ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
//! \} //! \}

View File

@@ -52,11 +52,13 @@ FileLogger::FileLogger(FILE* file) noexcept
FileLogger::~FileLogger() noexcept {} FileLogger::~FileLogger() noexcept {}
Error FileLogger::_log(const char* data, size_t size) noexcept { Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file) if (!_file) {
return kErrorOk; return kErrorOk;
}
if (size == SIZE_MAX) if (size == SIZE_MAX) {
size = strlen(data); size = strlen(data);
}
fwrite(data, 1, size, _file); fwrite(data, 1, size, _file);
return kErrorOk; return kErrorOk;

View File

@@ -47,36 +47,53 @@ public:
//! \{ //! \{
//! Returns \ref FormatOptions of this logger. //! Returns \ref FormatOptions of this logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; } ASMJIT_INLINE_NODEBUG FormatOptions& options() noexcept { return _options; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; } ASMJIT_INLINE_NODEBUG const FormatOptions& options() const noexcept { return _options; }
//! Sets formatting options of this Logger to `options`. //! Sets formatting options of this Logger to `options`.
ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; } ASMJIT_INLINE_NODEBUG void setOptions(const FormatOptions& options) noexcept { _options = options; }
//! Resets formatting options of this Logger to defaults. //! Resets formatting options of this Logger to defaults.
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); } ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options.reset(); }
//! Returns formatting flags. //! Returns formatting flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); } ASMJIT_INLINE_NODEBUG FormatFlags flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled. //! Tests whether the logger has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`. //! Sets formatting flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); } ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`. //! Enables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); } ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`. //! Disables the given formatting `flags`.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); } ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of a given indentation `group`. //! Returns indentation of a given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); } ASMJIT_INLINE_NODEBUG uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `group` to `n` spaces. //! Sets indentation of the given indentation `group` to `n` spaces.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); } ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `group` to 0 spaces. //! Resets indentation of the given indentation `group` to 0 spaces.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); } ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); }
//! Returns padding of a given padding `group`. //! Returns padding of a given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); } ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); }
//! Sets padding of a given padding `group` to `n`. //! Sets padding of a given padding `group` to `n`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); } ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); }
//! Resets padding of a given padding `group` to 0, which means that a default will be used. //! Resets padding of a given padding `group` to 0, which means that a default will be used.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); } ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); }
@@ -127,6 +144,7 @@ public:
//! \{ //! \{
//! Returns the logging output stream or null if the logger has no output stream. //! Returns the logging output stream or null if the logger has no output stream.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; } ASMJIT_INLINE_NODEBUG FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null. //! Sets the logging output stream to `stream` or null.
@@ -165,15 +183,21 @@ public:
//! Returns the content of the logger as \ref String. //! Returns the content of the logger as \ref String.
//! //!
//! It can be moved, if desired. //! It can be moved, if desired.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; } ASMJIT_INLINE_NODEBUG String& content() noexcept { return _content; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; } ASMJIT_INLINE_NODEBUG const String& content() const noexcept { return _content; }
//! Returns aggregated logger data as `char*` pointer. //! Returns aggregated logger data as `char*` pointer.
//! //!
//! The pointer is owned by `StringLogger`, it can't be modified or freed. //! The pointer is owned by `StringLogger`, it can't be modified or freed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); } ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _content.data(); }
//! Returns size of the data returned by `data()`. //! Returns size of the data returned by `data()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); } ASMJIT_INLINE_NODEBUG size_t dataSize() const noexcept { return _content.size(); }
//! \} //! \}

File diff suppressed because it is too large Load Diff

View File

@@ -17,8 +17,9 @@ ASMJIT_BEGIN_NAMESPACE
#if !defined(_WIN32) #if !defined(_WIN32)
Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept { Error OSUtils::readFile(const char* name, String& dst, size_t maxSize) noexcept {
char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize); char* buffer = dst.prepare(String::ModifyOp::kAssign, maxSize);
if (ASMJIT_UNLIKELY(!buffer)) if (ASMJIT_UNLIKELY(!buffer)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
int fd = ASMJIT_FILE64_API(::open)(name, O_RDONLY); int fd = ASMJIT_FILE64_API(::open)(name, O_RDONLY);
if (fd < 0) { if (fd < 0) {

View File

@@ -35,7 +35,7 @@ public:
Handle _handle; Handle _handle;
#pragma pack(pop) #pragma pack(pop)
#elif !defined(__EMSCRIPTEN__) #elif !defined(__EMSCRIPTEN__)
typedef pthread_mutex_t Handle; using Handle = pthread_mutex_t;
Handle _handle; Handle _handle;
#endif #endif

View File

@@ -66,6 +66,7 @@ public:
//! PhysReg to WorkReg mapping. //! PhysReg to WorkReg mapping.
uint32_t workIds[1 /* ... */]; uint32_t workIds[1 /* ... */];
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept { static ASMJIT_INLINE_NODEBUG size_t sizeOf(size_t count) noexcept {
return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t); return sizeof(PhysToWorkMap) - sizeof(uint32_t) + count * sizeof(uint32_t);
} }
@@ -74,8 +75,9 @@ public:
assigned.reset(); assigned.reset();
dirty.reset(); dirty.reset();
for (size_t i = 0; i < count; i++) for (size_t i = 0; i < count; i++) {
workIds[i] = kWorkNone; workIds[i] = kWorkNone;
}
} }
inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept { inline void copyFrom(const PhysToWorkMap* other, size_t count) noexcept {
@@ -94,19 +96,22 @@ public:
//! WorkReg to PhysReg mapping //! WorkReg to PhysReg mapping
uint8_t physIds[1 /* ... */]; uint8_t physIds[1 /* ... */];
[[nodiscard]]
static inline size_t sizeOf(size_t count) noexcept { static inline size_t sizeOf(size_t count) noexcept {
return size_t(count) * sizeof(uint8_t); return size_t(count) * sizeof(uint8_t);
} }
inline void reset(size_t count) noexcept { inline void reset(size_t count) noexcept {
for (size_t i = 0; i < count; i++) for (size_t i = 0; i < count; i++) {
physIds[i] = kPhysNone; physIds[i] = kPhysNone;
}
} }
inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept { inline void copyFrom(const WorkToPhysMap* other, size_t count) noexcept {
size_t size = sizeOf(count); size_t size = sizeOf(count);
if (ASMJIT_LIKELY(size)) if (ASMJIT_LIKELY(size)) {
memcpy(this, other, size); memcpy(this, other, size);
}
} }
}; };
@@ -132,7 +137,7 @@ public:
resetMaps(); resetMaps();
} }
ASMJIT_FORCE_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept { ASMJIT_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data. // Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr); ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr); ASMJIT_ASSERT(_workToPhysMap == nullptr);
@@ -145,14 +150,15 @@ public:
_layout.workRegs = &workRegs; _layout.workRegs = &workRegs;
} }
ASMJIT_FORCE_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept { ASMJIT_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap; _physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap; _workToPhysMap = workToPhysMap;
for (RegGroup group : RegGroupVirtValues{}) for (RegGroup group : RegGroupVirtValues{}) {
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group); _physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
} }
ASMJIT_FORCE_INLINE void resetMaps() noexcept { ASMJIT_INLINE void resetMaps() noexcept {
_physToWorkMap = nullptr; _physToWorkMap = nullptr;
_workToPhysMap = nullptr; _workToPhysMap = nullptr;
_physToWorkIds.fill(nullptr); _physToWorkIds.fill(nullptr);
@@ -163,17 +169,31 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; } ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; } ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; } ASMJIT_INLINE_NODEBUG RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; } ASMJIT_INLINE_NODEBUG const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; } ASMJIT_INLINE_NODEBUG uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; } ASMJIT_INLINE_NODEBUG RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; } ASMJIT_INLINE_NODEBUG const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; } ASMJIT_INLINE_NODEBUG RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; }
[[nodiscard]]
inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept { inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept {
DebugUtils::unused(group); DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone); ASMJIT_ASSERT(workId != kWorkNone);
@@ -181,16 +201,19 @@ public:
return _workToPhysMap->physIds[workId]; return _workToPhysMap->physIds[workId];
} }
[[nodiscard]]
inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept { inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId]; return _physToWorkIds[group][physId];
} }
[[nodiscard]]
inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept { inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId); return Support::bitTest(_physToWorkMap->assigned[group], physId);
} }
[[nodiscard]]
inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept { inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs); ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId); return Support::bitTest(_physToWorkMap->dirty[group], physId);
@@ -304,7 +327,7 @@ public:
//! \name Utilities //! \name Utilities
//! \{ //! \{
ASMJIT_FORCE_INLINE void swap(RAAssignment& other) noexcept { ASMJIT_INLINE void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap); std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap); std::swap(_physToWorkMap, other._physToWorkMap);
_physToWorkIds.swap(other._physToWorkIds); _physToWorkIds.swap(other._physToWorkIds);
@@ -342,6 +365,7 @@ public:
} }
// Not really useful outside of debugging. // Not really useful outside of debugging.
[[nodiscard]]
bool equals(const RAAssignment& other) const noexcept { bool equals(const RAAssignment& other) const noexcept {
// Layout should always match. // Layout should always match.
if (_layout.physIndex != other._layout.physIndex || if (_layout.physIndex != other._layout.physIndex ||
@@ -357,15 +381,17 @@ public:
for (uint32_t physId = 0; physId < physTotal; physId++) { for (uint32_t physId = 0; physId < physTotal; physId++) {
uint32_t thisWorkId = _physToWorkMap->workIds[physId]; uint32_t thisWorkId = _physToWorkMap->workIds[physId];
uint32_t otherWorkId = other._physToWorkMap->workIds[physId]; uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
if (thisWorkId != otherWorkId) if (thisWorkId != otherWorkId) {
return false; return false;
}
} }
for (uint32_t workId = 0; workId < workCount; workId++) { for (uint32_t workId = 0; workId < workCount; workId++) {
uint32_t thisPhysId = _workToPhysMap->physIds[workId]; uint32_t thisPhysId = _workToPhysMap->physIds[workId];
uint32_t otherPhysId = other._workToPhysMap->physIds[workId]; uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
if (thisPhysId != otherPhysId) if (thisPhysId != otherPhysId) {
return false; return false;
}
} }
if (_physToWorkMap->assigned != other._physToWorkMap->assigned || if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||

View File

@@ -21,15 +21,18 @@ ASMJIT_BEGIN_NAMESPACE
template<typename This> template<typename This>
class RACFGBuilderT { class RACFGBuilderT {
public: public:
enum : uint32_t { //! \name Constants
kRootIndentation = 2, //! \{
kCodeIndentation = 4,
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and static inline constexpr uint32_t kRootIndentation = 2;
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these static inline constexpr uint32_t kCodeIndentation = 4;
// we just use their position that is [at that time] unassigned.
kNodePositionDidOnBefore = 0xFFFFFFFFu // NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
}; // `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
static inline constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
//! \}
//! \name Members //! \name Members
//! \{ //! \{
@@ -60,17 +63,20 @@ public:
_cc(pass->cc()) { _cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
_logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr; _logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr;
if (_logger) if (_logger) {
_formatOptions = _logger->options(); _formatOptions = _logger->options();
}
#endif #endif
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return _cc; } ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return _cc; }
//! \name Run //! \name Run
//! \{ //! \{
//! Called per function by an architecture-specific CFG builder. //! Called per function by an architecture-specific CFG builder.
[[nodiscard]]
Error run() noexcept { Error run() noexcept {
log("[BuildCFG]\n"); log("[BuildCFG]\n");
ASMJIT_PROPAGATE(prepare()); ASMJIT_PROPAGATE(prepare());
@@ -289,8 +295,9 @@ public:
_hasCode = false; _hasCode = false;
_blockRegStats.reset(); _blockRegStats.reset();
if (_curBlock->isConstructed()) if (_curBlock->isConstructed()) {
break; break;
}
ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock)); ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
logBlock(_curBlock, kRootIndentation); logBlock(_curBlock, kRootIndentation);
@@ -417,16 +424,18 @@ public:
if (node->type() == NodeType::kSentinel) { if (node->type() == NodeType::kSentinel) {
if (node == _funcNode->endNode()) { if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel. // Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) if (ASMJIT_UNLIKELY(_curBlock && _hasCode)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
break; break;
} }
} }
else if (node->type() == NodeType::kFunc) { else if (node->type() == NodeType::kFunc) {
// RAPass can only compile a single function at a time. If we // RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not. // encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode)) if (ASMJIT_UNLIKELY(node != _funcNode)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// PASS if this is the first node. // PASS if this is the first node.
} }
else { else {
@@ -440,15 +449,18 @@ public:
// NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`) // NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`)
// node. If we encountered a NULL node it means that something went wrong and this node list is corrupted; // node. If we encountered a NULL node it means that something went wrong and this node list is corrupted;
// bail in such case. // bail in such case.
if (ASMJIT_UNLIKELY(!node)) if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
} }
if (_pass->hasDanglingBlocks()) if (_pass->hasDanglingBlocks()) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
for (RABlock* block : blocksWithUnknownJumps) for (RABlock* block : blocksWithUnknownJumps) {
handleBlockWithUnknownJump(block); ASMJIT_PROPAGATE(handleBlockWithUnknownJump(block));
}
return _pass->initSharedAssignments(_sharedAssignmentsMap); return _pass->initSharedAssignments(_sharedAssignmentsMap);
} }
@@ -459,6 +471,7 @@ public:
//! \{ //! \{
//! Prepares the CFG builder of the current function. //! Prepares the CFG builder of the current function.
[[nodiscard]]
Error prepare() noexcept { Error prepare() noexcept {
FuncNode* func = _pass->func(); FuncNode* func = _pass->func();
BaseNode* node = nullptr; BaseNode* node = nullptr;
@@ -508,6 +521,7 @@ public:
//! //!
//! If we encounter such block we basically insert all existing blocks as successors except the function entry //! If we encounter such block we basically insert all existing blocks as successors except the function entry
//! block and a natural successor, if such block exists. //! block and a natural successor, if such block exists.
[[nodiscard]]
Error handleBlockWithUnknownJump(RABlock* block) noexcept { Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks(); RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size(); size_t blockCount = blocks.size();
@@ -517,40 +531,48 @@ public:
RABlock* consecutive = block->consecutive(); RABlock* consecutive = block->consecutive();
for (size_t i = 1; i < blockCount; i++) { for (size_t i = 1; i < blockCount; i++) {
RABlock* candidate = blocks[i]; RABlock* candidate = blocks[i];
if (candidate == consecutive || !candidate->isTargetable()) if (candidate == consecutive || !candidate->isTargetable()) {
continue; continue;
block->appendSuccessor(candidate); }
ASMJIT_PROPAGATE(block->appendSuccessor(candidate));
} }
return shareAssignmentAcrossSuccessors(block); return shareAssignmentAcrossSuccessors(block);
} }
[[nodiscard]]
Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept { Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
if (block->successors().size() <= 1) if (block->successors().size() <= 1) {
return kErrorOk; return kErrorOk;
}
RABlock* consecutive = block->consecutive(); RABlock* consecutive = block->consecutive();
uint32_t sharedAssignmentId = Globals::kInvalidId; uint32_t sharedAssignmentId = Globals::kInvalidId;
for (RABlock* successor : block->successors()) { for (RABlock* successor : block->successors()) {
if (successor == consecutive) if (successor == consecutive) {
continue; continue;
}
if (successor->hasSharedAssignmentId()) { if (successor->hasSharedAssignmentId()) {
if (sharedAssignmentId == Globals::kInvalidId) if (sharedAssignmentId == Globals::kInvalidId) {
sharedAssignmentId = successor->sharedAssignmentId(); sharedAssignmentId = successor->sharedAssignmentId();
else }
else {
_sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId; _sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
}
} }
else { else {
if (sharedAssignmentId == Globals::kInvalidId) if (sharedAssignmentId == Globals::kInvalidId) {
ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId)); ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
}
successor->setSharedAssignmentId(sharedAssignmentId); successor->setSharedAssignmentId(sharedAssignmentId);
} }
} }
return kErrorOk; return kErrorOk;
} }
[[nodiscard]]
Error newSharedAssignmentId(uint32_t* out) noexcept { Error newSharedAssignmentId(uint32_t* out) noexcept {
uint32_t id = _sharedAssignmentsMap.size(); uint32_t id = _sharedAssignmentsMap.size();
ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id)); ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
@@ -567,18 +589,21 @@ public:
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
template<typename... Args> template<typename... Args>
inline void log(const char* fmt, Args&&... args) noexcept { inline void log(const char* fmt, Args&&... args) noexcept {
if (_logger) if (_logger) {
_logger->logf(fmt, std::forward<Args>(args)...); _logger->logf(fmt, std::forward<Args>(args)...);
}
} }
inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept { inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
if (_logger) if (_logger) {
_logBlock(block, indentation); _logBlock(block, indentation);
}
} }
inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept { inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
if (_logger) if (_logger) {
_logNode(node, indentation, action); _logNode(node, indentation, action);
}
} }
void _logBlock(RABlock* block, uint32_t indentation) noexcept { void _logBlock(RABlock* block, uint32_t indentation) noexcept {

View File

@@ -44,8 +44,8 @@ class RABlock;
class BaseNode; class BaseNode;
struct RAStackSlot; struct RAStackSlot;
typedef ZoneVector<RABlock*> RABlocks; using RABlocks = ZoneVector<RABlock*>;
typedef ZoneVector<RAWorkReg*> RAWorkRegs; using RAWorkRegs = ZoneVector<RAWorkReg*>;
//! Maximum number of consecutive registers aggregated from all supported backends. //! Maximum number of consecutive registers aggregated from all supported backends.
static constexpr uint32_t kMaxConsecutiveRegs = 4; static constexpr uint32_t kMaxConsecutiveRegs = 4;
@@ -60,6 +60,7 @@ public:
//! \} //! \}
[[nodiscard]]
ASMJIT_NOINLINE Error init(Arch arch) noexcept { ASMJIT_NOINLINE Error init(Arch arch) noexcept {
switch (arch) { switch (arch) {
case Arch::kX86: case Arch::kX86:
@@ -85,6 +86,7 @@ public:
} }
} }
[[nodiscard]]
inline RegMask availableRegs(RegGroup group) const noexcept { return _availableRegs[group]; } inline RegMask availableRegs(RegGroup group) const noexcept { return _availableRegs[group]; }
}; };
@@ -120,14 +122,23 @@ struct RAStrategy {
_flags = RAStrategyFlags::kNone; _flags = RAStrategyFlags::kNone;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStrategyType type() const noexcept { return _type; } ASMJIT_INLINE_NODEBUG RAStrategyType type() const noexcept { return _type; }
ASMJIT_INLINE_NODEBUG void setType(RAStrategyType type) noexcept { _type = type; } ASMJIT_INLINE_NODEBUG void setType(RAStrategyType type) noexcept { _type = type; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isSimple() const noexcept { return _type == RAStrategyType::kSimple; } ASMJIT_INLINE_NODEBUG bool isSimple() const noexcept { return _type == RAStrategyType::kSimple; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isComplex() const noexcept { return _type >= RAStrategyType::kComplex; } ASMJIT_INLINE_NODEBUG bool isComplex() const noexcept { return _type >= RAStrategyType::kComplex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStrategyFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG RAStrategyFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RAStrategyFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(RAStrategyFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RAStrategyFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(RAStrategyFlags flags) noexcept { _flags |= flags; }
//! \} //! \}
@@ -160,17 +171,22 @@ struct RARegCount {
//! \name Overloaded Operators //! \name Overloaded Operators
//! \{ //! \{
[[nodiscard]]
inline uint8_t& operator[](RegGroup group) noexcept { inline uint8_t& operator[](RegGroup group) noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _regs[size_t(group)]; return _regs[size_t(group)];
} }
[[nodiscard]]
inline const uint8_t& operator[](RegGroup group) const noexcept { inline const uint8_t& operator[](RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
return _regs[size_t(group)]; return _regs[size_t(group)];
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; } ASMJIT_INLINE_NODEBUG bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; } ASMJIT_INLINE_NODEBUG bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; }
//! \} //! \}
@@ -179,6 +195,7 @@ struct RARegCount {
//! \{ //! \{
//! Returns the count of registers by the given register `group`. //! Returns the count of registers by the given register `group`.
[[nodiscard]]
inline uint32_t get(RegGroup group) const noexcept { inline uint32_t get(RegGroup group) const noexcept {
ASMJIT_ASSERT(group <= RegGroup::kMaxVirt); ASMJIT_ASSERT(group <= RegGroup::kMaxVirt);
@@ -210,7 +227,7 @@ struct RARegCount {
//! Provides mapping that can be used to fast index architecture register groups. //! Provides mapping that can be used to fast index architecture register groups.
struct RARegIndex : public RARegCount { struct RARegIndex : public RARegCount {
//! Build register indexes based on the given `count` of registers. //! Build register indexes based on the given `count` of registers.
ASMJIT_FORCE_INLINE void buildIndexes(const RARegCount& count) noexcept { ASMJIT_INLINE void buildIndexes(const RARegCount& count) noexcept {
uint32_t x = uint32_t(count._regs[0]); uint32_t x = uint32_t(count._regs[0]);
uint32_t y = uint32_t(count._regs[1]) + x; uint32_t y = uint32_t(count._regs[1]) + x;
uint32_t z = uint32_t(count._regs[2]) + y; uint32_t z = uint32_t(count._regs[2]) + y;
@@ -253,13 +270,18 @@ struct RARegMask {
//! \name Overloaded Operators //! \name Overloaded Operators
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const RARegMask& other) const noexcept { return _masks == other._masks; } ASMJIT_INLINE_NODEBUG bool operator==(const RARegMask& other) const noexcept { return _masks == other._masks; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const RARegMask& other) const noexcept { return _masks != other._masks; } ASMJIT_INLINE_NODEBUG bool operator!=(const RARegMask& other) const noexcept { return _masks != other._masks; }
template<typename Index> template<typename Index>
[[nodiscard]]
inline uint32_t& operator[](const Index& index) noexcept { return _masks[index]; } inline uint32_t& operator[](const Index& index) noexcept { return _masks[index]; }
template<typename Index> template<typename Index>
[[nodiscard]]
inline const uint32_t& operator[](const Index& index) const noexcept { return _masks[index]; } inline const uint32_t& operator[](const Index& index) const noexcept { return _masks[index]; }
//! \} //! \}
@@ -268,10 +290,12 @@ struct RARegMask {
//! \{ //! \{
//! Tests whether all register masks are zero (empty). //! Tests whether all register masks are zero (empty).
[[nodiscard]]
inline bool empty() const noexcept { inline bool empty() const noexcept {
return _masks.aggregate<Support::Or>() == 0; return _masks.aggregate<Support::Or>() == 0;
} }
[[nodiscard]]
inline bool has(RegGroup group, RegMask mask = 0xFFFFFFFFu) const noexcept { inline bool has(RegGroup group, RegMask mask = 0xFFFFFFFFu) const noexcept {
return (_masks[group] & mask) != 0; return (_masks[group] & mask) != 0;
} }
@@ -329,16 +353,28 @@ public:
ASMJIT_INLINE_NODEBUG void reset() noexcept { _packed = 0; } ASMJIT_INLINE_NODEBUG void reset() noexcept { _packed = 0; }
ASMJIT_INLINE_NODEBUG void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; } ASMJIT_INLINE_NODEBUG void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; } ASMJIT_INLINE_NODEBUG bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUsed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + uint32_t(group))) != 0u; } ASMJIT_INLINE_NODEBUG bool hasUsed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeUsed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexUsed + uint32_t(group)); } ASMJIT_INLINE_NODEBUG void makeUsed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexUsed + uint32_t(group)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; } ASMJIT_INLINE_NODEBUG bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFixed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + uint32_t(group))) != 0u; } ASMJIT_INLINE_NODEBUG bool hasFixed(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeFixed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexFixed + uint32_t(group)); } ASMJIT_INLINE_NODEBUG void makeFixed(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexFixed + uint32_t(group)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; } ASMJIT_INLINE_NODEBUG bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasClobbered(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + uint32_t(group))) != 0u; } ASMJIT_INLINE_NODEBUG bool hasClobbered(RegGroup group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + uint32_t(group))) != 0u; }
ASMJIT_INLINE_NODEBUG void makeClobbered(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexClobbered + uint32_t(group)); } ASMJIT_INLINE_NODEBUG void makeClobbered(RegGroup group) noexcept { _packed |= Support::bitMask(kIndexClobbered + uint32_t(group)); }
//! \} //! \}
@@ -370,8 +406,11 @@ public:
ASMJIT_INLINE_NODEBUG RALiveCount& operator=(const RALiveCount& other) noexcept = default; ASMJIT_INLINE_NODEBUG RALiveCount& operator=(const RALiveCount& other) noexcept = default;
inline uint32_t& operator[](RegGroup group) noexcept { return n[group]; } [[nodiscard]]
inline const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; } ASMJIT_INLINE_NODEBUG uint32_t& operator[](RegGroup group) noexcept { return n[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const uint32_t& operator[](RegGroup group) const noexcept { return n[group]; }
//! \} //! \}
@@ -388,10 +427,8 @@ struct RALiveInterval {
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { static inline constexpr uint32_t kNaN = 0;
kNaN = 0, static inline constexpr uint32_t kInf = 0xFFFFFFFFu;
kInf = 0xFFFFFFFFu
};
//! \} //! \}
@@ -428,7 +465,10 @@ struct RALiveInterval {
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isValid() const noexcept { return a < b; } ASMJIT_INLINE_NODEBUG bool isValid() const noexcept { return a < b; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return b - a; } ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return b - a; }
//! \} //! \}
@@ -441,7 +481,7 @@ public:
//! \name Types //! \name Types
//! \{ //! \{
typedef T DataType; using DataType = T;
//! \} //! \}
@@ -488,9 +528,20 @@ class RALiveSpans {
public: public:
ASMJIT_NONCOPYABLE(RALiveSpans) ASMJIT_NONCOPYABLE(RALiveSpans)
typedef typename T::DataType DataType; //! \name Types
//! \{
using DataType = typename T::DataType;
//! \}
//! \name Members
//! \{
ZoneVector<T> _data; ZoneVector<T> _data;
//! \}
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -504,12 +555,19 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _data.empty(); } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _data.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _data.size(); } ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _data.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return _data.data(); } ASMJIT_INLINE_NODEBUG T* data() noexcept { return _data.data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return _data.data(); } ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return _data.data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOpen() const noexcept { ASMJIT_INLINE_NODEBUG bool isOpen() const noexcept {
uint32_t size = _data.size(); uint32_t size = _data.size();
return size > 0 && _data[size - 1].b == RALiveInterval::kInf; return size > 0 && _data[size - 1].b == RALiveInterval::kInf;
@@ -523,12 +581,12 @@ public:
ASMJIT_INLINE_NODEBUG void swap(RALiveSpans<T>& other) noexcept { _data.swap(other._data); } ASMJIT_INLINE_NODEBUG void swap(RALiveSpans<T>& other) noexcept { _data.swap(other._data); }
//! Open the current live span. //! Open the current live span.
ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept { ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept {
bool wasOpen; bool wasOpen;
return openAt(allocator, start, end, wasOpen); return openAt(allocator, start, end, wasOpen);
} }
ASMJIT_FORCE_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept { ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept {
uint32_t size = _data.size(); uint32_t size = _data.size();
wasOpen = false; wasOpen = false;
@@ -544,7 +602,7 @@ public:
return _data.append(allocator, T(start, end)); return _data.append(allocator, T(start, end));
} }
ASMJIT_FORCE_INLINE void closeAt(uint32_t end) noexcept { ASMJIT_INLINE void closeAt(uint32_t end) noexcept {
ASMJIT_ASSERT(!empty()); ASMJIT_ASSERT(!empty());
uint32_t size = _data.size(); uint32_t size = _data.size();
@@ -561,14 +619,19 @@ public:
return width; return width;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T& operator[](uint32_t index) noexcept { return _data[index]; } ASMJIT_INLINE_NODEBUG T& operator[](uint32_t index) noexcept { return _data[index]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T& operator[](uint32_t index) const noexcept { return _data[index]; } ASMJIT_INLINE_NODEBUG const T& operator[](uint32_t index) const noexcept { return _data[index]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool intersects(const RALiveSpans<T>& other) const noexcept { ASMJIT_INLINE_NODEBUG bool intersects(const RALiveSpans<T>& other) const noexcept {
return intersects(*this, other); return intersects(*this, other);
} }
ASMJIT_FORCE_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept { [[nodiscard]]
ASMJIT_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
uint32_t finalSize = x.size() + y.size(); uint32_t finalSize = x.size() + y.size();
ASMJIT_PROPAGATE(_data.growingReserve(allocator, finalSize)); ASMJIT_PROPAGATE(_data.growingReserve(allocator, finalSize));
@@ -588,21 +651,24 @@ public:
while (ySpan->b <= xa) { while (ySpan->b <= xa) {
dstPtr->init(*ySpan, yData); dstPtr->init(*ySpan, yData);
dstPtr++; dstPtr++;
if (++ySpan == yEnd) if (++ySpan == yEnd) {
goto Done; goto Done;
}
} }
ya = ySpan->a; ya = ySpan->a;
while (xSpan->b <= ya) { while (xSpan->b <= ya) {
*dstPtr++ = *xSpan; *dstPtr++ = *xSpan;
if (++xSpan == xEnd) if (++xSpan == xEnd) {
goto Done; goto Done;
}
} }
// We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`. // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
xa = xSpan->a; xa = xSpan->a;
if (ySpan->b > xa) if (ySpan->b > xa) {
return 0xFFFFFFFFu; return 0xFFFFFFFFu;
}
} }
} }
@@ -621,7 +687,8 @@ public:
return kErrorOk; return kErrorOk;
} }
static ASMJIT_FORCE_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept { [[nodiscard]]
static ASMJIT_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept {
const T* xSpan = x.data(); const T* xSpan = x.data();
const T* ySpan = y.data(); const T* ySpan = y.data();
@@ -630,26 +697,32 @@ public:
// Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`, which means that there is no // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`, which means that there is no
// intersection. We advance either `xSpan` or `ySpan` depending on their end positions. // intersection. We advance either `xSpan` or `ySpan` depending on their end positions.
if (xSpan == xEnd || ySpan == yEnd) if (xSpan == xEnd || ySpan == yEnd) {
return false; return false;
}
uint32_t xa, ya; uint32_t xa, ya;
xa = xSpan->a; xa = xSpan->a;
for (;;) { for (;;) {
while (ySpan->b <= xa) while (ySpan->b <= xa) {
if (++ySpan == yEnd) if (++ySpan == yEnd) {
return false; return false;
}
}
ya = ySpan->a; ya = ySpan->a;
while (xSpan->b <= ya) while (xSpan->b <= ya) {
if (++xSpan == xEnd) if (++xSpan == xEnd) {
return false; return false;
}
}
// We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`. // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
xa = xSpan->a; xa = xSpan->a;
if (ySpan->b > xa) if (ySpan->b > xa) {
return true; return true;
}
} }
} }
@@ -666,8 +739,13 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return _width; } ASMJIT_INLINE_NODEBUG uint32_t width() const noexcept { return _width; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG float freq() const noexcept { return _freq; } ASMJIT_INLINE_NODEBUG float freq() const noexcept { return _freq; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG float priority() const noexcept { return _priority; } ASMJIT_INLINE_NODEBUG float priority() const noexcept { return _priority; }
//! \} //! \}
@@ -681,12 +759,15 @@ struct LiveRegData {
ASMJIT_INLINE_NODEBUG void init(const LiveRegData& other) noexcept { id = other.id; } ASMJIT_INLINE_NODEBUG void init(const LiveRegData& other) noexcept { id = other.id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const LiveRegData& other) const noexcept { return id == other.id; } ASMJIT_INLINE_NODEBUG bool operator==(const LiveRegData& other) const noexcept { return id == other.id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; } ASMJIT_INLINE_NODEBUG bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; }
}; };
typedef RALiveSpan<LiveRegData> LiveRegSpan; using LiveRegSpan = RALiveSpan<LiveRegData>;
typedef RALiveSpans<LiveRegSpan> LiveRegSpans; using LiveRegSpans = RALiveSpans<LiveRegSpan>;
//! Flags used by \ref RATiedReg. //! Flags used by \ref RATiedReg.
//! //!
@@ -875,53 +956,86 @@ struct RATiedReg {
//! \{ //! \{
//! Returns the associated WorkReg id. //! Returns the associated WorkReg id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; } ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutiveParent() const noexcept { return _consecutiveParent != Globals::kInvalidId; } ASMJIT_INLINE_NODEBUG bool hasConsecutiveParent() const noexcept { return _consecutiveParent != Globals::kInvalidId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveParent() const noexcept { return _consecutiveParent; } ASMJIT_INLINE_NODEBUG uint32_t consecutiveParent() const noexcept { return _consecutiveParent; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveData() const noexcept { return consecutiveDataFromFlags(_flags); } ASMJIT_INLINE_NODEBUG uint32_t consecutiveData() const noexcept { return consecutiveDataFromFlags(_flags); }
//! Returns TiedReg flags. //! Returns TiedReg flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; }
//! Checks if the given `flag` is set. //! Checks if the given `flag` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Adds tied register flags. //! Adds tied register flags.
ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; }
//! Tests whether the register is read (writes `true` also if it's Read/Write). //! Tests whether the register is read (writes `true` also if it's Read/Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasFlag(RATiedFlags::kRead); } ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasFlag(RATiedFlags::kRead); }
//! Tests whether the register is written (writes `true` also if it's Read/Write). //! Tests whether the register is written (writes `true` also if it's Read/Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasFlag(RATiedFlags::kWrite); } ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasFlag(RATiedFlags::kWrite); }
//! Tests whether the register is read only. //! Tests whether the register is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRead; } ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRead; }
//! Tests whether the register is write only. //! Tests whether the register is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kWrite; } ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kWrite; }
//! Tests whether the register is read and written. //! Tests whether the register is read and written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRW; } ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_flags & RATiedFlags::kRW) == RATiedFlags::kRW; }
//! Tests whether the tied register has use operand (Read/ReadWrite). //! Tests whether the tied register has use operand (Read/ReadWrite).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUse() const noexcept { return hasFlag(RATiedFlags::kUse); } ASMJIT_INLINE_NODEBUG bool isUse() const noexcept { return hasFlag(RATiedFlags::kUse); }
//! Tests whether the tied register has out operand (Write). //! Tests whether the tied register has out operand (Write).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOut() const noexcept { return hasFlag(RATiedFlags::kOut); } ASMJIT_INLINE_NODEBUG bool isOut() const noexcept { return hasFlag(RATiedFlags::kOut); }
//! Tests whether the tied register has \ref RATiedFlags::kLeadConsecutive flag set. //! Tests whether the tied register has \ref RATiedFlags::kLeadConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive); } ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive); }
//! Tests whether the tied register has \ref RATiedFlags::kUseConsecutive flag set. //! Tests whether the tied register has \ref RATiedFlags::kUseConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUseConsecutive() const noexcept { return hasFlag(RATiedFlags::kUseConsecutive); } ASMJIT_INLINE_NODEBUG bool isUseConsecutive() const noexcept { return hasFlag(RATiedFlags::kUseConsecutive); }
//! Tests whether the tied register has \ref RATiedFlags::kOutConsecutive flag set. //! Tests whether the tied register has \ref RATiedFlags::kOutConsecutive flag set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutConsecutive() const noexcept { return hasFlag(RATiedFlags::kOutConsecutive); } ASMJIT_INLINE_NODEBUG bool isOutConsecutive() const noexcept { return hasFlag(RATiedFlags::kOutConsecutive); }
//! Tests whether the tied register must be unique (cannot be allocated to any other allocated register). //! Tests whether the tied register must be unique (cannot be allocated to any other allocated register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasFlag(RATiedFlags::kUnique); } ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasFlag(RATiedFlags::kUnique); }
//! Tests whether the tied register has any consecutive flag. //! Tests whether the tied register has any consecutive flag.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnyConsecutiveFlag() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive); } ASMJIT_INLINE_NODEBUG bool hasAnyConsecutiveFlag() const noexcept { return hasFlag(RATiedFlags::kLeadConsecutive | RATiedFlags::kUseConsecutive | RATiedFlags::kOutConsecutive); }
//! Tests whether the USE slot can be patched to memory operand. //! Tests whether the USE slot can be patched to memory operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseRM() const noexcept { return hasFlag(RATiedFlags::kUseRM); } ASMJIT_INLINE_NODEBUG bool hasUseRM() const noexcept { return hasFlag(RATiedFlags::kUseRM); }
//! Tests whether the OUT slot can be patched to memory operand. //! Tests whether the OUT slot can be patched to memory operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOutRM() const noexcept { return hasFlag(RATiedFlags::kOutRM); } ASMJIT_INLINE_NODEBUG bool hasOutRM() const noexcept { return hasFlag(RATiedFlags::kOutRM); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; } ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
inline void makeReadOnly() noexcept { inline void makeReadOnly() noexcept {
@@ -937,46 +1051,70 @@ struct RATiedReg {
} }
//! Tests whether the register would duplicate. //! Tests whether the register would duplicate.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDuplicate() const noexcept { return hasFlag(RATiedFlags::kDuplicate); } ASMJIT_INLINE_NODEBUG bool isDuplicate() const noexcept { return hasFlag(RATiedFlags::kDuplicate); }
//! Tests whether the register (and the instruction it's part of) appears last in the basic block. //! Tests whether the register (and the instruction it's part of) appears last in the basic block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLast() const noexcept { return hasFlag(RATiedFlags::kLast); } ASMJIT_INLINE_NODEBUG bool isLast() const noexcept { return hasFlag(RATiedFlags::kLast); }
//! Tests whether the register should be killed after USEd and/or OUTed. //! Tests whether the register should be killed after USEd and/or OUTed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isKill() const noexcept { return hasFlag(RATiedFlags::kKill); } ASMJIT_INLINE_NODEBUG bool isKill() const noexcept { return hasFlag(RATiedFlags::kKill); }
//! Tests whether the register is OUT or KILL (used internally by local register allocator). //! Tests whether the register is OUT or KILL (used internally by local register allocator).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutOrKill() const noexcept { return hasFlag(RATiedFlags::kOut | RATiedFlags::kKill); } ASMJIT_INLINE_NODEBUG bool isOutOrKill() const noexcept { return hasFlag(RATiedFlags::kOut | RATiedFlags::kKill); }
//! Returns a register mask that describes allocable USE registers (Read/ReadWrite access). //! Returns a register mask that describes allocable USE registers (Read/ReadWrite access).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask useRegMask() const noexcept { return _useRegMask; } ASMJIT_INLINE_NODEBUG RegMask useRegMask() const noexcept { return _useRegMask; }
//! Returns a register mask that describes allocable OUT registers (WriteOnly access). //! Returns a register mask that describes allocable OUT registers (WriteOnly access).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask outRegMask() const noexcept { return _outRegMask; } ASMJIT_INLINE_NODEBUG RegMask outRegMask() const noexcept { return _outRegMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t refCount() const noexcept { return _refCount; } ASMJIT_INLINE_NODEBUG uint32_t refCount() const noexcept { return _refCount; }
ASMJIT_INLINE_NODEBUG void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); } ASMJIT_INLINE_NODEBUG void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); }
//! Tests whether the register must be allocated to a fixed physical register before it's used. //! Tests whether the register must be allocated to a fixed physical register before it's used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; }
//! Tests whether the register must be allocated to a fixed physical register before it's written. //! Tests whether the register must be allocated to a fixed physical register before it's written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; }
//! Returns a physical register id used for 'use' operation. //! Returns a physical register id used for 'use' operation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t useId() const noexcept { return _useId; } ASMJIT_INLINE_NODEBUG uint32_t useId() const noexcept { return _useId; }
//! Returns a physical register id used for 'out' operation. //! Returns a physical register id used for 'out' operation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t outId() const noexcept { return _outId; } ASMJIT_INLINE_NODEBUG uint32_t outId() const noexcept { return _outId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t useRewriteMask() const noexcept { return _useRewriteMask; } ASMJIT_INLINE_NODEBUG uint32_t useRewriteMask() const noexcept { return _useRewriteMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t outRewriteMask() const noexcept { return _outRewriteMask; } ASMJIT_INLINE_NODEBUG uint32_t outRewriteMask() const noexcept { return _outRewriteMask; }
//! Sets a physical register used for 'use' operation. //! Sets a physical register used for 'use' operation.
ASMJIT_INLINE_NODEBUG void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); } ASMJIT_INLINE_NODEBUG void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); }
//! Sets a physical register used for 'out' operation. //! Sets a physical register used for 'out' operation.
ASMJIT_INLINE_NODEBUG void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); } ASMJIT_INLINE_NODEBUG void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUseDone() const noexcept { return hasFlag(RATiedFlags::kUseDone); } ASMJIT_INLINE_NODEBUG bool isUseDone() const noexcept { return hasFlag(RATiedFlags::kUseDone); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isOutDone() const noexcept { return hasFlag(RATiedFlags::kOutDone); } ASMJIT_INLINE_NODEBUG bool isOutDone() const noexcept { return hasFlag(RATiedFlags::kOutDone); }
ASMJIT_INLINE_NODEBUG void markUseDone() noexcept { addFlags(RATiedFlags::kUseDone); } ASMJIT_INLINE_NODEBUG void markUseDone() noexcept { addFlags(RATiedFlags::kUseDone); }
ASMJIT_INLINE_NODEBUG void markOutDone() noexcept { addFlags(RATiedFlags::kOutDone); } ASMJIT_INLINE_NODEBUG void markOutDone() noexcept { addFlags(RATiedFlags::kOutDone); }
//! \} //! \}
@@ -1024,13 +1162,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { static inline constexpr uint32_t kIdNone = 0xFFFFFFFFu;
kIdNone = 0xFFFFFFFFu static inline constexpr uint32_t kNoArgIndex = 0xFFu;
};
enum : uint32_t {
kNoArgIndex = 0xFFu
};
//! \} //! \}
@@ -1128,22 +1261,38 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; } ASMJIT_INLINE_NODEBUG uint32_t workId() const noexcept { return _workId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtId() const noexcept { return _virtId; } ASMJIT_INLINE_NODEBUG uint32_t virtId() const noexcept { return _virtId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _virtReg->name(); } ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _virtReg->name(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _virtReg->nameSize(); } ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _virtReg->nameSize(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _virtReg->typeId(); } ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _virtReg->typeId(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG RAWorkRegFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RAWorkRegFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(RAWorkRegFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RAWorkRegFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(RAWorkRegFlags flags) noexcept { _flags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RAWorkRegFlags::kAllocated); } ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RAWorkRegFlags::kAllocated); }
ASMJIT_INLINE_NODEBUG void markAllocated() noexcept { addFlags(RAWorkRegFlags::kAllocated); } ASMJIT_INLINE_NODEBUG void markAllocated() noexcept { addFlags(RAWorkRegFlags::kAllocated); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWithinSingleBasicBlock() const noexcept { return !hasFlag(RAWorkRegFlags::kMultipleBasicBlocks); } ASMJIT_INLINE_NODEBUG bool isWithinSingleBasicBlock() const noexcept { return !hasFlag(RAWorkRegFlags::kMultipleBasicBlocks); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t singleBasicBlockId() const noexcept { return _singleBasicBlockId; } ASMJIT_INLINE_NODEBUG uint32_t singleBasicBlockId() const noexcept { return _singleBasicBlockId; }
//! Called when this register appeared in a basic block having `blockId`. //! Called when this register appeared in a basic block having `blockId`.
@@ -1160,43 +1309,77 @@ public:
addFlags(RAWorkRegFlags::kMultipleBasicBlocks); addFlags(RAWorkRegFlags::kMultipleBasicBlocks);
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kLeadConsecutive); } ASMJIT_INLINE_NODEBUG bool isLeadConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kLeadConsecutive); }
ASMJIT_INLINE_NODEBUG void markLeadConsecutive() noexcept { addFlags(RAWorkRegFlags::kLeadConsecutive); } ASMJIT_INLINE_NODEBUG void markLeadConsecutive() noexcept { addFlags(RAWorkRegFlags::kLeadConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isProcessedConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kProcessedConsecutive); } ASMJIT_INLINE_NODEBUG bool isProcessedConsecutive() const noexcept { return hasFlag(RAWorkRegFlags::kProcessedConsecutive); }
ASMJIT_INLINE_NODEBUG void markProcessedConsecutive() noexcept { addFlags(RAWorkRegFlags::kProcessedConsecutive); } ASMJIT_INLINE_NODEBUG void markProcessedConsecutive() noexcept { addFlags(RAWorkRegFlags::kProcessedConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStackUsed() const noexcept { return hasFlag(RAWorkRegFlags::kStackUsed); } ASMJIT_INLINE_NODEBUG bool isStackUsed() const noexcept { return hasFlag(RAWorkRegFlags::kStackUsed); }
ASMJIT_INLINE_NODEBUG void markStackUsed() noexcept { addFlags(RAWorkRegFlags::kStackUsed); } ASMJIT_INLINE_NODEBUG void markStackUsed() noexcept { addFlags(RAWorkRegFlags::kStackUsed); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStackPreferred() const noexcept { return hasFlag(RAWorkRegFlags::kStackPreferred); } ASMJIT_INLINE_NODEBUG bool isStackPreferred() const noexcept { return hasFlag(RAWorkRegFlags::kStackPreferred); }
ASMJIT_INLINE_NODEBUG void markStackPreferred() noexcept { addFlags(RAWorkRegFlags::kStackPreferred); } ASMJIT_INLINE_NODEBUG void markStackPreferred() noexcept { addFlags(RAWorkRegFlags::kStackPreferred); }
//! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore). //! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCoalesced() const noexcept { return hasFlag(RAWorkRegFlags::kCoalesced); } ASMJIT_INLINE_NODEBUG bool isCoalesced() const noexcept { return hasFlag(RAWorkRegFlags::kCoalesced); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; } ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); } ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); } ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtReg() const noexcept { return _virtReg; } ASMJIT_INLINE_NODEBUG VirtReg* virtReg() const noexcept { return _virtReg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTiedReg() const noexcept { return _tiedReg != nullptr; } ASMJIT_INLINE_NODEBUG bool hasTiedReg() const noexcept { return _tiedReg != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedReg() const noexcept { return _tiedReg; } ASMJIT_INLINE_NODEBUG RATiedReg* tiedReg() const noexcept { return _tiedReg; }
ASMJIT_INLINE_NODEBUG void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; } ASMJIT_INLINE_NODEBUG void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; }
ASMJIT_INLINE_NODEBUG void resetTiedReg() noexcept { _tiedReg = nullptr; } ASMJIT_INLINE_NODEBUG void resetTiedReg() noexcept { _tiedReg = nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return _stackSlot != nullptr; } ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return _stackSlot != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStackSlot* stackSlot() const noexcept { return _stackSlot; } ASMJIT_INLINE_NODEBUG RAStackSlot* stackSlot() const noexcept { return _stackSlot; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LiveRegSpans& liveSpans() noexcept { return _liveSpans; } ASMJIT_INLINE_NODEBUG LiveRegSpans& liveSpans() noexcept { return _liveSpans; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; } ASMJIT_INLINE_NODEBUG const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RALiveStats& liveStats() noexcept { return _liveStats; } ASMJIT_INLINE_NODEBUG RALiveStats& liveStats() noexcept { return _liveStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RALiveStats& liveStats() const noexcept { return _liveStats; } ASMJIT_INLINE_NODEBUG const RALiveStats& liveStats() const noexcept { return _liveStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; } ASMJIT_INLINE_NODEBUG bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argIndex() const noexcept { return _argIndex; } ASMJIT_INLINE_NODEBUG uint32_t argIndex() const noexcept { return _argIndex; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argValueIndex() const noexcept { return _argValueIndex; } ASMJIT_INLINE_NODEBUG uint32_t argValueIndex() const noexcept { return _argValueIndex; }
inline void setArgIndex(uint32_t argIndex, uint32_t valueIndex) noexcept { inline void setArgIndex(uint32_t argIndex, uint32_t valueIndex) noexcept {
@@ -1204,39 +1387,71 @@ public:
_argValueIndex = uint8_t(valueIndex); _argValueIndex = uint8_t(valueIndex);
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeRegId() const noexcept { return _homeRegId; } ASMJIT_INLINE_NODEBUG uint32_t homeRegId() const noexcept { return _homeRegId; }
ASMJIT_INLINE_NODEBUG void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); } ASMJIT_INLINE_NODEBUG void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; } ASMJIT_INLINE_NODEBUG bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hintRegId() const noexcept { return _hintRegId; } ASMJIT_INLINE_NODEBUG uint32_t hintRegId() const noexcept { return _hintRegId; }
ASMJIT_INLINE_NODEBUG void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); } ASMJIT_INLINE_NODEBUG void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask useIdMask() const noexcept { return _useIdMask; } ASMJIT_INLINE_NODEBUG RegMask useIdMask() const noexcept { return _useIdMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasUseIdMask() const noexcept { return _useIdMask != 0u; } ASMJIT_INLINE_NODEBUG bool hasUseIdMask() const noexcept { return _useIdMask != 0u; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasMultipleUseIds() const noexcept { return _useIdMask != 0u && !Support::isPowerOf2(_useIdMask); } ASMJIT_INLINE_NODEBUG bool hasMultipleUseIds() const noexcept { return _useIdMask != 0u && !Support::isPowerOf2(_useIdMask); }
ASMJIT_INLINE_NODEBUG void addUseIdMask(RegMask mask) noexcept { _useIdMask |= mask; } ASMJIT_INLINE_NODEBUG void addUseIdMask(RegMask mask) noexcept { _useIdMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask preferredMask() const noexcept { return _preferredMask; } ASMJIT_INLINE_NODEBUG RegMask preferredMask() const noexcept { return _preferredMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPreferredMask() const noexcept { return _preferredMask != 0xFFFFFFFFu; } ASMJIT_INLINE_NODEBUG bool hasPreferredMask() const noexcept { return _preferredMask != 0xFFFFFFFFu; }
ASMJIT_INLINE_NODEBUG void restrictPreferredMask(RegMask mask) noexcept { _preferredMask &= mask; } ASMJIT_INLINE_NODEBUG void restrictPreferredMask(RegMask mask) noexcept { _preferredMask &= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask consecutiveMask() const noexcept { return _consecutiveMask; } ASMJIT_INLINE_NODEBUG RegMask consecutiveMask() const noexcept { return _consecutiveMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutiveMask() const noexcept { return _consecutiveMask != 0xFFFFFFFFu; } ASMJIT_INLINE_NODEBUG bool hasConsecutiveMask() const noexcept { return _consecutiveMask != 0xFFFFFFFFu; }
ASMJIT_INLINE_NODEBUG void restrictConsecutiveMask(RegMask mask) noexcept { _consecutiveMask &= mask; } ASMJIT_INLINE_NODEBUG void restrictConsecutiveMask(RegMask mask) noexcept { _consecutiveMask &= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; } ASMJIT_INLINE_NODEBUG RegMask clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; }
ASMJIT_INLINE_NODEBUG void addClobberSurvivalMask(RegMask mask) noexcept { _clobberSurvivalMask |= mask; } ASMJIT_INLINE_NODEBUG void addClobberSurvivalMask(RegMask mask) noexcept { _clobberSurvivalMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask allocatedMask() const noexcept { return _allocatedMask; } ASMJIT_INLINE_NODEBUG RegMask allocatedMask() const noexcept { return _allocatedMask; }
ASMJIT_INLINE_NODEBUG void addAllocatedMask(RegMask mask) noexcept { _allocatedMask |= mask; } ASMJIT_INLINE_NODEBUG void addAllocatedMask(RegMask mask) noexcept { _allocatedMask |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t regByteMask() const noexcept { return _regByteMask; } ASMJIT_INLINE_NODEBUG uint64_t regByteMask() const noexcept { return _regByteMask; }
ASMJIT_INLINE_NODEBUG void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; } ASMJIT_INLINE_NODEBUG void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasImmediateConsecutives() const noexcept { return !_immediateConsecutives.empty(); } ASMJIT_INLINE_NODEBUG bool hasImmediateConsecutives() const noexcept { return !_immediateConsecutives.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& immediateConsecutives() const noexcept { return _immediateConsecutives; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& immediateConsecutives() const noexcept { return _immediateConsecutives; }
[[nodiscard]]
inline Error addImmediateConsecutive(ZoneAllocator* allocator, uint32_t workId) noexcept { inline Error addImmediateConsecutive(ZoneAllocator* allocator, uint32_t workId) noexcept {
if (_immediateConsecutives.size() <= workId) if (_immediateConsecutives.size() <= workId)
ASMJIT_PROPAGATE(_immediateConsecutives.resize(allocator, workId + 1)); ASMJIT_PROPAGATE(_immediateConsecutives.resize(allocator, workId + 1));

View File

@@ -14,7 +14,7 @@ ASMJIT_BEGIN_NAMESPACE
// RALocalAllocator - Utilities // RALocalAllocator - Utilities
// ============================ // ============================
static ASMJIT_FORCE_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept { static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
for (size_t i = 0; i < count; i++) for (size_t i = 0; i < count; i++)
if (tiedRegs[i].workId() == workId) if (tiedRegs[i].workId() == workId)
return &tiedRegs[i]; return &tiedRegs[i];
@@ -65,24 +65,28 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
// Unassigned argument. // Unassigned argument.
const RegOnly& regArg = func->argPack(argIndex)[valueIndex]; const RegOnly& regArg = func->argPack(argIndex)[valueIndex];
if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id())) if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id())) {
continue; continue;
}
VirtReg* virtReg = _cc->virtRegById(regArg.id()); VirtReg* virtReg = _cc->virtRegById(regArg.id());
// Unreferenced argument. // Unreferenced argument.
RAWorkReg* workReg = virtReg->workReg(); RAWorkReg* workReg = virtReg->workReg();
if (!workReg) if (!workReg) {
continue; continue;
}
// Overwritten argument. // Overwritten argument.
uint32_t workId = workReg->workId(); uint32_t workId = workReg->workId();
if (!liveIn.bitAt(workId)) if (!liveIn.bitAt(workId)) {
continue; continue;
}
RegGroup group = workReg->group(); RegGroup group = workReg->group();
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone) if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone) {
continue; continue;
}
RegMask allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group); RegMask allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
if (iter == 0) { if (iter == 0) {
@@ -109,8 +113,9 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
// This register will definitely need stack, create the slot now and assign also `argIndex` // This register will definitely need stack, create the slot now and assign also `argIndex`
// to it. We will patch `_argsAssignment` later after RAStackAllocator finishes. // to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg); RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
if (ASMJIT_UNLIKELY(!slot)) if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// This means STACK_ARG may be moved to STACK. // This means STACK_ARG may be moved to STACK.
workReg->addFlags(RAWorkRegFlags::kStackArgToStack); workReg->addFlags(RAWorkRegFlags::kStackArgToStack);
@@ -182,8 +187,9 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
while (affectedRegs) { while (affectedRegs) {
if (++runId == 2) { if (++runId == 2) {
if (!tryMode) if (!tryMode) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// Stop in `tryMode` if we haven't done anything in past two rounds. // Stop in `tryMode` if we haven't done anything in past two rounds.
break; break;
@@ -204,12 +210,14 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
// Both assigned. // Both assigned.
if (curWorkId != dstWorkId) { if (curWorkId != dstWorkId) {
// Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out. // Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out.
if (runId <= 0) if (runId <= 0) {
continue; continue;
}
uint32_t altPhysId = cur.workToPhysId(group, dstWorkId); uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
if (altPhysId == RAAssignment::kPhysNone) if (altPhysId == RAAssignment::kPhysNone) {
continue; continue;
}
// Reset as we will do some changes to the current assignment. // Reset as we will do some changes to the current assignment.
runId = -1; runId = -1;
@@ -220,14 +228,15 @@ Error RALocalAllocator::switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, cons
else { else {
// SPILL the reg if it's not dirty in DST, otherwise try to MOVE. // SPILL the reg if it's not dirty in DST, otherwise try to MOVE.
if (!cur.isPhysDirty(group, physId)) { if (!cur.isPhysDirty(group, physId)) {
ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId)); onKillReg(group, curWorkId, physId);
} }
else { else {
RegMask allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group); RegMask allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
// If possible don't conflict with assigned regs at DST. // If possible don't conflict with assigned regs at DST.
if (allocableRegs & ~dst.assigned(group)) if (allocableRegs & ~dst.assigned(group)) {
allocableRegs &= ~dst.assigned(group); allocableRegs &= ~dst.assigned(group);
}
if (allocableRegs) { if (allocableRegs) {
// MOVE is possible, thus preferred. // MOVE is possible, thus preferred.
@@ -251,8 +260,9 @@ Cleared:
// DST assigned, CUR unassigned. // DST assigned, CUR unassigned.
uint32_t altPhysId = cur.workToPhysId(group, dstWorkId); uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
if (altPhysId == RAAssignment::kPhysNone) { if (altPhysId == RAAssignment::kPhysNone) {
if (liveIn.bitAt(dstWorkId)) if (liveIn.bitAt(dstWorkId)) {
willLoadRegs |= physMask; // Scheduled for `onLoadReg()`. willLoadRegs |= physMask; // Scheduled for `onLoadReg()`.
}
affectedRegs &= ~physMask; // Unaffected from now. affectedRegs &= ~physMask; // Unaffected from now.
continue; continue;
} }
@@ -267,10 +277,12 @@ Cleared:
// If `dstReadOnly` is true it means that that block was already processed and we cannot change from // If `dstReadOnly` is true it means that that block was already processed and we cannot change from
// CLEAN to DIRTY. In that case the register has to be saved as it cannot enter the block DIRTY. // CLEAN to DIRTY. In that case the register has to be saved as it cannot enter the block DIRTY.
if (dstReadOnly) if (dstReadOnly) {
ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId)); ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId));
else }
else {
dst.makeDirty(group, dstWorkId, physId); dst.makeDirty(group, dstWorkId, physId);
}
} }
else { else {
// DST dirty, CUR not dirty (the assert is just to visualize the condition). // DST dirty, CUR not dirty (the assert is just to visualize the condition).
@@ -306,8 +318,9 @@ Cleared:
ASMJIT_ASSERT(liveIn.bitAt(workId) == true); ASMJIT_ASSERT(liveIn.bitAt(workId) == true);
ASMJIT_PROPAGATE(onLoadReg(group, workId, physId)); ASMJIT_PROPAGATE(onLoadReg(group, workId, physId));
if (dst.isPhysDirty(group, physId)) if (dst.isPhysDirty(group, physId)) {
cur.makeDirty(group, workId, physId); cur.makeDirty(group, workId, physId);
}
ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId)); ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
} }
else { else {
@@ -413,19 +426,22 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (tiedReg->hasAnyConsecutiveFlag()) { if (tiedReg->hasAnyConsecutiveFlag()) {
uint32_t consecutiveOffset = tiedReg->isLeadConsecutive() ? uint32_t(0) : tiedReg->consecutiveData(); uint32_t consecutiveOffset = tiedReg->isLeadConsecutive() ? uint32_t(0) : tiedReg->consecutiveData();
if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset))) if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset))) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
consecutiveMask |= Support::bitMask(consecutiveOffset); consecutiveMask |= Support::bitMask(consecutiveOffset);
consecutiveRegs[consecutiveOffset] = tiedReg; consecutiveRegs[consecutiveOffset] = tiedReg;
} }
// Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment. // Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment.
if (tiedReg->isOutOrKill()) if (tiedReg->isOutOrKill()) {
outTiedRegs[outTiedCount++] = tiedReg; outTiedRegs[outTiedCount++] = tiedReg;
}
if (tiedReg->isDuplicate()) if (tiedReg->isDuplicate()) {
dupTiedRegs[dupTiedCount++] = tiedReg; dupTiedRegs[dupTiedCount++] = tiedReg;
}
if (!tiedReg->isUse()) { if (!tiedReg->isUse()) {
tiedReg->markUseDone(); tiedReg->markUseDone();
@@ -434,8 +450,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
} }
// Don't assign anything here if this is a consecutive USE - we will handle this in STEP 2 instead. // Don't assign anything here if this is a consecutive USE - we will handle this in STEP 2 instead.
if (tiedReg->isUseConsecutive()) if (tiedReg->isUseConsecutive()) {
continue; continue;
}
uint32_t workId = tiedReg->workId(); uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId); uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -450,8 +467,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (assignedId == tiedReg->useId()) { if (assignedId == tiedReg->useId()) {
// If the register is already allocated in this one, mark it done and continue. // If the register is already allocated in this one, mark it done and continue.
tiedReg->markUseDone(); tiedReg->markUseDone();
if (tiedReg->isWrite()) if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId); _curAssignment.makeDirty(group, workId, assignedId);
}
usePending--; usePending--;
willUse |= useMask; willUse |= useMask;
} }
@@ -467,8 +485,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if ((allocableRegs & ~willUse) & assignedMask) { if ((allocableRegs & ~willUse) & assignedMask) {
tiedReg->setUseId(assignedId); tiedReg->setUseId(assignedId);
tiedReg->markUseDone(); tiedReg->markUseDone();
if (tiedReg->isWrite()) if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId); _curAssignment.makeDirty(group, workId, assignedId);
}
usePending--; usePending--;
willUse |= assignedMask; willUse |= assignedMask;
} }
@@ -488,8 +507,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t consecutiveCount = 0; uint32_t consecutiveCount = 0;
if (consecutiveMask) { if (consecutiveMask) {
if ((consecutiveMask & (consecutiveMask + 1u)) != 0) if ((consecutiveMask & (consecutiveMask + 1u)) != 0) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// Count of trailing ones is the count of consecutive registers. There cannot be gap. // Count of trailing ones is the count of consecutive registers. There cannot be gap.
consecutiveCount = Support::ctz(~consecutiveMask); consecutiveCount = Support::ctz(~consecutiveMask);
@@ -505,8 +525,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t assignments[kMaxConsecutiveRegs]; uint32_t assignments[kMaxConsecutiveRegs];
for (i = 0; i < consecutiveCount; i++) for (i = 0; i < consecutiveCount; i++) {
assignments[i] = _curAssignment.workToPhysId(group, consecutiveRegs[i]->workId()); assignments[i] = _curAssignment.workToPhysId(group, consecutiveRegs[i]->workId());
}
Support::BitWordIterator<uint32_t> it(lead->useRegMask()); Support::BitWordIterator<uint32_t> it(lead->useRegMask());
while (it.hasNext()) { while (it.hasNext()) {
@@ -533,8 +554,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
} }
} }
if (bestLeadReg == 0xFFFFFFFF) if (bestLeadReg == 0xFFFFFFFF) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation); return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
for (i = 0; i < consecutiveCount; i++) { for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = bestLeadReg + i; uint32_t consecutiveIndex = bestLeadReg + i;
@@ -550,8 +572,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (assignedId == consecutiveIndex) { if (assignedId == consecutiveIndex) {
// If the register is already allocated in this one, mark it done and continue. // If the register is already allocated in this one, mark it done and continue.
tiedReg->markUseDone(); tiedReg->markUseDone();
if (tiedReg->isWrite()) if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, assignedId); _curAssignment.makeDirty(group, workId, assignedId);
}
usePending--; usePending--;
willUse |= useMask; willUse |= useMask;
} }
@@ -578,8 +601,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
RATiedReg* tiedReg = &tiedRegs[i]; RATiedReg* tiedReg = &tiedRegs[i];
if (tiedReg->isUseDone()) if (tiedReg->isUseDone()) {
continue; continue;
}
uint32_t workId = tiedReg->workId(); uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId); uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -630,8 +654,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (!(liveRegs & useMask)) { if (!(liveRegs & useMask)) {
ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId)); ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId));
tiedReg->markUseDone(); tiedReg->markUseDone();
if (tiedReg->isWrite()) if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, useId); _curAssignment.makeDirty(group, workId, useId);
}
usePending--; usePending--;
} }
} }
@@ -640,8 +665,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (!(liveRegs & useMask)) { if (!(liveRegs & useMask)) {
ASMJIT_PROPAGATE(onLoadReg(group, workId, useId)); ASMJIT_PROPAGATE(onLoadReg(group, workId, useId));
tiedReg->markUseDone(); tiedReg->markUseDone();
if (tiedReg->isWrite()) if (tiedReg->isWrite()) {
_curAssignment.makeDirty(group, workId, useId); _curAssignment.makeDirty(group, workId, useId);
}
usePending--; usePending--;
} }
} }
@@ -743,8 +769,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
continue; continue;
} }
if (!mustSwap) if (!mustSwap) {
continue; continue;
}
// Only branched here if the previous iteration did nothing. This is essentially a SWAP operation without // Only branched here if the previous iteration did nothing. This is essentially a SWAP operation without
// having a dedicated instruction for that purpose (vector registers, etc...). The simplest way to handle // having a dedicated instruction for that purpose (vector registers, etc...). The simplest way to handle
@@ -805,7 +832,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
// Must check if it's allocated as KILL can be related to OUT (like KILL immediately after OUT, which could // Must check if it's allocated as KILL can be related to OUT (like KILL immediately after OUT, which could
// mean the register is not assigned). // mean the register is not assigned).
if (physId != RAAssignment::kPhysNone) { if (physId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onKillReg(group, workId, physId)); onKillReg(group, workId, physId);
willOut &= ~Support::bitMask(physId); willOut &= ~Support::bitMask(physId);
} }
@@ -851,7 +878,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (dstId == srcId) { if (dstId == srcId) {
continue; continue;
} }
_pass->emitMove(workId, dstId, srcId); ASMJIT_PROPAGATE(_pass->emitMove(workId, dstId, srcId));
} }
} }
@@ -947,18 +974,20 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
// Allocate OUT registers. // Allocate OUT registers.
for (i = 0; i < outTiedCount; i++) { for (i = 0; i < outTiedCount; i++) {
RATiedReg* tiedReg = outTiedRegs[i]; RATiedReg* tiedReg = outTiedRegs[i];
if (!tiedReg->isOut()) if (!tiedReg->isOut()) {
continue; continue;
}
RegMask avoidOut = avoidRegs; RegMask avoidOut = avoidRegs;
if (tiedReg->isUnique()) if (tiedReg->isUnique()) {
avoidOut |= willUse; avoidOut |= willUse;
}
uint32_t workId = tiedReg->workId(); uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId); uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
if (assignedId != RAAssignment::kPhysNone) { if (assignedId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onKillReg(group, workId, assignedId)); onKillReg(group, workId, assignedId);
} }
uint32_t physId = tiedReg->outId(); uint32_t physId = tiedReg->outId();
@@ -1087,16 +1116,18 @@ Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets,
// TODO: Do we really need to use `cont`? // TODO: Do we really need to use `cont`?
DebugUtils::unused(cont); DebugUtils::unused(cont);
if (targets.empty()) if (targets.empty()) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
// The cursor must point to the previous instruction for a possible instruction insertion. // The cursor must point to the previous instruction for a possible instruction insertion.
_cc->_setCursor(node->prev()); _cc->_setCursor(node->prev());
// All `targets` should have the same sharedAssignmentId, we just read the first. // All `targets` should have the same sharedAssignmentId, we just read the first.
RABlock* anyTarget = targets[0]; RABlock* anyTarget = targets[0];
if (!anyTarget->hasSharedAssignmentId()) if (!anyTarget->hasSharedAssignmentId()) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()]; RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()];

View File

@@ -80,30 +80,47 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); } ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); } ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); } ASMJIT_INLINE_NODEBUG WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
//! Returns the currently processed block. //! Returns the currently processed block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; } ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Sets the currently processed block. //! Sets the currently processed block.
ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; } ASMJIT_INLINE_NODEBUG void setBlock(RABlock* block) noexcept { _block = block; }
//! Returns the currently processed `InstNode`. //! Returns the currently processed `InstNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; } ASMJIT_INLINE_NODEBUG InstNode* node() const noexcept { return _node; }
//! Returns the currently processed `RAInst`. //! Returns the currently processed `RAInst`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; } ASMJIT_INLINE_NODEBUG RAInst* raInst() const noexcept { return _raInst; }
//! Returns all tied regs as `RATiedReg` array. //! Returns all tied regs as `RATiedReg` array.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); } ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`. //! Returns tied registers grouped by the given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); } ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction. //! Returns count of all TiedRegs used by the instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; } ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`. //! Returns count of TiedRegs used by the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); } ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; } ASMJIT_INLINE_NODEBUG bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; }
//! \} //! \}
@@ -111,8 +128,10 @@ public:
//! \name Assignment //! \name Assignment
//! \{ //! \{
[[nodiscard]]
Error makeInitialAssignment() noexcept; Error makeInitialAssignment() noexcept;
[[nodiscard]]
Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept; Error replaceAssignment(const PhysToWorkMap* physToWorkMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting code that reassigns them. //! Switch to the given assignment by reassigning all register and emitting code that reassigns them.
@@ -121,12 +140,15 @@ public:
//! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap` //! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap`
//! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate //! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate
//! a code sequence that is always executed regardless of the flow. //! a code sequence that is always executed regardless of the flow.
[[nodiscard]]
Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept; Error switchToAssignment(PhysToWorkMap* dstPhysToWorkMap, const ZoneBitVector& liveIn, bool dstReadOnly, bool tryMode) noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept { ASMJIT_INLINE_NODEBUG Error spillRegsBeforeEntry(RABlock* block) noexcept {
return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs()); return spillScratchGpRegsBeforeEntry(block->entryScratchGpRegs());
} }
[[nodiscard]]
Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept; Error spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept;
//! \} //! \}
@@ -134,10 +156,16 @@ public:
//! \name Allocation //! \name Allocation
//! \{ //! \{
[[nodiscard]]
Error allocInst(InstNode* node) noexcept; Error allocInst(InstNode* node) noexcept;
[[nodiscard]]
Error spillAfterAllocation(InstNode* node) noexcept; Error spillAfterAllocation(InstNode* node) noexcept;
[[nodiscard]]
Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept; Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
[[nodiscard]]
Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept; Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
//! \} //! \}
@@ -150,11 +178,13 @@ public:
kCostOfDirtyFlag = kCostOfFrequency / 4 kCostOfDirtyFlag = kCostOfFrequency / 4
}; };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept { ASMJIT_INLINE_NODEBUG uint32_t costByFrequency(float freq) const noexcept {
return uint32_t(int32_t(freq * float(kCostOfFrequency))); return uint32_t(int32_t(freq * float(kCostOfFrequency)));
} }
ASMJIT_FORCE_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept { [[nodiscard]]
ASMJIT_INLINE uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId); RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq()); uint32_t cost = costByFrequency(workReg->liveStats().freq());
@@ -164,7 +194,8 @@ public:
return cost; return cost;
} }
ASMJIT_FORCE_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept { [[nodiscard]]
ASMJIT_INLINE uint32_t pickBestSuitableRegister(RegGroup group, RegMask allocableRegs) const noexcept {
// These are registers must be preserved by the function itself. // These are registers must be preserved by the function itself.
RegMask preservedRegs = _funcPreservedRegs[group]; RegMask preservedRegs = _funcPreservedRegs[group];
@@ -177,6 +208,7 @@ public:
} }
//! Decides on register assignment. //! Decides on register assignment.
[[nodiscard]]
uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept; uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have //! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have
@@ -185,9 +217,11 @@ public:
//! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be //! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be
//! spilled, or a valid physical register ID, which means that the register should be moved to that physical register //! spilled, or a valid physical register ID, which means that the register should be moved to that physical register
//! instead. //! instead.
[[nodiscard]]
uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs, RAInst* raInst) const noexcept; uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs, RAInst* raInst) const noexcept;
//! Decides on best spill given a register mask `spillableRegs` //! Decides on best spill given a register mask `spillableRegs`
[[nodiscard]]
uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept; uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \} //! \}
@@ -197,6 +231,7 @@ public:
//! Emits a move between a destination and source register, and fixes the //! Emits a move between a destination and source register, and fixes the
//! register assignment. //! register assignment.
[[nodiscard]]
inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept { inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) { if (dstPhysId == srcPhysId) {
return kErrorOk; return kErrorOk;
@@ -209,6 +244,7 @@ public:
//! Emits a swap between two physical registers and fixes their assignment. //! Emits a swap between two physical registers and fixes their assignment.
//! //!
//! \note Target must support this operation otherwise this would ASSERT. //! \note Target must support this operation otherwise this would ASSERT.
[[nodiscard]]
inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept { inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId); _curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId); return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
@@ -216,6 +252,7 @@ public:
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register //! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean. //! and makes it assigned and clean.
[[nodiscard]]
inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean); _curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId); return _pass->emitLoad(workId, physId);
@@ -223,6 +260,7 @@ public:
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot, //! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean. //! keeps it assigned, and makes it clean.
[[nodiscard]]
inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId); ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId); ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
@@ -232,26 +270,29 @@ public:
} }
//! Assigns a register, the content of it is undefined at this point. //! Assigns a register, the content of it is undefined at this point.
[[nodiscard]]
inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept { inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty); _curAssignment.assign(group, workId, physId, dirty);
return kErrorOk; return kErrorOk;
} }
//! Spills a variable/register, saves the content to the memory-home if modified. //! Spills a variable/register, saves the content to the memory-home if modified.
[[nodiscard]]
inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId)) if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId)); ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
return onKillReg(group, workId, physId); onKillReg(group, workId, physId);
return kErrorOk;
} }
[[nodiscard]]
inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId); _curAssignment.makeDirty(group, workId, physId);
return kErrorOk; return kErrorOk;
} }
inline Error onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept { inline void onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId); _curAssignment.unassign(group, workId, physId);
return kErrorOk;
} }
//! \} //! \}

View File

@@ -251,8 +251,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped
if (block) { if (block) {
// Exit node has always a block associated with it. If we went here it means that `cbLabel` passed here // Exit node has always a block associated with it. If we went here it means that `cbLabel` passed here
// is after the end of the function and cannot be merged with the function exit block. // is after the end of the function and cannot be merged with the function exit block.
if (node == func->exitNode()) if (node == func->exitNode()) {
block = nullptr; block = nullptr;
}
break; break;
} }
@@ -273,8 +274,9 @@ RABlock* BaseRAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stopped
if (!block) { if (!block) {
block = newBlock(); block = newBlock();
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return nullptr; return nullptr;
}
} }
cbLabel->setPassData<RABlock>(block); cbLabel->setPassData<RABlock>(block);
@@ -402,13 +404,15 @@ Error BaseRAPass::buildCFGViews() noexcept {
for (;;) { for (;;) {
for (;;) { for (;;) {
if (i >= current->successors().size()) if (i >= current->successors().size()) {
break; break;
}
// Skip if already visited. // Skip if already visited.
RABlock* child = current->successors()[i++]; RABlock* child = current->successors()[i++];
if (visited.bitAt(child->blockId())) if (visited.bitAt(child->blockId())) {
continue; continue;
}
// Mark as visited to prevent visiting the same block multiple times. // Mark as visited to prevent visiting the same block multiple times.
visited.setBit(child->blockId(), true); visited.setBit(child->blockId(), true);
@@ -423,8 +427,9 @@ Error BaseRAPass::buildCFGViews() noexcept {
current->_povOrder = _pov.size(); current->_povOrder = _pov.size();
_pov.appendUnsafe(current); _pov.appendUnsafe(current);
if (stack.empty()) if (stack.empty()) {
break; break;
}
RABlockVisitItem top = stack.pop(); RABlockVisitItem top = stack.pop();
current = top.block(); current = top.block();
@@ -454,7 +459,7 @@ Error BaseRAPass::buildCFGViews() noexcept {
// BaseRAPass - CFG - Dominators // BaseRAPass - CFG - Dominators
// ============================= // =============================
static ASMJIT_FORCE_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept { static ASMJIT_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept {
while (b1 != b2) { while (b1 != b2) {
while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom(); while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom();
while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom(); while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom();
@@ -469,8 +474,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
ASMJIT_RA_LOG_FORMAT("[BuildCFGDominators]\n"); ASMJIT_RA_LOG_FORMAT("[BuildCFGDominators]\n");
#endif #endif
if (_blocks.empty()) if (_blocks.empty()) {
return kErrorOk; return kErrorOk;
}
RABlock* entryBlock = this->entryBlock(); RABlock* entryBlock = this->entryBlock();
entryBlock->setIDom(entryBlock); entryBlock->setIDom(entryBlock);
@@ -491,8 +497,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
uint32_t i = _pov.size(); uint32_t i = _pov.size();
while (i) { while (i) {
RABlock* block = _pov[--i]; RABlock* block = _pov[--i];
if (block == entryBlock) if (block == entryBlock) {
continue; continue;
}
RABlock* iDom = nullptr; RABlock* iDom = nullptr;
const RABlocks& preds = block->predecessors(); const RABlocks& preds = block->predecessors();
@@ -500,8 +507,9 @@ Error BaseRAPass::buildCFGDominators() noexcept {
uint32_t j = preds.size(); uint32_t j = preds.size();
while (j) { while (j) {
RABlock* p = preds[--j]; RABlock* p = preds[--j];
if (!p->iDom()) if (!p->iDom()) {
continue; continue;
}
iDom = !iDom ? p : intersectBlocks(iDom, p); iDom = !iDom ? p : intersectBlocks(iDom, p);
} }
@@ -525,12 +533,14 @@ bool BaseRAPass::_strictlyDominates(const RABlock* a, const RABlock* b) const no
// Nothing strictly dominates the entry block. // Nothing strictly dominates the entry block.
const RABlock* entryBlock = this->entryBlock(); const RABlock* entryBlock = this->entryBlock();
if (a == entryBlock) if (a == entryBlock) {
return false; return false;
}
const RABlock* iDom = b->iDom(); const RABlock* iDom = b->iDom();
while (iDom != a && iDom != entryBlock) while (iDom != a && iDom != entryBlock) {
iDom = iDom->iDom(); iDom = iDom->iDom();
}
return iDom != entryBlock; return iDom != entryBlock;
} }
@@ -540,16 +550,19 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo
ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks. ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`. ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`.
if (a == b) if (a == b) {
return a; return a;
}
// If `a` strictly dominates `b` then `a` is the nearest common dominator. // If `a` strictly dominates `b` then `a` is the nearest common dominator.
if (_strictlyDominates(a, b)) if (_strictlyDominates(a, b)) {
return a; return a;
}
// If `b` strictly dominates `a` then `b` is the nearest common dominator. // If `b` strictly dominates `a` then `b` is the nearest common dominator.
if (_strictlyDominates(b, a)) if (_strictlyDominates(b, a)) {
return b; return b;
}
const RABlock* entryBlock = this->entryBlock(); const RABlock* entryBlock = this->entryBlock();
uint64_t timestamp = nextTimestamp(); uint64_t timestamp = nextTimestamp();
@@ -564,8 +577,9 @@ const RABlock* BaseRAPass::_nearestCommonDominator(const RABlock* a, const RABlo
// Check all B's dominators against marked dominators of A. // Check all B's dominators against marked dominators of A.
block = b->iDom(); block = b->iDom();
while (block != entryBlock) { while (block != entryBlock) {
if (block->hasTimestamp(timestamp)) if (block->hasTimestamp(timestamp)) {
return block; return block;
}
block = block->iDom(); block = block->iDom();
} }
@@ -580,8 +594,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
uint32_t numReachableBlocks = reachableBlockCount(); uint32_t numReachableBlocks = reachableBlockCount();
// All reachable -> nothing to do. // All reachable -> nothing to do.
if (numAllBlocks == numReachableBlocks) if (numAllBlocks == numReachableBlocks) {
return kErrorOk; return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING #ifndef ASMJIT_NO_LOGGING
StringTmp<256> sb; StringTmp<256> sb;
@@ -591,8 +606,9 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
for (uint32_t i = 0; i < numAllBlocks; i++) { for (uint32_t i = 0; i < numAllBlocks; i++) {
RABlock* block = _blocks[i]; RABlock* block = _blocks[i];
if (block->isReachable()) if (block->isReachable()) {
continue; continue;
}
ASMJIT_RA_LOG_FORMAT(" Removing code from unreachable block {%u}\n", i); ASMJIT_RA_LOG_FORMAT(" Removing code from unreachable block {%u}\n", i);
BaseNode* first = block->first(); BaseNode* first = block->first();
@@ -632,22 +648,26 @@ Error BaseRAPass::removeUnreachableCode() noexcept {
} }
BaseNode* BaseRAPass::findSuccessorStartingAt(BaseNode* node) noexcept { BaseNode* BaseRAPass::findSuccessorStartingAt(BaseNode* node) noexcept {
while (node && (node->isInformative() || node->hasNoEffect())) while (node && (node->isInformative() || node->hasNoEffect())) {
node = node->next(); node = node->next();
}
return node; return node;
} }
bool BaseRAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept { bool BaseRAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept {
for (;;) { for (;;) {
node = node->next(); node = node->next();
if (node == target) if (node == target) {
return true; return true;
}
if (!node) if (!node) {
return false; return false;
}
if (node->isCode() || node->isData()) if (node->isCode() || node->isData()) {
return false; return false;
}
} }
} }
@@ -668,12 +688,14 @@ Error BaseRAPass::_asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator())); ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator()));
RAWorkReg* wReg = zone()->newT<RAWorkReg>(vReg, wRegs.size()); RAWorkReg* wReg = zone()->newT<RAWorkReg>(vReg, wRegs.size());
if (ASMJIT_UNLIKELY(!wReg)) if (ASMJIT_UNLIKELY(!wReg)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
vReg->setWorkReg(wReg); vReg->setWorkReg(wReg);
if (!vReg->isStack()) if (!vReg->isStack()) {
wReg->setRegByteMask(Support::lsbMask<uint64_t>(vReg->virtSize())); wReg->setRegByteMask(Support::lsbMask<uint64_t>(vReg->virtSize()));
}
wRegs.appendUnsafe(wReg); wRegs.appendUnsafe(wReg);
wRegsByGroup.appendUnsafe(wReg); wRegsByGroup.appendUnsafe(wReg);
@@ -696,8 +718,9 @@ RAAssignment::WorkToPhysMap* BaseRAPass::newWorkToPhysMap() noexcept {
} }
WorkToPhysMap* map = zone()->allocT<WorkToPhysMap>(size); WorkToPhysMap* map = zone()->allocT<WorkToPhysMap>(size);
if (ASMJIT_UNLIKELY(!map)) if (ASMJIT_UNLIKELY(!map)) {
return nullptr; return nullptr;
}
map->reset(count); map->reset(count);
return map; return map;
@@ -708,8 +731,9 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept {
size_t size = PhysToWorkMap::sizeOf(count); size_t size = PhysToWorkMap::sizeOf(count);
PhysToWorkMap* map = zone()->allocT<PhysToWorkMap>(size); PhysToWorkMap* map = zone()->allocT<PhysToWorkMap>(size);
if (ASMJIT_UNLIKELY(!map)) if (ASMJIT_UNLIKELY(!map)) {
return nullptr; return nullptr;
}
map->reset(count); map->reset(count);
return map; return map;
@@ -719,17 +743,17 @@ RAAssignment::PhysToWorkMap* BaseRAPass::newPhysToWorkMap() noexcept {
// ========================================================= // =========================================================
namespace LiveOps { namespace LiveOps {
typedef ZoneBitVector::BitWord BitWord; using BitWord = ZoneBitVector::BitWord;
struct In { struct In {
static ASMJIT_FORCE_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept { static ASMJIT_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept {
DebugUtils::unused(dst); DebugUtils::unused(dst);
return (out | gen) & ~kill; return (out | gen) & ~kill;
} }
}; };
template<typename Operator> template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept { static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept {
BitWord changed = 0; BitWord changed = 0;
for (uint32_t i = 0; i < n; i++) { for (uint32_t i = 0; i < n; i++) {
@@ -744,7 +768,7 @@ namespace LiveOps {
} }
template<typename Operator> template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept { static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept {
BitWord changed = 0; BitWord changed = 0;
for (uint32_t i = 0; i < n; i++) { for (uint32_t i = 0; i < n; i++) {
@@ -759,7 +783,7 @@ namespace LiveOps {
} }
template<typename Operator> template<typename Operator>
static ASMJIT_FORCE_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept { static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept {
BitWord changed = 0; BitWord changed = 0;
#if defined(_MSC_VER) && _MSC_VER <= 1938 #if defined(_MSC_VER) && _MSC_VER <= 1938
@@ -787,12 +811,14 @@ namespace LiveOps {
uint32_t numSuccessors = successors.size(); uint32_t numSuccessors = successors.size();
// Calculate `OUT` based on `IN` of all successors. // Calculate `OUT` based on `IN` of all successors.
for (uint32_t i = 0; i < numSuccessors; i++) for (uint32_t i = 0; i < numSuccessors; i++) {
changed |= op<Support::Or>(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords); changed |= op<Support::Or>(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords);
}
// Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits. // Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits.
if (changed) if (changed) {
changed = op<In>(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords); changed = op<In>(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords);
}
return changed; return changed;
} }
@@ -858,10 +884,12 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
// Mark as: // Mark as:
// KILL - if this VirtReg is killed afterwards. // KILL - if this VirtReg is killed afterwards.
// LAST - if this VirtReg is last in this basic block. // LAST - if this VirtReg is last in this basic block.
if (block->kill().bitAt(workId)) if (block->kill().bitAt(workId)) {
tiedReg->addFlags(RATiedFlags::kKill); tiedReg->addFlags(RATiedFlags::kKill);
else if (!block->gen().bitAt(workId)) }
else if (!block->gen().bitAt(workId)) {
tiedReg->addFlags(RATiedFlags::kLast); tiedReg->addFlags(RATiedFlags::kLast);
}
if (tiedReg->isWriteOnly()) { if (tiedReg->isWriteOnly()) {
// KILL. // KILL.
@@ -887,8 +915,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
nInsts++; nInsts++;
} }
if (node == stop) if (node == stop) {
break; break;
}
node = node->prev(); node = node->prev();
ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node != nullptr);
@@ -976,8 +1005,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
for (i = 0; i < numAllBlocks; i++) { for (i = 0; i < numAllBlocks; i++) {
RABlock* block = _blocks[i]; RABlock* block = _blocks[i];
if (!block->isReachable()) if (!block->isReachable()) {
continue; continue;
}
uint32_t blockId = block->blockId(); uint32_t blockId = block->blockId();
@@ -1087,8 +1117,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::buildLiveness() noexcept {
maxLiveCount.op<Support::Max>(raInst->_liveCount); maxLiveCount.op<Support::Max>(raInst->_liveCount);
} }
if (node == stop) if (node == stop) {
break; break;
}
node = node->next(); node = node->next();
ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node != nullptr);
@@ -1134,22 +1165,26 @@ Error BaseRAPass::assignArgIndexToWorkRegs() noexcept {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) { for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
// Unassigned argument. // Unassigned argument.
const RegOnly& regArg = func()->argPack(argIndex)[valueIndex]; const RegOnly& regArg = func()->argPack(argIndex)[valueIndex];
if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id())) if (!regArg.isReg() || !cc()->isVirtIdValid(regArg.id())) {
continue; continue;
}
VirtReg* virtReg = cc()->virtRegById(regArg.id()); VirtReg* virtReg = cc()->virtRegById(regArg.id());
if (!virtReg) if (!virtReg) {
continue; continue;
}
// Unreferenced argument. // Unreferenced argument.
RAWorkReg* workReg = virtReg->workReg(); RAWorkReg* workReg = virtReg->workReg();
if (!workReg) if (!workReg) {
continue; continue;
}
// Overwritten argument. // Overwritten argument.
uint32_t workId = workReg->workId(); uint32_t workId = workReg->workId();
if (!liveIn.bitAt(workId)) if (!liveIn.bitAt(workId)) {
continue; continue;
}
workReg->setArgIndex(argIndex, valueIndex); workReg->setArgIndex(argIndex, valueIndex);
const FuncValue& arg = func()->detail().arg(argIndex, valueIndex); const FuncValue& arg = func()->detail().arg(argIndex, valueIndex);
@@ -1172,7 +1207,9 @@ static void RAPass_dumpSpans(String& sb, uint32_t index, const LiveRegSpans& liv
for (uint32_t i = 0; i < liveSpans.size(); i++) { for (uint32_t i = 0; i < liveSpans.size(); i++) {
const LiveRegSpan& liveSpan = liveSpans[i]; const LiveRegSpan& liveSpan = liveSpans[i];
if (i) sb.append(", "); if (i) {
sb.append(", ");
}
sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id); sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id);
} }
@@ -1197,11 +1234,13 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::initGlobalLiveSpans() noexcept {
if (physCount) { if (physCount) {
liveSpans = allocator()->allocT<LiveRegSpans>(physCount * sizeof(LiveRegSpans)); liveSpans = allocator()->allocT<LiveRegSpans>(physCount * sizeof(LiveRegSpans));
if (ASMJIT_UNLIKELY(!liveSpans)) if (ASMJIT_UNLIKELY(!liveSpans)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
for (size_t physId = 0; physId < physCount; physId++) for (size_t physId = 0; physId < physCount; physId++) {
new(Support::PlacementNew{&liveSpans[physId]}) LiveRegSpans(); new(Support::PlacementNew{&liveSpans[physId]}) LiveRegSpans();
}
} }
_globalLiveSpans[group] = liveSpans; _globalLiveSpans[group] = liveSpans;
@@ -1272,8 +1311,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
continue; continue;
} }
if (err != 0xFFFFFFFFu) if (err != 0xFFFFFFFFu) {
return err; return err;
}
} }
} }
@@ -1294,8 +1334,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
i = 0; i = 0;
for (;;) { for (;;) {
uint32_t stop = consecutiveRegs.size(); uint32_t stop = consecutiveRegs.size();
if (i == stop) if (i == stop) {
break; break;
}
while (i < stop) { while (i < stop) {
RAWorkReg* workReg = consecutiveRegs[i].workReg; RAWorkReg* workReg = consecutiveRegs[i].workReg;
@@ -1317,8 +1358,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
uint32_t numConsecutiveRegs = consecutiveRegs.size(); uint32_t numConsecutiveRegs = consecutiveRegs.size();
for (i = 0; i < numConsecutiveRegs; i++) { for (i = 0; i < numConsecutiveRegs; i++) {
RAWorkReg* workReg = consecutiveRegs[i].workReg; RAWorkReg* workReg = consecutiveRegs[i].workReg;
if (workReg->isAllocated()) if (workReg->isAllocated()) {
continue; continue;
}
RAWorkReg* parentReg = consecutiveRegs[i].parentReg; RAWorkReg* parentReg = consecutiveRegs[i].parentReg;
RegMask physRegs = 0; RegMask physRegs = 0;
@@ -1330,16 +1372,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
// NOTE: This should never be true as it would mean we would never allocate this virtual register // NOTE: This should never be true as it would mean we would never allocate this virtual register
// (not here, and not later when local register allocator processes RATiedReg sets). // (not here, and not later when local register allocator processes RATiedReg sets).
if (ASMJIT_UNLIKELY(!physRegs)) if (ASMJIT_UNLIKELY(!physRegs)) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation); return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
} }
} }
else if (parentReg->hasHomeRegId()) { else if (parentReg->hasHomeRegId()) {
uint32_t consecutiveId = parentReg->homeRegId() + 1; uint32_t consecutiveId = parentReg->homeRegId() + 1;
// NOTE: We don't support wrapping. If this goes beyond all allocable registers there is something wrong. // NOTE: We don't support wrapping. If this goes beyond all allocable registers there is something wrong.
if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId)) if (consecutiveId > 31 || !Support::bitTest(availableRegs, consecutiveId)) {
return DebugUtils::errored(kErrorConsecutiveRegsAllocation); return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
}
workReg->setHintRegId(consecutiveId); workReg->setHintRegId(consecutiveId);
physRegs = Support::bitMask(consecutiveId); physRegs = Support::bitMask(consecutiveId);
@@ -1358,8 +1402,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
break; break;
} }
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) {
return err; return err;
}
physRegs ^= Support::bitMask(physId); physRegs ^= Support::bitMask(physId);
} }
@@ -1373,20 +1418,23 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
for (i = 0; i < numWorkRegs; i++) { for (i = 0; i < numWorkRegs; i++) {
RAWorkReg* workReg = workRegs[i]; RAWorkReg* workReg = workRegs[i];
if (workReg->isAllocated()) if (workReg->isAllocated()) {
continue; continue;
}
RegMask remainingPhysRegs = availableRegs; RegMask remainingPhysRegs = availableRegs;
if (remainingPhysRegs & workReg->preferredMask()) if (remainingPhysRegs & workReg->preferredMask()) {
remainingPhysRegs &= workReg->preferredMask(); remainingPhysRegs &= workReg->preferredMask();
}
RegMask physRegs = remainingPhysRegs & ~preservedRegs; RegMask physRegs = remainingPhysRegs & ~preservedRegs;
remainingPhysRegs &= preservedRegs; remainingPhysRegs &= preservedRegs;
for (;;) { for (;;) {
if (!physRegs) { if (!physRegs) {
if (!remainingPhysRegs) if (!remainingPhysRegs) {
break; break;
}
physRegs = remainingPhysRegs; physRegs = remainingPhysRegs;
remainingPhysRegs = 0; remainingPhysRegs = 0;
} }
@@ -1396,8 +1444,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
if (workReg->clobberSurvivalMask()) { if (workReg->clobberSurvivalMask()) {
RegMask preferredMask = (physRegs | remainingPhysRegs) & workReg->clobberSurvivalMask(); RegMask preferredMask = (physRegs | remainingPhysRegs) & workReg->clobberSurvivalMask();
if (preferredMask) { if (preferredMask) {
if (preferredMask & ~remainingPhysRegs) if (preferredMask & ~remainingPhysRegs) {
preferredMask &= ~remainingPhysRegs; preferredMask &= ~remainingPhysRegs;
}
physId = Support::ctz(preferredMask); physId = Support::ctz(preferredMask);
} }
} }
@@ -1412,16 +1461,18 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
break; break;
} }
if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu)) {
return err; return err;
}
physRegs &= ~Support::bitMask(physId); physRegs &= ~Support::bitMask(physId);
remainingPhysRegs &= ~Support::bitMask(physId); remainingPhysRegs &= ~Support::bitMask(physId);
} }
// Keep it in `workRegs` if it was not allocated. // Keep it in `workRegs` if it was not allocated.
if (!physRegs) if (!physRegs) {
workRegs[dstIndex++] = workReg; workRegs[dstIndex++] = workReg;
}
} }
workRegs._setSize(dstIndex); workRegs._setSize(dstIndex);
@@ -1431,8 +1482,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
ASMJIT_RA_LOG_COMPLEX({ ASMJIT_RA_LOG_COMPLEX({
for (uint32_t physId = 0; physId < physCount; physId++) { for (uint32_t physId = 0; physId < physCount; physId++) {
LiveRegSpans& live = _globalLiveSpans[group][physId]; LiveRegSpans& live = _globalLiveSpans[group][physId];
if (live.empty()) if (live.empty()) {
continue; continue;
}
sb.clear(); sb.clear();
RAPass_dumpSpans(sb, physId, live); RAPass_dumpSpans(sb, physId, live);
@@ -1448,8 +1500,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
} }
else { else {
_strategy[group].setType(RAStrategyType::kComplex); _strategy[group].setType(RAStrategyType::kComplex);
for (RAWorkReg* workReg : workRegs) for (RAWorkReg* workReg : workRegs) {
workReg->markStackPreferred(); workReg->markStackPreferred();
}
ASMJIT_RA_LOG_COMPLEX({ ASMJIT_RA_LOG_COMPLEX({
uint32_t count = workRegs.size(); uint32_t count = workRegs.size();
@@ -1457,7 +1510,9 @@ ASMJIT_FAVOR_SPEED Error BaseRAPass::binPack(RegGroup group) noexcept {
sb.appendFormat(" Unassigned (%u): ", count); sb.appendFormat(" Unassigned (%u): ", count);
for (i = 0; i < numWorkRegs; i++) { for (i = 0; i < numWorkRegs; i++) {
RAWorkReg* workReg = workRegs[i]; RAWorkReg* workReg = workRegs[i];
if (i) sb.append(", "); if (i) {
sb.append(", ");
}
sb.append(workReg->name()); sb.append(workReg->name());
} }
sb.append('\n'); sb.append('\n');
@@ -1475,8 +1530,9 @@ Error BaseRAPass::runLocalAllocator() noexcept {
RALocalAllocator lra(this); RALocalAllocator lra(this);
ASMJIT_PROPAGATE(lra.init()); ASMJIT_PROPAGATE(lra.init());
if (!blockCount()) if (!blockCount()) {
return kErrorOk; return kErrorOk;
}
// The allocation is done when this reaches zero. // The allocation is done when this reaches zero.
uint32_t blocksRemaining = reachableBlockCount(); uint32_t blocksRemaining = reachableBlockCount();
@@ -1489,7 +1545,7 @@ Error BaseRAPass::runLocalAllocator() noexcept {
ASMJIT_ASSERT(block->isReachable()); ASMJIT_ASSERT(block->isReachable());
// Assign function arguments for the initial block. The `lra` is valid now. // Assign function arguments for the initial block. The `lra` is valid now.
lra.makeInitialAssignment(); ASMJIT_PROPAGATE(lra.makeInitialAssignment());
ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment)); ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment));
// The loop starts from the first block and iterates blocks in order, however, the algorithm also allows to jump to // The loop starts from the first block and iterates blocks in order, however, the algorithm also allows to jump to
@@ -1503,10 +1559,7 @@ Error BaseRAPass::runLocalAllocator() noexcept {
BaseNode* afterLast = last->next(); BaseNode* afterLast = last->next();
bool unconditionalJump = false; bool unconditionalJump = false;
RABlock* consecutive = nullptr; RABlock* consecutive = block->hasSuccessors() ? block->successors()[0] : nullptr;
if (block->hasSuccessors())
consecutive = block->successors()[0];
lra.setBlock(block); lra.setBlock(block);
block->makeAllocated(); block->makeAllocated();
@@ -1539,10 +1592,12 @@ Error BaseRAPass::runLocalAllocator() noexcept {
} }
ASMJIT_PROPAGATE(lra.allocInst(inst)); ASMJIT_PROPAGATE(lra.allocInst(inst));
if (inst->type() == NodeType::kInvoke) if (inst->type() == NodeType::kInvoke) {
ASMJIT_PROPAGATE(emitPreCall(inst->as<InvokeNode>())); ASMJIT_PROPAGATE(emitPreCall(inst->as<InvokeNode>()));
else }
else {
ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst)); ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst));
}
} }
node = next; node = next;
} }
@@ -1566,30 +1621,34 @@ Error BaseRAPass::runLocalAllocator() noexcept {
block->setFirst(beforeFirst->next()); block->setFirst(beforeFirst->next());
block->setLast(afterLast ? afterLast->prev() : cc()->lastNode()); block->setLast(afterLast ? afterLast->prev() : cc()->lastNode());
if (--blocksRemaining == 0) if (--blocksRemaining == 0) {
break; break;
}
// Switch to the next consecutive block, if any. // Switch to the next consecutive block, if any.
if (consecutive) { if (consecutive) {
block = consecutive; block = consecutive;
if (!block->isAllocated()) if (!block->isAllocated()) {
continue; continue;
}
} }
// Get the next block. // Get the next block.
for (;;) { for (;;) {
if (++blockId >= blockCount()) if (++blockId >= blockCount()) {
blockId = 0; blockId = 0;
}
block = _blocks[blockId]; block = _blocks[blockId];
if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment()) if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment()) {
continue; continue;
}
break; break;
} }
// If we switched to some block we have to update the local allocator. // If we switched to some block we have to update the local allocator.
lra.replaceAssignment(block->entryPhysToWorkMap()); ASMJIT_PROPAGATE(lra.replaceAssignment(block->entryPhysToWorkMap()));
} }
_clobberedRegs.op<Support::Or>(lra._clobberedRegs); _clobberedRegs.op<Support::Or>(lra._clobberedRegs);
@@ -1602,23 +1661,26 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
// Shouldn't happen. Entry assignment of a block that has a shared-state will assign to all blocks // Shouldn't happen. Entry assignment of a block that has a shared-state will assign to all blocks
// with the same sharedAssignmentId. It's a bug if the shared state has been already assigned. // with the same sharedAssignmentId. It's a bug if the shared state has been already assigned.
if (!_sharedAssignments[sharedAssignmentId].empty()) if (!_sharedAssignments[sharedAssignmentId].empty()) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
return setSharedAssignment(sharedAssignmentId, fromAssignment); return setSharedAssignment(sharedAssignmentId, fromAssignment);
} }
PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap()); PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
if (ASMJIT_UNLIKELY(!physToWorkMap)) if (ASMJIT_UNLIKELY(!physToWorkMap)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
block->setEntryAssignment(physToWorkMap); block->setEntryAssignment(physToWorkMap);
// True if this is the first (entry) block, nothing to do in this case. // True if this is the first (entry) block, nothing to do in this case.
if (block == fromBlock) { if (block == fromBlock) {
// Entry block should never have a shared state. // Entry block should never have a shared state.
if (block->hasSharedAssignmentId()) if (block->hasSharedAssignmentId()) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
return kErrorOk; return kErrorOk;
} }
@@ -1637,8 +1699,9 @@ Error BaseRAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlo
RegGroup group = workReg->group(); RegGroup group = workReg->group();
uint32_t physId = fromAssignment.workToPhysId(group, workId); uint32_t physId = fromAssignment.workToPhysId(group, workId);
if (physId != RAAssignment::kPhysNone) if (physId != RAAssignment::kPhysNone) {
physToWorkMap->unassign(group, physId, _physRegIndex.get(group) + physId); physToWorkMap->unassign(group, physId, _physRegIndex.get(group) + physId);
}
} }
} }
@@ -1663,8 +1726,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
ASMJIT_ASSERT(!block->hasEntryAssignment()); ASMJIT_ASSERT(!block->hasEntryAssignment());
PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap()); PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
if (ASMJIT_UNLIKELY(!entryPhysToWorkMap)) if (ASMJIT_UNLIKELY(!entryPhysToWorkMap)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
block->setEntryAssignment(entryPhysToWorkMap); block->setEntryAssignment(entryPhysToWorkMap);
@@ -1681,8 +1745,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
uint32_t physId = it.next(); uint32_t physId = it.next();
uint32_t workId = entryPhysToWorkMap->workIds[physBaseIndex + physId]; uint32_t workId = entryPhysToWorkMap->workIds[physBaseIndex + physId];
if (!liveIn.bitAt(workId)) if (!liveIn.bitAt(workId)) {
entryPhysToWorkMap->unassign(group, physId, physBaseIndex + physId); entryPhysToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
} }
} }
} }
@@ -1694,8 +1759,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
while (it.hasNext()) { while (it.hasNext()) {
uint32_t physId = it.next(); uint32_t physId = it.next();
if (Support::bitTest(physToWorkMap->assigned[group], physId)) if (Support::bitTest(physToWorkMap->assigned[group], physId)) {
physToWorkMap->unassign(group, physId, physBaseIndex + physId); physToWorkMap->unassign(group, physId, physBaseIndex + physId);
}
} }
} }
@@ -1705,8 +1771,9 @@ Error BaseRAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssig
Error BaseRAPass::blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept { Error BaseRAPass::blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept {
// Complex allocation strategy requires to record register assignments upon block entry (or per shared state). // Complex allocation strategy requires to record register assignments upon block entry (or per shared state).
for (RegGroup group : RegGroupVirtValues{}) { for (RegGroup group : RegGroupVirtValues{}) {
if (!_strategy[group].isComplex()) if (!_strategy[group].isComplex()) {
continue; continue;
}
uint32_t physBaseIndex = _physRegIndex[group]; uint32_t physBaseIndex = _physRegIndex[group];
Support::BitWordIterator<RegMask> it(physToWorkMap->assigned[group]); Support::BitWordIterator<RegMask> it(physToWorkMap->assigned[group]);
@@ -1753,15 +1820,17 @@ Error BaseRAPass::updateStackFrame() noexcept {
// Update some StackFrame information that we updated during allocation. The only information we don't have at the // Update some StackFrame information that we updated during allocation. The only information we don't have at the
// moment is final local stack size, which is calculated last. // moment is final local stack size, which is calculated last.
FuncFrame& frame = func()->frame(); FuncFrame& frame = func()->frame();
for (RegGroup group : RegGroupVirtValues{}) for (RegGroup group : RegGroupVirtValues{}) {
frame.addDirtyRegs(group, _clobberedRegs[group]); frame.addDirtyRegs(group, _clobberedRegs[group]);
}
frame.setLocalStackAlignment(_stackAllocator.alignment()); frame.setLocalStackAlignment(_stackAllocator.alignment());
// If there are stack arguments that are not assigned to registers upon entry and the function doesn't require // If there are stack arguments that are not assigned to registers upon entry and the function doesn't require
// dynamic stack alignment we keep these arguments where they are. This will also mark all stack slots that match // dynamic stack alignment we keep these arguments where they are. This will also mark all stack slots that match
// these arguments as allocated. // these arguments as allocated.
if (_numStackArgsToStackSlots) if (_numStackArgsToStackSlots) {
ASMJIT_PROPAGATE(_markStackArgsToKeep()); ASMJIT_PROPAGATE(_markStackArgsToKeep());
}
// Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size. // Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size.
ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame()); ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame());
@@ -1773,13 +1842,15 @@ Error BaseRAPass::updateStackFrame() noexcept {
ASMJIT_PROPAGATE(frame.finalize()); ASMJIT_PROPAGATE(frame.finalize());
// StackAllocator allocates all stots starting from [0], adjust them when necessary. // StackAllocator allocates all stots starting from [0], adjust them when necessary.
if (frame.localStackOffset() != 0) if (frame.localStackOffset() != 0) {
ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset()))); ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset())));
}
// Again, if there are stack arguments allocated in function's stack we have to handle them. This handles all cases // Again, if there are stack arguments allocated in function's stack we have to handle them. This handles all cases
// (either regular or dynamic stack alignment). // (either regular or dynamic stack alignment).
if (_numStackArgsToStackSlots) if (_numStackArgsToStackSlots) {
ASMJIT_PROPAGATE(_updateStackArgs()); ASMJIT_PROPAGATE(_updateStackArgs());
}
return kErrorOk; return kErrorOk;
} }
@@ -1800,8 +1871,9 @@ Error BaseRAPass::_markStackArgsToKeep() noexcept {
// If the register doesn't have stack slot then we failed. It doesn't make much sense as it was marked as // If the register doesn't have stack slot then we failed. It doesn't make much sense as it was marked as
// `kFlagStackArgToStack`, which requires the WorkReg was live-in upon function entry. // `kFlagStackArgToStack`, which requires the WorkReg was live-in upon function entry.
RAStackSlot* slot = workReg->stackSlot(); RAStackSlot* slot = workReg->stackSlot();
if (ASMJIT_UNLIKELY(!slot)) if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) { if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) {
uint32_t typeSize = TypeUtils::sizeOf(srcArg.typeId()); uint32_t typeSize = TypeUtils::sizeOf(srcArg.typeId());
@@ -1832,8 +1904,9 @@ Error BaseRAPass::_updateStackArgs() noexcept {
ASMJIT_ASSERT(workReg->hasArgIndex()); ASMJIT_ASSERT(workReg->hasArgIndex());
RAStackSlot* slot = workReg->stackSlot(); RAStackSlot* slot = workReg->stackSlot();
if (ASMJIT_UNLIKELY(!slot)) if (ASMJIT_UNLIKELY(!slot)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
if (slot->isStackArg()) { if (slot->isStackArg()) {
const FuncValue& srcArg = _func->detail().arg(workReg->argIndex()); const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
@@ -1930,38 +2003,48 @@ static void RAPass_formatLiveness(BaseRAPass* pass, String& sb, const RAInst* ra
for (uint32_t i = 0; i < tiedCount; i++) { for (uint32_t i = 0; i < tiedCount; i++) {
const RATiedReg& tiedReg = tiedRegs[i]; const RATiedReg& tiedReg = tiedRegs[i];
if (i != 0) if (i != 0) {
sb.append(' '); sb.append(' ');
}
sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name()); sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name());
sb.append(tiedReg.isReadWrite() ? 'X' : sb.append(tiedReg.isReadWrite() ? 'X' :
tiedReg.isRead() ? 'R' : tiedReg.isRead() ? 'R' :
tiedReg.isWrite() ? 'W' : '?'); tiedReg.isWrite() ? 'W' : '?');
if (tiedReg.isLeadConsecutive()) if (tiedReg.isLeadConsecutive()) {
sb.appendFormat("|Lead[%u]", tiedReg.consecutiveData() + 1u); sb.appendFormat("|Lead[%u]", tiedReg.consecutiveData() + 1u);
}
if (tiedReg.hasUseId()) if (tiedReg.hasUseId()) {
sb.appendFormat("|Use=%u", tiedReg.useId()); sb.appendFormat("|Use=%u", tiedReg.useId());
else if (tiedReg.isUse()) }
else if (tiedReg.isUse()) {
sb.append("|Use"); sb.append("|Use");
}
if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive()) if (tiedReg.isUseConsecutive() && !tiedReg.isLeadConsecutive()) {
sb.appendFormat("+%u", tiedReg.consecutiveData()); sb.appendFormat("+%u", tiedReg.consecutiveData());
}
if (tiedReg.hasOutId()) if (tiedReg.hasOutId()) {
sb.appendFormat("|Out=%u", tiedReg.outId()); sb.appendFormat("|Out=%u", tiedReg.outId());
else if (tiedReg.isOut()) }
else if (tiedReg.isOut()) {
sb.append("|Out"); sb.append("|Out");
}
if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive()) if (tiedReg.isOutConsecutive() && !tiedReg.isLeadConsecutive()) {
sb.appendFormat("+%u", tiedReg.consecutiveData()); sb.appendFormat("+%u", tiedReg.consecutiveData());
}
if (tiedReg.isLast()) if (tiedReg.isLast()) {
sb.append("|Last"); sb.append("|Last");
}
if (tiedReg.isKill()) if (tiedReg.isKill()) {
sb.append("|Kill"); sb.append("|Kill");
}
sb.append("}"); sb.append("}");
} }
@@ -1972,7 +2055,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
for (const RABlock* block : _blocks) { for (const RABlock* block : _blocks) {
BaseNode* node = block->first(); BaseNode* node = block->first();
if (!node) continue; if (!node) {
continue;
}
BaseNode* last = block->last(); BaseNode* last = block->last();
for (;;) { for (;;) {
@@ -1989,8 +2074,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
} }
node->setInlineComment(static_cast<char*>(cc()->_dataZone.dup(sb.data(), sb.size(), true))); node->setInlineComment(static_cast<char*>(cc()->_dataZone.dup(sb.data(), sb.size(), true)));
if (node == last) if (node == last) {
break; break;
}
node = node->next(); node = node->next();
} }
} }
@@ -2001,10 +2087,12 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::annotateCode() noexcept {
ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept { ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept {
for (uint32_t i = 0, size = blocks.size(); i < size; i++) { for (uint32_t i = 0, size = blocks.size(); i < size; i++) {
const RABlock* block = blocks[i]; const RABlock* block = blocks[i];
if (i != 0) if (i != 0) {
ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId())); ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId()));
else }
else {
ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId())); ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId()));
}
} }
return kErrorOk; return kErrorOk;
} }
@@ -2024,18 +2112,21 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpBlockLiveness(String& sb, const RABlock
if (bits.bitAt(workId)) { if (bits.bitAt(workId)) {
RAWorkReg* wReg = workRegById(workId); RAWorkReg* wReg = workRegById(workId);
if (!n) if (!n) {
sb.appendFormat(" %s [", bitsName); sb.appendFormat(" %s [", bitsName);
else }
else {
sb.append(", "); sb.append(", ");
}
sb.append(wReg->name()); sb.append(wReg->name());
n++; n++;
} }
} }
if (n) if (n) {
sb.append("]\n"); sb.append("]\n");
}
} }
return kErrorOk; return kErrorOk;
@@ -2065,8 +2156,9 @@ ASMJIT_FAVOR_SIZE Error BaseRAPass::_dumpLiveSpans(String& sb) noexcept {
LiveRegSpans& liveSpans = workReg->liveSpans(); LiveRegSpans& liveSpans = workReg->liveSpans();
for (uint32_t x = 0; x < liveSpans.size(); x++) { for (uint32_t x = 0; x < liveSpans.size(); x++) {
const LiveRegSpan& liveSpan = liveSpans[x]; const LiveRegSpan& liveSpan = liveSpans[x];
if (x) if (x) {
sb.append(", "); sb.append(", ");
}
sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b); sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b);
} }

View File

@@ -56,24 +56,25 @@ class RABlock {
public: public:
ASMJIT_NONCOPYABLE(RABlock) ASMJIT_NONCOPYABLE(RABlock)
typedef RAAssignment::PhysToWorkMap PhysToWorkMap; //! \name Types
typedef RAAssignment::WorkToPhysMap WorkToPhysMap; //! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { //! Unassigned block id.
//! Unassigned block id. static inline constexpr uint32_t kUnassignedId = 0xFFFFFFFFu;
kUnassignedId = 0xFFFFFFFFu
};
enum LiveType : uint32_t { static inline constexpr uint32_t kLiveIn = 0;
kLiveIn = 0, static inline constexpr uint32_t kLiveOut = 1;
kLiveOut = 1, static inline constexpr uint32_t kLiveGen = 2;
kLiveGen = 2, static inline constexpr uint32_t kLiveKill = 3;
kLiveKill = 3, static inline constexpr uint32_t kLiveCount = 4;
kLiveCount = 4
};
//! \} //! \}
@@ -145,24 +146,48 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseRAPass* pass() const noexcept { return _ra; } ASMJIT_INLINE_NODEBUG BaseRAPass* pass() const noexcept { return _ra; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept; ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockId() const noexcept { return _blockId; } ASMJIT_INLINE_NODEBUG uint32_t blockId() const noexcept { return _blockId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlockFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG RABlockFlags flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RABlockFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(RABlockFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG void addFlags(RABlockFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(RABlockFlags flags) noexcept { _flags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return _blockId != kUnassignedId; } ASMJIT_INLINE_NODEBUG bool isAssigned() const noexcept { return _blockId != kUnassignedId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isConstructed() const noexcept { return hasFlag(RABlockFlags::kIsConstructed); } ASMJIT_INLINE_NODEBUG bool isConstructed() const noexcept { return hasFlag(RABlockFlags::kIsConstructed); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReachable() const noexcept { return hasFlag(RABlockFlags::kIsReachable); } ASMJIT_INLINE_NODEBUG bool isReachable() const noexcept { return hasFlag(RABlockFlags::kIsReachable); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isTargetable() const noexcept { return hasFlag(RABlockFlags::kIsTargetable); } ASMJIT_INLINE_NODEBUG bool isTargetable() const noexcept { return hasFlag(RABlockFlags::kIsTargetable); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RABlockFlags::kIsAllocated); } ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return hasFlag(RABlockFlags::kIsAllocated); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFuncExit() const noexcept { return hasFlag(RABlockFlags::kIsFuncExit); } ASMJIT_INLINE_NODEBUG bool isFuncExit() const noexcept { return hasFlag(RABlockFlags::kIsFuncExit); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTerminator() const noexcept { return hasFlag(RABlockFlags::kHasTerminator); } ASMJIT_INLINE_NODEBUG bool hasTerminator() const noexcept { return hasFlag(RABlockFlags::kHasTerminator); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasConsecutive() const noexcept { return hasFlag(RABlockFlags::kHasConsecutive); } ASMJIT_INLINE_NODEBUG bool hasConsecutive() const noexcept { return hasFlag(RABlockFlags::kHasConsecutive); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasJumpTable() const noexcept { return hasFlag(RABlockFlags::kHasJumpTable); } ASMJIT_INLINE_NODEBUG bool hasJumpTable() const noexcept { return hasFlag(RABlockFlags::kHasJumpTable); }
ASMJIT_INLINE_NODEBUG void makeConstructed(const RARegsStats& regStats) noexcept { ASMJIT_INLINE_NODEBUG void makeConstructed(const RARegsStats& regStats) noexcept {
@@ -174,11 +199,16 @@ public:
ASMJIT_INLINE_NODEBUG void makeTargetable() noexcept { _flags |= RABlockFlags::kIsTargetable; } ASMJIT_INLINE_NODEBUG void makeTargetable() noexcept { _flags |= RABlockFlags::kIsTargetable; }
ASMJIT_INLINE_NODEBUG void makeAllocated() noexcept { _flags |= RABlockFlags::kIsAllocated; } ASMJIT_INLINE_NODEBUG void makeAllocated() noexcept { _flags |= RABlockFlags::kIsAllocated; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegsStats& regsStats() const noexcept { return _regsStats; } ASMJIT_INLINE_NODEBUG const RARegsStats& regsStats() const noexcept { return _regsStats; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPredecessors() const noexcept { return !_predecessors.empty(); } ASMJIT_INLINE_NODEBUG bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSuccessors() const noexcept { return !_successors.empty(); } ASMJIT_INLINE_NODEBUG bool hasSuccessors() const noexcept { return !_successors.empty(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSuccessor(RABlock* block) noexcept { ASMJIT_INLINE_NODEBUG bool hasSuccessor(RABlock* block) noexcept {
if (block->_predecessors.size() < _successors.size()) if (block->_predecessors.size() < _successors.size())
return block->_predecessors.contains(this); return block->_predecessors.contains(this);
@@ -186,56 +216,97 @@ public:
return _successors.contains(block); return _successors.contains(block);
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& predecessors() const noexcept { return _predecessors; } ASMJIT_INLINE_NODEBUG const RABlocks& predecessors() const noexcept { return _predecessors; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& successors() const noexcept { return _successors; } ASMJIT_INLINE_NODEBUG const RABlocks& successors() const noexcept { return _successors; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; } ASMJIT_INLINE_NODEBUG BaseNode* first() const noexcept { return _first; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; } ASMJIT_INLINE_NODEBUG BaseNode* last() const noexcept { return _last; }
ASMJIT_INLINE_NODEBUG void setFirst(BaseNode* node) noexcept { _first = node; } ASMJIT_INLINE_NODEBUG void setFirst(BaseNode* node) noexcept { _first = node; }
ASMJIT_INLINE_NODEBUG void setLast(BaseNode* node) noexcept { _last = node; } ASMJIT_INLINE_NODEBUG void setLast(BaseNode* node) noexcept { _last = node; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t firstPosition() const noexcept { return _firstPosition; } ASMJIT_INLINE_NODEBUG uint32_t firstPosition() const noexcept { return _firstPosition; }
ASMJIT_INLINE_NODEBUG void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; } ASMJIT_INLINE_NODEBUG void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _endPosition; } ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _endPosition; }
ASMJIT_INLINE_NODEBUG void setEndPosition(uint32_t position) noexcept { _endPosition = position; } ASMJIT_INLINE_NODEBUG void setEndPosition(uint32_t position) noexcept { _endPosition = position; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t povOrder() const noexcept { return _povOrder; } ASMJIT_INLINE_NODEBUG uint32_t povOrder() const noexcept { return _povOrder; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept; ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; } ASMJIT_INLINE_NODEBUG RegMask exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask regMask) noexcept { _entryScratchGpRegs |= regMask; } ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask regMask) noexcept { _entryScratchGpRegs |= regMask; }
ASMJIT_INLINE_NODEBUG void addExitScratchGpRegs(RegMask regMask) noexcept { _exitScratchGpRegs |= regMask; } ASMJIT_INLINE_NODEBUG void addExitScratchGpRegs(RegMask regMask) noexcept { _exitScratchGpRegs |= regMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; } ASMJIT_INLINE_NODEBUG bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; } ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; }
ASMJIT_INLINE_NODEBUG void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; } ASMJIT_INLINE_NODEBUG void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t timestamp() const noexcept { return _timestamp; } ASMJIT_INLINE_NODEBUG uint64_t timestamp() const noexcept { return _timestamp; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; } ASMJIT_INLINE_NODEBUG bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; }
ASMJIT_INLINE_NODEBUG void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; } ASMJIT_INLINE_NODEBUG void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; }
ASMJIT_INLINE_NODEBUG void resetTimestamp() const noexcept { _timestamp = 0; } ASMJIT_INLINE_NODEBUG void resetTimestamp() const noexcept { _timestamp = 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; } ASMJIT_INLINE_NODEBUG RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* iDom() noexcept { return _idom; } ASMJIT_INLINE_NODEBUG RABlock* iDom() noexcept { return _idom; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlock* iDom() const noexcept { return _idom; } ASMJIT_INLINE_NODEBUG const RABlock* iDom() const noexcept { return _idom; }
ASMJIT_INLINE_NODEBUG void setIDom(RABlock* block) noexcept { _idom = block; } ASMJIT_INLINE_NODEBUG void setIDom(RABlock* block) noexcept { _idom = block; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; } ASMJIT_INLINE_NODEBUG ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; } ASMJIT_INLINE_NODEBUG ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; } ASMJIT_INLINE_NODEBUG ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; } ASMJIT_INLINE_NODEBUG ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; }
[[nodiscard]]
inline Error resizeLiveBits(uint32_t size) noexcept { inline Error resizeLiveBits(uint32_t size) noexcept {
ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size)); ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size));
ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size)); ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size));
@@ -244,8 +315,12 @@ public:
return kErrorOk; return kErrorOk;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; } ASMJIT_INLINE_NODEBUG bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; } ASMJIT_INLINE_NODEBUG PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; }
ASMJIT_INLINE_NODEBUG void setEntryAssignment(PhysToWorkMap* physToWorkMap) noexcept { _entryPhysToWorkMap = physToWorkMap; } ASMJIT_INLINE_NODEBUG void setEntryAssignment(PhysToWorkMap* physToWorkMap) noexcept { _entryPhysToWorkMap = physToWorkMap; }
//! \} //! \}
@@ -256,11 +331,13 @@ public:
//! Adds a successor to this block, and predecessor to `successor`, making connection on both sides. //! Adds a successor to this block, and predecessor to `successor`, making connection on both sides.
//! //!
//! This API must be used to manage successors and predecessors, never manage it manually. //! This API must be used to manage successors and predecessors, never manage it manually.
[[nodiscard]]
Error appendSuccessor(RABlock* successor) noexcept; Error appendSuccessor(RABlock* successor) noexcept;
//! Similar to `appendSuccessor()`, but does prepend instead append. //! Similar to `appendSuccessor()`, but does prepend instead append.
//! //!
//! This function is used to add a natural flow (always first) to the block. //! This function is used to add a natural flow (always first) to the block.
[[nodiscard]]
Error prependSuccessor(RABlock* successor) noexcept; Error prependSuccessor(RABlock* successor) noexcept;
//! \} //! \}
@@ -318,53 +395,76 @@ public:
//! \{ //! \{
//! Returns instruction RW flags. //! Returns instruction RW flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; }; ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; };
//! Tests whether the given `flag` is present in instruction RW flags. //! Tests whether the given `flag` is present in instruction RW flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); }
//! Adds `flags` to instruction RW flags. //! Adds `flags` to instruction RW flags.
ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; } ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; }
//! Returns the instruction flags. //! Returns the instruction flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; } ASMJIT_INLINE_NODEBUG RATiedFlags flags() const noexcept { return _flags; }
//! Tests whether the instruction has flag `flag`. //! Tests whether the instruction has flag `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Replaces the existing instruction flags with `flags`. //! Replaces the existing instruction flags with `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(RATiedFlags flags) noexcept { _flags = flags; } ASMJIT_INLINE_NODEBUG void setFlags(RATiedFlags flags) noexcept { _flags = flags; }
//! Adds instruction `flags` to this RAInst. //! Adds instruction `flags` to this RAInst.
ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; } ASMJIT_INLINE_NODEBUG void addFlags(RATiedFlags flags) noexcept { _flags |= flags; }
//! Clears instruction `flags` from this RAInst. //! Clears instruction `flags` from this RAInst.
ASMJIT_INLINE_NODEBUG void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; }
//! Tests whether one operand of this instruction has been patched from Reg to Mem. //! Tests whether one operand of this instruction has been patched from Reg to Mem.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRegToMemPatched() const noexcept { return hasFlag(RATiedFlags::kInst_RegToMemPatched); } ASMJIT_INLINE_NODEBUG bool isRegToMemPatched() const noexcept { return hasFlag(RATiedFlags::kInst_RegToMemPatched); }
//! Tests whether this instruction can be transformed to another instruction if necessary. //! Tests whether this instruction can be transformed to another instruction if necessary.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); } ASMJIT_INLINE_NODEBUG bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); }
//! Returns the associated block with this RAInst. //! Returns the associated block with this RAInst.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; } ASMJIT_INLINE_NODEBUG RABlock* block() const noexcept { return _block; }
//! Returns tied registers (all). //! Returns tied registers (all).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); } ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); }
//! Returns tied registers for a given `group`. //! Returns tied registers for a given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); } ASMJIT_INLINE_NODEBUG RATiedReg* tiedRegs(RegGroup group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
//! Returns count of all tied registers. //! Returns count of all tied registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; } ASMJIT_INLINE_NODEBUG uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of tied registers of a given `group`. //! Returns count of tied registers of a given `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount[group]; } ASMJIT_INLINE_NODEBUG uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount[group]; }
//! Returns `RATiedReg` at the given `index`. //! Returns `RATiedReg` at the given `index`.
[[nodiscard]]
inline RATiedReg* tiedAt(uint32_t index) const noexcept { inline RATiedReg* tiedAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedTotal); ASMJIT_ASSERT(index < _tiedTotal);
return tiedRegs() + index; return tiedRegs() + index;
} }
//! Returns `RATiedReg` at the given `index` of the given register `group`. //! Returns `RATiedReg` at the given `index` of the given register `group`.
[[nodiscard]]
inline RATiedReg* tiedOf(RegGroup group, uint32_t index) const noexcept { inline RATiedReg* tiedOf(RegGroup group, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedCount.get(group)); ASMJIT_ASSERT(index < _tiedCount.get(group));
return tiedRegs(group) + index; return tiedRegs(group) + index;
} }
[[nodiscard]]
inline const RATiedReg* tiedRegForWorkReg(RegGroup group, uint32_t workId) const noexcept { inline const RATiedReg* tiedRegForWorkReg(RegGroup group, uint32_t workId) const noexcept {
const RATiedReg* array = tiedRegs(group); const RATiedReg* array = tiedRegs(group);
size_t count = tiedCount(group); size_t count = tiedCount(group);
@@ -387,6 +487,7 @@ public:
//! \name Static Functions //! \name Static Functions
//! \{ //! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG size_t sizeOf(uint32_t tiedRegCount) noexcept { static ASMJIT_INLINE_NODEBUG size_t sizeOf(uint32_t tiedRegCount) noexcept {
return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg); return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg);
} }
@@ -447,33 +548,51 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; } ASMJIT_INLINE_NODEBUG InstRWFlags instRWFlags() const noexcept { return _instRWFlags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); } ASMJIT_INLINE_NODEBUG bool hasInstRWFlag(InstRWFlags flag) const noexcept { return Support::test(_instRWFlags, flag); }
ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; } ASMJIT_INLINE_NODEBUG void addInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags |= flags; }
ASMJIT_INLINE_NODEBUG void clearInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags &= ~flags; } ASMJIT_INLINE_NODEBUG void clearInstRWFlags(InstRWFlags flags) noexcept { _instRWFlags &= ~flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags aggregatedFlags() const noexcept { return _aggregatedFlags; } ASMJIT_INLINE_NODEBUG RATiedFlags aggregatedFlags() const noexcept { return _aggregatedFlags; }
ASMJIT_INLINE_NODEBUG void addAggregatedFlags(RATiedFlags flags) noexcept { _aggregatedFlags |= flags; } ASMJIT_INLINE_NODEBUG void addAggregatedFlags(RATiedFlags flags) noexcept { _aggregatedFlags |= flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedFlags forbiddenFlags() const noexcept { return _forbiddenFlags; } ASMJIT_INLINE_NODEBUG RATiedFlags forbiddenFlags() const noexcept { return _forbiddenFlags; }
ASMJIT_INLINE_NODEBUG void addForbiddenFlags(RATiedFlags flags) noexcept { _forbiddenFlags |= flags; } ASMJIT_INLINE_NODEBUG void addForbiddenFlags(RATiedFlags flags) noexcept { _forbiddenFlags |= flags; }
//! Returns the number of tied registers added to the builder. //! Returns the number of tied registers added to the builder.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); } ASMJIT_INLINE_NODEBUG uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* begin() noexcept { return _tiedRegs; } ASMJIT_INLINE_NODEBUG RATiedReg* begin() noexcept { return _tiedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RATiedReg* end() noexcept { return _cur; } ASMJIT_INLINE_NODEBUG RATiedReg* end() noexcept { return _cur; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RATiedReg* begin() const noexcept { return _tiedRegs; } ASMJIT_INLINE_NODEBUG const RATiedReg* begin() const noexcept { return _tiedRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RATiedReg* end() const noexcept { return _cur; } ASMJIT_INLINE_NODEBUG const RATiedReg* end() const noexcept { return _cur; }
//! Returns `RATiedReg` at the given `index`. //! Returns `RATiedReg` at the given `index`.
[[nodiscard]]
inline RATiedReg* operator[](size_t index) noexcept { inline RATiedReg* operator[](size_t index) noexcept {
ASMJIT_ASSERT(index < tiedRegCount()); ASMJIT_ASSERT(index < tiedRegCount());
return &_tiedRegs[index]; return &_tiedRegs[index];
} }
//! Returns `RATiedReg` at the given `index`. (const). //! Returns `RATiedReg` at the given `index`. (const).
[[nodiscard]]
inline const RATiedReg* operator[](size_t index) const noexcept { inline const RATiedReg* operator[](size_t index) const noexcept {
ASMJIT_ASSERT(index < tiedRegCount()); ASMJIT_ASSERT(index < tiedRegCount());
return &_tiedRegs[index]; return &_tiedRegs[index];
@@ -484,6 +603,7 @@ public:
//! \name Utilities //! \name Utilities
//! \{ //! \{
[[nodiscard]]
Error add( Error add(
RAWorkReg* workReg, RAWorkReg* workReg,
RATiedFlags flags, RATiedFlags flags,
@@ -524,20 +644,23 @@ public:
} }
else { else {
if (consecutiveParent != tiedReg->consecutiveParent()) { if (consecutiveParent != tiedReg->consecutiveParent()) {
if (tiedReg->consecutiveParent() != Globals::kInvalidId) if (tiedReg->consecutiveParent() != Globals::kInvalidId) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
tiedReg->_consecutiveParent = consecutiveParent; tiedReg->_consecutiveParent = consecutiveParent;
} }
if (useId != BaseReg::kIdBad) { if (useId != BaseReg::kIdBad) {
if (ASMJIT_UNLIKELY(tiedReg->hasUseId())) if (ASMJIT_UNLIKELY(tiedReg->hasUseId())) {
return DebugUtils::errored(kErrorOverlappedRegs); return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->setUseId(useId); tiedReg->setUseId(useId);
} }
if (outId != BaseReg::kIdBad) { if (outId != BaseReg::kIdBad) {
if (ASMJIT_UNLIKELY(tiedReg->hasOutId())) if (ASMJIT_UNLIKELY(tiedReg->hasOutId())) {
return DebugUtils::errored(kErrorOverlappedRegs); return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->setOutId(outId); tiedReg->setOutId(outId);
} }
@@ -552,6 +675,7 @@ public:
} }
} }
[[nodiscard]]
Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept { Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept {
ASMJIT_ASSERT(useId != BaseReg::kIdBad); ASMJIT_ASSERT(useId != BaseReg::kIdBad);
@@ -594,6 +718,7 @@ public:
} }
} }
[[nodiscard]]
Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept { Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept {
ASMJIT_ASSERT(outId != BaseReg::kIdBad); ASMJIT_ASSERT(outId != BaseReg::kIdBad);
@@ -621,8 +746,9 @@ public:
return kErrorOk; return kErrorOk;
} }
else { else {
if (tiedReg->hasOutId()) if (tiedReg->hasOutId()) {
return DebugUtils::errored(kErrorOverlappedRegs); return DebugUtils::errored(kErrorOverlappedRegs);
}
tiedReg->addRefCount(); tiedReg->addRefCount();
tiedReg->addFlags(flags); tiedReg->addFlags(flags);
@@ -639,8 +765,13 @@ public:
//! See \ref RAAssignment for more information about register assignments. //! See \ref RAAssignment for more information about register assignments.
class RASharedAssignment { class RASharedAssignment {
public: public:
typedef RAAssignment::PhysToWorkMap PhysToWorkMap; //! \name Types
typedef RAAssignment::WorkToPhysMap WorkToPhysMap; //! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Members //! \name Members
//! \{ //! \{
@@ -659,14 +790,20 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _physToWorkMap == nullptr; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _physToWorkMap == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; } ASMJIT_INLINE_NODEBUG RegMask entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask mask) noexcept { _entryScratchGpRegs |= mask; } ASMJIT_INLINE_NODEBUG void addEntryScratchGpRegs(RegMask mask) noexcept { _entryScratchGpRegs |= mask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveIn; } ASMJIT_INLINE_NODEBUG const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; } ASMJIT_INLINE_NODEBUG PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
ASMJIT_INLINE_NODEBUG void assignPhysToWorkMap(PhysToWorkMap* physToWorkMap) noexcept { _physToWorkMap = physToWorkMap; } ASMJIT_INLINE_NODEBUG void assignPhysToWorkMap(PhysToWorkMap* physToWorkMap) noexcept { _physToWorkMap = physToWorkMap; }
//! \} //! \}
@@ -676,14 +813,22 @@ public:
class BaseRAPass : public FuncPass { class BaseRAPass : public FuncPass {
public: public:
ASMJIT_NONCOPYABLE(BaseRAPass) ASMJIT_NONCOPYABLE(BaseRAPass)
typedef FuncPass Base; using Base = FuncPass;
enum : uint32_t { //! \name Constants
kCallArgWeight = 80 //! \{
};
typedef RAAssignment::PhysToWorkMap PhysToWorkMap; static inline constexpr uint32_t kCallArgWeight = 80;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! \}
//! \name Types
//! \{
using PhysToWorkMap = RAAssignment::PhysToWorkMap;
using WorkToPhysMap = RAAssignment::WorkToPhysMap;
//! \}
//! \name Members //! \name Members
//! \{ //! \{
@@ -787,37 +932,55 @@ public:
//! \{ //! \{
//! Returns \ref Logger passed to \ref runOnFunction(). //! Returns \ref Logger passed to \ref runOnFunction().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; } ASMJIT_INLINE_NODEBUG Logger* logger() const noexcept { return _logger; }
//! Returns either a valid logger if the given `option` is set and logging is enabled, or nullptr. //! Returns either a valid logger if the given `option` is set and logging is enabled, or nullptr.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Logger* getLoggerIf(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option) ? _logger : nullptr; } ASMJIT_INLINE_NODEBUG Logger* getLoggerIf(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option) ? _logger : nullptr; }
//! Returns whether the diagnostic `option` is enabled. //! Returns whether the diagnostic `option` is enabled.
//! //!
//! \note Returns false if there is no logger (as diagnostics without logging make no sense). //! \note Returns false if there is no logger (as diagnostics without logging make no sense).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); } ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Returns \ref Zone passed to \ref runOnFunction(). //! Returns \ref Zone passed to \ref runOnFunction().
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _allocator.zone(); } ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _allocator.zone(); }
//! Returns \ref ZoneAllocator used by the register allocator. //! Returns \ref ZoneAllocator used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); } ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<RASharedAssignment>& sharedAssignments() const { return _sharedAssignments; } ASMJIT_INLINE_NODEBUG const ZoneVector<RASharedAssignment>& sharedAssignments() const { return _sharedAssignments; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); } ASMJIT_INLINE_NODEBUG uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); }
//! Returns the current function node. //! Returns the current function node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; } ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Returns the stop of the current function. //! Returns the stop of the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* stop() const noexcept { return _stop; } ASMJIT_INLINE_NODEBUG BaseNode* stop() const noexcept { return _stop; }
//! Returns an extra block used by the current function being processed. //! Returns an extra block used by the current function being processed.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseNode* extraBlock() const noexcept { return _extraBlock; } ASMJIT_INLINE_NODEBUG BaseNode* extraBlock() const noexcept { return _extraBlock; }
//! Sets an extra block, see `extraBlock()`. //! Sets an extra block, see `extraBlock()`.
ASMJIT_INLINE_NODEBUG void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; } ASMJIT_INLINE_NODEBUG void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _instructionCount * 2; } ASMJIT_INLINE_NODEBUG uint32_t endPosition() const noexcept { return _instructionCount * 2; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& availableRegs() const noexcept { return _availableRegs; } ASMJIT_INLINE_NODEBUG const RARegMask& availableRegs() const noexcept { return _availableRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RARegMask& clobberedRegs() const noexcept { return _clobberedRegs; } ASMJIT_INLINE_NODEBUG const RARegMask& clobberedRegs() const noexcept { return _clobberedRegs; }
//! \} //! \}
@@ -855,25 +1018,33 @@ public:
//! \{ //! \{
//! Returns the function's entry block. //! Returns the function's entry block.
[[nodiscard]]
inline RABlock* entryBlock() noexcept { inline RABlock* entryBlock() noexcept {
ASMJIT_ASSERT(!_blocks.empty()); ASMJIT_ASSERT(!_blocks.empty());
return _blocks[0]; return _blocks[0];
} }
//! \overload //! \overload
[[nodiscard]]
inline const RABlock* entryBlock() const noexcept { inline const RABlock* entryBlock() const noexcept {
ASMJIT_ASSERT(!_blocks.empty()); ASMJIT_ASSERT(!_blocks.empty());
return _blocks[0]; return _blocks[0];
} }
//! Returns all basic blocks of this function. //! Returns all basic blocks of this function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlocks& blocks() noexcept { return _blocks; } ASMJIT_INLINE_NODEBUG RABlocks& blocks() noexcept { return _blocks; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlocks& blocks() const noexcept { return _blocks; } ASMJIT_INLINE_NODEBUG const RABlocks& blocks() const noexcept { return _blocks; }
//! Returns the count of basic blocks (returns size of `_blocks` array). //! Returns the count of basic blocks (returns size of `_blocks` array).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t blockCount() const noexcept { return _blocks.size(); } ASMJIT_INLINE_NODEBUG uint32_t blockCount() const noexcept { return _blocks.size(); }
//! Returns the count of reachable basic blocks (returns size of `_pov` array). //! Returns the count of reachable basic blocks (returns size of `_pov` array).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t reachableBlockCount() const noexcept { return _pov.size(); } ASMJIT_INLINE_NODEBUG uint32_t reachableBlockCount() const noexcept { return _pov.size(); }
//! Tests whether the CFG has dangling blocks - these were created by `newBlock()`, but not added to CFG through //! Tests whether the CFG has dangling blocks - these were created by `newBlock()`, but not added to CFG through
@@ -881,41 +1052,51 @@ public:
//! incomplete. //! incomplete.
//! //!
//! \note This is only used to check if the number of created blocks matches the number of added blocks. //! \note This is only used to check if the number of created blocks matches the number of added blocks.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); } ASMJIT_INLINE_NODEBUG bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); }
//! Gest a next timestamp to be used to mark CFG blocks. //! Gest a next timestamp to be used to mark CFG blocks.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; } ASMJIT_INLINE_NODEBUG uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; }
//! Creates a new `RABlock` instance. //! Creates a new `RABlock` instance.
//! //!
//! \note New blocks don't have ID assigned until they are added to the block array by calling `addBlock()`. //! \note New blocks don't have ID assigned until they are added to the block array by calling `addBlock()`.
[[nodiscard]]
RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept; RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept;
//! Tries to find a neighboring LabelNode (without going through code) that is already connected with `RABlock`. //! Tries to find a neighboring LabelNode (without going through code) that is already connected with `RABlock`.
//! If no label is found then a new RABlock is created and assigned to all possible labels in a backward direction. //! If no label is found then a new RABlock is created and assigned to all possible labels in a backward direction.
[[nodiscard]]
RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept; RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept;
//! Adds the given `block` to the block list and assign it a unique block id. //! Adds the given `block` to the block list and assign it a unique block id.
[[nodiscard]]
Error addBlock(RABlock* block) noexcept; Error addBlock(RABlock* block) noexcept;
[[nodiscard]]
inline Error addExitBlock(RABlock* block) noexcept { inline Error addExitBlock(RABlock* block) noexcept {
block->addFlags(RABlockFlags::kIsFuncExit); block->addFlags(RABlockFlags::kIsFuncExit);
return _exits.append(allocator(), block); return _exits.append(allocator(), block);
} }
ASMJIT_FORCE_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept { [[nodiscard]]
ASMJIT_INLINE RAInst* newRAInst(RABlock* block, InstRWFlags instRWFlags, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount)); void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount));
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p)) {
return nullptr; return nullptr;
}
return new(Support::PlacementNew{p}) RAInst(block, instRWFlags, flags, tiedRegCount, clobberedRegs); return new(Support::PlacementNew{p}) RAInst(block, instRWFlags, flags, tiedRegCount, clobberedRegs);
} }
ASMJIT_FORCE_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept { [[nodiscard]]
ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
uint32_t tiedRegCount = ib.tiedRegCount(); uint32_t tiedRegCount = ib.tiedRegCount();
RAInst* raInst = newRAInst(block, ib.instRWFlags(), ib.aggregatedFlags(), tiedRegCount, ib._clobbered); RAInst* raInst = newRAInst(block, ib.instRWFlags(), ib.aggregatedFlags(), tiedRegCount, ib._clobbered);
if (ASMJIT_UNLIKELY(!raInst)) if (ASMJIT_UNLIKELY(!raInst)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
RARegIndex index; RARegIndex index;
RATiedFlags flagsFilter = ~ib.forbiddenFlags(); RATiedFlags flagsFilter = ~ib.forbiddenFlags();
@@ -944,8 +1125,9 @@ public:
dst = *tiedReg; dst = *tiedReg;
dst._flags &= flagsFilter; dst._flags &= flagsFilter;
if (!tiedReg->isDuplicate()) if (!tiedReg->isDuplicate()) {
dst._useRegMask &= ~ib._used[group]; dst._useRegMask &= ~ib._used[group];
}
} }
node->setPassData<RAInst>(raInst); node->setPassData<RAInst>(raInst);
@@ -970,9 +1152,11 @@ public:
//! analysis and register allocation. //! analysis and register allocation.
//! //!
//! Use `RACFGBuilderT` template that provides the necessary boilerplate. //! Use `RACFGBuilderT` template that provides the necessary boilerplate.
[[nodiscard]]
virtual Error buildCFG() noexcept; virtual Error buildCFG() noexcept;
//! Called after the CFG is built. //! Called after the CFG is built.
[[nodiscard]]
Error initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept; Error initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept;
//! \} //! \}
@@ -981,6 +1165,7 @@ public:
//! \{ //! \{
//! Constructs CFG views (only POV at the moment). //! Constructs CFG views (only POV at the moment).
[[nodiscard]]
Error buildCFGViews() noexcept; Error buildCFGViews() noexcept;
//! \} //! \}
@@ -993,19 +1178,29 @@ public:
// - A node `Z` post-dominates a node `X` if any path from `X` to the end of the graph has to go through `Z`. // - A node `Z` post-dominates a node `X` if any path from `X` to the end of the graph has to go through `Z`.
//! Constructs a dominator-tree from CFG. //! Constructs a dominator-tree from CFG.
[[nodiscard]]
Error buildCFGDominators() noexcept; Error buildCFGDominators() noexcept;
[[nodiscard]]
bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept; bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept;
[[nodiscard]]
const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept; const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept;
//! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`. //! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); } ASMJIT_INLINE_NODEBUG bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); }
//! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`. //! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); } ASMJIT_INLINE_NODEBUG bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); }
//! Returns a nearest common dominator of `a` and `b`. //! Returns a nearest common dominator of `a` and `b`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast<RABlock*>(_nearestCommonDominator(a, b)); } ASMJIT_INLINE_NODEBUG RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast<RABlock*>(_nearestCommonDominator(a, b)); }
//! Returns a nearest common dominator of `a` and `b` (const). //! Returns a nearest common dominator of `a` and `b` (const).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); } ASMJIT_INLINE_NODEBUG const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); }
//! \} //! \}
@@ -1013,15 +1208,18 @@ public:
//! \name CFG - Utilities //! \name CFG - Utilities
//! \{ //! \{
[[nodiscard]]
Error removeUnreachableCode() noexcept; Error removeUnreachableCode() noexcept;
//! Returns `node` or some node after that is ideal for beginning a new block. This function is mostly used after //! Returns `node` or some node after that is ideal for beginning a new block. This function is mostly used after
//! a conditional or unconditional jump to select the successor node. In some cases the next node could be a label, //! a conditional or unconditional jump to select the successor node. In some cases the next node could be a label,
//! which means it could have assigned some block already. //! which means it could have assigned some block already.
[[nodiscard]]
BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept; BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept;
//! Returns `true` of the `node` can flow to `target` without reaching code nor data. It's used to eliminate jumps //! Returns `true` of the `node` can flow to `target` without reaching code nor data. It's used to eliminate jumps
//! to labels that are next right to them. //! to labels that are next right to them.
[[nodiscard]]
bool isNextTo(BaseNode* node, BaseNode* target) noexcept; bool isNextTo(BaseNode* node, BaseNode* target) noexcept;
//! \} //! \}
@@ -1030,18 +1228,31 @@ public:
//! \{ //! \{
//! Returns a native size of the general-purpose register of the target architecture. //! Returns a native size of the general-purpose register of the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return _sp.size(); } ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return _sp.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t availableRegCount(RegGroup group) const noexcept { return _availableRegCount[group]; } ASMJIT_INLINE_NODEBUG uint32_t availableRegCount(RegGroup group) const noexcept { return _availableRegCount[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; } ASMJIT_INLINE_NODEBUG RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs() noexcept { return _workRegs; } ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs() noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs(RegGroup group) noexcept { return _workRegsOfGroup[group]; } ASMJIT_INLINE_NODEBUG RAWorkRegs& workRegs(RegGroup group) noexcept { return _workRegsOfGroup[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs() const noexcept { return _workRegs; } ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs() const noexcept { return _workRegs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs(RegGroup group) const noexcept { return _workRegsOfGroup[group]; } ASMJIT_INLINE_NODEBUG const RAWorkRegs& workRegs(RegGroup group) const noexcept { return _workRegsOfGroup[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workRegCount() const noexcept { return _workRegs.size(); } ASMJIT_INLINE_NODEBUG uint32_t workRegCount() const noexcept { return _workRegs.size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t workRegCount(RegGroup group) const noexcept { return _workRegsOfGroup[group].size(); } ASMJIT_INLINE_NODEBUG uint32_t workRegCount(RegGroup group) const noexcept { return _workRegsOfGroup[group].size(); }
inline void _buildPhysIndex() noexcept { inline void _buildPhysIndex() noexcept {
@@ -1049,30 +1260,39 @@ public:
_physRegTotal = uint32_t(_physRegIndex[RegGroup::kMaxVirt]) + _physRegTotal = uint32_t(_physRegIndex[RegGroup::kMaxVirt]) +
uint32_t(_physRegCount[RegGroup::kMaxVirt]) ; uint32_t(_physRegCount[RegGroup::kMaxVirt]) ;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physRegIndex(RegGroup group) const noexcept { return _physRegIndex[group]; } ASMJIT_INLINE_NODEBUG uint32_t physRegIndex(RegGroup group) const noexcept { return _physRegIndex[group]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physRegTotal() const noexcept { return _physRegTotal; } ASMJIT_INLINE_NODEBUG uint32_t physRegTotal() const noexcept { return _physRegTotal; }
[[nodiscard]]
Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept; Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept;
//! Creates `RAWorkReg` data for the given `vReg`. The function does nothing //! Creates `RAWorkReg` data for the given `vReg`. The function does nothing
//! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`. //! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`.
[[nodiscard]]
inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept { inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
*out = vReg->workReg(); *out = vReg->workReg();
return *out ? kErrorOk : _asWorkReg(vReg, out); return *out ? kErrorOk : _asWorkReg(vReg, out);
} }
ASMJIT_FORCE_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept { [[nodiscard]]
ASMJIT_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs(); const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs();
if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size())) if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size()))
return DebugUtils::errored(kErrorInvalidVirtId); return DebugUtils::errored(kErrorInvalidVirtId);
return asWorkReg(virtRegs[vIndex], out); return asWorkReg(virtRegs[vIndex], out);
} }
[[nodiscard]]
inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept { inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept {
RAStackSlot* slot = workReg->stackSlot(); RAStackSlot* slot = workReg->stackSlot();
if (slot) if (slot) {
return slot; return slot;
}
slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), RAStackSlot::kFlagRegHome); slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), RAStackSlot::kFlagRegHome);
workReg->_stackSlot = slot; workReg->_stackSlot = slot;
@@ -1080,17 +1300,22 @@ public:
return slot; return slot;
} }
[[nodiscard]]
inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept { inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept {
getOrCreateStackSlot(workReg); (void)getOrCreateStackSlot(workReg);
return BaseMem(OperandSignature::fromOpType(OperandType::kMem) | return BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_sp.type()) | OperandSignature::fromMemBaseType(_sp.type()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag), OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
workReg->virtId(), 0, 0); workReg->virtId(), 0, 0);
} }
[[nodiscard]]
WorkToPhysMap* newWorkToPhysMap() noexcept; WorkToPhysMap* newWorkToPhysMap() noexcept;
[[nodiscard]]
PhysToWorkMap* newPhysToWorkMap() noexcept; PhysToWorkMap* newPhysToWorkMap() noexcept;
[[nodiscard]]
inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept { inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept {
size_t size = PhysToWorkMap::sizeOf(_physRegTotal); size_t size = PhysToWorkMap::sizeOf(_physRegTotal);
return static_cast<PhysToWorkMap*>(zone()->dupAligned(map, size, sizeof(uint32_t))); return static_cast<PhysToWorkMap*>(zone()->dupAligned(map, size, sizeof(uint32_t)));
@@ -1101,10 +1326,12 @@ public:
//! 1. Calculates GEN/KILL/IN/OUT of each block. //! 1. Calculates GEN/KILL/IN/OUT of each block.
//! 2. Calculates live spans and basic statistics of each work register. //! 2. Calculates live spans and basic statistics of each work register.
[[nodiscard]]
Error buildLiveness() noexcept; Error buildLiveness() noexcept;
//! Assigns argIndex to WorkRegs. Must be called after the liveness analysis //! Assigns argIndex to WorkRegs. Must be called after the liveness analysis
//! finishes as it checks whether the argument is live upon entry. //! finishes as it checks whether the argument is live upon entry.
[[nodiscard]]
Error assignArgIndexToWorkRegs() noexcept; Error assignArgIndexToWorkRegs() noexcept;
//! \} //! \}
@@ -1113,11 +1340,14 @@ public:
//! \{ //! \{
//! Runs a global register allocator. //! Runs a global register allocator.
[[nodiscard]]
Error runGlobalAllocator() noexcept; Error runGlobalAllocator() noexcept;
//! Initializes data structures used for global live spans. //! Initializes data structures used for global live spans.
[[nodiscard]]
Error initGlobalLiveSpans() noexcept; Error initGlobalLiveSpans() noexcept;
[[nodiscard]]
Error binPack(RegGroup group) noexcept; Error binPack(RegGroup group) noexcept;
//! \} //! \}
@@ -1126,13 +1356,19 @@ public:
//! \{ //! \{
//! Runs a local register allocator. //! Runs a local register allocator.
[[nodiscard]]
Error runLocalAllocator() noexcept; Error runLocalAllocator() noexcept;
[[nodiscard]]
Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept; Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept;
[[nodiscard]]
Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept; Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept;
//! Called after the RA assignment has been assigned to a block. //! Called after the RA assignment has been assigned to a block.
//! //!
//! This cannot change the assignment, but can examine it. //! This cannot change the assignment, but can examine it.
[[nodiscard]]
Error blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept; Error blockEntryAssigned(const PhysToWorkMap* physToWorkMap) noexcept;
//! \} //! \}
@@ -1140,6 +1376,7 @@ public:
//! \name Register Allocation Utilities //! \name Register Allocation Utilities
//! \{ //! \{
[[nodiscard]]
Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept; Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept;
//! \} //! \}
@@ -1147,9 +1384,16 @@ public:
//! \name Function Prolog & Epilog //! \name Function Prolog & Epilog
//! \{ //! \{
[[nodiscard]]
virtual Error updateStackFrame() noexcept; virtual Error updateStackFrame() noexcept;
[[nodiscard]]
Error _markStackArgsToKeep() noexcept; Error _markStackArgsToKeep() noexcept;
[[nodiscard]]
Error _updateStackArgs() noexcept; Error _updateStackArgs() noexcept;
[[nodiscard]]
Error insertPrologEpilog() noexcept; Error insertPrologEpilog() noexcept;
//! \} //! \}
@@ -1157,7 +1401,10 @@ public:
//! \name Instruction Rewriter //! \name Instruction Rewriter
//! \{ //! \{
[[nodiscard]]
Error rewrite() noexcept; Error rewrite() noexcept;
[[nodiscard]]
virtual Error _rewrite(BaseNode* first, BaseNode* stop) noexcept; virtual Error _rewrite(BaseNode* first, BaseNode* stop) noexcept;
//! \} //! \}
@@ -1167,7 +1414,6 @@ public:
//! \{ //! \{
Error annotateCode() noexcept; Error annotateCode() noexcept;
Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept; Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept;
Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept; Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept;
Error _dumpLiveSpans(String& sb) noexcept; Error _dumpLiveSpans(String& sb) noexcept;
@@ -1178,13 +1424,22 @@ public:
//! \name Emit //! \name Emit
//! \{ //! \{
[[nodiscard]]
virtual Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept; virtual Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept;
[[nodiscard]]
virtual Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept; virtual Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept;
[[nodiscard]]
virtual Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept; virtual Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept;
[[nodiscard]]
virtual Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept; virtual Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept;
[[nodiscard]]
virtual Error emitJump(const Label& label) noexcept; virtual Error emitJump(const Label& label) noexcept;
[[nodiscard]]
virtual Error emitPreCall(InvokeNode* invokeNode) noexcept; virtual Error emitPreCall(InvokeNode* invokeNode) noexcept;
//! \} //! \}
@@ -1194,8 +1449,9 @@ inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocato
inline RegMask RABlock::entryScratchGpRegs() const noexcept { inline RegMask RABlock::entryScratchGpRegs() const noexcept {
RegMask regs = _entryScratchGpRegs; RegMask regs = _entryScratchGpRegs;
if (hasSharedAssignmentId()) if (hasSharedAssignmentId()) {
regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs(); regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs();
}
return regs; return regs;
} }

View File

@@ -15,12 +15,14 @@ ASMJIT_BEGIN_NAMESPACE
// ======================== // ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept { RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk)) {
return nullptr; return nullptr;
}
RAStackSlot* slot = allocator()->allocT<RAStackSlot>(); RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot)) if (ASMJIT_UNLIKELY(!slot)) {
return nullptr; return nullptr;
}
slot->_baseRegId = uint8_t(baseRegId); slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1)); slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
@@ -72,15 +74,18 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6); uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight; uint64_t weight;
if (slot->isRegHome()) if (slot->isRegHome()) {
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power)); weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
else }
else {
weight = power; weight = power;
}
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it // If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all. // probably doesn't matter at all.
if (weight > 0xFFFFFFFFu) if (weight > 0xFFFFFFFFu) {
weight = 0xFFFFFFFFu; weight = 0xFFFFFFFFu;
}
slot->setWeight(uint32_t(weight)); slot->setWeight(uint32_t(weight));
} }
@@ -104,8 +109,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
ZoneVector<RAStackGap> gaps[kSizeCount - 1]; ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) { for (RAStackSlot* slot : _slots) {
if (slot->isStackArg()) if (slot->isStackArg()) {
continue; continue;
}
uint32_t slotAlignment = slot->alignment(); uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment); uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
@@ -153,8 +159,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
uint32_t slotSize = 1u << index; uint32_t slotSize = 1u << index;
// Weird case, better to bail... // Weird case, better to bail...
if (gapEnd - gapOffset < slotSize) if (gapEnd - gapOffset < slotSize) {
break; break;
}
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize))); ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize; gapOffset += slotSize;
@@ -173,9 +180,11 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
} }
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept { Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots) for (RAStackSlot* slot : _slots) {
if (!slot->isStackArg()) if (!slot->isStackArg()) {
slot->_offset += offset; slot->_offset += offset;
}
}
return kErrorOk; return kErrorOk;
} }

View File

@@ -57,32 +57,50 @@ struct RAStackSlot {
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
inline uint32_t baseRegId() const noexcept { return _baseRegId; } inline uint32_t baseRegId() const noexcept { return _baseRegId; }
inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); } inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); }
[[nodiscard]]
inline uint32_t size() const noexcept { return _size; } inline uint32_t size() const noexcept { return _size; }
[[nodiscard]]
inline uint32_t alignment() const noexcept { return _alignment; } inline uint32_t alignment() const noexcept { return _alignment; }
[[nodiscard]]
inline uint32_t flags() const noexcept { return _flags; } inline uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; } inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); } inline void addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); }
[[nodiscard]]
inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); } inline bool isRegHome() const noexcept { return hasFlag(kFlagRegHome); }
[[nodiscard]]
inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); } inline bool isStackArg() const noexcept { return hasFlag(kFlagStackArg); }
[[nodiscard]]
inline uint32_t useCount() const noexcept { return _useCount; } inline uint32_t useCount() const noexcept { return _useCount; }
inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; } inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; }
[[nodiscard]]
inline uint32_t weight() const noexcept { return _weight; } inline uint32_t weight() const noexcept { return _weight; }
inline void setWeight(uint32_t weight) noexcept { _weight = weight; } inline void setWeight(uint32_t weight) noexcept { _weight = weight; }
[[nodiscard]]
inline int32_t offset() const noexcept { return _offset; } inline int32_t offset() const noexcept { return _offset; }
inline void setOffset(int32_t offset) noexcept { _offset = offset; } inline void setOffset(int32_t offset) noexcept { _offset = offset; }
//! \} //! \}
}; };
typedef ZoneVector<RAStackSlot*> RAStackSlots; using RAStackSlots = ZoneVector<RAStackSlot*>;
//! Stack allocator. //! Stack allocator.
class RAStackAllocator { class RAStackAllocator {
@@ -134,14 +152,25 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; } ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t bytesUsed() const noexcept { return _bytesUsed; } ASMJIT_INLINE_NODEBUG uint32_t bytesUsed() const noexcept { return _bytesUsed; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t stackSize() const noexcept { return _stackSize; } ASMJIT_INLINE_NODEBUG uint32_t stackSize() const noexcept { return _stackSize; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; } ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAStackSlots& slots() noexcept { return _slots; } ASMJIT_INLINE_NODEBUG RAStackSlots& slots() noexcept { return _slots; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RAStackSlots& slots() const noexcept { return _slots; } ASMJIT_INLINE_NODEBUG const RAStackSlots& slots() const noexcept { return _slots; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t slotCount() const noexcept { return _slots.size(); } ASMJIT_INLINE_NODEBUG uint32_t slotCount() const noexcept { return _slots.size(); }
//! \} //! \}
@@ -149,9 +178,13 @@ public:
//! \name Utilities //! \name Utilities
//! \{ //! \{
[[nodiscard]]
RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept; RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept;
[[nodiscard]]
Error calculateStackFrame() noexcept; Error calculateStackFrame() noexcept;
[[nodiscard]]
Error adjustSlotOffsets(int32_t offset) noexcept; Error adjustSlotOffsets(int32_t offset) noexcept;
//! \} //! \}

View File

@@ -21,7 +21,7 @@ constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
// //
// NOTE: The sizes here include null terminators - that way we can have aligned allocations that are power of 2s // NOTE: The sizes here include null terminators - that way we can have aligned allocations that are power of 2s
// initially. // initially.
static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept { static ASMJIT_INLINE size_t String_growCapacity(size_t byteSize, size_t minimumByteSize) noexcept {
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold; static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
ASMJIT_ASSERT(minimumByteSize < kMaxAllocSize); ASMJIT_ASSERT(minimumByteSize < kMaxAllocSize);
@@ -51,8 +51,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi
byteSize = minimumByteSize + remainder; byteSize = minimumByteSize + remainder;
// Bail to `minimumByteSize` in case of overflow. // Bail to `minimumByteSize` in case of overflow.
if (byteSize < minimumByteSize) if (byteSize < minimumByteSize) {
return minimumByteSize; return minimumByteSize;
}
} }
} }
@@ -63,8 +64,9 @@ static ASMJIT_FORCE_INLINE size_t String_growCapacity(size_t byteSize, size_t mi
// ====================== // ======================
Error String::reset() noexcept { Error String::reset() noexcept {
if (_type == kTypeLarge) if (_type == kTypeLarge) {
::free(_large.data); ::free(_large.data);
}
_resetInternal(); _resetInternal();
return kErrorOk; return kErrorOk;
@@ -104,17 +106,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
if (op == ModifyOp::kAssign) { if (op == ModifyOp::kAssign) {
if (size > curCapacity) { if (size > curCapacity) {
// Prevent arithmetic overflow. // Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize)) if (ASMJIT_UNLIKELY(size >= kMaxAllocSize)) {
return nullptr; return nullptr;
}
size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize); size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize);
char* newData = static_cast<char*>(::malloc(newCapacity)); char* newData = static_cast<char*>(::malloc(newCapacity));
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return nullptr; return nullptr;
}
if (_type == kTypeLarge) if (_type == kTypeLarge) {
::free(curData); ::free(curData);
}
_large.type = kTypeLarge; _large.type = kTypeLarge;
_large.size = size; _large.size = size;
@@ -132,8 +137,9 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
} }
else { else {
// Prevent arithmetic overflow. // Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1)) if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize - 1)) {
return nullptr; return nullptr;
}
size_t newSize = size + curSize; size_t newSize = size + curSize;
size_t newSizePlusOne = newSize + 1; size_t newSizePlusOne = newSize + 1;
@@ -142,17 +148,20 @@ char* String::prepare(ModifyOp op, size_t size) noexcept {
size_t newCapacityPlusOne = String_growCapacity(size + 1u, newSizePlusOne); size_t newCapacityPlusOne = String_growCapacity(size + 1u, newSizePlusOne);
ASMJIT_ASSERT(newCapacityPlusOne >= newSizePlusOne); ASMJIT_ASSERT(newCapacityPlusOne >= newSizePlusOne);
if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne)) if (ASMJIT_UNLIKELY(newCapacityPlusOne < newSizePlusOne)) {
return nullptr; return nullptr;
}
char* newData = static_cast<char*>(::malloc(newCapacityPlusOne)); char* newData = static_cast<char*>(::malloc(newCapacityPlusOne));
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return nullptr; return nullptr;
}
memcpy(newData, curData, curSize); memcpy(newData, curData, curSize);
if (_type == kTypeLarge) if (_type == kTypeLarge) {
::free(curData); ::free(curData);
}
_large.type = kTypeLarge; _large.type = kTypeLarge;
_large.size = newSize; _large.size = newSize;
@@ -177,8 +186,9 @@ Error String::assign(const char* data, size_t size) noexcept {
char* dst = nullptr; char* dst = nullptr;
// Null terminated string without `size` specified. // Null terminated string without `size` specified.
if (size == SIZE_MAX) if (size == SIZE_MAX) {
size = data ? strlen(data) : size_t(0); size = data ? strlen(data) : size_t(0);
}
if (isLargeOrExternal()) { if (isLargeOrExternal()) {
if (size <= _large.capacity) { if (size <= _large.capacity) {
@@ -187,15 +197,18 @@ Error String::assign(const char* data, size_t size) noexcept {
} }
else { else {
size_t capacityPlusOne = Support::alignUp(size + 1, 32); size_t capacityPlusOne = Support::alignUp(size + 1, 32);
if (ASMJIT_UNLIKELY(capacityPlusOne < size)) if (ASMJIT_UNLIKELY(capacityPlusOne < size)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
dst = static_cast<char*>(::malloc(capacityPlusOne)); dst = static_cast<char*>(::malloc(capacityPlusOne));
if (ASMJIT_UNLIKELY(!dst)) if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
if (_type == kTypeLarge) if (_type == kTypeLarge) {
::free(_large.data); ::free(_large.data);
}
_large.type = kTypeLarge; _large.type = kTypeLarge;
_large.data = dst; _large.data = dst;
@@ -212,8 +225,9 @@ Error String::assign(const char* data, size_t size) noexcept {
} }
else { else {
dst = static_cast<char*>(::malloc(size + 1)); dst = static_cast<char*>(::malloc(size + 1));
if (ASMJIT_UNLIKELY(!dst)) if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
_large.type = kTypeLarge; _large.type = kTypeLarge;
_large.data = dst; _large.data = dst;
@@ -237,15 +251,18 @@ Error String::assign(const char* data, size_t size) noexcept {
// =================== // ===================
Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept { Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept {
if (size == SIZE_MAX) if (size == SIZE_MAX) {
size = str ? strlen(str) : size_t(0); size = str ? strlen(str) : size_t(0);
}
if (!size) if (!size) {
return kErrorOk; return kErrorOk;
}
char* p = prepare(op, size); char* p = prepare(op, size);
if (!p) if (!p) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
memcpy(p, str, size); memcpy(p, str, size);
return kErrorOk; return kErrorOk;
@@ -253,20 +270,23 @@ Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept {
Error String::_opChar(ModifyOp op, char c) noexcept { Error String::_opChar(ModifyOp op, char c) noexcept {
char* p = prepare(op, 1); char* p = prepare(op, 1);
if (!p) if (!p) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
*p = c; *p = c;
return kErrorOk; return kErrorOk;
} }
Error String::_opChars(ModifyOp op, char c, size_t n) noexcept { Error String::_opChars(ModifyOp op, char c, size_t n) noexcept {
if (!n) if (!n) {
return kErrorOk; return kErrorOk;
}
char* p = prepare(op, n); char* p = prepare(op, n);
if (!p) if (!p) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
memset(p, c, n); memset(p, c, n);
return kErrorOk; return kErrorOk;
@@ -278,8 +298,9 @@ Error String::padEnd(size_t n, char c) noexcept {
} }
Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, StringFormatFlags flags) noexcept { Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, StringFormatFlags flags) noexcept {
if (base == 0) if (base == 0) {
base = 10; base = 10;
}
char buf[128]; char buf[128];
char* p = buf + ASMJIT_ARRAY_SIZE(buf); char* p = buf + ASMJIT_ARRAY_SIZE(buf);
@@ -345,8 +366,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
if (Support::test(flags, StringFormatFlags::kAlternate)) { if (Support::test(flags, StringFormatFlags::kAlternate)) {
if (base == 8) { if (base == 8) {
if (orig != 0) if (orig != 0) {
*--p = '0'; *--p = '0';
}
} }
if (base == 16) { if (base == 16) {
*--p = 'x'; *--p = 'x';
@@ -357,16 +379,20 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
// String Width // String Width
// ------------ // ------------
if (sign != 0) if (sign != 0) {
*--p = sign; *--p = sign;
}
if (width > 256) if (width > 256) {
width = 256; width = 256;
}
if (width <= numberSize) if (width <= numberSize) {
width = 0; width = 0;
else }
else {
width -= numberSize; width -= numberSize;
}
// Finalize // Finalize
// -------- // --------
@@ -374,8 +400,9 @@ Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, St
size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize; size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
char* data = prepare(op, prefixSize + width + numberSize); char* data = prepare(op, prefixSize + width + numberSize);
if (!data) if (!data) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
memcpy(data, p, prefixSize); memcpy(data, p, prefixSize);
data += prefixSize; data += prefixSize;
@@ -391,23 +418,29 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator)
char* dst; char* dst;
const uint8_t* src = static_cast<const uint8_t*>(data); const uint8_t* src = static_cast<const uint8_t*>(data);
if (!size) if (!size) {
return kErrorOk; return kErrorOk;
}
if (separator) { if (separator) {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3)) if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 3)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
dst = prepare(op, size * 3 - 1); dst = prepare(op, size * 3 - 1);
if (ASMJIT_UNLIKELY(!dst)) if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
size_t i = 0; size_t i = 0;
for (;;) { for (;;) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF]; dst[0] = String_baseN[(src[0] >> 4) & 0xF];
dst[1] = String_baseN[(src[0] ) & 0xF]; dst[1] = String_baseN[(src[0] ) & 0xF];
if (++i == size)
if (++i == size) {
break; break;
}
// This makes sure that the separator is only put between two hexadecimal bytes. // This makes sure that the separator is only put between two hexadecimal bytes.
dst[2] = separator; dst[2] = separator;
dst += 3; dst += 3;
@@ -415,12 +448,14 @@ Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator)
} }
} }
else { else {
if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2)) if (ASMJIT_UNLIKELY(size >= SIZE_MAX / 2)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
dst = prepare(op, size * 2); dst = prepare(op, size * 2);
if (ASMJIT_UNLIKELY(!dst)) if (ASMJIT_UNLIKELY(!dst)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
for (size_t i = 0; i < size; i++, dst += 2, src++) { for (size_t i = 0; i < size; i++, dst += 2, src++) {
dst[0] = String_baseN[(src[0] >> 4) & 0xF]; dst[0] = String_baseN[(src[0] >> 4) & 0xF];
@@ -466,16 +501,19 @@ Error String::_opVFormat(ModifyOp op, const char* fmt, va_list ap) noexcept {
fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
outputSize = size_t(fmtResult); outputSize = size_t(fmtResult);
if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf))) if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf))) {
return _opString(op, buf, outputSize); return _opString(op, buf, outputSize);
}
} }
if (ASMJIT_UNLIKELY(fmtResult < 0)) if (ASMJIT_UNLIKELY(fmtResult < 0)) {
return DebugUtils::errored(kErrorInvalidState); return DebugUtils::errored(kErrorInvalidState);
}
char* p = prepare(op, outputSize); char* p = prepare(op, outputSize);
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy); fmtResult = vsnprintf(p, outputSize + 1, fmt, apCopy);
ASMJIT_ASSERT(size_t(fmtResult) == outputSize); ASMJIT_ASSERT(size_t(fmtResult) == outputSize);
@@ -509,14 +547,17 @@ bool String::equals(const char* other, size_t size) const noexcept {
if (bSize == SIZE_MAX) { if (bSize == SIZE_MAX) {
size_t i; size_t i;
for (i = 0; i < aSize; i++) for (i = 0; i < aSize; i++) {
if (aData[i] != bData[i] || bData[i] == 0) if (aData[i] != bData[i] || bData[i] == 0) {
return false; return false;
}
}
return bData[i] == 0; return bData[i] == 0;
} }
else { else {
if (aSize != bSize) if (aSize != bSize) {
return false; return false;
}
return ::memcmp(aData, bData, aSize) == 0; return ::memcmp(aData, bData, aSize) == 0;
} }
} }

View File

@@ -36,9 +36,7 @@ union FixedString {
//! \{ //! \{
// This cannot be constexpr as GCC 4.8 refuses constexpr members of unions. // This cannot be constexpr as GCC 4.8 refuses constexpr members of unions.
enum : uint32_t { static inline constexpr uint32_t kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t));
kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
};
//! \} //! \}
@@ -53,6 +51,7 @@ union FixedString {
//! \name Utilities //! \name Utilities
//! \{ //! \{
[[nodiscard]]
inline bool equals(const char* other) const noexcept { return strcmp(str, other) == 0; } inline bool equals(const char* other) const noexcept { return strcmp(str, other) == 0; }
//! \} //! \}
@@ -85,18 +84,13 @@ public:
}; };
//! \cond INTERNAL //! \cond INTERNAL
enum : uint32_t { static inline constexpr uint32_t kLayoutSize = 32;
kLayoutSize = 32, static inline constexpr uint32_t kSSOCapacity = kLayoutSize - 2;
kSSOCapacity = kLayoutSize - 2
};
//! String type. //! Large string (owned by String).
enum Type : uint8_t { static inline constexpr uint8_t kTypeLarge = 0x1Fu;
//! Large string (owned by String). //! External string (zone allocated or not owned by String).
kTypeLarge = 0x1Fu, static inline constexpr uint8_t kTypeExternal = 0x20u;
//! External string (zone allocated or not owned by String).
kTypeExternal = 0x20u
};
union Raw { union Raw {
uint8_t u8[kLayoutSize]; uint8_t u8[kLayoutSize];
@@ -156,10 +150,16 @@ public:
return *this; return *this;
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const char* other) const noexcept { return equals(other); } ASMJIT_INLINE_NODEBUG bool operator==(const char* other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const char* other) const noexcept { return !equals(other); } ASMJIT_INLINE_NODEBUG bool operator!=(const char* other) const noexcept { return !equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const String& other) const noexcept { return equals(other); } ASMJIT_INLINE_NODEBUG bool operator==(const String& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const String& other) const noexcept { return !equals(other); } ASMJIT_INLINE_NODEBUG bool operator!=(const String& other) const noexcept { return !equals(other); }
//! \} //! \}
@@ -167,25 +167,42 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return _type == kTypeExternal; } ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return _type == kTypeExternal; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLargeOrExternal() const noexcept { return _type >= kTypeLarge; } ASMJIT_INLINE_NODEBUG bool isLargeOrExternal() const noexcept { return _type >= kTypeLarge; }
//! Tests whether the string is empty. //! Tests whether the string is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return size() == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return size() == 0; }
//! Returns the size of the string. //! Returns the size of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return isLargeOrExternal() ? size_t(_large.size) : size_t(_type); } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return isLargeOrExternal() ? size_t(_large.size) : size_t(_type); }
//! Returns the capacity of the string. //! Returns the capacity of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return isLargeOrExternal() ? _large.capacity : size_t(kSSOCapacity); } ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return isLargeOrExternal() ? _large.capacity : size_t(kSSOCapacity); }
//! Returns the data of the string. //! Returns the data of the string.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* data() noexcept { return isLargeOrExternal() ? _large.data : _small.data; } ASMJIT_INLINE_NODEBUG char* data() noexcept { return isLargeOrExternal() ? _large.data : _small.data; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return isLargeOrExternal() ? _large.data : _small.data; } ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return isLargeOrExternal() ? _large.data : _small.data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* start() noexcept { return data(); } ASMJIT_INLINE_NODEBUG char* start() noexcept { return data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* start() const noexcept { return data(); } ASMJIT_INLINE_NODEBUG const char* start() const noexcept { return data(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG char* end() noexcept { return data() + size(); } ASMJIT_INLINE_NODEBUG char* end() noexcept { return data() + size(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* end() const noexcept { return data() + size(); } ASMJIT_INLINE_NODEBUG const char* end() const noexcept { return data() + size(); }
//! \} //! \}
@@ -201,6 +218,7 @@ public:
//! Clears the content of the string. //! Clears the content of the string.
ASMJIT_API Error clear() noexcept; ASMJIT_API Error clear() noexcept;
[[nodiscard]]
ASMJIT_API char* prepare(ModifyOp op, size_t size) noexcept; ASMJIT_API char* prepare(ModifyOp op, size_t size) noexcept;
ASMJIT_API Error _opString(ModifyOp op, const char* str, size_t size = SIZE_MAX) noexcept; ASMJIT_API Error _opString(ModifyOp op, const char* str, size_t size = SIZE_MAX) noexcept;
@@ -310,7 +328,10 @@ public:
//! Truncate the string length into `newSize`. //! Truncate the string length into `newSize`.
ASMJIT_API Error truncate(size_t newSize) noexcept; ASMJIT_API Error truncate(size_t newSize) noexcept;
[[nodiscard]]
ASMJIT_API bool equals(const char* other, size_t size = SIZE_MAX) const noexcept; ASMJIT_API bool equals(const char* other, size_t size = SIZE_MAX) const noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool equals(const String& other) const noexcept { return equals(other.data(), other.size()); } ASMJIT_INLINE_NODEBUG bool equals(const String& other) const noexcept { return equals(other.data(), other.size()); }
//! \} //! \}
@@ -323,15 +344,18 @@ public:
//! \note This is always called internally after an external buffer was released as it zeroes all bytes //! \note This is always called internally after an external buffer was released as it zeroes all bytes
//! used by String's embedded storage. //! used by String's embedded storage.
inline void _resetInternal() noexcept { inline void _resetInternal() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++) for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++) {
_raw.uptr[i] = 0; _raw.uptr[i] = 0;
}
} }
inline void _setSize(size_t newSize) noexcept { inline void _setSize(size_t newSize) noexcept {
if (isLargeOrExternal()) if (isLargeOrExternal()) {
_large.size = newSize; _large.size = newSize;
else }
else {
_small.type = uint8_t(newSize); _small.type = uint8_t(newSize);
}
} }
//! \} //! \}

File diff suppressed because it is too large Load Diff

View File

@@ -40,12 +40,18 @@ public:
//! \{ //! \{
//! Returns target's environment. //! Returns target's environment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; } ASMJIT_INLINE_NODEBUG const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture. //! Returns the target architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _environment.arch(); } ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture. //! Returns the target sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _environment.subArch(); } ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _environment.subArch(); }
[[nodiscard]]
//! Returns target CPU features. //! Returns target CPU features.
ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; } ASMJIT_INLINE_NODEBUG const CpuFeatures& cpuFeatures() const noexcept { return _cpuFeatures; }

View File

@@ -13,50 +13,46 @@ namespace TypeUtils {
template<uint32_t Index> template<uint32_t Index>
struct ScalarOfTypeId { struct ScalarOfTypeId {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(
kTypeId = uint32_t( isScalar(TypeId(Index)) ? TypeId(Index) :
isScalar(TypeId(Index)) ? TypeId(Index) : isMask8 (TypeId(Index)) ? TypeId::kUInt8 :
isMask8 (TypeId(Index)) ? TypeId::kUInt8 : isMask16(TypeId(Index)) ? TypeId::kUInt16 :
isMask16(TypeId(Index)) ? TypeId::kUInt16 : isMask32(TypeId(Index)) ? TypeId::kUInt32 :
isMask32(TypeId(Index)) ? TypeId::kUInt32 : isMask64(TypeId(Index)) ? TypeId::kUInt64 :
isMask64(TypeId(Index)) ? TypeId::kUInt64 : isMmx32 (TypeId(Index)) ? TypeId::kUInt32 :
isMmx32 (TypeId(Index)) ? TypeId::kUInt32 : isMmx64 (TypeId(Index)) ? TypeId::kUInt64 :
isMmx64 (TypeId(Index)) ? TypeId::kUInt64 : isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) : isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) : isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) : isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid);
isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid)
};
}; };
template<uint32_t Index> template<uint32_t Index>
struct SizeOfTypeId { struct SizeOfTypeId {
enum : uint32_t { static inline constexpr uint32_t kTypeSize =
kTypeSize = isInt8 (TypeId(Index)) ? 1 :
isInt8 (TypeId(Index)) ? 1 : isUInt8 (TypeId(Index)) ? 1 :
isUInt8 (TypeId(Index)) ? 1 : isInt16 (TypeId(Index)) ? 2 :
isInt16 (TypeId(Index)) ? 2 : isUInt16 (TypeId(Index)) ? 2 :
isUInt16 (TypeId(Index)) ? 2 : isInt32 (TypeId(Index)) ? 4 :
isInt32 (TypeId(Index)) ? 4 : isUInt32 (TypeId(Index)) ? 4 :
isUInt32 (TypeId(Index)) ? 4 : isInt64 (TypeId(Index)) ? 8 :
isInt64 (TypeId(Index)) ? 8 : isUInt64 (TypeId(Index)) ? 8 :
isUInt64 (TypeId(Index)) ? 8 : isFloat32(TypeId(Index)) ? 4 :
isFloat32(TypeId(Index)) ? 4 : isFloat64(TypeId(Index)) ? 8 :
isFloat64(TypeId(Index)) ? 8 : isFloat80(TypeId(Index)) ? 10 :
isFloat80(TypeId(Index)) ? 10 : isMask8 (TypeId(Index)) ? 1 :
isMask8 (TypeId(Index)) ? 1 : isMask16 (TypeId(Index)) ? 2 :
isMask16 (TypeId(Index)) ? 2 : isMask32 (TypeId(Index)) ? 4 :
isMask32 (TypeId(Index)) ? 4 : isMask64 (TypeId(Index)) ? 8 :
isMask64 (TypeId(Index)) ? 8 : isMmx32 (TypeId(Index)) ? 4 :
isMmx32 (TypeId(Index)) ? 4 : isMmx64 (TypeId(Index)) ? 8 :
isMmx64 (TypeId(Index)) ? 8 : isVec32 (TypeId(Index)) ? 4 :
isVec32 (TypeId(Index)) ? 4 : isVec64 (TypeId(Index)) ? 8 :
isVec64 (TypeId(Index)) ? 8 : isVec128 (TypeId(Index)) ? 16 :
isVec128 (TypeId(Index)) ? 16 : isVec256 (TypeId(Index)) ? 32 :
isVec256 (TypeId(Index)) ? 32 : isVec512 (TypeId(Index)) ? 64 : 0;
isVec512 (TypeId(Index)) ? 64 : 0
};
}; };
const TypeData _typeData = { const TypeData _typeData = {

View File

@@ -164,98 +164,164 @@ struct TypeData {
ASMJIT_VARAPI const TypeData _typeData; ASMJIT_VARAPI const TypeData _typeData;
//! Returns the scalar type of `typeId`. //! Returns the scalar type of `typeId`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG TypeId scalarOf(TypeId typeId) noexcept { return _typeData.scalarOf[uint32_t(typeId)]; } static ASMJIT_INLINE_NODEBUG TypeId scalarOf(TypeId typeId) noexcept { return _typeData.scalarOf[uint32_t(typeId)]; }
//! Returns the size [in bytes] of `typeId`. //! Returns the size [in bytes] of `typeId`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t sizeOf(TypeId typeId) noexcept { return _typeData.sizeOf[uint32_t(typeId)]; } static ASMJIT_INLINE_NODEBUG uint32_t sizeOf(TypeId typeId) noexcept { return _typeData.sizeOf[uint32_t(typeId)]; }
//! Tests whether a given type `typeId` is between `a` and `b`. //! Tests whether a given type `typeId` is between `a` and `b`.
static ASMJIT_INLINE_NODEBUG constexpr bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept {
return Support::isBetween(uint32_t(typeId), uint32_t(a), uint32_t(b)); return Support::isBetween(uint32_t(typeId), uint32_t(a), uint32_t(b));
} }
//! Tests whether a given type `typeId` is \ref TypeId::kVoid. //! Tests whether a given type `typeId` is \ref TypeId::kVoid.
static ASMJIT_INLINE_NODEBUG constexpr bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; }
//! Tests whether a given type `typeId` is a valid non-void type. //! Tests whether a given type `typeId` is a valid non-void type.
static ASMJIT_INLINE_NODEBUG constexpr bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); }
//! Tests whether a given type `typeId` is scalar (has no vector part). //! Tests whether a given type `typeId` is scalar (has no vector part).
static ASMJIT_INLINE_NODEBUG constexpr bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); }
//! Tests whether a given type `typeId` is abstract, which means that its size depends on register size. //! Tests whether a given type `typeId` is abstract, which means that its size depends on register size.
static ASMJIT_INLINE_NODEBUG constexpr bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); }
//! Tests whether a given type is a scalar integer (signed or unsigned) of any size. //! Tests whether a given type is a scalar integer (signed or unsigned) of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); }
//! Tests whether a given type is a scalar 8-bit integer (signed). //! Tests whether a given type is a scalar 8-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; }
//! Tests whether a given type is a scalar 8-bit integer (unsigned). //! Tests whether a given type is a scalar 8-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; }
//! Tests whether a given type is a scalar 16-bit integer (signed). //! Tests whether a given type is a scalar 16-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; }
//! Tests whether a given type is a scalar 16-bit integer (unsigned). //! Tests whether a given type is a scalar 16-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; }
//! Tests whether a given type is a scalar 32-bit integer (signed). //! Tests whether a given type is a scalar 32-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; }
//! Tests whether a given type is a scalar 32-bit integer (unsigned). //! Tests whether a given type is a scalar 32-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; }
//! Tests whether a given type is a scalar 64-bit integer (signed). //! Tests whether a given type is a scalar 64-bit integer (signed).
static ASMJIT_INLINE_NODEBUG constexpr bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; }
//! Tests whether a given type is a scalar 64-bit integer (unsigned). //! Tests whether a given type is a scalar 64-bit integer (unsigned).
static ASMJIT_INLINE_NODEBUG constexpr bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; }
//! Tests whether a given type is an 8-bit general purpose register representing either signed or unsigned 8-bit integer. //! Tests whether a given type is an 8-bit general purpose register representing either signed or unsigned 8-bit integer.
static ASMJIT_INLINE_NODEBUG constexpr bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); }
//! Tests whether a given type is a 16-bit general purpose register representing either signed or unsigned 16-bit integer //! Tests whether a given type is a 16-bit general purpose register representing either signed or unsigned 16-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); }
//! Tests whether a given type is a 32-bit general purpose register representing either signed or unsigned 32-bit integer //! Tests whether a given type is a 32-bit general purpose register representing either signed or unsigned 32-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); }
//! Tests whether a given type is a 64-bit general purpose register representing either signed or unsigned 64-bit integer //! Tests whether a given type is a 64-bit general purpose register representing either signed or unsigned 64-bit integer
static ASMJIT_INLINE_NODEBUG constexpr bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); }
//! Tests whether a given type is a scalar floating point of any size. //! Tests whether a given type is a scalar floating point of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); }
//! Tests whether a given type is a scalar 32-bit float. //! Tests whether a given type is a scalar 32-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; }
//! Tests whether a given type is a scalar 64-bit float. //! Tests whether a given type is a scalar 64-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; }
//! Tests whether a given type is a scalar 80-bit float. //! Tests whether a given type is a scalar 80-bit float.
static ASMJIT_INLINE_NODEBUG constexpr bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; }
//! Tests whether a given type is a mask register of any size. //! Tests whether a given type is a mask register of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); }
//! Tests whether a given type is an 8-bit mask register. //! Tests whether a given type is an 8-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; }
//! Tests whether a given type is an 16-bit mask register. //! Tests whether a given type is an 16-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; }
//! Tests whether a given type is an 32-bit mask register. //! Tests whether a given type is an 32-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; }
//! Tests whether a given type is an 64-bit mask register. //! Tests whether a given type is an 64-bit mask register.
static ASMJIT_INLINE_NODEBUG constexpr bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; }
//! Tests whether a given type is an MMX register. //! Tests whether a given type is an MMX register.
//! //!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); }
//! Tests whether a given type is an MMX register, which only uses the low 32 bits of data (only specific cases). //! Tests whether a given type is an MMX register, which only uses the low 32 bits of data (only specific cases).
//! //!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; }
//! Tests whether a given type is an MMX register, which uses 64 bits of data (default). //! Tests whether a given type is an MMX register, which uses 64 bits of data (default).
//! //!
//! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness. //! \note MMX functionality is in general deprecated on X86 architecture. AsmJit provides it just for completeness.
static ASMJIT_INLINE_NODEBUG constexpr bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; }
//! Tests whether a given type is a vector register of any size. //! Tests whether a given type is a vector register of any size.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); }
//! Tests whether a given type is a 32-bit or 32-bit view of a vector register. //! Tests whether a given type is a 32-bit or 32-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); }
//! Tests whether a given type is a 64-bit or 64-bit view of a vector register. //! Tests whether a given type is a 64-bit or 64-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); }
//! Tests whether a given type is a 128-bit or 128-bit view of a vector register. //! Tests whether a given type is a 128-bit or 128-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); }
//! Tests whether a given type is a 256-bit or 256-bit view of a vector register. //! Tests whether a given type is a 256-bit or 256-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); }
//! Tests whether a given type is a 512-bit or 512-bit view of a vector register. //! Tests whether a given type is a 512-bit or 512-bit view of a vector register.
static ASMJIT_INLINE_NODEBUG constexpr bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); } [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); }
//! \cond //! \cond
enum TypeCategory : uint32_t { enum TypeCategory : uint32_t {
@@ -271,38 +337,32 @@ struct TypeIdOfT_ByCategory {}; // Fails if not specialized.
template<typename T> template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryIntegral> { struct TypeIdOfT_ByCategory<T, kTypeCategoryIntegral> {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(
kTypeId = uint32_t( (sizeof(T) == 1 && std::is_signed_v<T>) ? TypeId::kInt8 :
(sizeof(T) == 1 && std::is_signed<T>::value) ? TypeId::kInt8 : (sizeof(T) == 1 && !std::is_signed_v<T>) ? TypeId::kUInt8 :
(sizeof(T) == 1 && !std::is_signed<T>::value) ? TypeId::kUInt8 : (sizeof(T) == 2 && std::is_signed_v<T>) ? TypeId::kInt16 :
(sizeof(T) == 2 && std::is_signed<T>::value) ? TypeId::kInt16 : (sizeof(T) == 2 && !std::is_signed_v<T>) ? TypeId::kUInt16 :
(sizeof(T) == 2 && !std::is_signed<T>::value) ? TypeId::kUInt16 : (sizeof(T) == 4 && std::is_signed_v<T>) ? TypeId::kInt32 :
(sizeof(T) == 4 && std::is_signed<T>::value) ? TypeId::kInt32 : (sizeof(T) == 4 && !std::is_signed_v<T>) ? TypeId::kUInt32 :
(sizeof(T) == 4 && !std::is_signed<T>::value) ? TypeId::kUInt32 : (sizeof(T) == 8 && std::is_signed_v<T>) ? TypeId::kInt64 :
(sizeof(T) == 8 && std::is_signed<T>::value) ? TypeId::kInt64 : (sizeof(T) == 8 && !std::is_signed_v<T>) ? TypeId::kUInt64 : TypeId::kVoid);
(sizeof(T) == 8 && !std::is_signed<T>::value) ? TypeId::kUInt64 : TypeId::kVoid)
};
}; };
template<typename T> template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryFloatingPoint> { struct TypeIdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(
kTypeId = uint32_t( (sizeof(T) == 4 ) ? TypeId::kFloat32 :
(sizeof(T) == 4 ) ? TypeId::kFloat32 : (sizeof(T) == 8 ) ? TypeId::kFloat64 :
(sizeof(T) == 8 ) ? TypeId::kFloat64 : (sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid);
(sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid)
};
}; };
template<typename T> template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryEnum> struct TypeIdOfT_ByCategory<T, kTypeCategoryEnum>
: public TypeIdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {}; : public TypeIdOfT_ByCategory<std::underlying_type_t<T>, kTypeCategoryIntegral> {};
template<typename T> template<typename T>
struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> { struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
kTypeId = uint32_t(TypeId::kUIntPtr)
};
}; };
//! \endcond //! \endcond
@@ -311,37 +371,33 @@ struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> {
template<typename T> template<typename T>
struct TypeIdOfT { struct TypeIdOfT {
//! TypeId of C++ type `T`. //! TypeId of C++ type `T`.
static constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_; static inline constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_;
}; };
#else #else
template<typename T> template<typename T>
struct TypeIdOfT struct TypeIdOfT
: public TypeIdOfT_ByCategory<T, : public TypeIdOfT_ByCategory<T,
std::is_enum<T>::value ? kTypeCategoryEnum : std::is_enum_v<T> ? kTypeCategoryEnum :
std::is_integral<T>::value ? kTypeCategoryIntegral : std::is_integral_v<T> ? kTypeCategoryIntegral :
std::is_floating_point<T>::value ? kTypeCategoryFloatingPoint : std::is_floating_point_v<T> ? kTypeCategoryFloatingPoint :
std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown> {}; std::is_function_v<T> ? kTypeCategoryFunction : kTypeCategoryUnknown> {};
#endif #endif
//! \cond //! \cond
template<typename T> template<typename T>
struct TypeIdOfT<T*> { struct TypeIdOfT<T*> {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
kTypeId = uint32_t(TypeId::kUIntPtr)
};
}; };
template<typename T> template<typename T>
struct TypeIdOfT<T&> { struct TypeIdOfT<T&> {
enum : uint32_t { static inline constexpr uint32_t kTypeId = uint32_t(TypeId::kUIntPtr);
kTypeId = uint32_t(TypeId::kUIntPtr)
};
}; };
//! \endcond //! \endcond
//! Returns a corresponding \ref TypeId of `T` type. //! Returns a corresponding \ref TypeId of `T` type.
template<typename T> template<typename T>
static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT<T>::kTypeId); } static ASMJIT_INLINE_CONSTEXPR TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT<T>::kTypeId); }
//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId into a type that matches `registerSize` //! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId into a type that matches `registerSize`
//! (general-purpose register size). If you find such TypeId it's then only about adding the offset to it. //! (general-purpose register size). If you find such TypeId it's then only about adding the offset to it.
@@ -360,18 +416,21 @@ static ASMJIT_INLINE_NODEBUG constexpr TypeId typeIdOfT() noexcept { return Type
//! // The same, but by using TypeUtils::deabstract() function. //! // The same, but by using TypeUtils::deabstract() function.
//! typeId = TypeUtils::deabstract(typeId, deabstractDelta); //! typeId = TypeUtils::deabstract(typeId, deabstractDelta);
//! ``` //! ```
static ASMJIT_INLINE_NODEBUG constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
return registerSize >= 8 ? uint32_t(TypeId::kInt64) - uint32_t(TypeId::kIntPtr) return registerSize >= 8 ? uint32_t(TypeId::kInt64) - uint32_t(TypeId::kIntPtr)
: uint32_t(TypeId::kInt32) - uint32_t(TypeId::kIntPtr); : uint32_t(TypeId::kInt32) - uint32_t(TypeId::kIntPtr);
} }
//! Deabstracts a given `typeId` into a native type by using `deabstractDelta`, which was previously //! Deabstracts a given `typeId` into a native type by using `deabstractDelta`, which was previously
//! calculated by calling \ref deabstractDeltaOfSize() with a target native register size. //! calculated by calling \ref deabstractDeltaOfSize() with a target native register size.
static ASMJIT_INLINE_NODEBUG constexpr TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept {
return isAbstract(typeId) ? TypeId(uint32_t(typeId) + deabstractDelta) : typeId; return isAbstract(typeId) ? TypeId(uint32_t(typeId) + deabstractDelta) : typeId;
} }
static ASMJIT_INLINE_NODEBUG constexpr TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept { [[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept {
return TypeId(uint32_t(vecStartId) + uint32_t(scalarTypeId) - uint32_t(TypeId::kInt8)); return TypeId(uint32_t(vecStartId) + uint32_t(scalarTypeId) - uint32_t(TypeId::kInt8));
} }
@@ -410,14 +469,12 @@ struct Float64 {};
} // {Type} } // {Type}
//! \cond //! \cond
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \ #define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace TypeUtils { \ namespace TypeUtils { \
template<> \ template<> \
struct TypeIdOfT<T> { \ struct TypeIdOfT<T> { \
enum : uint32_t { \ static inline constexpr uint32_t kTypeId = uint32_t(TYPE_ID); \
kTypeId = uint32_t(TYPE_ID) \ }; \
}; \
}; \
} }
ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid); ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid);

View File

@@ -146,7 +146,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem)
// Virtual Memory Utilities // Virtual Memory Utilities
// ======================== // ========================
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static const constexpr MemoryFlags dualMappingFilter[2] = { static const constexpr MemoryFlags dualMappingFilter[2] = {
MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite, MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite,
MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute
@@ -162,8 +162,9 @@ struct ScopedHandle {
: value(nullptr) {} : value(nullptr) {}
inline ~ScopedHandle() noexcept { inline ~ScopedHandle() noexcept {
if (value != nullptr) if (value != nullptr) {
::CloseHandle(value); ::CloseHandle(value);
}
} }
HANDLE value; HANDLE value;
@@ -191,12 +192,15 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD protectFlags; DWORD protectFlags;
// READ|WRITE|EXECUTE. // READ|WRITE|EXECUTE.
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ; protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else if (Support::test(memoryFlags, MemoryFlags::kAccessRW)) }
else if (Support::test(memoryFlags, MemoryFlags::kAccessRW)) {
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY; protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
else }
else {
protectFlags = PAGE_NOACCESS; protectFlags = PAGE_NOACCESS;
}
// Any other flags to consider? // Any other flags to consider?
return protectFlags; return protectFlags;
@@ -204,24 +208,28 @@ static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
static DWORD desiredAccessFromMemoryFlags(MemoryFlags memoryFlags) noexcept { static DWORD desiredAccessFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD access = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ; DWORD access = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
access |= FILE_MAP_EXECUTE; access |= FILE_MAP_EXECUTE;
}
return access; return access;
} }
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone; HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone;
if (hasDualMappingSupport()) if (hasDualMappingSupport()) {
flags |= HardenedRuntimeFlags::kDualMapping; flags |= HardenedRuntimeFlags::kDualMapping;
}
return flags; return flags;
} }
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
*p = nullptr; *p = nullptr;
if (size == 0)
if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
DWORD allocationType = MEM_COMMIT | MEM_RESERVE; DWORD allocationType = MEM_COMMIT | MEM_RESERVE;
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags); DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
@@ -230,18 +238,21 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
size_t lpSize = largePageSize(); size_t lpSize = largePageSize();
// Does it make sense to call VirtualAlloc() if we failed to query large page size? // Does it make sense to call VirtualAlloc() if we failed to query large page size?
if (lpSize == 0) if (lpSize == 0) {
return DebugUtils::errored(kErrorFeatureNotEnabled); return DebugUtils::errored(kErrorFeatureNotEnabled);
}
if (!Support::isAligned(size, lpSize)) if (!Support::isAligned(size, lpSize)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
allocationType |= MEM_LARGE_PAGES; allocationType |= MEM_LARGE_PAGES;
} }
void* result = ::VirtualAlloc(nullptr, size, allocationType, protectFlags); void* result = ::VirtualAlloc(nullptr, size, allocationType, protectFlags);
if (!result) if (!result) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
*p = result; *p = result;
return kErrorOk; return kErrorOk;
@@ -249,10 +260,14 @@ Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
Error release(void* p, size_t size) noexcept { Error release(void* p, size_t size) noexcept {
DebugUtils::unused(size); DebugUtils::unused(size);
// NOTE: If the `dwFreeType` parameter is MEM_RELEASE, `size` parameter must be zero. // NOTE: If the `dwFreeType` parameter is MEM_RELEASE, `size` parameter must be zero.
constexpr DWORD dwFreeType = MEM_RELEASE; constexpr DWORD dwFreeType = MEM_RELEASE;
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType)))
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, dwFreeType))) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
return kErrorOk; return kErrorOk;
} }
@@ -260,8 +275,9 @@ Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags); DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
DWORD oldFlags; DWORD oldFlags;
if (::VirtualProtect(p, size, protectFlags, &oldFlags)) if (::VirtualProtect(p, size, protectFlags, &oldFlags)) {
return kErrorOk; return kErrorOk;
}
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
} }
@@ -270,8 +286,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
dm->rx = nullptr; dm->rx = nullptr;
dm->rw = nullptr; dm->rw = nullptr;
if (size == 0) if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
ScopedHandle handle; ScopedHandle handle;
handle.value = ::CreateFileMappingW( handle.value = ::CreateFileMappingW(
@@ -282,8 +299,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
(DWORD)(size & 0xFFFFFFFFu), (DWORD)(size & 0xFFFFFFFFu),
nullptr); nullptr);
if (ASMJIT_UNLIKELY(!handle.value)) if (ASMJIT_UNLIKELY(!handle.value)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
void* ptr[2]; void* ptr[2];
for (uint32_t i = 0; i < 2; i++) { for (uint32_t i = 0; i < 2; i++) {
@@ -292,8 +310,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size); ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
if (ptr[i] == nullptr) { if (ptr[i] == nullptr) {
if (i == 1u) if (i == 1u) {
::UnmapViewOfFile(ptr[0]); ::UnmapViewOfFile(ptr[0]);
}
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
} }
} }
@@ -307,14 +326,17 @@ Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
DebugUtils::unused(size); DebugUtils::unused(size);
bool failed = false; bool failed = false;
if (!::UnmapViewOfFile(dm->rx)) if (!::UnmapViewOfFile(dm->rx)) {
failed = true; failed = true;
}
if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw)) if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw)) {
failed = true; failed = true;
}
if (failed) if (failed) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
dm->rx = nullptr; dm->rx = nullptr;
dm->rw = nullptr; dm->rw = nullptr;
@@ -342,7 +364,7 @@ struct KernelVersion {
inline bool ge(long major, long minor) const noexcept { return ver[0] > major || (ver[0] == major && ver[1] >= minor); } inline bool ge(long major, long minor) const noexcept { return ver[0] > major || (ver[0] == major && ver[1] >= minor); }
}; };
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static KernelVersion getKernelVersion() noexcept { static KernelVersion getKernelVersion() noexcept {
KernelVersion out {}; KernelVersion out {};
struct utsname buf {}; struct utsname buf {};
@@ -368,7 +390,7 @@ static KernelVersion getKernelVersion() noexcept {
#endif // getKernelVersion #endif // getKernelVersion
// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`. // Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static Error asmjitErrorFromErrno(int e) noexcept { static Error asmjitErrorFromErrno(int e) noexcept {
switch (e) { switch (e) {
case EACCES: case EACCES:
@@ -391,20 +413,20 @@ static Error asmjitErrorFromErrno(int e) noexcept {
} }
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept { static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept {
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value; static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift); return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift);
} }
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept { static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept {
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value; static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift); return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift);
} }
// Returns `mmap()` protection flags from \ref MemoryFlags. // Returns `mmap()` protection flags from \ref MemoryFlags.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
int protection = 0; int protection = 0;
if (Support::test(memoryFlags, MemoryFlags::kAccessRead)) protection |= PROT_READ; if (Support::test(memoryFlags, MemoryFlags::kAccessRead)) protection |= PROT_READ;
@@ -418,7 +440,7 @@ static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
// Uses: // Uses:
// - `PROT_MPROTECT()` on NetBSD. // - `PROT_MPROTECT()` on NetBSD.
// - `PROT_MAX()` when available on other BSDs. // - `PROT_MAX()` when available on other BSDs.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags); MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags);
if (acc != MemoryFlags::kNone) { if (acc != MemoryFlags::kNone) {
@@ -450,8 +472,10 @@ static size_t detectLargePageSize() noexcept {
return (getpagesizes(pageSize.data(), 2) < 2) ? 0 : uint32_t(pageSize[1]); return (getpagesizes(pageSize.data(), 2) < 2) ? 0 : uint32_t(pageSize[1]);
#elif defined(__linux__) #elif defined(__linux__)
StringTmp<128> storage; StringTmp<128> storage;
if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty())
if (OSUtils::readFile("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", storage, 16) != kErrorOk || storage.empty()) {
return 0u; return 0u;
}
// The first value should be the size of the page (hpage_pmd_size). // The first value should be the size of the page (hpage_pmd_size).
size_t largePageSize = 0; size_t largePageSize = 0;
@@ -461,8 +485,9 @@ static size_t detectLargePageSize() noexcept {
for (size_t i = 0; i < bufSize; i++) { for (size_t i = 0; i < bufSize; i++) {
uint32_t digit = uint32_t(uint8_t(buf[i]) - uint8_t('0')); uint32_t digit = uint32_t(uint8_t(buf[i]) - uint8_t('0'));
if (digit >= 10u) if (digit >= 10u) {
break; break;
}
largePageSize = largePageSize * 10 + digit; largePageSize = largePageSize * 10 + digit;
} }
@@ -514,7 +539,7 @@ static uint32_t getMfdExecFlag() noexcept {
// It's not fully random, just to avoid collisions when opening TMP or SHM file. // It's not fully random, just to avoid collisions when opening TMP or SHM file.
ASMJIT_MAYBE_UNUSED [[maybe_unused]]
static uint64_t generateRandomBits(uintptr_t stackPtr, uint32_t attempt) noexcept { static uint64_t generateRandomBits(uintptr_t stackPtr, uint32_t attempt) noexcept {
static std::atomic<uint32_t> internalCounter; static std::atomic<uint32_t> internalCounter;
@@ -573,14 +598,17 @@ public:
if (!memfd_create_not_supported) { if (!memfd_create_not_supported) {
_fd = (int)syscall(__NR_memfd_create, "vmem", MFD_CLOEXEC | getMfdExecFlag()); _fd = (int)syscall(__NR_memfd_create, "vmem", MFD_CLOEXEC | getMfdExecFlag());
if (ASMJIT_LIKELY(_fd >= 0)) if (ASMJIT_LIKELY(_fd >= 0)) {
return kErrorOk; return kErrorOk;
}
int e = errno; int e = errno;
if (e == ENOSYS) if (e == ENOSYS) {
memfd_create_not_supported = 1; memfd_create_not_supported = 1;
else }
else {
return DebugUtils::errored(asmjitErrorFromErrno(e)); return DebugUtils::errored(asmjitErrorFromErrno(e));
}
} }
#endif // __linux__ && __NR_memfd_create #endif // __linux__ && __NR_memfd_create
@@ -589,10 +617,12 @@ public:
DebugUtils::unused(preferTmpOverDevShm); DebugUtils::unused(preferTmpOverDevShm);
_fd = ::shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); _fd = ::shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
if (ASMJIT_LIKELY(_fd >= 0)) if (ASMJIT_LIKELY(_fd >= 0)) {
return kErrorOk; return kErrorOk;
else }
else {
return DebugUtils::errored(asmjitErrorFromErrno(errno)); return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
#else #else
// POSIX API. We have to generate somehow a unique name, so use `generateRandomBits()` helper. To prevent // POSIX API. We have to generate somehow a unique name, so use `generateRandomBits()` helper. To prevent
// having file collisions we use `shm_open()` with flags that require creation of the file so we never open // having file collisions we use `shm_open()` with flags that require creation of the file so we never open
@@ -625,8 +655,9 @@ public:
#endif #endif
int e = errno; int e = errno;
if (e != EEXIST) if (e != EEXIST) {
return DebugUtils::errored(asmjitErrorFromErrno(e)); return DebugUtils::errored(asmjitErrorFromErrno(e));
}
} }
return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory); return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory);
@@ -659,8 +690,9 @@ public:
Error allocate(size_t size) noexcept { Error allocate(size_t size) noexcept {
// TODO: Improve this by using `posix_fallocate()` when available. // TODO: Improve this by using `posix_fallocate()` when available.
if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0) if (ASMJIT_FILE64_API(ftruncate)(_fd, off_t(size)) != 0) {
return DebugUtils::errored(asmjitErrorFromErrno(errno)); return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
return kErrorOk; return kErrorOk;
} }
@@ -725,11 +757,9 @@ static bool hasHardenedRuntime() noexcept {
#else #else
static std::atomic<uint32_t> cachedHardenedFlag; static std::atomic<uint32_t> cachedHardenedFlag;
enum HardenedFlag : uint32_t { constexpr uint32_t kHardenedFlagUnknown = 0;
kHardenedFlagUnknown = 0, constexpr uint32_t kHardenedFlagDisabled = 1;
kHardenedFlagDisabled = 1, constexpr uint32_t kHardenedFlagEnabled = 2;
kHardenedFlagEnabled = 2
};
uint32_t flag = cachedHardenedFlag.load(); uint32_t flag = cachedHardenedFlag.load();
if (flag == kHardenedFlagUnknown) { if (flag == kHardenedFlagUnknown) {
@@ -784,12 +814,14 @@ static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
// //
// MAP_JIT is not required when dual-mapping memory and is incompatible with MAP_SHARED, so it will not be // MAP_JIT is not required when dual-mapping memory and is incompatible with MAP_SHARED, so it will not be
// added when the latter is enabled. // added when the latter is enabled.
bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime()) bool useMapJit = (Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime()) &&
&& !Support::test(memoryFlags, MemoryFlags::kMapShared); !Support::test(memoryFlags, MemoryFlags::kMapShared);
if (useMapJit) if (useMapJit) {
return hasMapJitSupport() ? int(MAP_JIT) : 0; return hasMapJitSupport() ? int(MAP_JIT) : 0;
else }
else {
return 0; return 0;
}
#else #else
DebugUtils::unused(memoryFlags); DebugUtils::unused(memoryFlags);
return 0; return 0;
@@ -807,40 +839,48 @@ static inline bool hasDualMappingSupport() noexcept {
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone; HardenedRuntimeFlags flags = HardenedRuntimeFlags::kNone;
if (hasHardenedRuntime()) if (hasHardenedRuntime()) {
flags |= HardenedRuntimeFlags::kEnabled; flags |= HardenedRuntimeFlags::kEnabled;
}
if (hasMapJitSupport()) if (hasMapJitSupport()) {
flags |= HardenedRuntimeFlags::kMapJit; flags |= HardenedRuntimeFlags::kMapJit;
}
if (hasDualMappingSupport()) if (hasDualMappingSupport()) {
flags |= HardenedRuntimeFlags::kDualMapping; flags |= HardenedRuntimeFlags::kDualMapping;
}
return flags; return flags;
} }
static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept { static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept {
*p = nullptr; *p = nullptr;
if (size == 0)
if (size == 0) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags); int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags);
int mmFlags = mmMapJitFromMemoryFlags(memoryFlags); int mmFlags = mmMapJitFromMemoryFlags(memoryFlags);
mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE; mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE;
if (fd == -1) if (fd == -1) {
mmFlags |= MAP_ANONYMOUS; mmFlags |= MAP_ANONYMOUS;
}
bool useLargePages = Support::test(memoryFlags, VirtMem::MemoryFlags::kMMapLargePages); bool useLargePages = Support::test(memoryFlags, VirtMem::MemoryFlags::kMMapLargePages);
if (useLargePages) { if (useLargePages) {
#if defined(__linux__) #if defined(__linux__)
size_t lpSize = largePageSize(); size_t lpSize = largePageSize();
if (lpSize == 0) if (lpSize == 0) {
return DebugUtils::errored(kErrorFeatureNotEnabled); return DebugUtils::errored(kErrorFeatureNotEnabled);
}
if (!Support::isAligned(size, lpSize)) if (!Support::isAligned(size, lpSize)) {
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
}
unsigned lpSizeLog2 = Support::ctz(lpSize); unsigned lpSizeLog2 = Support::ctz(lpSize);
mmFlags |= int(unsigned(MAP_HUGETLB) | (lpSizeLog2 << MAP_HUGE_SHIFT)); mmFlags |= int(unsigned(MAP_HUGETLB) | (lpSizeLog2 << MAP_HUGE_SHIFT));
@@ -850,8 +890,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd =
} }
void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset); void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset);
if (ptr == MAP_FAILED) if (ptr == MAP_FAILED) {
return DebugUtils::errored(asmjitErrorFromErrno(errno)); return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
#if defined(MADV_HUGEPAGE) #if defined(MADV_HUGEPAGE)
if (useLargePages) { if (useLargePages) {
@@ -864,8 +905,9 @@ static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd =
} }
static Error unmapMemory(void* p, size_t size) noexcept { static Error unmapMemory(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) {
return DebugUtils::errored(asmjitErrorFromErrno(errno)); return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
return kErrorOk; return kErrorOk;
} }
@@ -880,9 +922,9 @@ Error release(void* p, size_t size) noexcept {
Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept { Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
int protection = mmProtFromMemoryFlags(memoryFlags); int protection = mmProtFromMemoryFlags(memoryFlags);
if (mprotect(p, size, protection) == 0) if (mprotect(p, size, protection) == 0) {
return kErrorOk; return kErrorOk;
}
return DebugUtils::errored(asmjitErrorFromErrno(errno)); return DebugUtils::errored(asmjitErrorFromErrno(errno));
} }
@@ -894,12 +936,14 @@ static Error unmapDualMapping(DualMapping* dm, size_t size) noexcept {
Error err1 = unmapMemory(dm->rx, size); Error err1 = unmapMemory(dm->rx, size);
Error err2 = kErrorOk; Error err2 = kErrorOk;
if (dm->rx != dm->rw) if (dm->rx != dm->rw) {
err2 = unmapMemory(dm->rw, size); err2 = unmapMemory(dm->rw, size);
}
// We can report only one error, so report the first... // We can report only one error, so report the first...
if (err1 || err2) if (err1 || err2) {
return DebugUtils::errored(err1 ? err1 : err2); return DebugUtils::errored(err1 ? err1 : err2);
}
dm->rx = nullptr; dm->rx = nullptr;
dm->rw = nullptr; dm->rw = nullptr;
@@ -964,8 +1008,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
int rwProtectFlags = VM_PROT_READ | VM_PROT_WRITE; int rwProtectFlags = VM_PROT_READ | VM_PROT_WRITE;
int rxProtectFlags = VM_PROT_READ; int rxProtectFlags = VM_PROT_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) {
rxProtectFlags |= VM_PROT_EXECUTE; rxProtectFlags |= VM_PROT_EXECUTE;
}
kern_return_t result {}; kern_return_t result {};
do { do {
@@ -992,8 +1037,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
&maxProt, // max_protection &maxProt, // max_protection
VM_INHERIT_DEFAULT); // inheritance VM_INHERIT_DEFAULT); // inheritance
if (result != KERN_SUCCESS) if (result != KERN_SUCCESS) {
break; break;
}
dm.rw = (void*)remappedAddr; dm.rw = (void*)remappedAddr;
@@ -1011,8 +1057,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
setMaximum, // set_maximum setMaximum, // set_maximum
rxProtectFlags); // new_protection rxProtectFlags); // new_protection
if (result != KERN_SUCCESS) if (result != KERN_SUCCESS) {
break; break;
}
result = vm_protect(task, // target_task result = vm_protect(task, // target_task
(vm_address_t)dm.rw, // address (vm_address_t)dm.rw, // address
@@ -1020,8 +1067,9 @@ static Error allocDualMappingUsingMachVmRemap(DualMapping* dmOut, size_t size, M
setMaximum, // set_maximum setMaximum, // set_maximum
rwProtectFlags); // new_protection rwProtectFlags); // new_protection
if (result != KERN_SUCCESS) if (result != KERN_SUCCESS) {
break; break;
}
} }
} while (0); } while (0);
@@ -1053,8 +1101,9 @@ static Error allocDualMappingUsingFile(DualMapping* dm, size_t size, MemoryFlags
MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i]; MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i];
Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0); Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0);
if (err != kErrorOk) { if (err != kErrorOk) {
if (i == 1) if (i == 1) {
unmapMemory(ptr[0], size); unmapMemory(ptr[0], size);
}
return err; return err;
} }
} }
@@ -1073,8 +1122,9 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
DebugUtils::unused(size, memoryFlags); DebugUtils::unused(size, memoryFlags);
return DebugUtils::errored(kErrorFeatureNotEnabled); return DebugUtils::errored(kErrorFeatureNotEnabled);
#else #else
if (off_t(size) <= 0) if (off_t(size) <= 0) {
return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge); return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
}
#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP) #if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP)
return allocDualMappingUsingRemapdup(dm, size, memoryFlags); return allocDualMappingUsingRemapdup(dm, size, memoryFlags);
@@ -1143,11 +1193,13 @@ size_t largePageSize() noexcept {
static constexpr size_t kNotAvailable = 1; static constexpr size_t kNotAvailable = 1;
size_t size = largePageSize.load(); size_t size = largePageSize.load();
if (ASMJIT_LIKELY(size > kNotAvailable)) if (ASMJIT_LIKELY(size > kNotAvailable)) {
return size; return size;
}
if (size == kNotAvailable) if (size == kNotAvailable) {
return 0; return 0;
}
size = detectLargePageSize(); size = detectLargePageSize();
largePageSize.store(size != 0 ? size : kNotAvailable); largePageSize.store(size != 0 ? size : kNotAvailable);

View File

@@ -50,6 +50,7 @@ struct Info {
}; };
//! Returns virtual memory information, see `VirtMem::Info` for more details. //! Returns virtual memory information, see `VirtMem::Info` for more details.
[[nodiscard]]
ASMJIT_API Info info() noexcept; ASMJIT_API Info info() noexcept;
//! Returns the size of the smallest large page supported. //! Returns the size of the smallest large page supported.
@@ -59,6 +60,7 @@ ASMJIT_API Info info() noexcept;
//! //!
//! Returns either the detected large page size or 0, if large page support is either not supported by AsmJit //! Returns either the detected large page size or 0, if large page support is either not supported by AsmJit
//! or not accessible to the process. //! or not accessible to the process.
[[nodiscard]]
ASMJIT_API size_t largePageSize() noexcept; ASMJIT_API size_t largePageSize() noexcept;
//! Virtual memory access and mmap-specific flags. //! Virtual memory access and mmap-specific flags.
@@ -164,15 +166,18 @@ ASMJIT_DEFINE_ENUM_FLAGS(MemoryFlags)
//! //!
//! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be //! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be
//! corrected by the implementation and the allocation would not succeed in such case. //! corrected by the implementation and the allocation would not succeed in such case.
[[nodiscard]]
ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept; ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory previously allocated by \ref VirtMem::alloc(). //! Releases virtual memory previously allocated by \ref VirtMem::alloc().
//! //!
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call //! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call
//! will fail on any POSIX system, but pass on Windows, because it's implemented differently. //! will fail on any POSIX system, but pass on Windows, because it's implemented differently.
[[nodiscard]]
ASMJIT_API Error release(void* p, size_t size) noexcept; ASMJIT_API Error release(void* p, size_t size) noexcept;
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows). //! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows).
[[nodiscard]]
ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept; ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept;
//! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but //! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but
@@ -195,11 +200,13 @@ struct DualMapping {
//! release the memory returned by `allocDualMapping()` as that would fail on Windows. //! release the memory returned by `allocDualMapping()` as that would fail on Windows.
//! //!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails. //! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
[[nodiscard]]
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept; ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping(). //! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping().
//! //!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds. //! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
[[nodiscard]]
ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept; ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
//! Hardened runtime flags. //! Hardened runtime flags.
@@ -238,12 +245,14 @@ struct HardenedRuntimeInfo {
//! \{ //! \{
//! Tests whether the hardened runtime `flag` is set. //! Tests whether the hardened runtime `flag` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(HardenedRuntimeFlags flag) const noexcept { return Support::test(flags, flag); } ASMJIT_INLINE_NODEBUG bool hasFlag(HardenedRuntimeFlags flag) const noexcept { return Support::test(flags, flag); }
//! \} //! \}
}; };
//! Returns runtime features provided by the OS. //! Returns runtime features provided by the OS.
[[nodiscard]]
ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept; ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept;
//! Values that can be used with `protectJitMemory()` function. //! Values that can be used with `protectJitMemory()` function.
@@ -296,10 +305,11 @@ public:
//! \{ //! \{
//! Makes the given memory block RW protected. //! Makes the given memory block RW protected.
ASMJIT_FORCE_INLINE ProtectJitReadWriteScope( ASMJIT_INLINE ProtectJitReadWriteScope(
void* rxPtr, void* rxPtr,
size_t size, size_t size,
CachePolicy policy = CachePolicy::kDefault) noexcept CachePolicy policy = CachePolicy::kDefault
) noexcept
: _rxPtr(rxPtr), : _rxPtr(rxPtr),
_size(size), _size(size),
_policy(policy) { _policy(policy) {
@@ -307,11 +317,12 @@ public:
} }
//! Makes the memory block RX protected again and flushes instruction cache. //! Makes the memory block RX protected again and flushes instruction cache.
ASMJIT_FORCE_INLINE ~ProtectJitReadWriteScope() noexcept { ASMJIT_INLINE ~ProtectJitReadWriteScope() noexcept {
protectJitMemory(ProtectJitAccess::kReadExecute); protectJitMemory(ProtectJitAccess::kReadExecute);
if (_policy != CachePolicy::kNeverFlush) if (_policy != CachePolicy::kNeverFlush) {
flushInstructionCache(_rxPtr, _size); flushInstructionCache(_rxPtr, _size);
}
} }
//! \} //! \}

View File

@@ -210,28 +210,36 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
void* Zone::allocZeroed(size_t size, size_t alignment) noexcept { void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
void* p = alloc(size, alignment); void* p = alloc(size, alignment);
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p)) {
return p; return p;
}
return memset(p, 0, size); return memset(p, 0, size);
} }
void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept { void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size)) if (ASMJIT_UNLIKELY(!data || !size)) {
return nullptr; return nullptr;
}
ASMJIT_ASSERT(size != SIZE_MAX); ASMJIT_ASSERT(size != SIZE_MAX);
uint8_t* m = allocT<uint8_t>(size + nullTerminate); uint8_t* m = allocT<uint8_t>(size + nullTerminate);
if (ASMJIT_UNLIKELY(!m)) return nullptr; if (ASMJIT_UNLIKELY(!m)) {
return nullptr;
}
memcpy(m, data, size); memcpy(m, data, size);
if (nullTerminate) m[size] = '\0'; if (nullTerminate) {
m[size] = '\0';
}
return static_cast<void*>(m); return static_cast<void*>(m);
} }
char* Zone::sformat(const char* fmt, ...) noexcept { char* Zone::sformat(const char* fmt, ...) noexcept {
if (ASMJIT_UNLIKELY(!fmt)) if (ASMJIT_UNLIKELY(!fmt)) {
return nullptr; return nullptr;
}
char buf[512]; char buf[512];
size_t size; size_t size;
@@ -252,8 +260,9 @@ char* Zone::sformat(const char* fmt, ...) noexcept {
static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept { static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks; ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
while (cur) { while (cur) {
if (cur == block) if (cur == block) {
return true; return true;
}
cur = cur->next; cur = cur->next;
} }
return false; return false;
@@ -334,8 +343,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
size_t blockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment; size_t blockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
// Handle a possible overflow. // Handle a possible overflow.
if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size)) if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size)) {
return nullptr; return nullptr;
}
void* p = ::malloc(size + blockOverhead); void* p = ::malloc(size + blockOverhead);
if (ASMJIT_UNLIKELY(!p)) { if (ASMJIT_UNLIKELY(!p)) {
@@ -347,8 +357,9 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
DynamicBlock* block = static_cast<DynamicBlock*>(p); DynamicBlock* block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamicBlocks; DynamicBlock* next = _dynamicBlocks;
if (next) if (next) {
next->prev = block; next->prev = block;
}
block->prev = nullptr; block->prev = nullptr;
block->next = next; block->next = next;
@@ -368,7 +379,9 @@ void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
void* p = _alloc(size, allocatedSize); void* p = _alloc(size, allocatedSize);
if (ASMJIT_UNLIKELY(!p)) return p; if (ASMJIT_UNLIKELY(!p)) {
return p;
}
return memset(p, 0, allocatedSize); return memset(p, 0, allocatedSize);
} }
@@ -384,13 +397,16 @@ void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
DynamicBlock* prev = block->prev; DynamicBlock* prev = block->prev;
DynamicBlock* next = block->next; DynamicBlock* next = block->next;
if (prev) if (prev) {
prev->next = next; prev->next = next;
else }
else {
_dynamicBlocks = next; _dynamicBlocks = next;
}
if (next) if (next) {
next->prev = prev; next->prev = prev;
}
::free(block); ::free(block);
} }

View File

@@ -41,16 +41,21 @@ public:
size_t size; size_t size;
}; };
enum Limits : size_t { static inline constexpr size_t kMinBlockSize = 256; // The number is ridiculously small, but still possible.
kMinBlockSize = 256, // The number is ridiculously small, but still possible. static inline constexpr size_t kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1);
kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1),
kMinAlignment = 1, static inline constexpr size_t kMinAlignment = 1;
kMaxAlignment = 64, static inline constexpr size_t kMaxAlignment = 64;
kBlockSize = sizeof(Block), static inline constexpr size_t kBlockSize = sizeof(Block);
kBlockOverhead = kBlockSize + Globals::kAllocOverhead static inline constexpr size_t kBlockOverhead = kBlockSize + Globals::kAllocOverhead;
};
static ASMJIT_API const Block _zeroBlock;
//! \endcond
//! \name Members
//! \{
//! Pointer in the current block. //! Pointer in the current block.
uint8_t* _ptr; uint8_t* _ptr;
@@ -72,9 +77,7 @@ public:
//! Count of allocated blocks. //! Count of allocated blocks.
size_t _blockCount; size_t _blockCount;
static ASMJIT_API const Block _zeroBlock; //! \}
//! \endcond
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -141,25 +144,35 @@ public:
//! \{ //! \{
//! Returns the default block alignment. //! Returns the default block alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; } ASMJIT_INLINE_NODEBUG size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
//! Returns a minimum block size. //! Returns a minimum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minimumBlockSize() const noexcept { return size_t(1) << _minimumBlockSizeShift; } ASMJIT_INLINE_NODEBUG size_t minimumBlockSize() const noexcept { return size_t(1) << _minimumBlockSizeShift; }
//! Returns a maximum block size. //! Returns a maximum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t maximumBlockSize() const noexcept { return size_t(1) << _maximumBlockSizeShift; } ASMJIT_INLINE_NODEBUG size_t maximumBlockSize() const noexcept { return size_t(1) << _maximumBlockSizeShift; }
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory. //! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t hasStaticBlock() const noexcept { return _hasStaticBlock; } ASMJIT_INLINE_NODEBUG uint8_t hasStaticBlock() const noexcept { return _hasStaticBlock; }
//! Returns remaining size of the current block. //! Returns remaining size of the current block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); } ASMJIT_INLINE_NODEBUG size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
//! Returns the current zone cursor (dangerous). //! Returns the current zone cursor (dangerous).
//! //!
//! This is a function that can be used to get exclusive access to the current block's memory buffer. //! This is a function that can be used to get exclusive access to the current block's memory buffer.
template<typename T = uint8_t> template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); } ASMJIT_INLINE_NODEBUG T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
//! Returns the end of the current zone block, only useful if you use `ptr()`. //! Returns the end of the current zone block, only useful if you use `ptr()`.
template<typename T = uint8_t> template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* end() noexcept { return reinterpret_cast<T*>(_end); } ASMJIT_INLINE_NODEBUG T* end() noexcept { return reinterpret_cast<T*>(_end); }
//! Sets the current zone pointer to `ptr` (must be within the current block). //! Sets the current zone pointer to `ptr` (must be within the current block).
@@ -209,11 +222,14 @@ public:
//! //!
//! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned //! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned
//! allocation you need to call `align()` before calling `ensure()`. //! allocation you need to call `align()` before calling `ensure()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error ensure(size_t size) noexcept { ASMJIT_INLINE_NODEBUG Error ensure(size_t size) noexcept {
if (ASMJIT_LIKELY(size <= remainingSize())) if (ASMJIT_LIKELY(size <= remainingSize())) {
return kErrorOk; return kErrorOk;
else }
else {
return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory); return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
}
} }
//! \} //! \}
@@ -252,6 +268,7 @@ public:
//! // Reset or destroy `Zone`. //! // Reset or destroy `Zone`.
//! zone.reset(); //! zone.reset();
//! ``` //! ```
[[nodiscard]]
inline void* alloc(size_t size) noexcept { inline void* alloc(size_t size) noexcept {
if (ASMJIT_UNLIKELY(size > remainingSize())) if (ASMJIT_UNLIKELY(size > remainingSize()))
return _alloc(size, 1); return _alloc(size, 1);
@@ -262,6 +279,7 @@ public:
} }
//! Allocates the requested memory specified by `size` and `alignment`. //! Allocates the requested memory specified by `size` and `alignment`.
[[nodiscard]]
inline void* alloc(size_t size, size_t alignment) noexcept { inline void* alloc(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment)); ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment); uint8_t* ptr = Support::alignUp(_ptr, alignment);
@@ -276,6 +294,7 @@ public:
//! Allocates the requested memory specified by `size` without doing any checks. //! Allocates the requested memory specified by `size` without doing any checks.
//! //!
//! Can only be called if `remainingSize()` returns size at least equal to `size`. //! Can only be called if `remainingSize()` returns size at least equal to `size`.
[[nodiscard]]
inline void* allocNoCheck(size_t size) noexcept { inline void* allocNoCheck(size_t size) noexcept {
ASMJIT_ASSERT(remainingSize() >= size); ASMJIT_ASSERT(remainingSize() >= size);
@@ -287,6 +306,7 @@ public:
//! Allocates the requested memory specified by `size` and `alignment` without doing any checks. //! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
//! //!
//! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied. //! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
[[nodiscard]]
inline void* allocNoCheck(size_t size, size_t alignment) noexcept { inline void* allocNoCheck(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment)); ASMJIT_ASSERT(Support::isPowerOf2(alignment));
@@ -298,28 +318,33 @@ public:
} }
//! Allocates `size` bytes of zeroed memory. See `alloc()` for more details. //! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
[[nodiscard]]
ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept; ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept;
//! Like `alloc()`, but the return pointer is casted to `T*`. //! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { inline T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(alloc(size, alignment)); return static_cast<T*>(alloc(size, alignment));
} }
//! Like `allocNoCheck()`, but the return pointer is casted to `T*`. //! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { inline T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocNoCheck(size, alignment)); return static_cast<T*>(allocNoCheck(size, alignment));
} }
//! Like `allocZeroed()`, but the return pointer is casted to `T*`. //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept { inline T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocZeroed(size, alignment)); return static_cast<T*>(allocZeroed(size, alignment));
} }
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* newT() noexcept { inline T* newT() noexcept {
void* p = alloc(sizeof(T), alignof(T)); void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p))
@@ -329,6 +354,7 @@ public:
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args> template<typename T, typename... Args>
[[nodiscard]]
inline T* newT(Args&&... args) noexcept { inline T* newT(Args&&... args) noexcept {
void* p = alloc(sizeof(T), alignof(T)); void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p))
@@ -339,28 +365,32 @@ public:
//! \cond INTERNAL //! \cond INTERNAL
//! //!
//! Internal alloc function used by other inlines. //! Internal alloc function used by other inlines.
[[nodiscard]]
ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept; ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept;
//! \endcond //! \endcond
//! Helper to duplicate data. //! Helper to duplicate data.
[[nodiscard]]
ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept; ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
//! Helper to duplicate data. //! Helper to duplicate data.
[[nodiscard]]
inline void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept { inline void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
align(alignment); align(alignment);
return dup(data, size, nullTerminate); return dup(data, size, nullTerminate);
} }
//! Helper to duplicate a formatted string, maximum size is 256 bytes. //! Helper to duplicate a formatted string, maximum size is 256 bytes.
[[nodiscard]]
ASMJIT_API char* sformat(const char* str, ...) noexcept; ASMJIT_API char* sformat(const char* str, ...) noexcept;
//! \} //! \}
#if !defined(ASMJIT_NO_DEPRECATED) #if !defined(ASMJIT_NO_DEPRECATED)
ASMJIT_DEPRECATED("Use Zone::minimumBlockSize() instead of Zone::blockSize()") [[deprecated("Use Zone::minimumBlockSize() instead of Zone::blockSize()")]]
ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return minimumBlockSize(); } ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return minimumBlockSize(); }
ASMJIT_DEPRECATED("Use Zone::hasStaticBlock() instead of Zone::isTemporary()") [[deprecated("Use Zone::hasStaticBlock() instead of Zone::isTemporary()")]]
ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return hasStaticBlock() != 0u; } ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return hasStaticBlock() != 0u; }
#endif #endif
}; };
@@ -402,24 +432,22 @@ public:
// In short, we pool chunks of these sizes: // In short, we pool chunks of these sizes:
// [32, 64, 96, 128, 192, 256, 320, 384, 448, 512] // [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
enum : uint32_t { //! How many bytes per a low granularity pool (has to be at least 16).
//! How many bytes per a low granularity pool (has to be at least 16). static inline constexpr uint32_t kLoGranularity = 32;
kLoGranularity = 32, //! Number of slots of a low granularity pool.
//! Number of slots of a low granularity pool. static inline constexpr uint32_t kLoCount = 4;
kLoCount = 4, //! Maximum size of a block that can be allocated in a low granularity pool.
//! Maximum size of a block that can be allocated in a low granularity pool. static inline constexpr uint32_t kLoMaxSize = kLoGranularity * kLoCount;
kLoMaxSize = kLoGranularity * kLoCount,
//! How many bytes per a high granularity pool. //! How many bytes per a high granularity pool.
kHiGranularity = 64, static inline constexpr uint32_t kHiGranularity = 64;
//! Number of slots of a high granularity pool. //! Number of slots of a high granularity pool.
kHiCount = 6, static inline constexpr uint32_t kHiCount = 6;
//! Maximum size of a block that can be allocated in a high granularity pool. //! Maximum size of a block that can be allocated in a high granularity pool.
kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount, static inline constexpr uint32_t kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount;
//! Alignment of every pointer returned by `alloc()`. //! Alignment of every pointer returned by `alloc()`.
kBlockAlignment = kLoGranularity static inline constexpr uint32_t kBlockAlignment = kLoGranularity;
};
//! Single-linked list used to store unused chunks. //! Single-linked list used to store unused chunks.
struct Slot { struct Slot {
@@ -482,6 +510,7 @@ public:
//! \{ //! \{
//! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator` is not initialized. //! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator` is not initialized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _zone; } ASMJIT_INLINE_NODEBUG Zone* zone() const noexcept { return _zone; }
//! \} //! \}
@@ -493,24 +522,30 @@ public:
//! Returns the slot index to be used for `size`. Returns `true` if a valid slot has been written to `slot` and //! Returns the slot index to be used for `size`. Returns `true` if a valid slot has been written to `slot` and
//! `allocatedSize` has been filled with slot exact size (`allocatedSize` can be equal or slightly greater than //! `allocatedSize` has been filled with slot exact size (`allocatedSize` can be equal or slightly greater than
//! `size`). //! `size`).
[[nodiscard]]
static inline bool _getSlotIndex(size_t size, uint32_t& slot) noexcept { static inline bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
ASMJIT_ASSERT(size > 0); ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize) if (size > kHiMaxSize) {
return false; return false;
}
if (size <= kLoMaxSize) if (size <= kLoMaxSize) {
slot = uint32_t((size - 1) / kLoGranularity); slot = uint32_t((size - 1) / kLoGranularity);
else }
else {
slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount; slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
}
return true; return true;
} }
//! \overload //! \overload
[[nodiscard]]
static inline bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept { static inline bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(size > 0); ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize) if (size > kHiMaxSize) {
return false; return false;
}
if (size <= kLoMaxSize) { if (size <= kLoMaxSize) {
slot = uint32_t((size - 1) / kLoGranularity); slot = uint32_t((size - 1) / kLoGranularity);
@@ -531,14 +566,19 @@ public:
//! \{ //! \{
//! \cond INTERNAL //! \cond INTERNAL
[[nodiscard]]
ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept; ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept;
[[nodiscard]]
ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept; ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept;
ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept; ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept;
//! \endcond //! \endcond
//! Allocates `size` bytes of memory, ideally from an available pool. //! Allocates `size` bytes of memory, ideally from an available pool.
//! //!
//! \note `size` can't be zero, it will assert in debug mode in such case. //! \note `size` can't be zero, it will assert in debug mode in such case.
[[nodiscard]]
inline void* alloc(size_t size) noexcept { inline void* alloc(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
size_t allocatedSize; size_t allocatedSize;
@@ -547,6 +587,7 @@ public:
//! Like `alloc(size)`, but provides a second argument `allocatedSize` that provides a way to know how big //! Like `alloc(size)`, but provides a second argument `allocatedSize` that provides a way to know how big
//! the block returned actually is. This is useful for containers to prevent growing too early. //! the block returned actually is. This is useful for containers to prevent growing too early.
[[nodiscard]]
inline void* alloc(size_t size, size_t& allocatedSize) noexcept { inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
return _alloc(size, allocatedSize); return _alloc(size, allocatedSize);
@@ -554,11 +595,13 @@ public:
//! Like `alloc()`, but the return pointer is casted to `T*`. //! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* allocT(size_t size = sizeof(T)) noexcept { inline T* allocT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(alloc(size)); return static_cast<T*>(alloc(size));
} }
//! Like `alloc(size)`, but returns zeroed memory. //! Like `alloc(size)`, but returns zeroed memory.
[[nodiscard]]
inline void* allocZeroed(size_t size) noexcept { inline void* allocZeroed(size_t size) noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
size_t allocatedSize; size_t allocatedSize;
@@ -566,6 +609,7 @@ public:
} }
//! Like `alloc(size, allocatedSize)`, but returns zeroed memory. //! Like `alloc(size, allocatedSize)`, but returns zeroed memory.
[[nodiscard]]
inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept { inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
return _allocZeroed(size, allocatedSize); return _allocZeroed(size, allocatedSize);
@@ -573,24 +617,29 @@ public:
//! Like `allocZeroed()`, but the return pointer is casted to `T*`. //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* allocZeroedT(size_t size = sizeof(T)) noexcept { inline T* allocZeroedT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(allocZeroed(size)); return static_cast<T*>(allocZeroed(size));
} }
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T> template<typename T>
[[nodiscard]]
inline T* newT() noexcept { inline T* newT() noexcept {
void* p = allocT<T>(); void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p)) {
return nullptr; return nullptr;
}
return new(Support::PlacementNew{p}) T(); return new(Support::PlacementNew{p}) T();
} }
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`. //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args> template<typename T, typename... Args>
[[nodiscard]]
inline T* newT(Args&&... args) noexcept { inline T* newT(Args&&... args) noexcept {
void* p = allocT<T>(); void* p = allocT<T>();
if (ASMJIT_UNLIKELY(!p)) if (ASMJIT_UNLIKELY(!p)) {
return nullptr; return nullptr;
}
return new(Support::PlacementNew{p}) T(std::forward<Args>(args)...); return new(Support::PlacementNew{p}) T(std::forward<Args>(args)...);
} }

View File

@@ -172,12 +172,12 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce
uint32_t newCount = ZoneHash_primeArray[primeIndex].prime; uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
ZoneHashNode** oldData = _data; ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>( ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
// We can still store nodes into the table, but it will degrade. // We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr)) if (ASMJIT_UNLIKELY(newData == nullptr)) {
return; return;
}
uint32_t i; uint32_t i;
uint32_t oldCount = _bucketsCount; uint32_t oldCount = _bucketsCount;
@@ -201,8 +201,9 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce
} }
} }
if (oldData != _embedded) if (oldData != _embedded) {
allocator->release(oldData, oldCount * sizeof(ZoneHashNode*)); allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
}
} }
// ZoneHashBase - Operations // ZoneHashBase - Operations
@@ -217,8 +218,9 @@ ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node
if (++_size > _bucketsGrow) { if (++_size > _bucketsGrow) {
uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1); uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
if (primeIndex > _primeIndex) if (primeIndex > _primeIndex) {
_rehash(allocator, primeIndex); _rehash(allocator, primeIndex);
}
} }
return node; return node;

View File

@@ -73,7 +73,9 @@ public:
_primeIndex = other._primeIndex; _primeIndex = other._primeIndex;
_embedded[0] = other._embedded[0]; _embedded[0] = other._embedded[0];
if (_data == other._embedded) _data = _embedded; if (_data == other._embedded) {
_data = _embedded;
}
} }
inline void reset() noexcept { inline void reset() noexcept {
@@ -89,8 +91,9 @@ public:
inline void release(ZoneAllocator* allocator) noexcept { inline void release(ZoneAllocator* allocator) noexcept {
ZoneHashNode** oldData = _data; ZoneHashNode** oldData = _data;
if (oldData != _embedded) if (oldData != _embedded) {
allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*)); allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
}
reset(); reset();
} }
@@ -99,7 +102,10 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! \} //! \}
@@ -117,8 +123,13 @@ public:
std::swap(_primeIndex, other._primeIndex); std::swap(_primeIndex, other._primeIndex);
std::swap(_embedded[0], other._embedded[0]); std::swap(_embedded[0], other._embedded[0]);
if (_data == other._embedded) _data = _embedded; if (_data == other._embedded) {
if (other._data == _embedded) other._data = other._embedded; _data = _embedded;
}
if (other._data == _embedded) {
other._data = other._embedded;
}
} }
//! \cond INTERNAL //! \cond INTERNAL
@@ -145,7 +156,7 @@ class ZoneHash : public ZoneHashBase {
public: public:
ASMJIT_NONCOPYABLE(ZoneHash) ASMJIT_NONCOPYABLE(ZoneHash)
typedef NodeT Node; using Node = NodeT;
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -164,12 +175,14 @@ public:
ASMJIT_INLINE_NODEBUG void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); } ASMJIT_INLINE_NODEBUG void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); }
template<typename KeyT> template<typename KeyT>
[[nodiscard]]
inline NodeT* get(const KeyT& key) const noexcept { inline NodeT* get(const KeyT& key) const noexcept {
uint32_t hashMod = _calcMod(key.hashCode()); uint32_t hashMod = _calcMod(key.hashCode());
NodeT* node = static_cast<NodeT*>(_data[hashMod]); NodeT* node = static_cast<NodeT*>(_data[hashMod]);
while (node && !key.matches(node)) while (node && !key.matches(node)) {
node = static_cast<NodeT*>(node->_hashNext); node = static_cast<NodeT*>(node->_hashNext);
}
return node; return node;
} }

View File

@@ -22,10 +22,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : size_t { static inline constexpr size_t kNodeIndexPrev = 0;
kNodeIndexPrev = 0, static inline constexpr size_t kNodeIndexNext = 1;
kNodeIndexNext = 1
};
//! \} //! \}
@@ -50,10 +48,16 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; } ASMJIT_INLINE_NODEBUG bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; } ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; } ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; } ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; }
//! \} //! \}
@@ -68,10 +72,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : size_t { static inline constexpr size_t kNodeIndexFirst = 0;
kNodeIndexFirst = 0, static inline constexpr size_t kNodeIndexLast = 1;
kNodeIndexLast = 1
};
//! \} //! \}
@@ -100,8 +102,13 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _nodes[0] == nullptr; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _nodes[0] == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; } ASMJIT_INLINE_NODEBUG NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* last() const noexcept { return _nodes[kNodeIndexLast]; } ASMJIT_INLINE_NODEBUG NodeT* last() const noexcept { return _nodes[kNodeIndexLast]; }
//! \} //! \}
@@ -120,10 +127,12 @@ public:
node->_listNodes[!dir] = prev; node->_listNodes[!dir] = prev;
_nodes[dir] = node; _nodes[dir] = node;
if (prev) if (prev) {
prev->_listNodes[dir] = node; prev->_listNodes[dir] = node;
else }
else {
_nodes[!dir] = node; _nodes[!dir] = node;
}
} }
// Can be used to both append and prepend. // Can be used to both append and prepend.
@@ -134,10 +143,12 @@ public:
NodeT* next = ref->_listNodes[dir]; NodeT* next = ref->_listNodes[dir];
prev->_listNodes[dir] = node; prev->_listNodes[dir] = node;
if (next) if (next) {
next->_listNodes[!dir] = node; next->_listNodes[!dir] = node;
else }
else {
_nodes[dir] = node; _nodes[dir] = node;
}
node->_listNodes[!dir] = prev; node->_listNodes[!dir] = prev;
node->_listNodes[ dir] = next; node->_listNodes[ dir] = next;
@@ -162,6 +173,7 @@ public:
return node; return node;
} }
[[nodiscard]]
inline NodeT* popFirst() noexcept { inline NodeT* popFirst() noexcept {
NodeT* node = _nodes[0]; NodeT* node = _nodes[0];
ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node != nullptr);
@@ -180,6 +192,7 @@ public:
return node; return node;
} }
[[nodiscard]]
inline NodeT* pop() noexcept { inline NodeT* pop() noexcept {
NodeT* node = _nodes[1]; NodeT* node = _nodes[1];
ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node != nullptr);

View File

@@ -30,8 +30,9 @@ Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcep
if (allocator) { if (allocator) {
Block* block = static_cast<Block*>(allocator->alloc(kBlockSize)); Block* block = static_cast<Block*>(allocator->alloc(kBlockSize));
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
block->_link[kBlockIndexPrev] = nullptr; block->_link[kBlockIndexPrev] = nullptr;
block->_link[kBlockIndexNext] = nullptr; block->_link[kBlockIndexNext] = nullptr;
@@ -56,8 +57,9 @@ Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept
ASMJIT_ASSERT(!prev->empty()); ASMJIT_ASSERT(!prev->empty());
Block* block = _allocator->allocT<Block>(kBlockSize); Block* block = _allocator->allocT<Block>(kBlockSize);
if (ASMJIT_UNLIKELY(!block)) if (ASMJIT_UNLIKELY(!block)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
block->_link[ side] = nullptr; block->_link[ side] = nullptr;
block->_link[!side] = prev; block->_link[!side] = prev;

View File

@@ -21,15 +21,13 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : size_t { static inline constexpr size_t kBlockIndexPrev = 0;
kBlockIndexPrev = 0, static inline constexpr size_t kBlockIndexNext = 1;
kBlockIndexNext = 1,
kBlockIndexFirst = 0, static inline constexpr size_t kBlockIndexFirst = 0;
kBlockIndexLast = 1, static inline constexpr size_t kBlockIndexLast = 1;
kBlockSize = ZoneAllocator::kHiMaxSize static inline constexpr size_t kBlockSize = ZoneAllocator::kHiMaxSize;
};
//! \} //! \}
@@ -44,32 +42,46 @@ public:
//! Pointer to the end of the array. //! Pointer to the end of the array.
void* _end; void* _end;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _start == _end; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _start == _end; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Block* prev() const noexcept { return _link[kBlockIndexPrev]; } ASMJIT_INLINE_NODEBUG Block* prev() const noexcept { return _link[kBlockIndexPrev]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Block* next() const noexcept { return _link[kBlockIndexNext]; } ASMJIT_INLINE_NODEBUG Block* next() const noexcept { return _link[kBlockIndexNext]; }
ASMJIT_INLINE_NODEBUG void setPrev(Block* block) noexcept { _link[kBlockIndexPrev] = block; } ASMJIT_INLINE_NODEBUG void setPrev(Block* block) noexcept { _link[kBlockIndexPrev] = block; }
ASMJIT_INLINE_NODEBUG void setNext(Block* block) noexcept { _link[kBlockIndexNext] = block; } ASMJIT_INLINE_NODEBUG void setNext(Block* block) noexcept { _link[kBlockIndexNext] = block; }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* start() const noexcept { return static_cast<T*>(_start); } ASMJIT_INLINE_NODEBUG T* start() const noexcept { return static_cast<T*>(_start); }
template<typename T> template<typename T>
ASMJIT_INLINE_NODEBUG void setStart(T* start) noexcept { _start = static_cast<void*>(start); } ASMJIT_INLINE_NODEBUG void setStart(T* start) noexcept { _start = static_cast<void*>(start); }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* end() const noexcept { return (T*)_end; } ASMJIT_INLINE_NODEBUG T* end() const noexcept { return (T*)_end; }
template<typename T> template<typename T>
ASMJIT_INLINE_NODEBUG void setEnd(T* end) noexcept { _end = (void*)end; } ASMJIT_INLINE_NODEBUG void setEnd(T* end) noexcept { _end = (void*)end; }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return (const T*)((const uint8_t*)(this) + sizeof(Block)); } ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return (const T*)((const uint8_t*)(this) + sizeof(Block)); }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); } ASMJIT_INLINE_NODEBUG T* data() noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool canPrepend() const noexcept { return _start > data<void>(); } ASMJIT_INLINE_NODEBUG bool canPrepend() const noexcept { return _start > data<void>(); }
template<typename T> template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool canAppend() const noexcept { ASMJIT_INLINE_NODEBUG bool canAppend() const noexcept {
size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T); size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T);
size_t kStartBlockIndex = sizeof(Block); size_t kStartBlockIndex = sizeof(Block);
@@ -107,8 +119,10 @@ public:
//! \{ //! \{
//! Returns `ZoneAllocator` attached to this container. //! Returns `ZoneAllocator` attached to this container.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; } ASMJIT_INLINE_NODEBUG ZoneAllocator* allocator() const noexcept { return _allocator; }
[[nodiscard]]
inline bool empty() const noexcept { inline bool empty() const noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
return _block[0]->start<void>() == _block[1]->end<void>(); return _block[0]->start<void>() == _block[1]->end<void>();
@@ -120,7 +134,9 @@ public:
//! \name Internal //! \name Internal
//! \{ //! \{
[[nodiscard]]
ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept; ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept;
ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept; ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept;
//! \} //! \}
@@ -136,12 +152,10 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { static inline constexpr uint32_t kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T));
kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)), static inline constexpr uint32_t kStartBlockIndex = uint32_t(sizeof(Block));
kStartBlockIndex = uint32_t(sizeof(Block)), static inline constexpr uint32_t kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T));
kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)), static inline constexpr uint32_t kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T));
kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
};
//! \} //! \}
@@ -191,6 +205,7 @@ public:
return kErrorOk; return kErrorOk;
} }
[[nodiscard]]
inline T popFirst() noexcept { inline T popFirst() noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty()); ASMJIT_ASSERT(!empty());
@@ -202,12 +217,14 @@ public:
T item = *ptr++; T item = *ptr++;
block->setStart(ptr); block->setStart(ptr);
if (block->empty()) if (block->empty()) {
_cleanupBlock(kBlockIndexFirst, kMidBlockIndex); _cleanupBlock(kBlockIndexFirst, kMidBlockIndex);
}
return item; return item;
} }
[[nodiscard]]
inline T pop() noexcept { inline T pop() noexcept {
ASMJIT_ASSERT(isInitialized()); ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty()); ASMJIT_ASSERT(!empty());
@@ -221,8 +238,9 @@ public:
ASMJIT_ASSERT(ptr >= block->start<T>()); ASMJIT_ASSERT(ptr >= block->start<T>());
block->setEnd(ptr); block->setEnd(ptr);
if (block->empty()) if (block->empty()) {
_cleanupBlock(kBlockIndexLast, kMidBlockIndex); _cleanupBlock(kBlockIndexLast, kMidBlockIndex);
}
return item; return item;
} }

View File

@@ -63,10 +63,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { static inline constexpr uint32_t kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase)), static inline constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
kMaxEmbeddedSize = kWholeSize - 5
};
//! \} //! \}
@@ -92,14 +90,19 @@ public:
//! \{ //! \{
//! Tests whether the string is empty. //! Tests whether the string is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _base._size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _base._size == 0; }
//! Returns the string data. //! Returns the string data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; } ASMJIT_INLINE_NODEBUG const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; }
//! Returns the string size. //! Returns the string size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _base._size; } ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _base._size; }
//! Tests whether the string is embedded (e.g. no dynamically allocated). //! Tests whether the string is embedded (e.g. no dynamically allocated).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; } ASMJIT_INLINE_NODEBUG bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
//! Copies a new `data` of the given `size` to the string. //! Copies a new `data` of the given `size` to the string.

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
#if defined(ASMJIT_TEST) #if defined(ASMJIT_TEST)
template<typename NodeT> template<typename NodeT>
struct ZoneRBUnit { struct ZoneRBUnit {
typedef ZoneTree<NodeT> Tree; using Tree = ZoneTree<NodeT>;
static void verifyTree(Tree& tree) noexcept { static void verifyTree(Tree& tree) noexcept {
EXPECT_GT(checkHeight(static_cast<NodeT*>(tree._root)), 0); EXPECT_GT(checkHeight(static_cast<NodeT*>(tree._root)), 0);

View File

@@ -25,10 +25,8 @@ public:
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uintptr_t { static inline constexpr uintptr_t kRedMask = 0x1;
kRedMask = 0x1, static inline constexpr uintptr_t kPtrMask = ~kRedMask;
kPtrMask = ~kRedMask
};
//! \} //! \}
@@ -49,17 +47,28 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); } ASMJIT_INLINE_NODEBUG bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; } ASMJIT_INLINE_NODEBUG bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; } ASMJIT_INLINE_NODEBUG bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRight() const noexcept { return _rbNodeData[1] != 0; } ASMJIT_INLINE_NODEBUG bool hasRight() const noexcept { return _rbNodeData[1] != 0; }
template<typename T = ZoneTreeNode> template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); } ASMJIT_INLINE_NODEBUG T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); }
template<typename T = ZoneTreeNode> template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* left() const noexcept { return static_cast<T*>(_getLeft()); } ASMJIT_INLINE_NODEBUG T* left() const noexcept { return static_cast<T*>(_getLeft()); }
template<typename T = ZoneTreeNode> template<typename T = ZoneTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* right() const noexcept { return static_cast<T*>(_getRight()); } ASMJIT_INLINE_NODEBUG T* right() const noexcept { return static_cast<T*>(_getRight()); }
//! \} //! \}
@@ -68,8 +77,13 @@ public:
//! \name Internal //! \name Internal
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); } ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); } ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); } ASMJIT_INLINE_NODEBUG ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); }
ASMJIT_INLINE_NODEBUG void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; } ASMJIT_INLINE_NODEBUG void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; }
@@ -80,6 +94,7 @@ public:
ASMJIT_INLINE_NODEBUG void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; } ASMJIT_INLINE_NODEBUG void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; }
//! Tests whether the node is RED (RED node must be non-null and must have RED flag set). //! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); } static ASMJIT_INLINE_NODEBUG bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); }
//! \} //! \}
@@ -103,8 +118,13 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); } ASMJIT_INLINE_NODEBUG NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); } ASMJIT_INLINE_NODEBUG NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); } ASMJIT_INLINE_NODEBUG NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); }
//! \} //! \}
@@ -116,7 +136,7 @@ class ZoneTree {
public: public:
ASMJIT_NONCOPYABLE(ZoneTree) ASMJIT_NONCOPYABLE(ZoneTree)
typedef NodeT Node; using Node = NodeT;
NodeT* _root {}; NodeT* _root {};
//! \name Construction & Destruction //! \name Construction & Destruction
@@ -132,7 +152,10 @@ public:
//! \name Accessors //! \name Accessors
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _root == nullptr; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _root == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* root() const noexcept { return static_cast<NodeT*>(_root); } ASMJIT_INLINE_NODEBUG NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
//! \} //! \}
@@ -192,14 +215,17 @@ public:
} }
// Stop if found. // Stop if found.
if (q == node) if (q == node) {
break; break;
}
last = dir; last = dir;
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0; dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Update helpers. // Update helpers.
if (g) t = g; if (g) {
t = g;
}
g = p; g = p;
p = q; p = q;
@@ -319,15 +345,20 @@ public:
// Update root and make it black. // Update root and make it black.
_root = static_cast<NodeT*>(head._getRight()); _root = static_cast<NodeT*>(head._getRight());
if (_root) _root->_makeBlack(); if (_root) {
_root->_makeBlack();
}
} }
template<typename KeyT, typename CompareT = Support::Compare<Support::SortOrder::kAscending>> template<typename KeyT, typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
[[nodiscard]]
inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept { inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
ZoneTreeNode* node = _root; ZoneTreeNode* node = _root;
while (node) { while (node) {
auto result = cmp(*static_cast<const NodeT*>(node), key); auto result = cmp(*static_cast<const NodeT*>(node), key);
if (result == 0) break; if (result == 0) {
break;
}
// Go left or right depending on the `result`. // Go left or right depending on the `result`.
node = node->_getChild(result < 0); node = node->_getChild(result < 0);

View File

@@ -16,7 +16,7 @@ ASMJIT_BEGIN_NAMESPACE
// ZoneVector is used as an array to hold short-lived data structures used during code generation. The growing // ZoneVector is used as an array to hold short-lived data structures used during code generation. The growing
// strategy is simple - use small capacity at the beginning (very good for ZoneAllocator) and then grow quicker // strategy is simple - use small capacity at the beginning (very good for ZoneAllocator) and then grow quicker
// to prevent successive reallocations. // to prevent successive reallocations.
static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept { static ASMJIT_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, uint32_t growMinimum, uint32_t sizeOfT) noexcept {
static constexpr size_t kGrowThreshold = Globals::kGrowThreshold; static constexpr size_t kGrowThreshold = Globals::kGrowThreshold;
size_t byteSize = size_t(current) * sizeOfT; size_t byteSize = size_t(current) * sizeOfT;
@@ -53,8 +53,9 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui
// Bail to `growMinimum` in case of overflow - should never happen as it's unlikely we would hit this on a 32-bit // Bail to `growMinimum` in case of overflow - should never happen as it's unlikely we would hit this on a 32-bit
// machine (consecutive near 4GiB allocation is impossible, and this should never happen on 64-bit machine as we // machine (consecutive near 4GiB allocation is impossible, and this should never happen on 64-bit machine as we
// use 32-bit size & capacity, so overflow of 64 bit integer is not possible. Added just as an extreme measure. // use 32-bit size & capacity, so overflow of 64 bit integer is not possible. Added just as an extreme measure.
if (byteSize < minimumByteSize) if (byteSize < minimumByteSize) {
return growMinimum; return growMinimum;
}
} }
} }
@@ -62,41 +63,48 @@ static ASMJIT_FORCE_INLINE uint32_t ZoneVector_growCapacity(uint32_t current, ui
return uint32_t(Support::min<size_t>(n, 0xFFFFFFFFu)); return uint32_t(Support::min<size_t>(n, 0xFFFFFFFFu));
} }
static ASMJIT_FORCE_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept { static ASMJIT_INLINE bool ZoneVector_byteSizeIsSafe(size_t nBytes, uint32_t n) noexcept {
if (sizeof(uint32_t) < sizeof(size_t)) if constexpr (sizeof(uint32_t) < sizeof(size_t)) {
return true; // there is no problem when running on a 64-bit machine. return true; // there is no problem when running on a 64-bit machine.
else }
else {
return nBytes >= size_t(n); return nBytes >= size_t(n);
}
}; };
Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t capacity = _capacity; uint32_t capacity = _capacity;
uint32_t after = _size; uint32_t after = _size;
if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after)) if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
after += n; after += n;
if (capacity >= after) if (capacity >= after) {
return kErrorOk; return kErrorOk;
}
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, after, sizeOfT)); return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, after, sizeOfT));
} }
Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t oldCapacity = _capacity; uint32_t oldCapacity = _capacity;
if (oldCapacity >= n) if (oldCapacity >= n) {
return kErrorOk; return kErrorOk;
}
size_t nBytes = size_t(n) * sizeOfT; size_t nBytes = size_t(n) * sizeOfT;
if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n))) if (ASMJIT_UNLIKELY(!ZoneVector_byteSizeIsSafe(nBytes, n))) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
size_t allocatedBytes; size_t allocatedBytes;
uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes)); uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes));
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
uint32_t newCapacity = uint32_t(allocatedBytes / sizeOfT); uint32_t newCapacity = uint32_t(allocatedBytes / sizeOfT);
ASMJIT_ASSERT(newCapacity >= n); ASMJIT_ASSERT(newCapacity >= n);
@@ -115,8 +123,9 @@ Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint3
Error ZoneVectorBase::_growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept { Error ZoneVectorBase::_growingReserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t capacity = _capacity; uint32_t capacity = _capacity;
if (capacity >= n) if (capacity >= n) {
return kErrorOk; return kErrorOk;
}
return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, n, sizeOfT)); return _reserve(allocator, sizeOfT, ZoneVector_growCapacity(capacity, n, sizeOfT));
} }
@@ -128,8 +137,9 @@ Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32
ASMJIT_ASSERT(_capacity >= n); ASMJIT_ASSERT(_capacity >= n);
} }
if (size < n) if (size < n) {
memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT); memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT);
}
_size = n; _size = n;
return kErrorOk; return kErrorOk;
@@ -150,16 +160,18 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth
if (newSize > _capacity) { if (newSize > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) required. // Realloc needed... Calculate the minimum capacity (in bytes) required.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits); uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// Normalize to bytes. // Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8; uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity; size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity)); BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// `allocatedCapacity` now contains number in bytes, we need bits. // `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8; size_t allocatedCapacityInBits = allocatedCapacity * 8;
@@ -167,11 +179,13 @@ Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& oth
// Arithmetic overflow should normally not happen. If it happens we just // Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`. // this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) {
allocatedCapacityInBits = minimumCapacityInBits; allocatedCapacityInBits = minimumCapacityInBits;
}
if (data) if (data) {
allocator->release(data, _capacity / 8); allocator->release(data, _capacity / 8);
}
data = newData; data = newData;
_data = data; _data = data;
@@ -197,8 +211,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128, // happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128,
// and so on. In that case don't change anything as that would mean settings // and so on. In that case don't change anything as that would mean settings
// bits outside of the `_size`. // bits outside of the `_size`.
if (bit) if (bit) {
_data[idx] &= (BitWord(1) << bit) - 1u; _data[idx] &= (BitWord(1) << bit) - 1u;
}
_size = newSize; _size = newSize;
return kErrorOk; return kErrorOk;
@@ -211,16 +226,18 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// Realloc needed, calculate the minimum capacity (in bytes) required. // Realloc needed, calculate the minimum capacity (in bytes) required.
uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits); uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits);
if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// Normalize to bytes. // Normalize to bytes.
uint32_t minimumCapacity = minimumCapacityInBits / 8; uint32_t minimumCapacity = minimumCapacityInBits / 8;
size_t allocatedCapacity; size_t allocatedCapacity;
BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity)); BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
if (ASMJIT_UNLIKELY(!newData)) if (ASMJIT_UNLIKELY(!newData)) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
// `allocatedCapacity` now contains number in bytes, we need bits. // `allocatedCapacity` now contains number in bytes, we need bits.
size_t allocatedCapacityInBits = allocatedCapacity * 8; size_t allocatedCapacityInBits = allocatedCapacity * 8;
@@ -228,13 +245,15 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
// Arithmetic overflow should normally not happen. If it happens we just // Arithmetic overflow should normally not happen. If it happens we just
// change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
// this value is still safe to be used to call `_allocator->release(...)`. // this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity)) {
allocatedCapacityInBits = minimumCapacityInBits; allocatedCapacityInBits = minimumCapacityInBits;
}
_copyBits(newData, data, _wordsPerBits(oldSize)); _copyBits(newData, data, _wordsPerBits(oldSize));
if (data) if (data) {
allocator->release(data, _capacity / 8); allocator->release(data, _capacity / 8);
}
data = newData; data = newData;
_data = data; _data = data;
@@ -274,8 +293,9 @@ Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_
while (idx < endIdx) data[idx++] = pattern; while (idx < endIdx) data[idx++] = pattern;
// Clear unused bits of the last bit-word. // Clear unused bits of the last bit-word.
if (endBit) if (endBit) {
data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1); data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1);
}
_size = newSize; _size = newSize;
return kErrorOk; return kErrorOk;
@@ -286,16 +306,20 @@ Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t newSize = _size + 1; uint32_t newSize = _size + 1;
uint32_t idealCapacity = _capacity; uint32_t idealCapacity = _capacity;
if (idealCapacity < 128) if (idealCapacity < 128) {
idealCapacity = 128; idealCapacity = 128;
else if (idealCapacity <= kThreshold) }
else if (idealCapacity <= kThreshold) {
idealCapacity *= 2; idealCapacity *= 2;
else }
else {
idealCapacity += kThreshold; idealCapacity += kThreshold;
}
if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) { if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max())) if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max())) {
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
}
idealCapacity = newSize; idealCapacity = newSize;
} }
@@ -356,7 +380,7 @@ static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
EXPECT_EQ(fsum, rsum); EXPECT_EQ(fsum, rsum);
vec.release(allocator); vec.release(allocator);
INFO("ZoneBitVector::growingReserve()"); INFO("ZoneVector<%s>::growingReserve()", typeName);
for (uint32_t j = 0; j < 40 / sizeof(T); j += 8) { for (uint32_t j = 0; j < 40 / sizeof(T); j += 8) {
EXPECT_EQ(vec.growingReserve(allocator, j * kMiB), kErrorOk); EXPECT_EQ(vec.growingReserve(allocator, j * kMiB), kErrorOk);
EXPECT_GE(vec.capacity(), j * kMiB); EXPECT_GE(vec.capacity(), j * kMiB);
@@ -379,15 +403,17 @@ static void test_zone_bitvector(ZoneAllocator* allocator) {
EXPECT_EQ(vec.resize(allocator, count, false), kErrorOk); EXPECT_EQ(vec.resize(allocator, count, false), kErrorOk);
EXPECT_EQ(vec.size(), count); EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++) for (i = 0; i < count; i++) {
EXPECT_FALSE(vec.bitAt(i)); EXPECT_FALSE(vec.bitAt(i));
}
vec.clear(); vec.clear();
EXPECT_EQ(vec.resize(allocator, count, true), kErrorOk); EXPECT_EQ(vec.resize(allocator, count, true), kErrorOk);
EXPECT_EQ(vec.size(), count); EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++) for (i = 0; i < count; i++) {
EXPECT_TRUE(vec.bitAt(i)); EXPECT_TRUE(vec.bitAt(i));
}
} }
INFO("ZoneBitVector::fillBits() / clearBits()"); INFO("ZoneBitVector::fillBits() / clearBits()");
@@ -398,10 +424,12 @@ static void test_zone_bitvector(ZoneAllocator* allocator) {
for (i = 0; i < (count + 1) / 2; i++) { for (i = 0; i < (count + 1) / 2; i++) {
bool value = bool(i & 1); bool value = bool(i & 1);
if (value) if (value) {
vec.fillBits(i, count - i * 2); vec.fillBits(i, count - i * 2);
else }
else {
vec.clearBits(i, count - i * 2); vec.clearBits(i, count - i * 2);
}
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {

View File

@@ -19,9 +19,16 @@ class ZoneVectorBase {
public: public:
ASMJIT_NONCOPYABLE(ZoneVectorBase) ASMJIT_NONCOPYABLE(ZoneVectorBase)
// STL compatibility; //! \name Types (C++ compatibility)
typedef uint32_t size_type; //! \{
typedef ptrdiff_t difference_type;
using size_type = uint32_t;
using difference_type = ptrdiff_t;
//! \}
//! \name Members
//! \{
//! Vector data (untyped). //! Vector data (untyped).
void* _data = nullptr; void* _data = nullptr;
@@ -30,6 +37,8 @@ public:
//! Capacity of the vector. //! Capacity of the vector.
size_type _capacity = 0; size_type _capacity = 0;
//! \}
protected: protected:
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -74,10 +83,15 @@ public:
//! \{ //! \{
//! Tests whether the vector is empty. //! Tests whether the vector is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the vector size. //! Returns the vector size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_type size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG size_type size() const noexcept { return _size; }
//! Returns the vector capacity. //! Returns the vector capacity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_type capacity() const noexcept { return _capacity; } ASMJIT_INLINE_NODEBUG size_type capacity() const noexcept { return _capacity; }
//! \} //! \}
@@ -120,17 +134,21 @@ class ZoneVector : public ZoneVectorBase {
public: public:
ASMJIT_NONCOPYABLE(ZoneVector) ASMJIT_NONCOPYABLE(ZoneVector)
// STL compatibility; //! \name Types (C++ compatibility)
typedef T value_type; //! \{
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T* iterator; using value_type = T;
typedef const T* const_iterator; using pointer = T*;
typedef Support::ArrayReverseIterator<T> reverse_iterator; using const_pointer = const T*;
typedef Support::ArrayReverseIterator<const T> const_reverse_iterator; using reference = T&;
using const_reference = const T&;
using iterator = T*;
using const_iterator = const T*;
using reverse_iterator = Support::ArrayReverseIterator<T>;
using const_reverse_iterator = Support::ArrayReverseIterator<const T>;
//! \}
//! \name Construction & Destruction //! \name Construction & Destruction
//! \{ //! \{
@@ -144,11 +162,15 @@ public:
//! \{ //! \{
//! Returns vector data. //! Returns vector data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return static_cast<T*>(_data); } ASMJIT_INLINE_NODEBUG T* data() noexcept { return static_cast<T*>(_data); }
//! Returns vector data (const) //! Returns vector data (const)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return static_cast<const T*>(_data); } ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return static_cast<const T*>(_data); }
//! Returns item at the given index `i` (const). //! Returns item at the given index `i` (const).
[[nodiscard]]
inline const T& at(size_t i) const noexcept { inline const T& at(size_t i) const noexcept {
ASMJIT_ASSERT(i < _size); ASMJIT_ASSERT(i < _size);
return data()[i]; return data()[i];
@@ -164,22 +186,40 @@ public:
//! \name STL Compatibility (Iterators) //! \name STL Compatibility (Iterators)
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator begin() noexcept { return iterator(data()); }; ASMJIT_INLINE_NODEBUG iterator begin() noexcept { return iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator begin() const noexcept { return const_iterator(data()); }; ASMJIT_INLINE_NODEBUG const_iterator begin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator end() noexcept { return iterator(data() + _size); }; ASMJIT_INLINE_NODEBUG iterator end() noexcept { return iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator end() const noexcept { return const_iterator(data() + _size); }; ASMJIT_INLINE_NODEBUG const_iterator end() const noexcept { return const_iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }; ASMJIT_INLINE_NODEBUG reverse_iterator rbegin() noexcept { return reverse_iterator(end()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); }; ASMJIT_INLINE_NODEBUG const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG reverse_iterator rend() noexcept { return reverse_iterator(begin()); }; ASMJIT_INLINE_NODEBUG reverse_iterator rend() noexcept { return reverse_iterator(begin()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); }; ASMJIT_INLINE_NODEBUG const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cbegin() const noexcept { return const_iterator(data()); }; ASMJIT_INLINE_NODEBUG const_iterator cbegin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cend() const noexcept { return const_iterator(data() + _size); }; ASMJIT_INLINE_NODEBUG const_iterator cend() const noexcept { return const_iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); }; ASMJIT_INLINE_NODEBUG const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(cend()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); }; ASMJIT_INLINE_NODEBUG const_reverse_iterator crend() const noexcept { return const_reverse_iterator(cbegin()); };
//! \} //! \}
@@ -188,12 +228,13 @@ public:
//! \{ //! \{
//! Swaps this vector with `other`. //! Swaps this vector with `other`.
ASMJIT_FORCE_INLINE void swap(ZoneVector<T>& other) noexcept { _swap(other); } ASMJIT_INLINE void swap(ZoneVector<T>& other) noexcept { _swap(other); }
//! Prepends `item` to the vector. //! Prepends `item` to the vector.
ASMJIT_FORCE_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept { ASMJIT_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity)) if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1)); ASMJIT_PROPAGATE(grow(allocator, 1));
}
memmove(static_cast<void*>(static_cast<T*>(_data) + 1), memmove(static_cast<void*>(static_cast<T*>(_data) + 1),
static_cast<const void*>(_data), static_cast<const void*>(_data),
@@ -208,11 +249,12 @@ public:
} }
//! Inserts an `item` at the specified `index`. //! Inserts an `item` at the specified `index`.
ASMJIT_FORCE_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept { ASMJIT_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_ASSERT(index <= _size); ASMJIT_ASSERT(index <= _size);
if (ASMJIT_UNLIKELY(_size == _capacity)) if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1)); ASMJIT_PROPAGATE(grow(allocator, 1));
}
T* dst = static_cast<T*>(_data) + index; T* dst = static_cast<T*>(_data) + index;
memmove(static_cast<void*>(dst + 1), memmove(static_cast<void*>(dst + 1),
@@ -228,9 +270,10 @@ public:
} }
//! Appends `item` to the vector. //! Appends `item` to the vector.
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept { ASMJIT_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity)) if (ASMJIT_UNLIKELY(_size == _capacity)) {
ASMJIT_PROPAGATE(grow(allocator, 1)); ASMJIT_PROPAGATE(grow(allocator, 1));
}
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size), memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(&item), static_cast<const void*>(&item),
@@ -241,10 +284,11 @@ public:
} }
//! Appends `other` vector at the end of this vector. //! Appends `other` vector at the end of this vector.
ASMJIT_FORCE_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept { ASMJIT_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
uint32_t size = other._size; uint32_t size = other._size;
if (_capacity - _size < size) if (_capacity - _size < size) {
ASMJIT_PROPAGATE(grow(allocator, size)); ASMJIT_PROPAGATE(grow(allocator, size));
}
if (size) { if (size) {
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size), memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
@@ -260,7 +304,7 @@ public:
//! //!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements //! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally. //! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void prependUnsafe(const T& item) noexcept { ASMJIT_INLINE void prependUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity); ASMJIT_ASSERT(_size < _capacity);
T* data = static_cast<T*>(_data); T* data = static_cast<T*>(_data);
@@ -280,7 +324,7 @@ public:
//! //!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements //! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally. //! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void appendUnsafe(const T& item) noexcept { ASMJIT_INLINE void appendUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity); ASMJIT_ASSERT(_size < _capacity);
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size), memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
@@ -290,7 +334,7 @@ public:
} }
//! Inserts an `item` at the specified `index` (unsafe case). //! Inserts an `item` at the specified `index` (unsafe case).
ASMJIT_FORCE_INLINE void insertUnsafe(size_t index, const T& item) noexcept { ASMJIT_INLINE void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity); ASMJIT_ASSERT(_size < _capacity);
ASMJIT_ASSERT(index <= _size); ASMJIT_ASSERT(index <= _size);
@@ -307,7 +351,7 @@ public:
} }
//! Concatenates all items of `other` at the end of the vector. //! Concatenates all items of `other` at the end of the vector.
ASMJIT_FORCE_INLINE void concatUnsafe(const ZoneVector<T>& other) noexcept { ASMJIT_INLINE void concatUnsafe(const ZoneVector<T>& other) noexcept {
uint32_t size = other._size; uint32_t size = other._size;
ASMJIT_ASSERT(_capacity - _size >= size); ASMJIT_ASSERT(_capacity - _size >= size);
@@ -320,7 +364,7 @@ public:
} }
//! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist. //! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
ASMJIT_FORCE_INLINE uint32_t indexOf(const T& val) const noexcept { ASMJIT_INLINE uint32_t indexOf(const T& val) const noexcept {
const T* data = static_cast<const T*>(_data); const T* data = static_cast<const T*>(_data);
uint32_t size = _size; uint32_t size = _size;
@@ -350,6 +394,7 @@ public:
} }
//! Pops the last element from the vector and returns it. //! Pops the last element from the vector and returns it.
[[nodiscard]]
inline T pop() noexcept { inline T pop() noexcept {
ASMJIT_ASSERT(_size > 0); ASMJIT_ASSERT(_size > 0);
@@ -363,12 +408,14 @@ public:
} }
//! Returns item at index `i`. //! Returns item at index `i`.
[[nodiscard]]
inline T& operator[](size_t i) noexcept { inline T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < _size); ASMJIT_ASSERT(i < _size);
return data()[i]; return data()[i];
} }
//! Returns item at index `i`. //! Returns item at index `i`.
[[nodiscard]]
inline const T& operator[](size_t i) const noexcept { inline const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < _size); ASMJIT_ASSERT(i < _size);
return data()[i]; return data()[i];
@@ -378,16 +425,22 @@ public:
//! //!
//! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger //! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger
//! an assertion failure in debug builds. //! an assertion failure in debug builds.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T& first() noexcept { return operator[](0); } ASMJIT_INLINE_NODEBUG T& first() noexcept { return operator[](0); }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T& first() const noexcept { return operator[](0); } ASMJIT_INLINE_NODEBUG const T& first() const noexcept { return operator[](0); }
//! Returns a reference to the last element of the vector. //! Returns a reference to the last element of the vector.
//! //!
//! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger //! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger
//! an assertion failure in debug builds. //! an assertion failure in debug builds.
[[nodiscard]]
inline T& last() noexcept { return operator[](_size - 1); } inline T& last() noexcept { return operator[](_size - 1); }
//! \overload //! \overload
[[nodiscard]]
inline const T& last() const noexcept { return operator[](_size - 1); } inline const T& last() const noexcept { return operator[](_size - 1); }
//! \} //! \}
@@ -401,6 +454,7 @@ public:
} }
//! Called to grow the buffer to fit at least `n` elements more. //! Called to grow the buffer to fit at least `n` elements more.
[[nodiscard]]
inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept { inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_grow(allocator, sizeof(T), n); return ZoneVectorBase::_grow(allocator, sizeof(T), n);
} }
@@ -409,27 +463,34 @@ public:
//! //!
//! If `n` is greater than the current size then the additional elements' content will be initialized to zero. //! If `n` is greater than the current size then the additional elements' content will be initialized to zero.
//! If `n` is less than the current size then the vector will be truncated to exactly `n` elements. //! If `n` is less than the current size then the vector will be truncated to exactly `n` elements.
[[nodiscard]]
inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept { inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_resize(allocator, sizeof(T), n); return ZoneVectorBase::_resize(allocator, sizeof(T), n);
} }
//! Reallocates the internal array to fit at least `n` items. //! Reallocates the internal array to fit at least `n` items.
[[nodiscard]]
inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept { inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity)) if (ASMJIT_UNLIKELY(n > _capacity)) {
return ZoneVectorBase::_reserve(allocator, sizeof(T), n); return ZoneVectorBase::_reserve(allocator, sizeof(T), n);
else }
else {
return Error(kErrorOk); return Error(kErrorOk);
}
} }
//! Reallocates the internal array to fit at least `n` items with growing semantics. //! Reallocates the internal array to fit at least `n` items with growing semantics.
//! //!
//! If the vector is smaller than `n` the same growing calculations will be used as if N items were appended //! If the vector is smaller than `n` the same growing calculations will be used as if N items were appended
//! to an empty vector, which means reserving additional space for more append operations that could follow. //! to an empty vector, which means reserving additional space for more append operations that could follow.
[[nodiscard]]
inline Error growingReserve(ZoneAllocator* allocator, uint32_t n) noexcept { inline Error growingReserve(ZoneAllocator* allocator, uint32_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity)) if (ASMJIT_UNLIKELY(n > _capacity)) {
return ZoneVectorBase::_growingReserve(allocator, sizeof(T), n); return ZoneVectorBase::_growingReserve(allocator, sizeof(T), n);
else }
else {
return Error(kErrorOk); return Error(kErrorOk);
}
} }
inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept { inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
@@ -442,16 +503,19 @@ public:
//! Zone-allocated bit vector. //! Zone-allocated bit vector.
class ZoneBitVector { class ZoneBitVector {
public: public:
typedef Support::BitWord BitWord;
ASMJIT_NONCOPYABLE(ZoneBitVector) ASMJIT_NONCOPYABLE(ZoneBitVector)
//! \name Types
//! \{
using BitWord = Support::BitWord;
//! \}
//! \name Constants //! \name Constants
//! \{ //! \{
enum : uint32_t { static inline constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
kBitWordSizeInBits = Support::kBitWordSizeInBits
};
//! \} //! \}
@@ -476,18 +540,21 @@ public:
} }
static ASMJIT_INLINE_NODEBUG void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept { static ASMJIT_INLINE_NODEBUG void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++) for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = 0; dst[i] = 0;
}
} }
static ASMJIT_INLINE_NODEBUG void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept { static ASMJIT_INLINE_NODEBUG void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++) for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = ~BitWord(0); dst[i] = ~BitWord(0);
}
} }
static ASMJIT_INLINE_NODEBUG void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept { static ASMJIT_INLINE_NODEBUG void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept {
for (uint32_t i = 0; i < nBitWords; i++) for (uint32_t i = 0; i < nBitWords; i++) {
dst[i] = src[i]; dst[i] = src[i];
}
} }
//! \} //! \}
@@ -508,7 +575,10 @@ public:
//! \name Overloaded Operators //! \name Overloaded Operators
//! \{ //! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const ZoneBitVector& other) const noexcept { return equals(other); } ASMJIT_INLINE_NODEBUG bool operator==(const ZoneBitVector& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const ZoneBitVector& other) const noexcept { return !equals(other); } ASMJIT_INLINE_NODEBUG bool operator!=(const ZoneBitVector& other) const noexcept { return !equals(other); }
//! \} //! \}
@@ -517,20 +587,31 @@ public:
//! \{ //! \{
//! Tests whether the bit-vector is empty (has no bits). //! Tests whether the bit-vector is empty (has no bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; } ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
//! Returns the size of this bit-vector (in bits). //! Returns the size of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _size; } ASMJIT_INLINE_NODEBUG uint32_t size() const noexcept { return _size; }
//! Returns the capacity of this bit-vector (in bits). //! Returns the capacity of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t capacity() const noexcept { return _capacity; } ASMJIT_INLINE_NODEBUG uint32_t capacity() const noexcept { return _capacity; }
//! Returns the size of the `BitWord[]` array in `BitWord` units. //! Returns the size of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); } ASMJIT_INLINE_NODEBUG uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); }
//! Returns the capacity of the `BitWord[]` array in `BitWord` units. //! Returns the capacity of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); } ASMJIT_INLINE_NODEBUG uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); }
//! Returns bit-vector data as `BitWord[]`. //! Returns bit-vector data as `BitWord[]`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BitWord* data() noexcept { return _data; } ASMJIT_INLINE_NODEBUG BitWord* data() noexcept { return _data; }
//! \overload //! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BitWord* data() const noexcept { return _data; } ASMJIT_INLINE_NODEBUG const BitWord* data() const noexcept { return _data; }
//! \} //! \}
@@ -559,6 +640,7 @@ public:
_clearUnusedBits(); _clearUnusedBits();
} }
[[nodiscard]]
inline bool bitAt(uint32_t index) const noexcept { inline bool bitAt(uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _size); ASMJIT_ASSERT(index < _size);
return Support::bitVectorGetBit(_data, index); return Support::bitVectorGetBit(_data, index);
@@ -574,7 +656,7 @@ public:
Support::bitVectorFlipBit(_data, index); Support::bitVectorFlipBit(_data, index);
} }
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept { ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t index = _size; uint32_t index = _size;
if (ASMJIT_UNLIKELY(index >= _capacity)) if (ASMJIT_UNLIKELY(index >= _capacity))
return _append(allocator, value); return _append(allocator, value);
@@ -593,23 +675,23 @@ public:
ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept; ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
ASMJIT_FORCE_INLINE void clearAll() noexcept { ASMJIT_INLINE void clearAll() noexcept {
_zeroBits(_data, _wordsPerBits(_size)); _zeroBits(_data, _wordsPerBits(_size));
} }
ASMJIT_FORCE_INLINE void fillAll() noexcept { ASMJIT_INLINE void fillAll() noexcept {
_fillBits(_data, _wordsPerBits(_size)); _fillBits(_data, _wordsPerBits(_size));
_clearUnusedBits(); _clearUnusedBits();
} }
ASMJIT_FORCE_INLINE void clearBits(uint32_t start, uint32_t count) noexcept { ASMJIT_INLINE void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size); ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count); ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorClear(_data, start, count); Support::bitVectorClear(_data, start, count);
} }
ASMJIT_FORCE_INLINE void fillBits(uint32_t start, uint32_t count) noexcept { ASMJIT_INLINE void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size); ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count); ASMJIT_ASSERT(_size - start >= count);
@@ -620,7 +702,7 @@ public:
//! bits than `this` then all remaining bits are set to zero. //! bits than `this` then all remaining bits are set to zero.
//! //!
//! \note The size of the BitVector is unaffected by this operation. //! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void and_(const ZoneBitVector& other) noexcept { ASMJIT_INLINE void and_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data; BitWord* dst = _data;
const BitWord* src = other._data; const BitWord* src = other._data;
@@ -644,49 +726,57 @@ public:
//! has less bits than `this` then all remaining bits are kept intact. //! has less bits than `this` then all remaining bits are kept intact.
//! //!
//! \note The size of the BitVector is unaffected by this operation. //! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void andNot(const ZoneBitVector& other) noexcept { ASMJIT_INLINE void andNot(const ZoneBitVector& other) noexcept {
BitWord* dst = _data; BitWord* dst = _data;
const BitWord* src = other._data; const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size)); uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++) for (uint32_t i = 0; i < commonBitWordCount; i++) {
dst[i] = dst[i] & ~src[i]; dst[i] = dst[i] & ~src[i];
}
} }
//! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less //! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are kept intact. //! bits than `this` then all remaining bits are kept intact.
//! //!
//! \note The size of the BitVector is unaffected by this operation. //! \note The size of the BitVector is unaffected by this operation.
ASMJIT_FORCE_INLINE void or_(const ZoneBitVector& other) noexcept { ASMJIT_INLINE void or_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data; BitWord* dst = _data;
const BitWord* src = other._data; const BitWord* src = other._data;
uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size)); uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
for (uint32_t i = 0; i < commonBitWordCount; i++) for (uint32_t i = 0; i < commonBitWordCount; i++) {
dst[i] = dst[i] | src[i]; dst[i] = dst[i] | src[i];
}
_clearUnusedBits(); _clearUnusedBits();
} }
ASMJIT_FORCE_INLINE void _clearUnusedBits() noexcept { ASMJIT_INLINE void _clearUnusedBits() noexcept {
uint32_t idx = _size / kBitWordSizeInBits; uint32_t idx = _size / kBitWordSizeInBits;
uint32_t bit = _size % kBitWordSizeInBits; uint32_t bit = _size % kBitWordSizeInBits;
if (!bit) if (!bit) {
return; return;
}
_data[idx] &= (BitWord(1) << bit) - 1u; _data[idx] &= (BitWord(1) << bit) - 1u;
} }
ASMJIT_FORCE_INLINE bool equals(const ZoneBitVector& other) const noexcept { [[nodiscard]]
if (_size != other._size) ASMJIT_INLINE bool equals(const ZoneBitVector& other) const noexcept {
if (_size != other._size) {
return false; return false;
}
const BitWord* aData = _data; const BitWord* aData = _data;
const BitWord* bData = other._data; const BitWord* bData = other._data;
uint32_t numBitWords = _wordsPerBits(_size); uint32_t numBitWords = _wordsPerBits(_size);
for (uint32_t i = 0; i < numBitWords; i++) for (uint32_t i = 0; i < numBitWords; i++) {
if (aData[i] != bData[i]) if (aData[i] != bData[i]) {
return false; return false;
}
}
return true; return true;
} }
@@ -696,12 +786,15 @@ public:
//! \{ //! \{
inline void release(ZoneAllocator* allocator) noexcept { inline void release(ZoneAllocator* allocator) noexcept {
if (!_data) if (!_data) {
return; return;
}
allocator->release(_data, _capacity / 8); allocator->release(_data, _capacity / 8);
reset(); reset();
} }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept { ASMJIT_INLINE_NODEBUG Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept {
return _resize(allocator, newSize, newSize, newBitsValue); return _resize(allocator, newSize, newSize, newBitsValue);
} }

Some files were not shown because too many files have changed in this diff Show More